diff --git a/.azure-pipelines/azure-pipelines-linux.yml b/.azure-pipelines/azure-pipelines-linux.yml
index 5138edc61..463bb1074 100755
--- a/.azure-pipelines/azure-pipelines-linux.yml
+++ b/.azure-pipelines/azure-pipelines-linux.yml
@@ -12,6 +12,7 @@ jobs:
CONFIG: linux_64_python3.9.____cpython
UPLOAD_PACKAGES: 'True'
DOCKER_IMAGE: quay.io/condaforge/linux-anvil-cos7-x86_64
+ SHORT_CONFIG: linux_64_python3.9.____cpython
timeoutInMinutes: 360
steps:
@@ -42,4 +43,33 @@ jobs:
env:
BINSTAR_TOKEN: $(BINSTAR_TOKEN)
FEEDSTOCK_TOKEN: $(FEEDSTOCK_TOKEN)
- STAGING_BINSTAR_TOKEN: $(STAGING_BINSTAR_TOKEN)
\ No newline at end of file
+ STAGING_BINSTAR_TOKEN: $(STAGING_BINSTAR_TOKEN)
+ - script: |
+ export CI=azure
+ export CI_RUN_ID=$(build.BuildNumber).$(system.JobAttempt)
+ export FEEDSTOCK_NAME=$(basename ${BUILD_REPOSITORY_NAME})
+ export CONDA_BLD_DIR=build_artifacts
+ export ARTIFACT_STAGING_DIR="$(Build.ArtifactStagingDirectory)"
+ # Archive everything in CONDA_BLD_DIR except environments
+ export BLD_ARTIFACT_PREFIX=conda_artifacts
+ if [[ "$AGENT_JOBSTATUS" == "Failed" ]]; then
+ # Archive the CONDA_BLD_DIR environments only when the job fails
+ export ENV_ARTIFACT_PREFIX=conda_envs
+ fi
+ ./.scripts/create_conda_build_artifacts.sh
+ displayName: Prepare conda build artifacts
+ condition: succeededOrFailed()
+
+ - task: PublishPipelineArtifact@1
+ displayName: Store conda build artifacts
+ condition: not(eq(variables.BLD_ARTIFACT_PATH, ''))
+ inputs:
+ targetPath: $(BLD_ARTIFACT_PATH)
+ artifactName: $(BLD_ARTIFACT_NAME)
+
+ - task: PublishPipelineArtifact@1
+ displayName: Store conda build environment artifacts
+ condition: not(eq(variables.ENV_ARTIFACT_PATH, ''))
+ inputs:
+ targetPath: $(ENV_ARTIFACT_PATH)
+ artifactName: $(ENV_ARTIFACT_NAME)
\ No newline at end of file
diff --git a/.azure-pipelines/azure-pipelines-osx.yml b/.azure-pipelines/azure-pipelines-osx.yml
index 46a907b94..7a77ccc90 100755
--- a/.azure-pipelines/azure-pipelines-osx.yml
+++ b/.azure-pipelines/azure-pipelines-osx.yml
@@ -11,6 +11,7 @@ jobs:
osx_64_python3.9.____cpython:
CONFIG: osx_64_python3.9.____cpython
UPLOAD_PACKAGES: 'True'
+ SHORT_CONFIG: osx_64_python3.9.____cpython
timeoutInMinutes: 360
steps:
@@ -30,4 +31,33 @@ jobs:
env:
BINSTAR_TOKEN: $(BINSTAR_TOKEN)
FEEDSTOCK_TOKEN: $(FEEDSTOCK_TOKEN)
- STAGING_BINSTAR_TOKEN: $(STAGING_BINSTAR_TOKEN)
\ No newline at end of file
+ STAGING_BINSTAR_TOKEN: $(STAGING_BINSTAR_TOKEN)
+ - script: |
+ export CI=azure
+ export CI_RUN_ID=$(build.BuildNumber).$(system.JobAttempt)
+ export FEEDSTOCK_NAME=$(basename ${BUILD_REPOSITORY_NAME})
+ export CONDA_BLD_DIR=/Users/runner/miniforge3/conda-bld
+ export ARTIFACT_STAGING_DIR="$(Build.ArtifactStagingDirectory)"
+ # Archive everything in CONDA_BLD_DIR except environments
+ export BLD_ARTIFACT_PREFIX=conda_artifacts
+ if [[ "$AGENT_JOBSTATUS" == "Failed" ]]; then
+ # Archive the CONDA_BLD_DIR environments only when the job fails
+ export ENV_ARTIFACT_PREFIX=conda_envs
+ fi
+ ./.scripts/create_conda_build_artifacts.sh
+ displayName: Prepare conda build artifacts
+ condition: succeededOrFailed()
+
+ - task: PublishPipelineArtifact@1
+ displayName: Store conda build artifacts
+ condition: not(eq(variables.BLD_ARTIFACT_PATH, ''))
+ inputs:
+ targetPath: $(BLD_ARTIFACT_PATH)
+ artifactName: $(BLD_ARTIFACT_NAME)
+
+ - task: PublishPipelineArtifact@1
+ displayName: Store conda build environment artifacts
+ condition: not(eq(variables.ENV_ARTIFACT_PATH, ''))
+ inputs:
+ targetPath: $(ENV_ARTIFACT_PATH)
+ artifactName: $(ENV_ARTIFACT_NAME)
\ No newline at end of file
diff --git a/.azure-pipelines/azure-pipelines-win.yml b/.azure-pipelines/azure-pipelines-win.yml
new file mode 100755
index 000000000..d53a8ff47
--- /dev/null
+++ b/.azure-pipelines/azure-pipelines-win.yml
@@ -0,0 +1,113 @@
+# This file was generated automatically from conda-smithy. To update this configuration,
+# update the conda-forge.yml and/or the recipe/meta.yaml.
+# -*- mode: yaml -*-
+
+jobs:
+- job: win
+ pool:
+ vmImage: windows-2019
+ strategy:
+ matrix:
+ win_64_python3.9.____cpython:
+ CONFIG: win_64_python3.9.____cpython
+ UPLOAD_PACKAGES: 'True'
+ SHORT_CONFIG: win_64_python3.9.____cpython
+ timeoutInMinutes: 360
+ variables:
+ CONDA_BLD_PATH: D:\\bld\\
+
+ steps:
+ - task: PythonScript@0
+ displayName: 'Download Miniforge'
+ inputs:
+ scriptSource: inline
+ script: |
+ import urllib.request
+ url = 'https://github.com/conda-forge/miniforge/releases/latest/download/Mambaforge-Windows-x86_64.exe'
+ path = r"$(Build.ArtifactStagingDirectory)/Miniforge.exe"
+ urllib.request.urlretrieve(url, path)
+
+ - script: |
+ start /wait "" %BUILD_ARTIFACTSTAGINGDIRECTORY%\Miniforge.exe /InstallationType=JustMe /RegisterPython=0 /S /D=C:\Miniforge
+ displayName: Install Miniforge
+
+ - powershell: Write-Host "##vso[task.prependpath]C:\Miniforge\Scripts"
+ displayName: Add conda to PATH
+
+ - script: |
+ call activate base
+ mamba.exe install "python=3.9" conda-build conda pip boa conda-forge-ci-setup=3 "py-lief<0.12" -c conda-forge --strict-channel-priority --yes
+ displayName: Install conda-build
+
+ - script: set PYTHONUNBUFFERED=1
+ displayName: Set PYTHONUNBUFFERED
+
+ # Configure the VM
+ - script: |
+ call activate base
+ conda.exe uninstall --quiet --yes --force conda-forge-ci-setup=3 "py-lief<0.12"
+ pip install --no-deps ".\recipe\."
+ setup_conda_rc .\ ".\recipe" .\.ci_support\%CONFIG%.yaml
+ displayName: conda-forge CI setup
+
+ # Configure the VM.
+ - script: |
+ set "CI=azure"
+ call activate base
+ :: Overriding global run_conda_forge_build_setup_win with local copy.
+ recipe\run_conda_forge_build_setup_win
+ displayName: conda-forge build setup
+
+ - script: |
+ call activate base
+ if EXIST LICENSE.txt (
+ copy LICENSE.txt "recipe\\recipe-scripts-license.txt"
+ )
+ conda.exe mambabuild "recipe" -m .ci_support\%CONFIG%.yaml --suppress-variables
+ displayName: Build recipe
+ env:
+ PYTHONUNBUFFERED: 1
+ - script: |
+ set CI=azure
+ set CI_RUN_ID=$(build.BuildNumber).$(system.JobAttempt)
+ set FEEDSTOCK_NAME=$(build.Repository.Name)
+ set ARTIFACT_STAGING_DIR=$(Build.ArtifactStagingDirectory)
+ set CONDA_BLD_DIR=$(CONDA_BLD_PATH)
+ set BLD_ARTIFACT_PREFIX=conda_artifacts
+ if "%AGENT_JOBSTATUS%" == "Failed" (
+ set ENV_ARTIFACT_PREFIX=conda_envs
+ )
+ call ".scripts\create_conda_build_artifacts.bat"
+ displayName: Prepare conda build artifacts
+ condition: succeededOrFailed()
+
+ - task: PublishPipelineArtifact@1
+ displayName: Store conda build artifacts
+ condition: not(eq(variables.BLD_ARTIFACT_PATH, ''))
+ inputs:
+ targetPath: $(BLD_ARTIFACT_PATH)
+ artifactName: $(BLD_ARTIFACT_NAME)
+
+ - task: PublishPipelineArtifact@1
+ displayName: Store conda build environment artifacts
+ condition: not(eq(variables.ENV_ARTIFACT_PATH, ''))
+ inputs:
+ targetPath: $(ENV_ARTIFACT_PATH)
+ artifactName: $(ENV_ARTIFACT_NAME)
+ - script: |
+ set "FEEDSTOCK_NAME=%BUILD_REPOSITORY_NAME:*/=%"
+ call activate base
+ validate_recipe_outputs "%FEEDSTOCK_NAME%"
+ displayName: Validate Recipe Outputs
+
+ - script: |
+ set "GIT_BRANCH=%BUILD_SOURCEBRANCHNAME%"
+ set "FEEDSTOCK_NAME=%BUILD_REPOSITORY_NAME:*/=%"
+ call activate base
+ upload_package --validate --feedstock-name="%FEEDSTOCK_NAME%" .\ ".\recipe" .ci_support\%CONFIG%.yaml
+ displayName: Upload package
+ env:
+ BINSTAR_TOKEN: $(BINSTAR_TOKEN)
+ FEEDSTOCK_TOKEN: $(FEEDSTOCK_TOKEN)
+ STAGING_BINSTAR_TOKEN: $(STAGING_BINSTAR_TOKEN)
+ condition: and(succeeded(), not(eq(variables['UPLOAD_PACKAGES'], 'False')), not(eq(variables['Build.Reason'], 'PullRequest')))
\ No newline at end of file
diff --git a/.ci_support/win_64_python3.9.____cpython.yaml b/.ci_support/win_64_python3.9.____cpython.yaml
new file mode 100644
index 000000000..53204fbaf
--- /dev/null
+++ b/.ci_support/win_64_python3.9.____cpython.yaml
@@ -0,0 +1,12 @@
+channel_sources:
+- conda-forge
+channel_targets:
+- conda-forge main
+pin_run_as_build:
+ python:
+ min_pin: x.x
+ max_pin: x.x
+python:
+- 3.9.* *_cpython
+target_platform:
+- win-64
diff --git a/.github/workflows/automerge.yml b/.github/workflows/automerge.yml
index 958d4c91a..da1368a12 100644
--- a/.github/workflows/automerge.yml
+++ b/.github/workflows/automerge.yml
@@ -13,7 +13,7 @@ jobs:
uses: actions/checkout@v3
- name: automerge-action
id: automerge-action
- uses: conda-forge/automerge-action@dev
+ uses: conda-forge/automerge-action@main
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
rerendering_github_token: ${{ secrets.RERENDERING_GITHUB_TOKEN }}
diff --git a/.scripts/build_steps.sh b/.scripts/build_steps.sh
index 71a4242a5..8662e69af 100755
--- a/.scripts/build_steps.sh
+++ b/.scripts/build_steps.sh
@@ -37,10 +37,13 @@ mamba install --update-specs --yes --quiet --channel conda-forge \
mamba update --update-specs --yes --quiet --channel conda-forge \
conda-build pip boa conda-forge-ci-setup=3 "py-lief<0.12"
+conda uninstall --quiet --yes --force conda-forge-ci-setup=3 "py-lief<0.12"
+pip install --no-deps ${RECIPE_ROOT}/.
# set up the condarc
setup_conda_rc "${FEEDSTOCK_ROOT}" "${RECIPE_ROOT}" "${CONFIG_FILE}"
-source run_conda_forge_build_setup
+# Overriding global run_conda_forge_build_setup_linux with local copy.
+source ${RECIPE_ROOT}/run_conda_forge_build_setup_linux
# make the build number clobber
make_build_number "${FEEDSTOCK_ROOT}" "${RECIPE_ROOT}" "${CONFIG_FILE}"
diff --git a/.scripts/create_conda_build_artifacts.bat b/.scripts/create_conda_build_artifacts.bat
new file mode 100755
index 000000000..79ce625d8
--- /dev/null
+++ b/.scripts/create_conda_build_artifacts.bat
@@ -0,0 +1,80 @@
+setlocal enableextensions enabledelayedexpansion
+
+rem INPUTS (environment variables that need to be set before calling this script):
+rem
+rem CI (azure/github_actions/UNSET)
+rem CI_RUN_ID (unique identifier for the CI job run)
+rem FEEDSTOCK_NAME
+rem CONFIG (build matrix configuration string)
+rem SHORT_CONFIG (uniquely-shortened configuration string)
+rem CONDA_BLD_DIR (path to the conda-bld directory)
+rem ARTIFACT_STAGING_DIR (use working directory if unset)
+rem BLD_ARTIFACT_PREFIX (prefix for the conda build artifact name, skip if unset)
+rem ENV_ARTIFACT_PREFIX (prefix for the conda build environments artifact name, skip if unset)
+
+rem OUTPUTS
+rem
+rem BLD_ARTIFACT_NAME
+rem BLD_ARTIFACT_PATH
+rem ENV_ARTIFACT_NAME
+rem ENV_ARTIFACT_PATH
+
+rem Check that the conda-build directory exists
+if not exist %CONDA_BLD_DIR% (
+ echo conda-build directory does not exist
+ exit 1
+)
+
+if not defined ARTIFACT_STAGING_DIR (
+ rem Set staging dir to the working dir
+ set ARTIFACT_STAGING_DIR=%cd%
+)
+
+rem Set a unique ID for the artifact(s), specialized for this particular job run
+set ARTIFACT_UNIQUE_ID=%CI_RUN_ID%_%CONFIG%
+if not "%ARTIFACT_UNIQUE_ID%" == "%ARTIFACT_UNIQUE_ID:~0,80%" (
+ set ARTIFACT_UNIQUE_ID=%CI_RUN_ID%_%SHORT_CONFIG%
+)
+
+rem Set a descriptive ID for the archive(s), specialized for this particular job run
+set ARCHIVE_UNIQUE_ID=%CI_RUN_ID%_%CONFIG%
+
+rem Make the build artifact zip
+if defined BLD_ARTIFACT_PREFIX (
+ set BLD_ARTIFACT_NAME=%BLD_ARTIFACT_PREFIX%_%ARTIFACT_UNIQUE_ID%
+ echo BLD_ARTIFACT_NAME: !BLD_ARTIFACT_NAME!
+
+ set "BLD_ARTIFACT_PATH=%ARTIFACT_STAGING_DIR%\%FEEDSTOCK_NAME%_%BLD_ARTIFACT_PREFIX%_%ARCHIVE_UNIQUE_ID%.zip"
+ 7z a "!BLD_ARTIFACT_PATH!" "%CONDA_BLD_DIR%" -xr^^!.git/ -xr^^!_*_env*/ -xr^^!*_cache/ -bb
+ if errorlevel 1 exit 1
+ echo BLD_ARTIFACT_PATH: !BLD_ARTIFACT_PATH!
+
+ if "%CI%" == "azure" (
+ echo ##vso[task.setVariable variable=BLD_ARTIFACT_NAME]!BLD_ARTIFACT_NAME!
+ echo ##vso[task.setVariable variable=BLD_ARTIFACT_PATH]!BLD_ARTIFACT_PATH!
+ )
+ if "%CI%" == "github_actions" (
+ echo ::set-output name=BLD_ARTIFACT_NAME::!BLD_ARTIFACT_NAME!
+ echo ::set-output name=BLD_ARTIFACT_PATH::!BLD_ARTIFACT_PATH!
+ )
+)
+
+rem Make the environments artifact zip
+if defined ENV_ARTIFACT_PREFIX (
+ set ENV_ARTIFACT_NAME=!ENV_ARTIFACT_PREFIX!_%ARTIFACT_UNIQUE_ID%
+ echo ENV_ARTIFACT_NAME: !ENV_ARTIFACT_NAME!
+
+ set "ENV_ARTIFACT_PATH=%ARTIFACT_STAGING_DIR%\%FEEDSTOCK_NAME%_%ENV_ARTIFACT_PREFIX%_%ARCHIVE_UNIQUE_ID%.zip"
+ 7z a "!ENV_ARTIFACT_PATH!" -r "%CONDA_BLD_DIR%"/_*_env*/ -bb
+ if errorlevel 1 exit 1
+ echo ENV_ARTIFACT_PATH: !ENV_ARTIFACT_PATH!
+
+ if "%CI%" == "azure" (
+ echo ##vso[task.setVariable variable=ENV_ARTIFACT_NAME]!ENV_ARTIFACT_NAME!
+ echo ##vso[task.setVariable variable=ENV_ARTIFACT_PATH]!ENV_ARTIFACT_PATH!
+ )
+ if "%CI%" == "github_actions" (
+ echo ::set-output name=ENV_ARTIFACT_NAME::!ENV_ARTIFACT_NAME!
+ echo ::set-output name=ENV_ARTIFACT_PATH::!ENV_ARTIFACT_PATH!
+ )
+)
\ No newline at end of file
diff --git a/.scripts/create_conda_build_artifacts.sh b/.scripts/create_conda_build_artifacts.sh
new file mode 100755
index 000000000..cba0faeea
--- /dev/null
+++ b/.scripts/create_conda_build_artifacts.sh
@@ -0,0 +1,113 @@
+#!/usr/bin/env bash
+
+# INPUTS (environment variables that need to be set before calling this script):
+#
+# CI (azure/github_actions/UNSET)
+# CI_RUN_ID (unique identifier for the CI job run)
+# FEEDSTOCK_NAME
+# CONFIG (build matrix configuration string)
+# SHORT_CONFIG (uniquely-shortened configuration string)
+# CONDA_BLD_DIR (path to the conda-bld directory)
+# ARTIFACT_STAGING_DIR (use working directory if unset)
+# BLD_ARTIFACT_PREFIX (prefix for the conda build artifact name, skip if unset)
+# ENV_ARTIFACT_PREFIX (prefix for the conda build environments artifact name, skip if unset)
+
+# OUTPUTS
+#
+# BLD_ARTIFACT_NAME
+# BLD_ARTIFACT_PATH
+# ENV_ARTIFACT_NAME
+# ENV_ARTIFACT_PATH
+
+source .scripts/logging_utils.sh
+
+# DON'T do set -x, because it results in double echo-ing pipeline commands
+# and that might end up inserting extraneous quotation marks in output variables
+set -e
+
+# Check that the conda-build directory exists
+if [ ! -d "$CONDA_BLD_DIR" ]; then
+ echo "conda-build directory does not exist"
+ exit 1
+fi
+
+# Set staging dir to the working dir, in Windows style if applicable
+if [[ -z "${ARTIFACT_STAGING_DIR}" ]]; then
+ if pwd -W; then
+ ARTIFACT_STAGING_DIR=$(pwd -W)
+ else
+ ARTIFACT_STAGING_DIR=$PWD
+ fi
+fi
+echo "ARTIFACT_STAGING_DIR: $ARTIFACT_STAGING_DIR"
+
+FEEDSTOCK_ROOT=$(cd "$(dirname "$0")/.."; pwd;)
+if [ -z ${FEEDSTOCK_NAME} ]; then
+ export FEEDSTOCK_NAME=$(basename ${FEEDSTOCK_ROOT})
+fi
+
+# Set a unique ID for the artifact(s), specialized for this particular job run
+ARTIFACT_UNIQUE_ID="${CI_RUN_ID}_${CONFIG}"
+if [[ ${#ARTIFACT_UNIQUE_ID} -gt 80 ]]; then
+ ARTIFACT_UNIQUE_ID="${CI_RUN_ID}_${SHORT_CONFIG}"
+fi
+echo "ARTIFACT_UNIQUE_ID: $ARTIFACT_UNIQUE_ID"
+
+# Set a descriptive ID for the archive(s), specialized for this particular job run
+ARCHIVE_UNIQUE_ID="${CI_RUN_ID}_${CONFIG}"
+
+# Make the build artifact zip
+if [[ ! -z "$BLD_ARTIFACT_PREFIX" ]]; then
+ export BLD_ARTIFACT_NAME="${BLD_ARTIFACT_PREFIX}_${ARTIFACT_UNIQUE_ID}"
+ export BLD_ARTIFACT_PATH="${ARTIFACT_STAGING_DIR}/${FEEDSTOCK_NAME}_${BLD_ARTIFACT_PREFIX}_${ARCHIVE_UNIQUE_ID}.zip"
+
+ ( startgroup "Archive conda build directory" ) 2> /dev/null
+
+ # Try 7z and fall back to zip if it fails (for cross-platform use)
+ if ! 7z a "$BLD_ARTIFACT_PATH" "$CONDA_BLD_DIR" '-xr!.git/' '-xr!_*_env*/' '-xr!*_cache/' -bb; then
+ pushd "$CONDA_BLD_DIR"
+ zip -r -y -T "$BLD_ARTIFACT_PATH" . -x '*.git/*' '*_*_env*/*' '*_cache/*'
+ popd
+ fi
+
+ ( endgroup "Archive conda build directory" ) 2> /dev/null
+
+ echo "BLD_ARTIFACT_NAME: $BLD_ARTIFACT_NAME"
+ echo "BLD_ARTIFACT_PATH: $BLD_ARTIFACT_PATH"
+
+ if [[ "$CI" == "azure" ]]; then
+ echo "##vso[task.setVariable variable=BLD_ARTIFACT_NAME]$BLD_ARTIFACT_NAME"
+ echo "##vso[task.setVariable variable=BLD_ARTIFACT_PATH]$BLD_ARTIFACT_PATH"
+ elif [[ "$CI" == "github_actions" ]]; then
+ echo "::set-output name=BLD_ARTIFACT_NAME::$BLD_ARTIFACT_NAME"
+ echo "::set-output name=BLD_ARTIFACT_PATH::$BLD_ARTIFACT_PATH"
+ fi
+fi
+
+# Make the environments artifact zip
+if [[ ! -z "$ENV_ARTIFACT_PREFIX" ]]; then
+ export ENV_ARTIFACT_NAME="${ENV_ARTIFACT_PREFIX}_${ARTIFACT_UNIQUE_ID}"
+ export ENV_ARTIFACT_PATH="${ARTIFACT_STAGING_DIR}/${FEEDSTOCK_NAME}_${ENV_ARTIFACT_PREFIX}_${ARCHIVE_UNIQUE_ID}.zip"
+
+ ( startgroup "Archive conda build environments" ) 2> /dev/null
+
+ # Try 7z and fall back to zip if it fails (for cross-platform use)
+ if ! 7z a "$ENV_ARTIFACT_PATH" -r "$CONDA_BLD_DIR"/'_*_env*/' -bb; then
+ pushd "$CONDA_BLD_DIR"
+ zip -r -y -T "$ENV_ARTIFACT_PATH" . -i '*_*_env*/*'
+ popd
+ fi
+
+ ( endgroup "Archive conda build environments" ) 2> /dev/null
+
+ echo "ENV_ARTIFACT_NAME: $ENV_ARTIFACT_NAME"
+ echo "ENV_ARTIFACT_PATH: $ENV_ARTIFACT_PATH"
+
+ if [[ "$CI" == "azure" ]]; then
+ echo "##vso[task.setVariable variable=ENV_ARTIFACT_NAME]$ENV_ARTIFACT_NAME"
+ echo "##vso[task.setVariable variable=ENV_ARTIFACT_PATH]$ENV_ARTIFACT_PATH"
+ elif [[ "$CI" == "github_actions" ]]; then
+ echo "::set-output name=ENV_ARTIFACT_NAME::$ENV_ARTIFACT_NAME"
+ echo "::set-output name=ENV_ARTIFACT_PATH::$ENV_ARTIFACT_PATH"
+ fi
+fi
\ No newline at end of file
diff --git a/.scripts/run_osx_build.sh b/.scripts/run_osx_build.sh
index caa788e64..fc256303a 100755
--- a/.scripts/run_osx_build.sh
+++ b/.scripts/run_osx_build.sh
@@ -29,6 +29,8 @@ mamba update --update-specs --yes --quiet --channel conda-forge \
conda-build pip boa conda-forge-ci-setup=3 "py-lief<0.12"
+conda uninstall --quiet --yes --force conda-forge-ci-setup=3 "py-lief<0.12"
+pip install --no-deps recipe/.
echo -e "\n\nSetting up the condarc and mangling the compiler."
setup_conda_rc ./ ./recipe ./.ci_support/${CONFIG}.yaml
@@ -46,8 +48,8 @@ else
fi
echo -e "\n\nRunning the build setup script."
-source run_conda_forge_build_setup
-
+# Overriding global run_conda_forge_build_setup_osx with local copy.
+source recipe/run_conda_forge_build_setup_osx
( endgroup "Configuring conda" ) 2> /dev/null
diff --git a/README.md b/README.md
index fef8d7c6a..dc219421f 100644
--- a/README.md
+++ b/README.md
@@ -54,6 +54,13 @@ Current build status
+
+ win_64_python3.9.____cpython |
+
+
+
+
+ |
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 33a441c1f..6b346f505 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -4,4 +4,5 @@
jobs:
- template: ./.azure-pipelines/azure-pipelines-linux.yml
+ - template: ./.azure-pipelines/azure-pipelines-win.yml
- template: ./.azure-pipelines/azure-pipelines-osx.yml
\ No newline at end of file
diff --git a/conda-forge.yml b/conda-forge.yml
index 6e47cfe9e..80f8fb9e8 100644
--- a/conda-forge.yml
+++ b/conda-forge.yml
@@ -6,3 +6,6 @@ github:
tooling_branch_name: main
provider:
linux_ppc64le: travis
+conda_pkg_format: 2
+azure:
+ store_build_artifacts: true
\ No newline at end of file
diff --git a/recipe/SetPageFileSize.ps1 b/recipe/SetPageFileSize.ps1
new file mode 100644
index 000000000..ae5890ee9
--- /dev/null
+++ b/recipe/SetPageFileSize.ps1
@@ -0,0 +1,197 @@
+<#
+.SYNOPSIS
+ Configure Pagefile on Windows machine
+.NOTES
+ Author: Aleksandr Chebotov
+.EXAMPLE
+ SetPageFileSize.ps1 -MinimumSize 4GB -MaximumSize 8GB -DiskRoot "D:"
+#>
+
+# this file taken 1:1 (with the exception of this comment & the parameter echo) from the MIT-licensed
+# https://github.com/al-cheb/configure-pagefile-action/blob/916fa29e5d27bd4e8eef3666869afbaaee27d9eb/scripts/SetPageFileSize.ps1
+
+param(
+ [System.UInt64] $MinimumSize = 8gb ,
+ [System.UInt64] $MaximumSize = 8gb ,
+ [System.String] $DiskRoot = "D:"
+)
+
+# https://referencesource.microsoft.com/#System.IdentityModel/System/IdentityModel/NativeMethods.cs,619688d876febbe1
+# https://www.geoffchappell.com/studies/windows/km/ntoskrnl/api/mm/modwrite/create.htm
+# https://referencesource.microsoft.com/#mscorlib/microsoft/win32/safehandles/safefilehandle.cs,9b08210f3be75520
+# https://referencesource.microsoft.com/#mscorlib/system/security/principal/tokenaccesslevels.cs,6eda91f498a38586
+# https://www.autoitscript.com/forum/topic/117993-api-ntcreatepagingfile/
+
+$source = @'
+using System;
+using System.ComponentModel;
+using System.Diagnostics;
+using System.Runtime.InteropServices;
+using System.Security.Principal;
+using System.Text;
+using Microsoft.Win32;
+using Microsoft.Win32.SafeHandles;
+
+namespace Util
+{
+ class NativeMethods
+ {
+ [StructLayout(LayoutKind.Sequential)]
+ internal struct LUID
+ {
+ internal uint LowPart;
+ internal uint HighPart;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ internal struct LUID_AND_ATTRIBUTES
+ {
+ internal LUID Luid;
+ internal uint Attributes;
+ }
+
+ [StructLayout(LayoutKind.Sequential)]
+ internal struct TOKEN_PRIVILEGE
+ {
+ internal uint PrivilegeCount;
+ internal LUID_AND_ATTRIBUTES Privilege;
+
+ internal static readonly uint Size = (uint)Marshal.SizeOf(typeof(TOKEN_PRIVILEGE));
+ }
+
+ [StructLayoutAttribute(LayoutKind.Sequential, CharSet = CharSet.Unicode)]
+ internal struct UNICODE_STRING
+ {
+ internal UInt16 length;
+ internal UInt16 maximumLength;
+ internal string buffer;
+ }
+
+ [DllImport("kernel32.dll", SetLastError=true)]
+ internal static extern IntPtr LocalFree(IntPtr handle);
+
+ [DllImport("advapi32.dll", ExactSpelling = true, CharSet = CharSet.Unicode, SetLastError = true, PreserveSig = false)]
+ internal static extern bool LookupPrivilegeValueW(
+ [In] string lpSystemName,
+ [In] string lpName,
+ [Out] out LUID luid
+ );
+
+ [DllImport("advapi32.dll", SetLastError = true, PreserveSig = false)]
+ internal static extern bool AdjustTokenPrivileges(
+ [In] SafeCloseHandle tokenHandle,
+ [In] bool disableAllPrivileges,
+ [In] ref TOKEN_PRIVILEGE newState,
+ [In] uint bufferLength,
+ [Out] out TOKEN_PRIVILEGE previousState,
+ [Out] out uint returnLength
+ );
+
+ [DllImport("advapi32.dll", CharSet = CharSet.Auto, SetLastError = true, PreserveSig = false)]
+ internal static extern bool OpenProcessToken(
+ [In] IntPtr processToken,
+ [In] int desiredAccess,
+ [Out] out SafeCloseHandle tokenHandle
+ );
+
+ [DllImport("ntdll.dll", CharSet = CharSet.Unicode, SetLastError = true, CallingConvention = CallingConvention.StdCall)]
+ internal static extern Int32 NtCreatePagingFile(
+ [In] ref UNICODE_STRING pageFileName,
+ [In] ref Int64 minimumSize,
+ [In] ref Int64 maximumSize,
+ [In] UInt32 flags
+ );
+
+ [DllImport("kernel32.dll", CharSet = CharSet.Unicode, SetLastError = true)]
+ internal static extern uint QueryDosDeviceW(
+ string lpDeviceName,
+ StringBuilder lpTargetPath,
+ int ucchMax
+ );
+ }
+
+ public sealed class SafeCloseHandle: SafeHandleZeroOrMinusOneIsInvalid
+ {
+ [DllImport("kernel32.dll", ExactSpelling = true, SetLastError = true)]
+ internal extern static bool CloseHandle(IntPtr handle);
+
+ private SafeCloseHandle() : base(true)
+ {
+ }
+
+ public SafeCloseHandle(IntPtr preexistingHandle, bool ownsHandle) : base(ownsHandle)
+ {
+ SetHandle(preexistingHandle);
+ }
+
+ override protected bool ReleaseHandle()
+ {
+ return CloseHandle(handle);
+ }
+ }
+
+ public class PageFile
+ {
+ public static void SetPageFileSize(long minimumValue, long maximumValue, string lpDeviceName)
+ {
+ SetPageFilePrivilege();
+ StringBuilder lpTargetPath = new StringBuilder(260);
+
+ UInt32 resultQueryDosDevice = NativeMethods.QueryDosDeviceW(lpDeviceName, lpTargetPath, lpTargetPath.Capacity);
+ if (resultQueryDosDevice == 0)
+ {
+ throw new Win32Exception(Marshal.GetLastWin32Error());
+ }
+
+ string pageFilePath = lpTargetPath.ToString() + "\\pagefile.sys";
+
+ NativeMethods.UNICODE_STRING pageFileName = new NativeMethods.UNICODE_STRING
+ {
+ length = (ushort)(pageFilePath.Length * 2),
+ maximumLength = (ushort)(2 * (pageFilePath.Length + 1)),
+ buffer = pageFilePath
+ };
+
+ Int32 resultNtCreatePagingFile = NativeMethods.NtCreatePagingFile(ref pageFileName, ref minimumValue, ref maximumValue, 0);
+ if (resultNtCreatePagingFile != 0)
+ {
+ throw new Win32Exception(Marshal.GetLastWin32Error());
+ }
+
+ Console.WriteLine("PageFile: {0} / {1} bytes for {2}", minimumValue, maximumValue, pageFilePath);
+ }
+
+ static void SetPageFilePrivilege()
+ {
+ const int SE_PRIVILEGE_ENABLED = 0x00000002;
+ const int AdjustPrivileges = 0x00000020;
+ const int Query = 0x00000008;
+
+ NativeMethods.LUID luid;
+ NativeMethods.LookupPrivilegeValueW(null, "SeCreatePagefilePrivilege", out luid);
+
+ SafeCloseHandle hToken;
+ NativeMethods.OpenProcessToken(
+ Process.GetCurrentProcess().Handle,
+ AdjustPrivileges | Query,
+ out hToken
+ );
+
+ NativeMethods.TOKEN_PRIVILEGE previousState;
+ NativeMethods.TOKEN_PRIVILEGE newState;
+ uint previousSize = 0;
+ newState.PrivilegeCount = 1;
+ newState.Privilege.Luid = luid;
+ newState.Privilege.Attributes = SE_PRIVILEGE_ENABLED;
+
+ NativeMethods.AdjustTokenPrivileges(hToken, false, ref newState, NativeMethods.TOKEN_PRIVILEGE.Size, out previousState, out previousSize);
+ }
+ }
+}
+'@
+
+Add-Type -TypeDefinition $source
+
+echo Parameters: $minimumSize, $maximumSize, $diskRoot
+# Set SetPageFileSize
+[Util.PageFile]::SetPageFileSize($minimumSize, $maximumSize, $diskRoot)
\ No newline at end of file
diff --git a/recipe/conda_forge_ci_setup/__init__.py b/recipe/conda_forge_ci_setup/__init__.py
new file mode 100644
index 000000000..69bc0c445
--- /dev/null
+++ b/recipe/conda_forge_ci_setup/__init__.py
@@ -0,0 +1 @@
+"""conda-forge-ci-utils"""
diff --git a/recipe/conda_forge_ci_setup/build_utils.py b/recipe/conda_forge_ci_setup/build_utils.py
new file mode 100644
index 000000000..9633259d3
--- /dev/null
+++ b/recipe/conda_forge_ci_setup/build_utils.py
@@ -0,0 +1,314 @@
+import os
+import sys
+import subprocess
+import platform
+
+try:
+ from ruamel_yaml import safe_load, safe_dump
+except ImportError:
+ from yaml import safe_load, safe_dump
+
+import click
+
+
+from conda_forge_ci_setup.upload_or_check_non_existence import retry_upload_or_check
+
+from .feedstock_outputs import STAGING
+
+
+call = subprocess.check_call
+
+_global_config = {
+ "channels": {
+ "sources": ["conda-forge", "defaults"],
+ "targets": [["conda-forge", "main"]],
+ }
+}
+
+
+arg_feedstock_root = click.argument(
+ "feedstock_root", type=click.Path(exists=True, file_okay=False, dir_okay=True)
+)
+arg_recipe_root = click.argument(
+ "recipe_root", type=click.Path(exists=True, file_okay=False, dir_okay=True)
+)
+arg_config_file = click.argument(
+ "config_file", type=click.Path(exists=True, file_okay=True, dir_okay=False)
+)
+
+
+def update_global_config(feedstock_root):
+ """Merge the conda-forge.yml with predefined system defaults"""
+ if os.path.exists(os.path.join(feedstock_root, "conda-forge.yml")):
+ with open(os.path.join(feedstock_root, "conda-forge.yml")) as f:
+ repo_config = safe_load(f)
+ for k1, k2 in [("channels", "sources"), ("channels", "targets")]:
+ if (k1 in repo_config) and (k2 in repo_config[k1]):
+ _global_config[k1][k2] = repo_config[k1][k2]
+
+
+def fail_if_outdated_windows_ci(feedstock_root):
+ if sys.platform != "win32":
+ return
+
+ if "APPVEYOR_ACCOUNT_NAME" in os.environ:
+ provider = "appveyor"
+ if os.environ["APPVEYOR_ACCOUNT_NAME"] != "conda-forge":
+ return
+ if "APPVEYOR_PULL_REQUEST_NUMBER" not in os.environ:
+ return
+ elif "BUILD_REPOSITORY_NAME" in os.environ:
+ provider = "azure"
+ if not os.environ["BUILD_REPOSITORY_NAME"].startswith("conda-forge/"):
+ return
+ if "SYSTEM_PULLREQUEST_PULLREQUESTID" not in os.environ:
+ return
+ else:
+ return
+
+ with open(os.path.join(feedstock_root, "conda-forge.yml")) as f:
+ config = safe_load(f)
+ if "provider" in config and "win" in config["provider"]:
+ provider_cfg = config["provider"]["win"]
+ if provider_cfg != "azure":
+ return
+ if provider == "appveyor":
+ raise RuntimeError(
+ "This PR needs a rerender to switch from appveyor to azure")
+ if (
+ provider == "azure"
+ and (
+ os.getenv("UPLOAD_PACKAGES", "False") == "False"
+ or os.path.exists(".appveyor.yml")
+ )
+ ):
+ raise RuntimeError(
+ "This PR needs a rerender to switch from appveyor to azure")
+
+
+def fail_if_travis_not_allowed_for_arch(config_file, feedstock_root):
+ specific_config = safe_load(open(config_file))
+ if "channel_targets" in specific_config:
+ channels = [c.strip().split(" ") for c in specific_config["channel_targets"]]
+ else:
+ update_global_config(feedstock_root)
+ channels = _global_config["channels"]["targets"]
+
+ upload_to_conda_forge = any(owner == "conda-forge" for owner, _ in channels)
+
+ if (
+ upload_to_conda_forge
+ and os.environ.get("CI", None) == "travis"
+ and (
+ platform.uname().machine.lower() in ["x86_64", "amd64"]
+ or platform.system().lower() != "linux"
+ )
+ ):
+ raise RuntimeError("Travis CI cannot be used on x86_64 in conda-forge!")
+
+
+def maybe_use_dot_conda(feedstock_root):
+ """Maybe set the .condarc to use .conda files."""
+ if os.path.exists(os.path.join(feedstock_root, "conda-forge.yml")):
+ with open(os.path.join(feedstock_root, "conda-forge.yml")) as f:
+ repo_config = safe_load(f)
+ pkg_format = repo_config.get("conda_pkg_format", None)
+ if pkg_format is not None:
+ try:
+ call([
+ "conda", "config", "--env", "--set",
+ "conda_build.pkg_format", str(pkg_format)
+ ])
+ except subprocess.CalledProcessError:
+ pass
+
+
+@click.command()
+@arg_feedstock_root
+@arg_recipe_root
+@arg_config_file
+def setup_conda_rc(feedstock_root, recipe_root, config_file):
+
+ fail_if_outdated_windows_ci(feedstock_root)
+
+ fail_if_travis_not_allowed_for_arch(config_file, feedstock_root)
+
+ maybe_use_dot_conda(feedstock_root)
+
+ with open(config_file) as f:
+ specific_config = safe_load(f)
+ if "channel_sources" in specific_config:
+ # Due to rendering we may have more than one row for channel_sources
+ # if nothing gets zipped with it
+ first_row = specific_config["channel_sources"][0] # type: str
+ channels = [c.strip() for c in first_row.split(",")]
+ else:
+ update_global_config(feedstock_root)
+ channels = _global_config["channels"]["sources"]
+
+ try:
+ call(["conda", "config", "--env", "--remove", "channels", "defaults"])
+ except subprocess.CalledProcessError:
+ pass
+
+ for c in reversed(channels):
+ call(["conda", "config", "--env", "--add", "channels", c])
+
+ call(["conda", "config", "--env", "--set", "show_channel_urls", "true"])
+
+
+@click.command()
+@arg_feedstock_root
+@arg_recipe_root
+@arg_config_file
+@click.option("--validate", is_flag=True)
+@click.option("--private", is_flag=True)
+@click.option("--feedstock-name", type=str, default=None)
+def upload_package(feedstock_root, recipe_root, config_file, validate, private, feedstock_name):
+ if feedstock_name is None and validate:
+ raise RuntimeError("You must supply the --feedstock-name option if validating!")
+
+ specific_config = safe_load(open(config_file))
+ if "channel_targets" in specific_config:
+ channels = [c.strip().split(" ") for c in specific_config["channel_targets"]]
+ source_channels = ",".join(
+ [c.strip() for c in specific_config["channel_sources"]])
+ else:
+ update_global_config(feedstock_root)
+ channels = _global_config["channels"]["targets"]
+ source_channels = ",".join(_global_config["channels"]["sources"])
+
+ if "UPLOAD_ON_BRANCH" in os.environ:
+ if "GIT_BRANCH" not in os.environ:
+ print(
+ "WARNING: UPLOAD_ON_BRANCH env variable set, "
+ "but GIT_BRANCH not set. Skipping check")
+ else:
+ if os.environ["UPLOAD_ON_BRANCH"] != os.environ["GIT_BRANCH"]:
+ print(
+ "The branch {} is not configured to be "
+ "uploaded".format(os.environ["GIT_BRANCH"]))
+ return
+
+ upload_to_conda_forge = any(owner == "conda-forge" for owner, _ in channels)
+ if upload_to_conda_forge and "channel_sources" in specific_config:
+ allowed_channels = [
+ "conda-forge", "conda-forge/label/", "defaults", "c4aarch64",
+ "c4armv7l"]
+ for source_channel in source_channels.split(","):
+ if source_channel.startswith('https://conda-web.anaconda.org/'):
+ source_channel = source_channel[len('https://conda-web.anaconda.org/'):]
+ for c in allowed_channels:
+ if source_channel.startswith(c):
+ break
+ else:
+ print(
+ "Uploading to %s with source channel '%s' "
+ "is not allowed" % ("conda-forge", source_channel))
+ return
+
+ # get the git sha of the current commit
+ git_sha = subprocess.run(
+ "git rev-parse HEAD",
+ check=True,
+ stdout=subprocess.PIPE,
+ shell=True,
+ cwd=feedstock_root,
+ ).stdout.decode("utf-8").strip()
+ if len(git_sha) == 0:
+ git_sha = None
+ print("Did not find git SHA for this build!")
+ else:
+ print("Found git SHA %s for this build!" % git_sha)
+
+ for owner, channel in channels:
+ if validate and owner == "conda-forge":
+ retry_upload_or_check(
+ feedstock_name, recipe_root, STAGING, channel,
+ [config_file], validate=True, git_sha=git_sha)
+ else:
+ retry_upload_or_check(
+ feedstock_name, recipe_root, owner, channel,
+ [config_file], validate=False, private_upload=private)
+
+
+@click.command()
+@arg_feedstock_root
+@arg_recipe_root
+@arg_config_file
+def make_build_number(feedstock_root, recipe_root, config_file):
+ """
+ General logic
+
+ The purpose of this is to ensure that the new compilers have build
+ numbers > 1000 and legacy compilers have a build number < 1000.
+
+ This is done by reading the build_number_decrement which is rendered
+ into all the recipes.
+
+ For linux and osx we want to avoid building for the legacy compilers
+ with build numbers > 1000
+
+ Example matrix
+ - {'compiler_c': 'toolchain_c', 'build_number_decrement': 1000}
+ - {'compiler_c': 'gcc', 'build_number_decrement': 0}
+
+ """
+ specific_config = safe_load(open(config_file))
+ build_number_dec = int(specific_config.get("build_number_decrement", [0])[0])
+ if build_number_dec == 0:
+ return
+
+ use_legacy_compilers = False
+ for key in {"c", "cxx", "fortran"}:
+ if "toolchain_{}".format(key) in specific_config.get(
+ '{}_compiler'.format(key), ""):
+ use_legacy_compilers = True
+ break
+
+ import conda_build.api
+
+ rendered_recipe = conda_build.api.render(
+ recipe_path=recipe_root, variants=specific_config
+ )
+ build_numbers = set()
+ for recipe, _, _ in rendered_recipe:
+ build_numbers.add(int(recipe.get_value("build/number")))
+ if len(build_numbers) > 1:
+ raise ValueError("More than one build number found, giving up")
+ if len(build_numbers) == 0:
+ print("> conda-forge:: No build number found. Presuming build string")
+ return
+ try:
+ build_number_int = build_numbers.pop()
+
+ if build_number_int < 1000:
+ if not use_legacy_compilers:
+ raise ValueError(
+ "Only legacy compilers only valid with build numbers < 1000"
+ )
+ new_build_number = build_number_int
+ else:
+ new_build_number = build_number_int - build_number_dec
+
+ config_dir, filename = os.path.split(config_file)
+ with open(os.path.join(config_dir, "clobber_" + filename), "w") as fo:
+ data = {"build": {"number": new_build_number}}
+ print("> conda-forge:: Build number clobber {} -> {}".format(
+ build_number_int, new_build_number))
+ safe_dump(data, fo)
+ except ValueError:
+ # This is a NON string build number
+ # we have this for things like the blas mutex and a few other similar cases
+ print("> conda-forge:: No build number clobber gererated!")
+ import traceback
+ traceback.print_exc()
+
+
+@click.command()
+@arg_feedstock_root
+@arg_recipe_root
+@arg_config_file
+def mangle_compiler(feedstock_root, recipe_root, config_file):
+ """Try hard to break the compilers for osx"""
+ # TODO
diff --git a/recipe/conda_forge_ci_setup/feedstock_outputs.py b/recipe/conda_forge_ci_setup/feedstock_outputs.py
new file mode 100644
index 000000000..9845b0d3e
--- /dev/null
+++ b/recipe/conda_forge_ci_setup/feedstock_outputs.py
@@ -0,0 +1,171 @@
+import os
+import hashlib
+import json
+
+import conda_build
+import conda_build.config
+import requests
+import click
+
+VALIDATION_ENDPOINT = "https://conda-forge.herokuapp.com"
+STAGING = "cf-staging"
+OUTPUTS_REPO = "https://github.com/conda-forge/feedstock-outputs.git"
+OUTPUTS_REPO_RAW = "https://raw.githubusercontent.com/conda-forge/feedstock-outputs/main/" # noqa
+
+
+def _get_sharded_path(output):
+ chars = [c for c in output if c.isalnum()]
+ while len(chars) < 3:
+ chars.append("z")
+
+ return "/".join(["outputs", chars[0], chars[1], chars[2], output + ".json"])
+
+
+def split_pkg(pkg):
+ if pkg.endswith(".tar.bz2"):
+ pkg = pkg[:-len(".tar.bz2")]
+ elif pkg.endswith(".conda"):
+ pkg = pkg[:-len(".conda")]
+ else:
+ raise RuntimeError("Can only process packages that end in .tar.bz2 or .conda!")
+ plat, pkg_name = pkg.split(os.path.sep)
+ name_ver, build = pkg_name.rsplit('-', 1)
+ name, ver = name_ver.rsplit('-', 1)
+ return plat, name, ver, build
+
+
+def _unix_dist_path(path):
+ return "/".join(path.split(os.sep)[-2:])
+
+
+def _compute_md5sum(pth):
+ h = hashlib.md5()
+
+ with open(pth, 'rb') as fp:
+ chunk = 0
+ while chunk != b'':
+ chunk = fp.read(1024)
+ h.update(chunk)
+
+ return h.hexdigest()
+
+
+def request_copy(feedstock, dists, channel, git_sha=None, comment_on_error=True):
+ checksums = {}
+ for path in dists:
+ dist = _unix_dist_path(path)
+ checksums[dist] = _compute_md5sum(path)
+
+ if "FEEDSTOCK_TOKEN" not in os.environ or os.environ["FEEDSTOCK_TOKEN"] is None:
+ print(
+ "ERROR you must have defined a FEEDSTOCK_TOKEN in order to "
+ "perform output copies to the production channels!"
+ )
+ return False
+
+ headers = {"FEEDSTOCK_TOKEN": os.environ["FEEDSTOCK_TOKEN"]}
+ json_data = {
+ "feedstock": feedstock,
+ "outputs": checksums,
+ "channel": channel,
+ "comment_on_error": comment_on_error,
+ }
+ if git_sha is not None:
+ json_data["git_sha"] = git_sha
+ r = requests.post(
+ "%s/feedstock-outputs/copy" % VALIDATION_ENDPOINT,
+ headers=headers,
+ json=json_data,
+ )
+
+ try:
+ results = r.json()
+ except Exception as e:
+ print(
+ "ERROR getting output validation information "
+ "from the webservice:",
+ repr(e)
+ )
+ results = {}
+
+ print("copy results:\n%s" % json.dumps(results, indent=2))
+
+ return r.status_code == 200
+
+
+def is_valid_feedstock_output(project, outputs):
+ """Test if feedstock outputs are valid (i.e., the outputs are allowed for that
+ feedstock). Optionally register them if they do not exist.
+
+ Parameters
+ ----------
+ project : str
+ The GitHub repo.
+ outputs : list of str
+ A list of ouputs top validate. The list entries should be the
+ full names with the platform directory, version/build info, and file extension
+ (e.g., `noarch/blah-fa31b0-2020.04.13.15.54.07-py_0.tar.bz2`).
+
+ Returns
+ -------
+ valid : dict
+ A dict keyed on output name with True if it is valid and False
+ otherwise.
+ """
+ if project.endswith("-feedstock"):
+ feedstock = project[:-len("-feedstock")]
+ else:
+ feedstock = project
+
+ valid = {o: False for o in outputs}
+
+ for dist in outputs:
+ try:
+ _, o, _, _ = split_pkg(dist)
+ except RuntimeError:
+ continue
+
+ opth = _get_sharded_path(o)
+ url = OUTPUTS_REPO_RAW + opth
+ res = requests.get(url)
+
+ if not res.ok:
+ # no output exists and we can add it
+ valid[dist] = True
+ else:
+ # make sure feedstock is ok
+ data = res.json()
+ valid[dist] = feedstock in data["feedstocks"]
+
+ return valid
+
+
+@click.command()
+@click.argument("feedstock_name", type=str)
+def main(feedstock_name):
+ """Validate the feedstock outputs."""
+
+ paths = (
+ [
+ os.path.join('noarch', p)
+ for p in os.listdir(os.path.join(conda_build.config.croot, 'noarch')) # noqa
+ ]
+ + [
+ os.path.join(conda_build.config.subdir, p)
+ for p in os.listdir(os.path.join(conda_build.config.croot, conda_build.config.subdir)) # noqa
+ ])
+ built_distributions = [
+ path
+ for path in paths
+ if (path.endswith('.tar.bz2') or path.endswith(".conda"))
+ ]
+
+ results = is_valid_feedstock_output(feedstock_name, built_distributions)
+
+ print("validation results:\n%s" % json.dumps(results, indent=2))
+ print("NOTE: Any outputs marked as False are not allowed for this feedstock.")
+
+ # FIXME: removing this for now - we can add extra arguments for us to
+ # compute the output names properly later
+ # if not all(v for v in results.values()):
+ # sys.exit(1)
diff --git a/recipe/conda_forge_ci_setup/ff_ci_pr_build.py b/recipe/conda_forge_ci_setup/ff_ci_pr_build.py
new file mode 100755
index 000000000..63b584d9c
--- /dev/null
+++ b/recipe/conda_forge_ci_setup/ff_ci_pr_build.py
@@ -0,0 +1,201 @@
+#!/usr/bin/env python
+
+"""
+Fast finish old PR builds on CIs
+
+Using various CI's (CircleCI, Travis CI, and AppVeyor) APIs and information
+about the current build for the relevant CI, this script checks to see if the
+current PR build is the most recent one. It does this by comparing the current
+PR build's build number to other build numbers of builds for this PR. If it is
+not the most recent build for the PR, then this script exits with a failure.
+Thus it can fail the build; stopping it from proceeding further. However, if
+it is the most recent build number or if it is not a PR (e.g. a build on a
+normal branch), then the build proceeds without issues.
+"""
+
+
+try:
+ from future_builtins import (
+ map,
+ filter,
+ )
+except ImportError:
+ pass
+
+import argparse
+import codecs
+import contextlib
+import json
+import os
+import sys
+
+try:
+ from urllib.request import (
+ Request,
+ urlopen,
+ )
+except ImportError:
+ from urllib2 import (
+ Request,
+ urlopen,
+ )
+
+
+def request_json(url, headers={}):
+ request = Request(url, headers=headers)
+ with contextlib.closing(urlopen(request)) as response:
+ reader = codecs.getreader("utf-8")
+ return json.load(reader(response))
+
+
+def circle_check_latest_pr_build(repo, pr, build_num):
+ # Not a PR so it is latest.
+ if pr is None:
+ return True
+
+ headers = {
+ "Accept": "application/json",
+ }
+ url = "https://circleci.com/api/v1.1/project/github/{repo}/tree/pull/{pr}"
+
+ builds = request_json(url.format(repo=repo, pr=pr), headers=headers)
+
+ # Parse the response to get a list of build numbers for this PR.
+ job_name = os.environ.get("CIRCLE_JOB")
+ same_param_builds = []
+ for b in builds:
+ b_params = b.get("build_parameters") or {}
+ if b_params.get("CIRCLE_JOB") == job_name:
+ same_param_builds.append(b)
+ pr_build_nums = set(map(lambda b: int(b["build_num"]), same_param_builds))
+ pr_build_nums.add(build_num)
+
+ # Check if our build number is the latest (largest)
+ # out of all of the builds for this PR.
+ if build_num < max(pr_build_nums):
+ return False
+ else:
+ return True
+
+
+def travis_check_latest_pr_build(repo, pr, build_num):
+ # Not a PR so it is latest.
+ if pr is None:
+ return True
+
+ headers = {
+ "Accept": "application/vnd.travis-ci.2+json",
+ }
+ url = "https://api.travis-ci.org/repos/{repo}/builds?event_type=pull_request"
+
+ data = request_json(url.format(repo=repo), headers=headers)
+
+ # Parse the response to get a list of build numbers for this PR.
+ builds = data["builds"]
+ pr_builds = filter(lambda b: b["pull_request_number"] == pr, builds)
+ pr_build_nums = set(map(lambda b: int(b["number"]), pr_builds))
+ pr_build_nums.add(build_num)
+
+ # Check if our build number is the latest (largest)
+ # out of all of the builds for this PR.
+ if build_num < max(pr_build_nums):
+ return False
+ else:
+ return True
+
+
+def appveyor_check_latest_pr_build(repo, pr, build_num, total_builds=50):
+ # Not a PR so it is latest.
+ if pr is None:
+ return True
+
+ headers = {
+ "Accept": "application/json",
+ }
+ url = "https://ci.appveyor.com/api/projects/{repo}/history?recordsNumber={total_builds}"
+
+ data = request_json(url.format(repo=repo, total_builds=total_builds), headers=headers)
+
+ # Parse the response to get a list of build numbers for this PR.
+ builds = data["builds"]
+ pr_builds = filter(lambda b: b.get("pullRequestId", "") == str(pr), builds)
+ pr_build_nums = set(map(lambda b: int(b["buildNumber"]), pr_builds))
+ pr_build_nums.add(build_num)
+
+ # Check if our build number is the latest (largest)
+ # out of all of the builds for this PR.
+ if build_num < max(pr_build_nums):
+ return False
+ else:
+ return True
+
+
+def main(*args):
+ if not args:
+ args = sys.argv[1:]
+
+ parser = argparse.ArgumentParser(
+ description=__doc__.strip().splitlines()[0]
+ )
+ parser.add_argument(
+ "-v",
+ "--verbose",
+ action="store_true",
+ help="Whether to include output",
+ )
+ parser.add_argument(
+ "--ci",
+ required=True,
+ choices=[
+ "circle",
+ "travis",
+ "appveyor",
+ ],
+ help="Which CI to check for an outdated build",
+ )
+ parser.add_argument(
+ "repo",
+ type=str,
+ help="GitHub repo name (e.g. `user/repo`.)",
+ )
+ parser.add_argument(
+ "bld",
+ type=int,
+ help="CI build number for this pull request",
+ )
+ parser.add_argument(
+ "pr",
+ nargs="?",
+ default="",
+ help="GitHub pull request number of this build",
+ )
+
+ params = parser.parse_args(args)
+ verbose = params.verbose
+ ci = params.ci
+ repo = params.repo
+ bld = params.bld
+ try:
+ pr = int(params.pr)
+ except ValueError:
+ pr = None
+
+ if verbose:
+ print("Checking to see if this PR build is outdated.")
+
+ exit_code = 0
+ if ci == "circle":
+ exit_code = int(circle_check_latest_pr_build(repo, pr, bld) is False)
+ elif ci == "travis":
+ exit_code = int(travis_check_latest_pr_build(repo, pr, bld) is False)
+ elif ci == "appveyor":
+ exit_code = int(appveyor_check_latest_pr_build(repo, pr, bld) is False)
+
+ if verbose and exit_code == 1:
+ print("Failing outdated PR build to end it.")
+
+ return exit_code
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/recipe/conda_forge_ci_setup/mangle_homebrew.py b/recipe/conda_forge_ci_setup/mangle_homebrew.py
new file mode 100644
index 000000000..2d8d1b1d4
--- /dev/null
+++ b/recipe/conda_forge_ci_setup/mangle_homebrew.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+import os
+import shutil
+import click
+
+
+@click.command()
+def main():
+ """Mangle homebrew paths for OSX"""
+ # make the mangled path
+ mangled_dir = "/usr/local/conda_mangled"
+ os.makedirs(mangled_dir, exist_ok=True)
+
+ excluded_dirs = [
+ "conda_mangled",
+ "miniconda",
+ ]
+
+ # move all of the stuff except miniconda
+ potential_dirs = os.listdir("/usr/local")
+ for _pth in potential_dirs:
+ pth = os.path.join("/usr/local", _pth)
+ if _pth in excluded_dirs:
+ continue
+ mangled_pth = os.path.join(mangled_dir, _pth)
+ shutil.move(pth, mangled_pth)
+ print("MOVED %s -> %s" % (pth, mangled_pth), flush=True)
diff --git a/recipe/conda_forge_ci_setup/upload_or_check_non_existence.py b/recipe/conda_forge_ci_setup/upload_or_check_non_existence.py
new file mode 100644
index 000000000..6425572df
--- /dev/null
+++ b/recipe/conda_forge_ci_setup/upload_or_check_non_existence.py
@@ -0,0 +1,310 @@
+#!/usr/bin/env python
+from __future__ import print_function
+
+import contextlib
+import os
+import shutil
+import subprocess
+import click
+import tempfile
+import time
+
+from binstar_client.utils import get_server_api
+import binstar_client.errors
+from conda_build.conda_interface import subdir as conda_subdir
+from conda_build.conda_interface import get_index
+import conda_build.api
+import conda_build.config
+
+from .feedstock_outputs import request_copy, split_pkg
+
+
+def get_built_distribution_names_and_subdirs(recipe_dir, variant):
+ additional_config = {}
+ for v in variant:
+ variant_dir, base_name = os.path.split(v)
+ clobber_file = os.path.join(variant_dir, 'clobber_' + base_name)
+ if os.path.exists(clobber_file):
+ additional_config = {
+ 'clobber_sections_file': clobber_file
+ }
+ break
+
+ metas = conda_build.api.render(
+ recipe_dir,
+ variant_config_files=variant,
+ finalize=False,
+ bypass_env_check=True,
+ **additional_config)
+
+ # Print the skipped distributions
+ skipped_distributions = [m for m, _, _ in metas if m.skip()]
+ for m in skipped_distributions:
+ print("{} configuration was skipped in build/skip.".format(m.name()))
+
+ subdirs = set([m.config.target_subdir for m, _, _ in metas if not m.skip()])
+ return set([m.name() for m, _, _ in metas if not m.skip()]), subdirs
+
+
+@contextlib.contextmanager
+def get_temp_token(token):
+ dn = tempfile.mkdtemp()
+ fn = os.path.join(dn, "binstar.token")
+ with open(fn, "w") as fh:
+ fh.write(token)
+ yield fn
+ try:
+ shutil.rmtree(dn)
+ except Exception:
+ print(f"Failed to remove temporary directroy '{dn}'")
+
+
+def built_distribution_already_exists(cli, name, version, fname, owner):
+ """
+ Checks to see whether the built recipe (aka distribution) already
+ exists on the owner/user's binstar account.
+
+ """
+ folder, basename = os.path.split(fname)
+ _, platform = os.path.split(folder)
+ distro_name = '{}/{}'.format(platform, basename)
+
+ try:
+ dist_info = cli.distribution(owner, name, version, distro_name)
+ except binstar_client.errors.NotFound:
+ dist_info = {}
+
+ exists = bool(dist_info)
+ # Unfortunately, we cannot check the md5 quality of the built distribution, as
+ # this will depend on fstat information such as modification date (because
+ # distributions are tar files). Therefore we can only assume that the distribution
+ # just built, and the one on anaconda.org are the same.
+ # if exists:
+ # md5_on_binstar = dist_info.get('md5')
+ # with open(fname, 'rb') as fh:
+ # md5_of_build = hashlib.md5(fh.read()).hexdigest()
+ #
+ # if md5_on_binstar != md5_of_build:
+ # raise ValueError('This build ({}), and the build already on binstar '
+ # '({}) are different.'.format(md5_of_build, md5_on_binstar)) # noqa
+ return exists
+
+
+def upload(token_fn, path, owner, channels, private_upload=False):
+ cmd = ['anaconda', '--quiet', '-t', token_fn,
+ 'upload', path, '--user={}'.format(owner),
+ '--channel={}'.format(channels)]
+ if private_upload:
+ cmd.append("--private")
+ subprocess.check_call(cmd, env=os.environ)
+
+
+def delete_dist(token_fn, path, owner, channels):
+ parts = path.split(os.sep)
+ path = os.path.join(parts[-2], parts[-1])
+ _, name, ver, _ = split_pkg(path)
+ subprocess.check_call(
+ [
+ 'anaconda', '--quiet', '-t', token_fn,
+ 'remove', '-f', '%s/%s/%s/%s/%s' % (
+ owner, name, ver, parts[-2], parts[-1]),
+ ],
+ env=os.environ
+ )
+
+
+def distribution_exists_on_channel(binstar_cli, meta, fname, owner, channel='main'):
+ """
+ Determine whether a distribution exists on a specific channel.
+
+ Note from @pelson: As far as I can see, there is no easy way to do this on binstar.
+
+ """
+ channel_url = '/'.join([owner, 'label', channel])
+ fname = os.path.basename(fname)
+
+ distributions_on_channel = get_index([channel_url],
+ prepend=False, use_cache=False)
+
+ try:
+ on_channel = (distributions_on_channel[fname]['subdir'] ==
+ conda_subdir)
+ except KeyError:
+ on_channel = False
+
+ return on_channel
+
+
+def upload_or_check(
+ feedstock,
+ recipe_dir,
+ owner,
+ channel,
+ variant,
+ validate=False,
+ git_sha=None,
+ private_upload=False,
+ prod_owner="conda-forge",
+ comment_on_error=True,
+):
+ if validate and "STAGING_BINSTAR_TOKEN" in os.environ:
+ token = os.environ["STAGING_BINSTAR_TOKEN"]
+ print("Using STAGING_BINSTAR_TOKEN for anaconda.org uploads to %s." % owner)
+ else:
+ token = os.environ.get('BINSTAR_TOKEN')
+ print("Using BINSTAR_TOKEN for anaconda.org uploads to %s." % owner)
+
+ # Azure's tokens are filled when in PR and not empty as for the other cis
+ # In pr they will have a value like '$(secret-name)'
+ if token and token.startswith('$('):
+ token = None
+
+ cli = get_server_api(token=token)
+
+ allowed_dist_names, allowed_subdirs = get_built_distribution_names_and_subdirs(
+ recipe_dir, variant
+ )
+
+ # The list of built distributions
+ paths = set()
+ for subdir in list(allowed_subdirs) + [conda_build.config.subdir, 'noarch']:
+ if not os.path.exists(os.path.join(conda_build.config.croot, subdir)):
+ continue
+ for p in os.listdir(os.path.join(conda_build.config.croot, subdir)):
+ paths.add(os.path.join(subdir, p))
+
+ built_distributions = [
+ (
+ split_pkg(path)[1],
+ split_pkg(path)[2],
+ os.path.join(conda_build.config.croot, path)
+ )
+ # TODO: flip this over to .conda when that format
+ # is in flight
+ for path in paths if (path.endswith('.tar.bz2') or path.endswith(".conda"))
+ if split_pkg(path)[1] in allowed_dist_names
+ ]
+
+ # This is the actual fix where we create the token file once and reuse it
+ # for all uploads
+ if token:
+ with get_temp_token(cli.token) as token_fn:
+ if validate:
+ to_copy_paths = []
+ for name, version, path in built_distributions:
+ need_copy = True
+ for i in range(0, 5):
+ time.sleep(i*15)
+ if built_distribution_already_exists(
+ cli, name, version, path, prod_owner
+ ):
+ # package already in production
+ need_copy = False
+ break
+ elif not built_distribution_already_exists(
+ cli, name, version, path, owner
+ ):
+ upload(token_fn, path, owner, channel)
+ break
+ else:
+ print(
+ "Distribution {} already exists on {}. "
+ "Waiting another {} seconds to "
+ "try uploading again.".format(path, owner, (i+1) * 15))
+ else:
+ print(
+ "WARNING: Distribution {} already existed in "
+ "{} for a while. Deleting and "
+ "re-uploading.".format(path, owner)
+ )
+ delete_dist(token_fn, path, owner, channel)
+ upload(token_fn, path, owner, channel)
+
+ if need_copy:
+ to_copy_paths.append(path)
+
+ if to_copy_paths and not request_copy(
+ feedstock,
+ to_copy_paths,
+ channel,
+ git_sha=git_sha,
+ comment_on_error=comment_on_error,
+ ):
+ raise RuntimeError(
+ "copy from staging to production channel failed")
+ else:
+ return True
+ else:
+ for name, version, path in built_distributions:
+ if not built_distribution_already_exists(
+ cli, name, version, path, owner
+ ):
+ upload(token_fn, path, owner, channel, private_upload=private_upload)
+ else:
+ print(
+ 'Distribution {} already exists for {}'.format(path, owner))
+ return True
+ else:
+ for name, version, path in built_distributions:
+ if not built_distribution_already_exists(cli, name, version, path, owner):
+ print(
+ "Distribution {} is new for {}, but no upload is taking place "
+ "because the BINSTAR_TOKEN/STAGING_BINSTAR_TOKEN "
+ "is missing or empty.".format(path, owner))
+ else:
+ print('Distribution {} already exists for {}'.format(path, owner))
+ return False
+
+
+def retry_upload_or_check(
+ feedstock,
+ recipe_dir,
+ owner,
+ channel,
+ variant,
+ validate=False,
+ git_sha=None,
+ private_upload=False,
+):
+ # perform a backoff in case we fail. THis should limit the failures from
+ # issues with the Anaconda api
+ n_try = 10
+ for i in range(1, n_try):
+ try:
+ res = upload_or_check(
+ feedstock, recipe_dir, owner, channel, variant,
+ validate=validate, git_sha=git_sha,
+ comment_on_error=True if i == n_try-1 else False,
+ private_upload=private_upload
+ )
+ return res
+ except Exception as e:
+ # exponential backoff, wait at least 10 seconds
+ timeout = max(1.75 ** i, 10)
+ print(
+ "Failed to upload due to {}. Trying again in {} seconds".format(
+ e, timeout))
+ time.sleep(timeout)
+ raise TimeoutError("Did not manage to upload package. Failing.")
+
+
+@click.command()
+@click.argument('recipe_dir',
+ type=click.Path(exists=True, file_okay=False, dir_okay=True),
+ ) # help='the conda recipe directory'
+@click.argument('owner') # help='the binstar owner/user'
+@click.option('--channel', default='main',
+ help='the anaconda label channel')
+@click.option('--variant', '-m', multiple=True,
+ type=click.Path(exists=True, file_okay=True, dir_okay=False),
+ help="path to conda_build_config.yaml defining your base matrix")
+def main(recipe_dir, owner, channel, variant):
+ """
+ Upload or check consistency of a built version of a conda recipe with binstar.
+ Note: The existence of the BINSTAR_TOKEN environment variable determines
+ whether the upload should actually take place."""
+ return retry_upload_or_check(None, recipe_dir, owner, channel, variant)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/recipe/cross_compile_support.sh b/recipe/cross_compile_support.sh
new file mode 100644
index 000000000..bf8f1c8d7
--- /dev/null
+++ b/recipe/cross_compile_support.sh
@@ -0,0 +1,33 @@
+BUILD_PLATFORM=$(conda info --json | jq -r .platform)
+
+if [ -f ${CI_SUPPORT}/${CONFIG}.yaml ]; then
+ HOST_PLATFORM=$(cat ${CI_SUPPORT}/${CONFIG}.yaml | shyaml get-value target_platform.0 ${BUILD_PLATFORM})
+fi
+
+HOST_PLATFORM=${HOST_PLATFORM:-${BUILD_PLATFORM}}
+
+if [[ "${HOST_PLATFORM}" != "${BUILD_PLATFORM}" ]]; then
+ echo "export CONDA_BUILD_CROSS_COMPILATION=1" >> "${CONDA_PREFIX}/etc/conda/activate.d/conda-forge-ci-setup-activate.sh"
+ export CONDA_BUILD_CROSS_COMPILATION=1
+ if [ -f ${CI_SUPPORT}/${CONFIG}.yaml ]; then
+ echo "build_platform:" >> ${CI_SUPPORT}/${CONFIG}.yaml
+ echo "- ${BUILD_PLATFORM}" >> ${CI_SUPPORT}/${CONFIG}.yaml
+ fi
+ if [[ "${BUILD_PLATFORM}" == "linux-64" && "${HOST_PLATFORM}" == linux-* ]]; then
+ mamba create -n sysroot_${HOST_PLATFORM} --yes --quiet sysroot_${HOST_PLATFORM}
+ if [[ -f ${CI_SUPPORT}/../recipe/yum_requirements.txt ]]; then
+ for pkg in $(cat ${CI_SUPPORT}/../recipe/yum_requirements.txt); do
+ if [[ "${pkg}" != "#"* && "${pkg}" != "" ]]; then
+ mamba install "${pkg}-cos7-${HOST_PLATFORM:6}" -n sysroot_${HOST_PLATFORM} --yes --quiet || true
+ fi
+ done
+ fi
+ export QEMU_LD_PREFIX=$(find ${CONDA_PREFIX}/envs/sysroot_${HOST_PLATFORM} -name sysroot | head -1)
+ if [ -f ${CI_SUPPORT}/${CONFIG}.yaml ]; then
+ echo "CMAKE_CROSSCOMPILING_EMULATOR: " >> ${CI_SUPPORT}/${CONFIG}.yaml
+ echo "- /usr/bin/qemu-$(echo $HOST_PLATFORM | cut -b 7-)-static" >> ${CI_SUPPORT}/${CONFIG}.yaml
+ echo "CROSSCOMPILING_EMULATOR: " >> ${CI_SUPPORT}/${CONFIG}.yaml
+ echo "- /usr/bin/qemu-$(echo $HOST_PLATFORM | cut -b 7-)-static" >> ${CI_SUPPORT}/${CONFIG}.yaml
+ fi
+ fi
+fi
diff --git a/recipe/download_osx_sdk.sh b/recipe/download_osx_sdk.sh
new file mode 100644
index 000000000..bf8f1c8d7
--- /dev/null
+++ b/recipe/download_osx_sdk.sh
@@ -0,0 +1,33 @@
+BUILD_PLATFORM=$(conda info --json | jq -r .platform)
+
+if [ -f ${CI_SUPPORT}/${CONFIG}.yaml ]; then
+ HOST_PLATFORM=$(cat ${CI_SUPPORT}/${CONFIG}.yaml | shyaml get-value target_platform.0 ${BUILD_PLATFORM})
+fi
+
+HOST_PLATFORM=${HOST_PLATFORM:-${BUILD_PLATFORM}}
+
+if [[ "${HOST_PLATFORM}" != "${BUILD_PLATFORM}" ]]; then
+ echo "export CONDA_BUILD_CROSS_COMPILATION=1" >> "${CONDA_PREFIX}/etc/conda/activate.d/conda-forge-ci-setup-activate.sh"
+ export CONDA_BUILD_CROSS_COMPILATION=1
+ if [ -f ${CI_SUPPORT}/${CONFIG}.yaml ]; then
+ echo "build_platform:" >> ${CI_SUPPORT}/${CONFIG}.yaml
+ echo "- ${BUILD_PLATFORM}" >> ${CI_SUPPORT}/${CONFIG}.yaml
+ fi
+ if [[ "${BUILD_PLATFORM}" == "linux-64" && "${HOST_PLATFORM}" == linux-* ]]; then
+ mamba create -n sysroot_${HOST_PLATFORM} --yes --quiet sysroot_${HOST_PLATFORM}
+ if [[ -f ${CI_SUPPORT}/../recipe/yum_requirements.txt ]]; then
+ for pkg in $(cat ${CI_SUPPORT}/../recipe/yum_requirements.txt); do
+ if [[ "${pkg}" != "#"* && "${pkg}" != "" ]]; then
+ mamba install "${pkg}-cos7-${HOST_PLATFORM:6}" -n sysroot_${HOST_PLATFORM} --yes --quiet || true
+ fi
+ done
+ fi
+ export QEMU_LD_PREFIX=$(find ${CONDA_PREFIX}/envs/sysroot_${HOST_PLATFORM} -name sysroot | head -1)
+ if [ -f ${CI_SUPPORT}/${CONFIG}.yaml ]; then
+ echo "CMAKE_CROSSCOMPILING_EMULATOR: " >> ${CI_SUPPORT}/${CONFIG}.yaml
+ echo "- /usr/bin/qemu-$(echo $HOST_PLATFORM | cut -b 7-)-static" >> ${CI_SUPPORT}/${CONFIG}.yaml
+ echo "CROSSCOMPILING_EMULATOR: " >> ${CI_SUPPORT}/${CONFIG}.yaml
+ echo "- /usr/bin/qemu-$(echo $HOST_PLATFORM | cut -b 7-)-static" >> ${CI_SUPPORT}/${CONFIG}.yaml
+ fi
+ fi
+fi
diff --git a/recipe/install_cuda.bat b/recipe/install_cuda.bat
new file mode 100644
index 000000000..0e77ae86d
--- /dev/null
+++ b/recipe/install_cuda.bat
@@ -0,0 +1,237 @@
+set "CUDA_VERSION=%1"
+
+:: We define a default subset of components to be installed for faster installation times
+:: and reduced storage usage (CI is limited to 10GB). Full list of components is available at
+:: https://docs.nvidia.com/cuda/archive/%CUDA_VERSION%/cuda-installation-guide-microsoft-windows/index.html
+set "VAR=nvcc_%CUDA_VERSION% cuobjdump_%CUDA_VERSION% nvprune_%CUDA_VERSION% cupti_%CUDA_VERSION%"
+set "VAR=%VAR% memcheck_%CUDA_VERSION% nvdisasm_%CUDA_VERSION% nvprof_%CUDA_VERSION% cublas_%CUDA_VERSION%"
+set "VAR=%VAR% cublas_dev_%CUDA_VERSION% cudart_%CUDA_VERSION% cufft_%CUDA_VERSION% cufft_dev_%CUDA_VERSION%"
+set "VAR=%VAR% curand_%CUDA_VERSION% curand_dev_%CUDA_VERSION% cusolver_%CUDA_VERSION% cusolver_dev_%CUDA_VERSION%"
+set "VAR=%VAR% cusparse_%CUDA_VERSION% cusparse_dev_%CUDA_VERSION% npp_%CUDA_VERSION% npp_dev_%CUDA_VERSION%"
+set "VAR=%VAR% nvrtc_%CUDA_VERSION% nvrtc_dev_%CUDA_VERSION% nvml_dev_%CUDA_VERSION%"
+set "VAR=%VAR% visual_studio_integration_%CUDA_VERSION%"
+set "CUDA_COMPONENTS=%VAR%"
+
+if "%CUDA_VERSION%" == "9.2" goto cuda92
+if "%CUDA_VERSION%" == "10.0" goto cuda100
+if "%CUDA_VERSION%" == "10.1" goto cuda101
+if "%CUDA_VERSION%" == "10.2" goto cuda102
+if "%CUDA_VERSION%" == "11.0" goto cuda110
+if "%CUDA_VERSION%" == "11.1" goto cuda111
+if "%CUDA_VERSION%" == "11.2" goto cuda1122
+if "%CUDA_VERSION%" == "11.3" goto cuda1131
+if "%CUDA_VERSION%" == "11.4" goto cuda114
+if "%CUDA_VERSION%" == "11.5" goto cuda1151
+if "%CUDA_VERSION%" == "11.6" goto cuda116
+if "%CUDA_VERSION%" == "11.7" goto cuda117
+
+echo CUDA '%CUDA_VERSION%' is not supported
+exit /b 1
+
+:: Define URLs per version
+:cuda92
+set "CUDA_NETWORK_INSTALLER_URL=https://developer.nvidia.com/compute/cuda/9.2/Prod2/network_installers2/cuda_9.2.148_win10_network"
+set "CUDA_NETWORK_INSTALLER_CHECKSUM=2bf9ae67016867b68f361bf50d2b9e7b"
+set "CUDA_INSTALLER_URL=https://developer.nvidia.com/compute/cuda/9.2/Prod2/local_installers2/cuda_9.2.148_win10"
+set "CUDA_INSTALLER_CHECKSUM=f6c170a7452098461070dbba3e6e58f1"
+set "CUDA_PATCH_URL=https://developer.nvidia.com/compute/cuda/9.2/Prod2/patches/1/cuda_9.2.148.1_windows"
+set "CUDA_PATCH_CHECKSUM=09e20653f1346d2461a9f8f1a7178ba2"
+set "CUDA_COMPONENTS=%CUDA_COMPONENTS% nvgraph_%CUDA_VERSION% nvgraph_dev_%CUDA_VERSION%"
+goto cuda_common
+
+
+:cuda100
+set "CUDA_NETWORK_INSTALLER_URL=https://developer.nvidia.com/compute/cuda/10.0/Prod/network_installers/cuda_10.0.130_win10_network"
+set "CUDA_NETWORK_INSTALLER_CHECKSUM=3312deac9c939bd78d0e7555606c22fc"
+set "CUDA_INSTALLER_URL=https://developer.nvidia.com/compute/cuda/10.0/Prod/local_installers/cuda_10.0.130_411.31_win10"
+set "CUDA_INSTALLER_CHECKSUM=90fafdfe2167ac25432db95391ca954e"
+set "CUDA_COMPONENTS=%CUDA_COMPONENTS% nvgraph_%CUDA_VERSION% nvgraph_dev_%CUDA_VERSION%"
+goto cuda_common
+
+
+:cuda101
+set "CUDA_NETWORK_INSTALLER_URL=http://developer.download.nvidia.com/compute/cuda/10.1/Prod/network_installers/cuda_10.1.243_win10_network.exe"
+set "CUDA_NETWORK_INSTALLER_CHECKSUM=fae0c958440511576691b825d4599e93"
+set "CUDA_INSTALLER_URL=http://developer.download.nvidia.com/compute/cuda/10.1/Prod/local_installers/cuda_10.1.243_426.00_win10.exe"
+set "CUDA_INSTALLER_CHECKSUM=b54cf32683f93e787321dcc2e692ff69"
+set "CUDA_COMPONENTS=%CUDA_COMPONENTS% nvgraph_%CUDA_VERSION% nvgraph_dev_%CUDA_VERSION%"
+goto cuda_common
+
+
+:cuda102
+set "CUDA_NETWORK_INSTALLER_URL=http://developer.download.nvidia.com/compute/cuda/10.2/Prod/network_installers/cuda_10.2.89_win10_network.exe"
+set "CUDA_NETWORK_INSTALLER_CHECKSUM=60e0f16845d731b690179606f385041e"
+set "CUDA_INSTALLER_URL=http://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda_10.2.89_441.22_win10.exe"
+set "CUDA_INSTALLER_CHECKSUM=d9f5b9f24c3d3fc456a3c789f9b43419"
+set "CUDA_PATCH_URL=http://developer.download.nvidia.com/compute/cuda/10.2/Prod/patches/1/cuda_10.2.1_win10.exe"
+set "CUDA_PATCH_CHECKSUM=9d751ae129963deb7202f1d85149c69d"
+set "CUDA_COMPONENTS=%CUDA_COMPONENTS% nvgraph_%CUDA_VERSION% nvgraph_dev_%CUDA_VERSION%"
+goto cuda_common
+
+
+:cuda110
+set "CUDA_NETWORK_INSTALLER_URL=http://developer.download.nvidia.com/compute/cuda/11.0.3/network_installers/cuda_11.0.3_win10_network.exe"
+set "CUDA_NETWORK_INSTALLER_CHECKSUM=1b88bf7bb8e50207bbb53ed2033f93f3"
+set "CUDA_INSTALLER_URL=http://developer.download.nvidia.com/compute/cuda/11.0.3/local_installers/cuda_11.0.3_451.82_win10.exe"
+set "CUDA_INSTALLER_CHECKSUM=80ae0fdbe04759123f3cab81f2aadabd"
+set "CUDA_COMPONENTS=%CUDA_COMPONENTS% nsight_nvtx_%CUDA_VERSION%"
+goto cuda_common
+
+
+:cuda111
+set "CUDA_NETWORK_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.1.1/network_installers/cuda_11.1.1_win10_network.exe"
+set "CUDA_NETWORK_INSTALLER_CHECKSUM=7e36e50ee486a84612adfd85500a9971"
+set "CUDA_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.1.1/local_installers/cuda_11.1.1_456.81_win10.exe"
+set "CUDA_INSTALLER_CHECKSUM=a89dfad35fc1adf02a848a9c06cfff15"
+set "CUDA_COMPONENTS=%CUDA_COMPONENTS% nsight_nvtx_%CUDA_VERSION%"
+goto cuda_common
+
+
+:cuda112
+set "CUDA_NETWORK_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.2.0/network_installers/cuda_11.2.0_win10_network.exe"
+set "CUDA_NETWORK_INSTALLER_CHECKSUM=ab02a25eed1201cc3e414be943a242df"
+set "CUDA_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.2.0/local_installers/cuda_11.2.0_460.89_win10.exe"
+set "CUDA_INSTALLER_CHECKSUM=92f38c37ce9c6c11d27c10701b040256"
+set "CUDA_COMPONENTS=%CUDA_COMPONENTS% nsight_nvtx_%CUDA_VERSION%"
+goto cuda_common
+
+
+:cuda1121
+set "CUDA_NETWORK_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.2.1/network_installers/cuda_11.2.1_win10_network.exe"
+set "CUDA_NETWORK_INSTALLER_CHECKSUM=de16fac595def6da33424e8bb5539bab"
+set "CUDA_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.2.1/local_installers/cuda_11.2.1_461.09_win10.exe"
+set "CUDA_INSTALLER_CHECKSUM=c34b541d8706b5aa0d8ba7313fff78e7"
+set "CUDA_COMPONENTS=%CUDA_COMPONENTS% nsight_nvtx_%CUDA_VERSION%"
+goto cuda_common
+
+
+:cuda1122
+set "CUDA_NETWORK_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.2.2/network_installers/cuda_11.2.2_win10_network.exe"
+set "CUDA_NETWORK_INSTALLER_CHECKSUM=67257f6a471ffbd49068793a699cecb7"
+set "CUDA_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.2.2/local_installers/cuda_11.2.2_461.33_win10.exe"
+set "CUDA_INSTALLER_CHECKSUM=f9da6687d8a4f137ff14f8389b496e0a"
+set "CUDA_COMPONENTS=%CUDA_COMPONENTS% nsight_nvtx_%CUDA_VERSION%"
+goto cuda_common
+
+:cuda113
+set "CUDA_NETWORK_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.3.0/network_installers/cuda_11.3.0_win10_network.exe"
+set "CUDA_NETWORK_INSTALLER_CHECKSUM=dddd7b22fcbb530b467db764eeb8439f"
+set "CUDA_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.3.0/local_installers/cuda_11.3.0_465.89_win10.exe"
+set "CUDA_INSTALLER_CHECKSUM=564c6ecf0b82f481d291519387e71db5"
+set "CUDA_COMPONENTS=%CUDA_COMPONENTS% nsight_nvtx_%CUDA_VERSION%"
+goto cuda_common
+
+:cuda1131
+set "CUDA_NETWORK_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.3.1/network_installers/cuda_11.3.1_win10_network.exe"
+set "CUDA_NETWORK_INSTALLER_CHECKSUM=8e812588cd299fe6e8d1e85b55bddf28"
+set "CUDA_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda_11.3.1_465.89_win10.exe"
+set "CUDA_INSTALLER_CHECKSUM=7bf61cf7b059ba08197c70035879c352"
+set "CUDA_COMPONENTS=%CUDA_COMPONENTS% nsight_nvtx_%CUDA_VERSION%"
+goto cuda_common
+
+:cuda114
+set "CUDA_NETWORK_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.4.0/network_installers/cuda_11.4.0_win10_network.exe"
+set "CUDA_NETWORK_INSTALLER_CHECKSUM=140811a2ca1a0993fcc8ee1a16d21a79"
+set "CUDA_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.4.0/local_installers/cuda_11.4.0_471.11_win10.exe"
+set "CUDA_INSTALLER_CHECKSUM=423695ea246810200e210f07a0e0bd43"
+set "CUDA_COMPONENTS=%CUDA_COMPONENTS% nsight_nvtx_%CUDA_VERSION%"
+goto cuda_common
+
+:cuda115
+set "CUDA_NETWORK_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.5.0/network_installers/cuda_11.5.0_win10_network.exe"
+set "CUDA_NETWORK_INSTALLER_CHECKSUM=499fb5f0d25424a4a52f901a78beceef"
+set "CUDA_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.5.0/local_installers/cuda_11.5.0_496.13_win10.exe"
+set "CUDA_INSTALLER_CHECKSUM=9ae3759817c87dc8ae6f0d38cb164361"
+set "CUDA_COMPONENTS=%CUDA_COMPONENTS% nsight_nvtx_%CUDA_VERSION%"
+goto cuda_common
+
+:cuda1151
+set "CUDA_NETWORK_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.5.1/network_installers/cuda_11.5.1_windows_network.exe"
+set "CUDA_NETWORK_INSTALLER_CHECKSUM=0e70240480f3d63bc17adcb046c01580"
+set "CUDA_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.5.1/local_installers/cuda_11.5.1_496.13_windows.exe"
+set "CUDA_INSTALLER_CHECKSUM=74d4a0723ca179f56f6877e72c9b1694"
+set "CUDA_COMPONENTS=%CUDA_COMPONENTS% nsight_nvtx_%CUDA_VERSION%"
+goto cuda_common
+
+:cuda116
+set "CUDA_NETWORK_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.6.0/network_installers/cuda_11.6.0_windows_network.exe"
+set "CUDA_NETWORK_INSTALLER_CHECKSUM=99a5d04c00eeac430c7f34b013c5b7c6"
+set "CUDA_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.6.0/local_installers/cuda_11.6.0_511.23_windows.exe"
+set "CUDA_INSTALLER_CHECKSUM=7a91a7a7696e869ff8d90c52faf48f40"
+set "CUDA_COMPONENTS=%CUDA_COMPONENTS% nsight_nvtx_%CUDA_VERSION%"
+goto cuda_common
+
+:cuda117
+set "CUDA_NETWORK_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.7.0/network_installers/cuda_11.7.0_windows_network.exe"
+set "CUDA_NETWORK_INSTALLER_CHECKSUM=89397d589806387de679b97565a2e800"
+set "CUDA_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.7.0/local_installers/cuda_11.7.0_516.01_windows.exe"
+set "CUDA_INSTALLER_CHECKSUM=a2388d0044b2dd6a3469938eb6108c85"
+set "CUDA_COMPONENTS=%CUDA_COMPONENTS% nsight_nvtx_%CUDA_VERSION%"
+goto cuda_common
+
+
+:: The actual installation logic
+:cuda_common
+
+::We expect this CUDA_PATH
+set "CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION%"
+
+echo Downloading CUDA version %CUDA_VERSION% installer from %CUDA_INSTALLER_URL%
+echo Expected MD5: %CUDA_INSTALLER_CHECKSUM%
+
+:: Download installer
+curl --retry 3 -k -L %CUDA_INSTALLER_URL% --output cuda_installer.exe
+if errorlevel 1 (
+ echo Problem downloading installer...
+ exit /b 1
+)
+:: Check md5
+openssl md5 cuda_installer.exe | findstr %CUDA_INSTALLER_CHECKSUM%
+if errorlevel 1 (
+ echo Checksum does not match!
+ exit /b 1
+)
+:: Run installer
+start /wait cuda_installer.exe -s %CUDA_COMPONENTS%
+if errorlevel 1 (
+ echo Problem installing CUDA toolkit...
+ exit /b 1
+)
+del cuda_installer.exe
+
+:: If patches are needed, download and apply
+if not "%CUDA_PATCH_URL%"=="" (
+ echo This version requires an additional patch
+ curl --retry 3 -k -L %CUDA_PATCH_URL% --output cuda_patch.exe
+ if errorlevel 1 (
+ echo Problem downloading patch installer...
+ exit /b 1
+ )
+ openssl md5 cuda_patch.exe | findstr %CUDA_PATCH_CHECKSUM%
+ if errorlevel 1 (
+ echo Checksum does not match!
+ exit /b 1
+ )
+ start /wait cuda_patch.exe -s
+ if errorlevel 1 (
+ echo Problem running patch installer...
+ exit /b 1
+ )
+ del cuda_patch.exe
+)
+
+:: This should exist by now!
+if not exist "%CUDA_PATH%\bin\nvcc.exe" (
+ echo CUDA toolkit installation failed!
+ exit /b 1
+)
+
+:: Notes about nvcuda.dll
+:: ----------------------
+:: We should also provide the drivers (nvcuda.dll), but the installer will not
+:: proceed without a physical Nvidia card attached (not the case in the CI).
+:: Expanding `\Display.Driver\nvcuda.64.dl_` to `C:\Windows\System32`
+:: does not work anymore (.dl_ files are not PE-COFF according to Dependencies.exe).
+:: Forcing this results in a DLL error 193. Basically, there's no way to provide
+:: ncvuda.dll in a GPU-less machine without breaking the EULA (aka zipping nvcuda.dll
+:: from a working installation).
diff --git a/recipe/meta.yaml b/recipe/meta.yaml
index 7dc15a553..cf3357e6a 100644
--- a/recipe/meta.yaml
+++ b/recipe/meta.yaml
@@ -10,8 +10,8 @@ source:
sha256: f6c45d5788f51dbe1cc55e1010f3e9ebd18b6c0f21907fc35499468a59827eef
build:
- number: 1
- skip: True # [win or py != 39]
+ number: 2
+ skip: True # [py != 39]
requirements:
host:
diff --git a/recipe/run_conda_forge_build_setup_linux b/recipe/run_conda_forge_build_setup_linux
new file mode 100755
index 000000000..e16cdc9d0
--- /dev/null
+++ b/recipe/run_conda_forge_build_setup_linux
@@ -0,0 +1,101 @@
+#!/bin/bash
+
+export PYTHONUNBUFFERED=1
+
+conda config --env --set show_channel_urls true
+conda config --env --set auto_update_conda false
+conda config --env --set add_pip_as_python_dependency false
+# Otherwise packages that don't explicitly pin openssl in their requirements
+# are forced to the newest OpenSSL version, even if their dependencies don't
+# support it.
+conda config --env --append aggressive_update_packages ca-certificates # add something to make sure the key exists
+conda config --env --remove-key aggressive_update_packages
+conda config --env --append aggressive_update_packages ca-certificates
+conda config --env --append aggressive_update_packages certifi
+
+export "CONDA_BLD_PATH=${FEEDSTOCK_ROOT}/build_artifacts"
+
+set +u
+
+# Don't set the number of CPUs for some CI systems
+if [[ "${CI}" == "drone" || "${CI}" == "github_actions" ]]; then
+ unset CPU_COUNT
+else
+ # 2 cores available on CircleCI workers: https://discuss.circleci.com/t/what-runs-on-the-node-container-by-default/1443
+ # 2 cores available on TravisCI workers: https://docs.travis-ci.com/user/reference/overview/
+ # CPU_COUNT is passed through conda build: https://github.com/conda/conda-build/pull/1149
+ export CPU_COUNT="${CPU_COUNT:-2}"
+fi
+
+# Need strict priority for
+# - pypy as defaults is not fixed
+# - cos7 as defaults is not fixed
+# but ppl can turn this off
+conda config --env --set channel_priority $(cat ${FEEDSTOCK_ROOT}/conda-forge.yml | shyaml get-value channel_priority strict || echo strict)
+
+# the upstream image nvidia/cuda:9.2-devel-centos6 (on which linux-anvil-cuda:9.2 is based)
+# does not contain libcuda.so; it should be installed in ${CUDA_HOME}/compat-${CUDA_VER},
+# however cuda-compat-${CUDA_VER}.x86_64.rpm is only packaged for 10.x; we abuse
+# cuda-compat-10.0 for this, since the actual dependency containing libcuda for 9.2
+# (xorg-x11-drv-nvidia-libs) pulls in a huge amount of dependencies;
+# this cannot be fixed in the conda-forge linux-anvil-cuda images for licensing reasons
+# (cannot add cuda-package in our image layers), so we add it here for CI purposes only.
+if [[ ! -z "$CUDA_HOME" && -d /usr/local/cuda-9.2 ]]; then
+ # note: $CUDA_HOME is just a symlink to /usr/local/cuda-${CUDA_VER}
+
+ # register cuda-repo with installer, cf.
+ # https://developer.download.nvidia.com/compute/cuda/repos/rhel6/x86_64/
+ if [[ "$(uname -m)" == "x86_64" ]]; then
+ curl -O https://developer.download.nvidia.com/compute/cuda/repos/rhel6/x86_64/cuda-repo-rhel6-9.2.148-1.x86_64.rpm
+ fi
+ if [[ "$(uname -m)" == "ppc64le" ]]; then
+ curl -O https://developer.download.nvidia.com/compute/cuda/repos/rhel7/ppc64le/cuda-repo-rhel7-9.2.148-1.ppc64le.rpm
+ fi
+ sudo yum localinstall -y cuda-repo-*.rpm
+ rm cuda-repo-*.rpm
+ # install latest cuda-compat-10-0
+ sudo yum install -y cuda-compat-10-0.$(uname -m) ;
+ # note: this path is added to ldconfig in linux-anvil-cuda:9.2
+ if [[ ! -f "/usr/local/cuda-10.0/compat/libcuda.so" ]]; then exit 1; fi
+fi
+
+if [ ! -z "$CONFIG" ]; then
+ if [ ! -z "$CI" ]; then
+ echo "" >> ${CI_SUPPORT}/${CONFIG}.yaml
+ echo "CI:" >> ${CI_SUPPORT}/${CONFIG}.yaml
+ echo "- ${CI}" >> ${CI_SUPPORT}/${CONFIG}.yaml
+ echo "" >> ${CI_SUPPORT}/${CONFIG}.yaml
+ fi
+ cat ${CI_SUPPORT}/${CONFIG}.yaml
+fi
+
+set -u
+
+mkdir -p "${CONDA_PREFIX}/etc/conda/activate.d"
+echo "export CONDA_BLD_PATH='${CONDA_BLD_PATH}'" > "${CONDA_PREFIX}/etc/conda/activate.d/conda-forge-ci-setup-activate.sh"
+if [ -n "${CPU_COUNT-}" ]; then
+ echo "export CPU_COUNT='${CPU_COUNT}'" >> "${CONDA_PREFIX}/etc/conda/activate.d/conda-forge-ci-setup-activate.sh"
+fi
+echo "export PYTHONUNBUFFERED='${PYTHONUNBUFFERED}'" >> "${CONDA_PREFIX}/etc/conda/activate.d/conda-forge-ci-setup-activate.sh"
+
+# Export CONDA_OVERRIDE_CUDA to allow __cuda to be detected on CI systems without GPUs
+CUDA_VERSION="$(cat ${CI_SUPPORT}/${CONFIG}.yaml | shyaml get-value cuda_compiler_version.0 None)"
+if [[ "$CUDA_VERSION" != "None" ]]; then
+ export CONDA_OVERRIDE_CUDA="${CUDA_VERSION}"
+ echo "export CONDA_OVERRIDE_CUDA='${CONDA_OVERRIDE_CUDA}'" >> "${CONDA_PREFIX}/etc/conda/activate.d/conda-forge-ci-setup-activate.sh"
+fi
+
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+source ${SCRIPT_DIR}/cross_compile_support.sh
+
+if [ -f ${CI_SUPPORT}/${CONFIG}.yaml ]; then
+ need_osx_sdk=$(cat ${CI_SUPPORT}/${CONFIG}.yaml | shyaml get-value MACOSX_DEPLOYMENT_TARGET.0 0)
+ if [[ "$need_osx_sdk" != "0" ]]; then
+ OSX_SDK_DIR=$(mktemp -d)
+ source ${SCRIPT_DIR}/download_osx_sdk.sh
+ fi
+fi
+
+conda info
+conda config --env --show-sources
+conda list --show-channel-urls
diff --git a/recipe/run_conda_forge_build_setup_osx b/recipe/run_conda_forge_build_setup_osx
new file mode 100755
index 000000000..b21dbe463
--- /dev/null
+++ b/recipe/run_conda_forge_build_setup_osx
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+# 2 core available on Travis CI OS X workers: https://docs.travis-ci.com/user/ci-environment/#Virtualization-environments
+# CPU_COUNT is passed through conda build: https://github.com/conda/conda-build/pull/1149
+export CPU_COUNT="${CPU_COUNT:-2}"
+
+export PYTHONUNBUFFERED=1
+
+conda config --env --set show_channel_urls true
+conda config --env --set auto_update_conda false
+conda config --env --set add_pip_as_python_dependency false
+# Otherwise packages that don't explicitly pin openssl in their requirements
+# are forced to the newest OpenSSL version, even if their dependencies don't
+# support it.
+conda config --env --append aggressive_update_packages ca-certificates # add something to make sure the key exists
+conda config --env --remove-key aggressive_update_packages
+conda config --env --append aggressive_update_packages ca-certificates
+conda config --env --append aggressive_update_packages certifi
+
+# Need strict priority for pypy as defaults is not fixed
+# but ppl can turn this off
+conda config --env --set channel_priority $(cat ./conda-forge.yml | shyaml get-value channel_priority strict || echo strict)
+
+# CONDA_PREFIX might be unset
+export CONDA_PREFIX="${CONDA_PREFIX:-$(conda info --json | jq -r .root_prefix)}"
+
+mkdir -p "${CONDA_PREFIX}/etc/conda/activate.d"
+echo "export CPU_COUNT='${CPU_COUNT}'" > "${CONDA_PREFIX}/etc/conda/activate.d/conda-forge-ci-setup-activate.sh"
+echo "export PYTHONUNBUFFERED='${PYTHONUNBUFFERED}'" >> "${CONDA_PREFIX}/etc/conda/activate.d/conda-forge-ci-setup-activate.sh"
+
+if [[ "${OSX_SDK_DIR:-}" == "" ]]; then
+ OSX_SDK_DIR="$(xcode-select -p)/Platforms/MacOSX.platform/Developer/SDKs"
+ USING_SYSTEM_SDK_DIR=1
+fi
+SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+CI_SUPPORT=$PWD/.ci_support
+source ${SCRIPT_DIR}/cross_compile_support.sh
+source ${SCRIPT_DIR}/download_osx_sdk.sh
+
+if [[ "$MACOSX_DEPLOYMENT_TARGET" == 10.* && "${USING_SYSTEM_SDK_DIR:-}" == "1" ]]; then
+ # set minimum sdk version to our target
+ plutil -replace MinimumSDKVersion -string ${MACOSX_SDK_VERSION} $(xcode-select -p)/Platforms/MacOSX.platform/Info.plist
+ plutil -replace DTSDKName -string macosx${MACOSX_SDK_VERSION}internal $(xcode-select -p)/Platforms/MacOSX.platform/Info.plist
+fi
+
+if [ ! -z "$CONFIG" ]; then
+ if [ ! -z "$CI" ]; then
+ echo "CI:" >> ${CI_SUPPORT}/${CONFIG}.yaml
+ echo "- ${CI}" >> ${CI_SUPPORT}/${CONFIG}.yaml
+ echo "" >> ${CI_SUPPORT}/${CONFIG}.yaml
+ fi
+ cat ${CI_SUPPORT}/${CONFIG}.yaml
+fi
+
+conda info
+conda config --env --show-sources
+conda list --show-channel-urls
+
+if [[ "${CI:-}" == "azure" ]]; then
+ PATH=$(echo $PATH | sed 's#/Users/runner/.yarn/bin:##g')
+ PATH=$(echo $PATH | sed 's#/Users/runner/Library/Android/sdk/tools:##g')
+ PATH=$(echo $PATH | sed 's#/Users/runner/Library/Android/sdk/platform-tools:##g')
+ PATH=$(echo $PATH | sed 's#/Users/runner/Library/Android/sdk/ndk-bundle:##g')
+ PATH=$(echo $PATH | sed 's#/usr/local/lib/ruby/gems/2.7.0/bin:##g')
+ PATH=$(echo $PATH | sed 's#/usr/local/opt/ruby@2.7/bin:##g')
+ PATH=$(echo $PATH | sed 's#/Users/runner/.cargo/bin:##g')
+ PATH=$(echo $PATH | sed 's#/usr/local/opt/curl/bin:##g')
+ PATH=$(echo $PATH | sed 's#/usr/local/opt/pipx_bin##g')
+ PATH=$(echo $PATH | sed 's#/Users/runner/.dotnet/tools:##g')
+ PATH=$(echo $PATH | sed 's#/Users/runner/.ghcup/bin:##g')
+ PATH=$(echo $PATH | sed 's#/Users/runner/hostedtoolcache/stack/2.7.3/x64:##g')
+ export PATH
+fi
diff --git a/recipe/run_conda_forge_build_setup_win.bat b/recipe/run_conda_forge_build_setup_win.bat
new file mode 100755
index 000000000..5e5feb075
--- /dev/null
+++ b/recipe/run_conda_forge_build_setup_win.bat
@@ -0,0 +1,123 @@
+
+:: 2 cores available on Appveyor workers: https://www.appveyor.com/docs/build-environment/#build-vm-configurations
+:: CPU_COUNT is passed through conda build: https://github.com/conda/conda-build/pull/1149
+set CPU_COUNT=2
+
+set PYTHONUNBUFFERED=1
+
+conda.exe config --set show_channel_urls true
+conda.exe config --set auto_update_conda false
+conda.exe config --set add_pip_as_python_dependency false
+:: Otherwise packages that don't explicitly pin openssl in their requirements
+:: are forced to the newest OpenSSL version, even if their dependencies don't
+:: support it.
+conda.exe config --env --append aggressive_update_packages ca-certificates
+conda.exe config --env --remove-key aggressive_update_packages
+conda.exe config --env --append aggressive_update_packages ca-certificates
+conda.exe config --env --append aggressive_update_packages certifi
+
+(type conda-forge.yml | shyaml get-value channel_priority strict || echo strict) > tmpFile
+set /p channel_priority= < tmpFile
+del tmpFile
+conda.exe config --set channel_priority %channel_priority%
+
+:: Set the conda-build working directory to a smaller path
+if "%CONDA_BLD_PATH%" == "" (
+ set "CONDA_BLD_PATH=C:\\bld\\"
+)
+
+:: Increase pagefile size, cf. https://github.com/conda-forge/conda-forge-ci-setup-feedstock/issues/155
+:: Both in the recipe and in the final package, this script is co-located with SetPageFileSize.ps1, see meta.yaml
+set ThisScriptsDirectory=%~dp0
+set EntryPointPath=%ThisScriptsDirectory%SetPageFileSize.ps1
+:: Only run if SET_PAGEFILE is set; EntryPointPath needs to be set outside if-condition when not using EnableDelayedExpansion.
+if "%SET_PAGEFILE%" NEQ "" (
+ if "%CI%" == "azure" (
+ REM use different drive than CONDA_BLD_PATH-location for pagefile
+ if "%CONDA_BLD_PATH%" == "C:\\bld\\" (
+ echo CONDA_BLD_PATH=%CONDA_BLD_PATH%; Setting pagefile size to 8GB on D:
+ REM Inspired by:
+ REM https://blog.danskingdom.com/allow-others-to-run-your-powershell-scripts-from-a-batch-file-they-will-love-you-for-it/
+ REM Drive-letter needs to be escaped in quotes
+ PowerShell -NoProfile -ExecutionPolicy Bypass -Command "& '%EntryPointPath%' -MinimumSize 8GB -MaximumSize 8GB -DiskRoot \"D:\""
+ )
+ if "%CONDA_BLD_PATH%" == "D:\\bld\\" (
+ echo CONDA_BLD_PATH=%CONDA_BLD_PATH%; Setting pagefile size to 8GB on C:
+ PowerShell -NoProfile -ExecutionPolicy Bypass -Command "& '%EntryPointPath%' -MinimumSize 8GB -MaximumSize 8GB -DiskRoot \"C:\""
+ )
+ )
+)
+
+call conda activate base
+
+if "%CI%" == "" (
+ echo "Not running on CI"
+) else (
+ echo CI: >> .ci_support\%CONFIG%.yaml
+ echo - %CI% >> .ci_support\%CONFIG%.yaml
+)
+
+:: Remove some directories from PATH
+set "PATH=%PATH:C:\ProgramData\Chocolatey\bin;=%"
+set "PATH=%PATH:C:\Program Files (x86)\sbt\bin;=%"
+set "PATH=%PATH:C:\Rust\.cargo\bin;=%"
+set "PATH=%PATH:C:\Program Files\Git\usr\bin;=%"
+set "PATH=%PATH:C:\Program Files\Git\cmd;=%"
+set "PATH=%PATH:C:\Program Files\Git\mingw64\bin;=%"
+set "PATH=%PATH:C:\Program Files (x86)\Subversion\bin;=%"
+set "PATH=%PATH:C:\Program Files\CMake\bin;=%"
+set "PATH=%PATH:C:\Program Files\OpenSSL\bin;=%"
+set "PATH=%PATH:C:\Strawberry\c\bin;=%"
+set "PATH=%PATH:C:\Strawberry\perl\bin;=%"
+set "PATH=%PATH:C:\Strawberry\perl\site\bin;=%"
+set "PATH=%PATH:c:\tools\php;=%"
+
+:: On azure, there are libcrypto*.dll & libssl*.dll under
+:: C:\Windows\System32, which should not be there (no vendor dlls in windows folder).
+:: They would be found before the openssl libs of the conda environment, so we delete them.
+if defined CI (
+ DEL C:\Windows\System32\libcrypto-1_1-x64.dll || (Echo Ignoring failure to delete C:\Windows\System32\libcrypto-1_1-x64.dll)
+ DEL C:\Windows\System32\libssl-1_1-x64.dll || (Echo Ignoring failure to delete C:\Windows\System32\libssl-1_1-x64.dll)
+ DEL C:\Windows\System32\msmpi.dll || (Echo Ignoring failure to delete C:\Windows\System32\msmpi.dll)
+ DEL C:\Windows\System32\msmpires.dll || (Echo Ignoring failure to delete C:\Windows\System32\msmpires.dll)
+)
+
+:: Make paths like C:\hostedtoolcache\windows\Ruby\2.5.7\x64\bin garbage
+set "PATH=%PATH:ostedtoolcache=%"
+
+:: Install CUDA drivers if needed
+for %%i in ("%~dp0.") do set "SCRIPT_DIR=%%~fi"
+<.ci_support\%CONFIG%.yaml shyaml get-value cuda_compiler_version.0 None > cuda.version
+ "%CONDA_PREFIX%\etc\conda\activate.d\conda-forge-ci-setup-activate.bat"
+echo set "CPU_COUNT=%CPU_COUNT%" >> "%CONDA_PREFIX%\etc\conda\activate.d\conda-forge-ci-setup-activate.bat"
+echo set "PYTHONUNBUFFERED=%PYTHONUNBUFFERED%" >> "%CONDA_PREFIX%\etc\conda\activate.d\conda-forge-ci-setup-activate.bat"
+echo set "PATH=%PATH%" >> "%CONDA_PREFIX%\etc\conda\activate.d\conda-forge-ci-setup-activate.bat"
+if not "%CUDA_PATH%" == "" (
+ echo set "CUDA_PATH=%CUDA_PATH%" >> "%CONDA_PREFIX%\etc\conda\activate.d\conda-forge-ci-setup-activate.bat"
+ echo set "CUDA_HOME=%CUDA_PATH%" >> "%CONDA_PREFIX%\etc\conda\activate.d\conda-forge-ci-setup-activate.bat"
+ :: Export CONDA_OVERRIDE_CUDA to allow __cuda to be detected on CI systems without GPUs
+ echo set "CONDA_OVERRIDE_CUDA=%CONDA_OVERRIDE_CUDA%" >> "%CONDA_PREFIX%\etc\conda\activate.d\conda-forge-ci-setup-activate.bat"
+)
+
+conda.exe info
+conda.exe config --show-sources
+conda.exe list --show-channel-urls
diff --git a/recipe/setup.py b/recipe/setup.py
new file mode 100644
index 000000000..54bd1c3e7
--- /dev/null
+++ b/recipe/setup.py
@@ -0,0 +1,45 @@
+import os
+from setuptools import setup, find_packages
+
+__version__ = "0.0.1"
+
+if "RECIPE_DIR" in os.environ:
+ pth = os.path.join(os.environ["RECIPE_DIR"], "meta.yaml")
+else:
+ pth = os.path.join(os.path.dirname(__file__), "meta.yaml")
+
+if os.path.exists(pth):
+ with open(pth, "r") as fp:
+ for line in fp.readlines():
+ if line.startswith("{% set version"):
+ __version__ = eval(
+ line
+ .strip()
+ .split("=")[1]
+ .strip()
+ .replace("%}", "")
+ .strip()
+ )
+ break
+
+setup(
+ name="conda_forge_ci_setup",
+ version=__version__,
+ description="conda-forge-ci-utils",
+ author="conda-forge/core",
+ author_email="conda-forge/core@github.com",
+ url="https://conda-forge.org",
+ packages=find_packages(),
+ entry_points={
+ "console_scripts": [
+ "ff_ci_pr_build = conda_forge_ci_setup.ff_ci_pr_build:main",
+ "upload_or_check_non_existence = conda_forge_ci_setup.upload_or_check_non_existence:main", # noqa
+ "setup_conda_rc = conda_forge_ci_setup.build_utils:setup_conda_rc",
+ "upload_package = conda_forge_ci_setup.build_utils:upload_package",
+ "mangle_compiler = conda_forge_ci_setup.build_utils:mangle_compiler", # noqa
+ "make_build_number = conda_forge_ci_setup.build_utils:make_build_number", # noqa
+ "mangle_homebrew = conda_forge_ci_setup.mangle_homebrew:main",
+ "validate_recipe_outputs = conda_forge_ci_setup.feedstock_outputs:main", # noqa
+ ]
+ },
+)