Skip to content

Manual Hub Push

Manual Hub Push #140

Workflow file for this run

name: Manual Hub Push
on:
workflow_dispatch:
inputs:
release_token:
description: 'Your release token'
required: true
triggered_by:
description: 'CD | TAG | MANUAL'
required: false
default: MANUAL
#on:
# pull_request:
jobs:
token-check:
runs-on: ubuntu-latest
steps:
- run: echo "success!"
if: "${{ github.event.inputs.release_token }} == ${{ env.release_token }}"
env:
release_token: ${{ secrets.CAS_RELEASE_TOKEN }}
hub-release:
needs: token-check
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set envs and versions
run: |
VCS_REF=${{ github.ref }}
echo "VCS_REF=$VCS_REF" >> $GITHUB_ENV
echo "Will push $VCS_REF"
CAS_VERSION=$(sed -n '/^__version__/p' ./server/clip_server/__init__.py | cut -d \' -f2)
V_CAS_VERSION=v${CAS_VERSION}
CAS_MINOR_VERSION=${CAS_VERSION%.*}
CAS_MAJOR_VERSION=${CAS_MINOR_VERSION%.*}
if [[ "${{ github.event.inputs.triggered_by }}" == "CD" ]]; then
# on every CD release
echo "TAG_ALIAS=\
-t latest \
" >> $GITHUB_ENV
echo "GPU_TAG_ALIAS=\
-t latest-gpu \
" >> $GITHUB_ENV
elif [[ "${{ github.event.inputs.triggered_by }}" == "TAG" ]]; then
# on every tag release
echo "TAG_ALIAS=\
-t latest \
-t ${CAS_VERSION} \
-t ${CAS_MINOR_VERSION} \
" >> $GITHUB_ENV
echo "GPU_TAG_ALIAS=\
-t latest-gpu \
-t ${CAS_VERSION}-gpu \
-t ${CAS_MINOR_VERSION}-gpu \
" >> $GITHUB_ENV
elif [[ "${{ github.event.inputs.triggered_by }}" == "MANUAL" ]]; then
# on every manual release
echo "TAG_ALIAS=\
-t ${CAS_VERSION} \
" >> $GITHUB_ENV
echo "GPU_TAG_ALIAS=\
-t ${CAS_VERSION}-gpu \
" >> $GITHUB_ENV
else
echo "TAG_ALIAS=\
-t latest \
" >> $GITHUB_ENV
echo "GPU_TAG_ALIAS=\
-t latest-gpu \
" >> $GITHUB_ENV
fi
echo "CAS_VERSION=${CAS_VERSION}" >> $GITHUB_ENV
- name: Prepare enviroment
run: |
python -m pip install --upgrade jina yq
- name: Push Torch Executor
id: push_torch_executor
run: |
# FIX the import issue
echo -e "\
__version__ = '$CAS_VERSION'
from .executors.clip_torch import CLIPEncoder\n\
" > server/clip_server/__init__.py
echo -e "\
jtype: CLIPEncoder\n\
metas:\n\
py_modules:\n\
- clip_server/__init__.py\n\
" > server/config.yml
echo -e "\
manifest_version: 1\n\
name: CLIPTorchEncoder\n\
description: Embed images and sentences into fixed-length vectors with CLIP\n\
url: https://github.com/jina-ai/clip-as-service\n\
keywords: [clip, clip-model, clip-as-service, pytorch]\n\
" > server/manifest.yml
python scripts/get-requirements.py "" server/requirements.txt
cp .github/README-exec/torch.readme.md server/README.md
exec_name=`yq -r .name server/manifest.yml`
echo executor name is $exec_name
cp Dockerfiles/base.Dockerfile server/Dockerfile
JINA_AUTH_TOKEN=${{secrets.JINAHUB_TOKEN}} jina hub push --force $exec_name --secret ${{secrets.TORCH_EXEC_SECRET}} server ${{env.TAG_ALIAS}}
cp Dockerfiles/cuda.Dockerfile server/Dockerfile
JINA_AUTH_TOKEN=${{secrets.JINAHUB_TOKEN}} jina hub push --force $exec_name --secret ${{secrets.TORCH_EXEC_SECRET}} server ${{env.GPU_TAG_ALIAS}}
- name: Push Onnx Executor
id: push_onnx_executor
run: |
# FIX the import issue
echo -e "\
__version__ = '$CAS_VERSION'
from .executors.clip_onnx import CLIPEncoder\n\
" > server/clip_server/__init__.py
echo -e "\
jtype: CLIPEncoder\n\
metas:\n\
py_modules:\n\
- clip_server/__init__.py\n\
" > server/config.yml
echo -e "\
manifest_version: 1\n\
name: CLIPOnnxEncoder\n\
description: Embed images and sentences into fixed-length vectors with CLIP\n\
url: https://github.com/jina-ai/clip-as-service\n\
keywords: [clip, clip-model, clip-as-service, onnx, onnx-runtime]\n\
" > server/manifest.yml
python scripts/get-requirements.py onnx server/requirements.txt
cp .github/README-exec/onnx.readme.md server/README.md
exec_name=`yq -r .name server/manifest.yml`
echo executor name is $exec_name
cp Dockerfiles/base.Dockerfile server/Dockerfile
sed -i 's/ARG BACKEND_TAG=torch/ARG BACKEND_TAG=onnx/g' server/Dockerfile
JINA_AUTH_TOKEN=${{secrets.JINAHUB_TOKEN}} jina hub push --force $exec_name --secret ${{secrets.ONNX_EXEC_SECRET}} server ${{env.TAG_ALIAS}}
cp Dockerfiles/cuda.Dockerfile server/Dockerfile
sed -i 's/ARG BACKEND_TAG=torch/ARG BACKEND_TAG=onnx/g' server/Dockerfile
JINA_AUTH_TOKEN=${{secrets.JINAHUB_TOKEN}} jina hub push --force $exec_name --secret ${{secrets.ONNX_EXEC_SECRET}} server ${{env.GPU_TAG_ALIAS}}
- name: Push TensorRT Executor
id: push_tensorrt_executor
run: |
# FIX the import issue
echo -e "\
__version__ = '$CAS_VERSION'
from .executors.clip_tensorrt import CLIPEncoder\n\
" > server/clip_server/__init__.py
echo -e "\
jtype: CLIPEncoder\n\
metas:\n\
py_modules:\n\
- clip_server/__init__.py\n\
" > server/config.yml
echo -e "\
manifest_version: 1\n\
name: CLIPTensorRTEncoder\n\
description: Embed images and sentences into fixed-length vectors with CLIP\n\
url: https://github.com/jina-ai/clip-as-service\n\
keywords: [clip, clip-model, clip-as-service, onnx, tensorrt]\n\
" > server/manifest.yml
python scripts/get-requirements.py tensorrt server/requirements.txt
cp Dockerfiles/tensorrt.Dockerfile server/Dockerfile
exec_name=`yq -r .name server/manifest.yml`
echo executor name is $exec_name
# FIXME: disable uploading at debugging
# JINA_AUTH_TOKEN=${{secrets.JINAHUB_TOKEN}} jina hub push --force $exec_name --secret ${{secrets.TENSORRT_EXEC_SECRET}} server ${{env.TAG_ALIAS}}