-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
2 changed files
with
293 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,124 @@ | ||
#------------------------------------------------------------------------------# | ||
name: Pipeline Local | ||
#------------------------------------------------------------------------------# | ||
# Global workflow environment variables | ||
env: | ||
EAGER_CONDA_ENV: "nf-core-eager-2.2.0dev" | ||
EAGER_NF_REV: "7b51863957" | ||
PHYLO_CONDA_ENV: "plague-phylogeography-0.1.4dev" | ||
CONDA_ENVS_PATH: "/home/runner/miniconda/envs:/usr/share/miniconda/envs" | ||
CONDA_PKGS_DIRS: "/home/runner/miniconda/pkgs" | ||
GH_RESOURCES: "--max_memory 6.GB --max_cpus 2" | ||
SQLITE_DB_PATH: "/home/runner/.nextflow/assets/${{github.repository}}/results/ncbimeta_db/update/latest/output/database/yersinia_pestis_db.sqlite" | ||
#------------------------------------------------------------------------------# | ||
# Workflow conditions | ||
on: | ||
push: | ||
branches: | ||
- '*' | ||
paths: | ||
- '.github/workflows/pipeline_local.yaml' | ||
- 'main.nf' | ||
- 'nextflow.config' | ||
- 'custom/local_data_eager.tsv' | ||
pull_request: | ||
branches: | ||
- '*' | ||
release: | ||
types: [published] | ||
#------------------------------------------------------------------------------# | ||
jobs: | ||
#----------------------------------------------------------------------------# | ||
# Install dependencies | ||
local : | ||
runs-on: ubuntu-latest | ||
timeout-minutes: 60 | ||
steps: | ||
#------------------------------------------------------------------------# | ||
# Checkout Repository | ||
- name: checkout repo | ||
uses: actions/checkout@v2 | ||
# Install nextflow | ||
- name: install nextflow | ||
run: | | ||
wget -qO- get.nextflow.io | bash | ||
sudo mv nextflow /usr/local/bin/ | ||
# Setup conda | ||
- name: setup conda | ||
uses: goanpeca/setup-miniconda@v1 | ||
with: | ||
auto-update-conda: true | ||
#------------------------------------------------------------------------# | ||
# Restore (cache) conda environments | ||
- name: cache eager env | ||
uses: actions/cache@v2 | ||
with: | ||
path: /home/runner/miniconda/envs/nf-core-eager-2.2.0dev | ||
key: eager-env-${{ runner.os }}-7b51863957 | ||
|
||
- name: cache plague-phylogeography env | ||
uses: actions/cache@v2 | ||
with: | ||
path: /home/runner/miniconda/envs/plague-phylogeography-0.1.4dev | ||
key: plague-phylogeography-env-${{ runner.os }}-${{ hashFiles('environment.yaml') }} | ||
|
||
- name: cache nextstrain env | ||
uses: actions/cache@v2 | ||
with: | ||
path: /home/runner/miniconda/envs/nextstrain-8.0.0 | ||
key: nextstrain-env-${{ runner.os }}-${{ hashFiles('config/nextstrain.yaml') }} | ||
#------------------------------------------------------------------------# | ||
# Check conda environments | ||
- name: check cache | ||
run: conda info --envs | ||
#------------------------------------------------------------------------# | ||
# Download pipelines and install | ||
- name: install | ||
shell: bash -l {0} | ||
run: scripts/install.sh ${{github.repository}} ${{ github.sha }} | ||
#------------------------------------------------------------------------# | ||
# Analyze Local Data | ||
- name: pipeline local | ||
shell: bash -l {0} | ||
run: | | ||
conda activate ${PHYLO_CONDA_ENV} | ||
nextflow run -r ${{ github.sha }} ${{github.repository}} \ | ||
--skip_assembly_download \ | ||
--skip_outgroup_download \ | ||
--skip_sra_download \ | ||
--eager_tsv "custom/local_data_eager.tsv" \ | ||
--assembly_local "custom/*.fna" \ | ||
--outdir test \ | ||
${GH_RESOURCES} | ||
conda deactivate | ||
#------------------------------------------------------------------------# | ||
# Artifact Upload | ||
- name: artifact multiqc-eager | ||
uses: actions/upload-artifact@v2 | ||
with: | ||
name: multiqc-eager | ||
path: test/eager/MultiQC/ | ||
|
||
- name: artifact snippy-pairwise | ||
uses: actions/upload-artifact@v2 | ||
with: | ||
name: snippy-pairwise | ||
path: test/snippy_pairwise/output10X/ | ||
|
||
- name: artifact iqtree | ||
uses: actions/upload-artifact@v2 | ||
with: | ||
name: iqtree | ||
path: test/iqtree/ | ||
|
||
- name: artifact multiqc | ||
uses: actions/upload-artifact@v2 | ||
with: | ||
name: multiqc | ||
path: test/multiqc/multiqc_report.html | ||
|
||
- name: artifact trace | ||
uses: actions/upload-artifact@v2 | ||
with: | ||
name: trace | ||
path: test/trace/ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,169 @@ | ||
#!/usr/bin/env python3 | ||
""" | ||
@author: Katherine Eaton | ||
Generate process docs for ReadTheDocs from nextflow file. | ||
./process_docs.py --nf ../main.nf --rst ../docs/process/process_all.rst | ||
""" | ||
|
||
# -----------------------------------------------------------------------------# | ||
# Modules and Packages # | ||
# -----------------------------------------------------------------------------# | ||
import argparse # Command-line argument parsing | ||
import os # File path checking | ||
|
||
# This program should only be called from the command-line | ||
if __name__ != "__main__": | ||
quit() | ||
|
||
# -----------------------------------------------------------------------------# | ||
# Argument Parsing # | ||
# -----------------------------------------------------------------------------# | ||
parser = argparse.ArgumentParser( | ||
description="Generate process docs for ReadTheDocs from nextflow file.", | ||
add_help=True | ||
) | ||
|
||
parser.add_argument( | ||
"--nf", | ||
help="Path to the nextflow pipeline file.", | ||
action="store", | ||
dest="nfPath", | ||
required=True, | ||
) | ||
|
||
parser.add_argument( | ||
"--rst", | ||
help="Path to the output rst docs file.", | ||
action="store", | ||
dest="rstPath", | ||
required=True, | ||
) | ||
|
||
# Retrieve user parameters | ||
args = vars(parser.parse_args()) | ||
|
||
nf_path = args["nfPath"] | ||
rst_path = args["rstPath"] | ||
|
||
# -----------------------------------------------------------------------------# | ||
# Error Catching # | ||
# -----------------------------------------------------------------------------# | ||
# Check if NF file exists | ||
if not os.path.exists(nf_path): | ||
print("An error occurred while trying to open", nf_path) | ||
sys.exit(1) | ||
|
||
# -----------------------------------------------------------------------------# | ||
# Constants and Variables # | ||
# -----------------------------------------------------------------------------# | ||
rst_file = open(rst_path, "w") | ||
H1_CHAR = "-" | ||
H2_CHAR = "*" | ||
H3_CHAR = "-" | ||
TABLE_CHAR = "=" | ||
TABLE_COL_WIDTH = 40 | ||
# -----------------------------------------------------------------------------# | ||
# Processing # | ||
# -----------------------------------------------------------------------------# | ||
with open(nf_path, "r") as nf_file: | ||
for line in nf_file: | ||
line = line.strip() | ||
# Skip everything that is not process | ||
if line.startswith("process"): | ||
# Parse the process lines | ||
split_process = line.split(" ") | ||
process_name = split_process[1].strip("{") | ||
format_process_name = process_name.replace("_"," ").title() | ||
rst_file.write("\n" | ||
+ format_process_name | ||
+ "\n" | ||
+ H2_CHAR * len(format_process_name) | ||
+ "\n\n") | ||
line = nf_file.readline().strip() | ||
# Begin the process docstring | ||
if line == ("/*"): | ||
# Write the process description. | ||
process_description = nf_file.readline().strip() | ||
rst_file.write(process_description | ||
+ "\n") | ||
# Process the IO docs | ||
line = nf_file.readline().strip() | ||
|
||
io_doc_exists = False | ||
while not line == ("*/"): | ||
# Blank lines signal table line | ||
if not line: | ||
rst_file.write(TABLE_CHAR * TABLE_COL_WIDTH | ||
+ " " | ||
+ TABLE_CHAR * TABLE_COL_WIDTH | ||
+ " " | ||
+ TABLE_CHAR * TABLE_COL_WIDTH | ||
+ "\n") | ||
line = nf_file.readline().strip() | ||
continue | ||
if line in ["Input:", "Output:", "Publish:"]: | ||
io_doc_exists = True | ||
io_section = line | ||
# Write input column headers | ||
rst_file.write(io_section | ||
+ " " * (TABLE_COL_WIDTH - len(io_section) + 1) | ||
+ "Type" | ||
+ " " * (TABLE_COL_WIDTH - len("Type") + 1) | ||
+ "Description" | ||
+ " " * (TABLE_COL_WIDTH - len("Description") + 1) | ||
+ "\n" | ||
+ TABLE_CHAR * TABLE_COL_WIDTH | ||
+ " " | ||
+ TABLE_CHAR * TABLE_COL_WIDTH | ||
+ " " | ||
+ TABLE_CHAR * TABLE_COL_WIDTH | ||
+ "\n") | ||
line = nf_file.readline().strip() | ||
continue | ||
io_split = line.split("(") | ||
# Retrieve the input value | ||
io_name = io_split[0] | ||
# Retrieve the input type | ||
io_split = io_split[1].split(")") | ||
io_type = io_split[0] | ||
# Retrieve the input description | ||
io_split = io_split[1].split(": ") | ||
io_desc = io_split[1] | ||
# Figure out where the put the process links | ||
if "process" in io_desc: | ||
#process :ref:`ncbimeta_db_update<NCBImeta DB Update>` | ||
io_desc_split = io_desc.split("process ") | ||
io_process_name = io_desc_split[1].strip(".") | ||
io_process_link = ("process :ref:`" | ||
+ io_process_name | ||
+ "<" | ||
+ io_process_name.title() | ||
+ ">`") | ||
io_desc = io_desc_split[0] + io_process_link | ||
rst_file.write(io_name | ||
+ " " * (TABLE_COL_WIDTH - len(io_name) + 1) | ||
+ io_type | ||
+ " " * (TABLE_COL_WIDTH - len(io_type) + 1) | ||
+ io_desc | ||
+ " " * (TABLE_COL_WIDTH - len(io_desc) + 1) | ||
+ "\n") | ||
# Read in the next IO line | ||
line = nf_file.readline().strip() | ||
# Read past the ending docstring | ||
if line == "*/": | ||
line = nf_file.readline().strip() | ||
|
||
# Process the script code | ||
if line == "script:" or line == "shell:": | ||
script_type = line.strip(":") | ||
# Skip the current line which is """ or ''' | ||
line = nf_file.readline().strip() | ||
# Check if the immediate next line is """ or ''' | ||
line = nf_file.readline().strip() | ||
if line != "'''" and line != '"""': | ||
rst_file.write("\n" + "**" + script_type + "**::" + "\n") | ||
while line != "'''" and line != '"""': | ||
rst_file.write("\t" + line + "\n") | ||
line = nf_file.readline().strip() |