diff --git a/.editorconfig b/.editorconfig index 95549501..b6b31907 100644 --- a/.editorconfig +++ b/.editorconfig @@ -8,12 +8,9 @@ trim_trailing_whitespace = true indent_size = 4 indent_style = space -[*.{yml,yaml}] +[*.{md,yml,yaml,html,css,scss,js}] indent_size = 2 -[*.json] -insert_final_newline = unset - # These files are edited and tested upstream in nf-core/modules [/modules/nf-core/**] charset = unset diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index b62cd67b..de10fd6d 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -15,8 +15,7 @@ Contributions to the code are even more welcome ;) If you'd like to write some code for nf-core/quantms, the standard workflow is as follows: -1. Check that there isn't already an issue about your idea in the [nf-core/quantms issues](https://github.com/nf-core/quantms/issues) to avoid duplicating work - * If there isn't one already, please create one so that others know you're working on this +1. Check that there isn't already an issue about your idea in the [nf-core/quantms issues](https://github.com/nf-core/quantms/issues) to avoid duplicating work. If there isn't one already, please create one so that others know you're working on this 2. [Fork](https://help.github.com/en/github/getting-started-with-github/fork-a-repo) the [nf-core/quantms repository](https://github.com/nf-core/quantms) to your GitHub account 3. Make the necessary changes / additions within your forked repository following [Pipeline conventions](#pipeline-contribution-conventions) 4. Use `nf-core schema build` and add any new parameters to the pipeline JSON schema (requires [nf-core tools](https://github.com/nf-core/tools) >= 1.10). @@ -49,9 +48,9 @@ These tests are run both with the latest available version of `Nextflow` and als :warning: Only in the unlikely and regretful event of a release happening with a bug. -* On your own fork, make a new branch `patch` based on `upstream/master`. -* Fix the bug, and bump version (X.Y.Z+1). -* A PR should be made on `master` from patch to directly this particular bug. +- On your own fork, make a new branch `patch` based on `upstream/master`. +- Fix the bug, and bump version (X.Y.Z+1). +- A PR should be made on `master` from patch to directly this particular bug. ## Getting help @@ -73,7 +72,7 @@ If you wish to contribute a new step, please use the following coding standards: 6. Add sanity checks and validation for all relevant parameters. 7. Perform local tests to validate that the new code works as expected. 8. If applicable, add a new test command in `.github/workflow/ci.yml`. -9. Update MultiQC config `assets/multiqc_config.yaml` so relevant suffixes, file name clean up and module plots are in the appropriate order. If applicable, add a [MultiQC](https://https://multiqc.info/) module. +9. Update MultiQC config `assets/multiqc_config.yml` so relevant suffixes, file name clean up and module plots are in the appropriate order. If applicable, add a [MultiQC](https://https://multiqc.info/) module. 10. Add a description of the output files and if relevant any appropriate images from the MultiQC report to `docs/output.md`. ### Default values @@ -92,8 +91,8 @@ The process resources can be passed on to the tool dynamically within the proces Please use the following naming schemes, to make it easy to understand what is going where. -* initial process channel: `ch_output_from_` -* intermediate and terminal channels: `ch__for_` +- initial process channel: `ch_output_from_` +- intermediate and terminal channels: `ch__for_` ### Nextflow version bumping diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 1c3996fa..34a329f8 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -2,7 +2,6 @@ name: Bug report description: Report something that is broken or incorrect labels: bug body: - - type: markdown attributes: value: | diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index d80cfc57..809d25a8 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -10,16 +10,15 @@ Remember that PRs should be made against the dev branch, unless you're preparing Learn more about contributing: [CONTRIBUTING.md](https://github.com/nf-core/quantms/tree/master/.github/CONTRIBUTING.md) --> - ## PR checklist - [ ] This comment contains a description of changes (with reason). - [ ] If you've fixed a bug or added code that should be tested, add tests! - - [ ] If you've added a new tool - have you followed the pipeline conventions in the [contribution docs](https://github.com/nf-core/quantms/tree/master/.github/CONTRIBUTING.md) - - [ ] If necessary, also make a PR on the nf-core/quantms _branch_ on the [nf-core/test-datasets](https://github.com/nf-core/test-datasets) repository. + - [ ] If you've added a new tool - have you followed the pipeline conventions in the [contribution docs](https://github.com/nf-core/quantms/tree/master/.github/CONTRIBUTING.md) + - [ ] If necessary, also make a PR on the nf-core/quantms _branch_ on the [nf-core/test-datasets](https://github.com/nf-core/test-datasets) repository. - [ ] Make sure your code lints (`nf-core lint`). -- [ ] Ensure the test suite passes (`nextflow run . -profile test,docker` --outdir `). +- [ ] Ensure the test suite passes (`nextflow run . -profile test,docker --outdir `). - [ ] Usage Documentation in `docs/usage.md` is updated. - [ ] Output Documentation in `docs/output.md` is updated. - [ ] `CHANGELOG.md` is updated. diff --git a/.github/workflows/awstest.yml b/.github/workflows/awstest.yml index 468e8e32..90119c07 100644 --- a/.github/workflows/awstest.yml +++ b/.github/workflows/awstest.yml @@ -10,6 +10,7 @@ jobs: if: github.repository == 'nf-core/quantms' runs-on: ubuntu-latest steps: + # Launch workflow using Tower CLI tool action - name: Launch workflow via tower uses: nf-core/tower-action@v3 diff --git a/.github/workflows/branch.yml b/.github/workflows/branch.yml index 729c489a..7068c251 100644 --- a/.github/workflows/branch.yml +++ b/.github/workflows/branch.yml @@ -13,8 +13,7 @@ jobs: - name: Check PRs if: github.repository == 'nf-core/quantms' run: | - { [[ ${{github.event.pull_request.head.repo.full_name }} == nf-core/quantms ]] && [[ $GITHUB_HEAD_REF = "dev" ]]; } || [[ $GITHUB_HEAD_REF == "patch" ]] - + "{ [[ ${{github.event.pull_request.head.repo.full_name }} == nf-core/quantms ]] && [[ $GITHUB_HEAD_REF = "dev" ]]; } || [[ $GITHUB_HEAD_REF == "patch" ]]" # If the above check failed, post a comment on the PR explaining the failure # NOTE - this doesn't currently work if the PR is coming from a fork, due to limitations in GitHub actions secrets @@ -43,4 +42,4 @@ jobs: Thanks again for your contribution! repo-token: ${{ secrets.GITHUB_TOKEN }} allow-repeats: false - +# diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 30f52d7a..ea24604a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -24,12 +24,12 @@ jobs: # Nextflow versions include: # Test pipeline minimum Nextflow version - - NXF_VER: '21.10.3' - NXF_EDGE: '' + - NXF_VER: "21.10.3" + NXF_EDGE: "" # Test latest edge release of Nextflow - - NXF_VER: '' - NXF_EDGE: '1' - test_profile: ['test', 'test_lfq', 'test_dia'] + - NXF_VER: "" + NXF_EDGE: "1" + test_profile: ["test", "test_lfq", "test_dia"] steps: - name: Check out pipeline code uses: actions/checkout@v2 diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index fda934c0..e9cf5de3 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -1,6 +1,7 @@ name: nf-core linting # This workflow is triggered on pushes and PRs to the repository. -# It runs the `nf-core lint` and markdown lint tests to ensure that the code meets the nf-core guidelines +# It runs the `nf-core lint` and markdown lint tests to ensure +# that the code meets the nf-core guidelines. on: push: pull_request: @@ -8,42 +9,6 @@ on: types: [published] jobs: - Markdown: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions/setup-node@v2 - - name: Install markdownlint - run: npm install -g markdownlint-cli - - name: Run Markdownlint - run: markdownlint . - - # If the above check failed, post a comment on the PR explaining the failure - - name: Post PR comment - if: failure() - uses: mshick/add-pr-comment@v1 - with: - message: | - ## Markdown linting is failing - - To keep the code consistent with lots of contributors, we run automated code consistency checks. - To fix this CI test, please run: - - * Install `markdownlint-cli` - * On Mac: `brew install markdownlint-cli` - * Everything else: [Install `npm`](https://www.npmjs.com/get-npm) then [install `markdownlint-cli`](https://www.npmjs.com/package/markdownlint-cli) (`npm install -g markdownlint-cli`) - * Fix the markdown errors - * Automatically: `markdownlint . --fix` - * Manually resolve anything left from `markdownlint .` - - Once you push these changes the test should pass, and you can hide this comment :+1: - - We highly recommend setting up markdownlint in your code editor so that this formatting is done automatically on save. Ask about it on Slack for help! - - Thanks again for your contribution! - repo-token: ${{ secrets.GITHUB_TOKEN }} - allow-repeats: false - EditorConfig: runs-on: ubuntu-latest steps: @@ -55,49 +20,24 @@ jobs: run: npm install -g editorconfig-checker - name: Run ECLint check - run: editorconfig-checker -exclude README.md $(git ls-files | grep -v test) + run: editorconfig-checker -exclude README.md $(find .* -type f | grep -v '.git\|.py\|.md\|json\|yml\|yaml\|html\|css\|work\|.nextflow\|build\|nf_core.egg-info\|log.txt\|Makefile') - YAML: + Prettier: runs-on: ubuntu-latest steps: - - name: Checkout - uses: actions/checkout@master - - name: 'Yamllint' - uses: karancode/yamllint-github-action@master - with: - yamllint_file_or_dir: '.' - yamllint_config_filepath: '.yamllint.yml' - - # If the above check failed, post a comment on the PR explaining the failure - - name: Post PR comment - if: failure() - uses: mshick/add-pr-comment@v1 - with: - message: | - ## YAML linting is failing - - To keep the code consistent with lots of contributors, we run automated code consistency checks. - To fix this CI test, please run: - - * Install `yamllint` - * Install `yamllint` following [this](https://yamllint.readthedocs.io/en/stable/quickstart.html#installing-yamllint) - instructions or alternative install it in your [conda environment](https://anaconda.org/conda-forge/yamllint) - * Fix the markdown errors - * Run the test locally: `yamllint $(find . -type f -name "*.yml" -o -name "*.yaml") -c ./.yamllint.yml` - * Fix any reported errors in your YAML files + - uses: actions/checkout@v2 - Once you push these changes the test should pass, and you can hide this comment :+1: + - uses: actions/setup-node@v2 - We highly recommend setting up yaml-lint in your code editor so that this formatting is done automatically on save. Ask about it on Slack for help! + - name: Install Prettier + run: npm install -g prettier - Thanks again for your contribution! - repo-token: ${{ secrets.GITHUB_TOKEN }} - allow-repeats: false + - name: Run Prettier --check + run: prettier --check ${GITHUB_WORKSPACE} nf-core: runs-on: ubuntu-latest steps: - - name: Check out pipeline code uses: actions/checkout@v2 @@ -110,8 +50,8 @@ jobs: - uses: actions/setup-python@v1 with: - python-version: '3.6' - architecture: 'x64' + python-version: "3.6" + architecture: "x64" - name: Install dependencies run: | @@ -139,3 +79,4 @@ jobs: lint_results.md PR_number.txt +# diff --git a/.github/workflows/linting_comment.yml b/.github/workflows/linting_comment.yml index 44d72994..91c487a1 100644 --- a/.github/workflows/linting_comment.yml +++ b/.github/workflows/linting_comment.yml @@ -1,4 +1,3 @@ - name: nf-core linting comment # This workflow is triggered after the linting action is complete # It posts an automated comment to the PR, even if the PR is coming from a fork @@ -27,4 +26,4 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} number: ${{ steps.pr_number.outputs.pr_number }} path: linting-logs/lint_results.md - +# diff --git a/.gitpod.yml b/.gitpod.yml index b7d4cee1..c452ee93 100644 --- a/.gitpod.yml +++ b/.gitpod.yml @@ -2,13 +2,13 @@ image: nfcore/gitpod:latest vscode: extensions: # based on nf-core.nf-core-extensionpack - - codezombiech.gitignore # Language support for .gitignore files + - codezombiech.gitignore # Language support for .gitignore files # - cssho.vscode-svgviewer # SVG viewer - - davidanson.vscode-markdownlint # Markdown/CommonMark linting and style checking for Visual Studio Code - - eamodio.gitlens # Quickly glimpse into whom, why, and when a line or code block was changed - - EditorConfig.EditorConfig # override user/workspace settings with settings found in .editorconfig files - - Gruntfuggly.todo-tree # Display TODO and FIXME in a tree view in the activity bar - - mechatroner.rainbow-csv # Highlight columns in csv files in different colors + - davidanson.vscode-markdownlint # Markdown/CommonMark linting and style checking for Visual Studio Code + - eamodio.gitlens # Quickly glimpse into whom, why, and when a line or code block was changed + - EditorConfig.EditorConfig # override user/workspace settings with settings found in .editorconfig files + - Gruntfuggly.todo-tree # Display TODO and FIXME in a tree view in the activity bar + - mechatroner.rainbow-csv # Highlight columns in csv files in different colors # - nextflow.nextflow # Nextflow syntax highlighting - - oderwat.indent-rainbow # Highlight indentation level - - streetsidesoftware.code-spell-checker # Spelling checker for source code + - oderwat.indent-rainbow # Highlight indentation level + - streetsidesoftware.code-spell-checker # Spelling checker for source code diff --git a/.markdownlint.yml b/.markdownlint.yml deleted file mode 100644 index 9e605fcf..00000000 --- a/.markdownlint.yml +++ /dev/null @@ -1,14 +0,0 @@ -# Markdownlint configuration file -default: true -line-length: false -ul-indent: - indent: 4 -no-duplicate-header: - siblings_only: true -no-inline-html: - allowed_elements: - - img - - p - - kbd - - details - - summary diff --git a/.nf-core.yml b/.nf-core.yml index a8df8cd8..778ae193 100644 --- a/.nf-core.yml +++ b/.nf-core.yml @@ -1,6 +1,4 @@ repository_type: pipeline lint: - files_unchanged: - - assets/multiqc_config.yaml files_exist: - conf/igenomes.config diff --git a/.prettierrc.yml b/.prettierrc.yml new file mode 100644 index 00000000..c81f9a76 --- /dev/null +++ b/.prettierrc.yml @@ -0,0 +1 @@ +printWidth: 120 diff --git a/.yamllint.yml b/.yamllint.yml deleted file mode 100644 index f2aa633c..00000000 --- a/.yamllint.yml +++ /dev/null @@ -1,8 +0,0 @@ -extends: default - -rules: - document-start: disable - line-length: disable - truthy: disable - indentation: disable - empty-lines: disable diff --git a/CHANGELOG.md b/CHANGELOG.md index 42a8f816..dfa2cb82 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,29 +9,29 @@ Initial release of nf-core/quantms, created with the [nf-core](https://nf-co.re/ ### `Added` -* New pipeline for DDA-LFQ data analysis -* New pipeline for DDA-ISO data analysis -* New datasets for DDA-LFQ and DDA-ISO data analsysis -* Documentation added for DDA pipeline -* First pipeline for DIA-LFQ data analsysis +- New pipeline for DDA-LFQ data analysis +- New pipeline for DDA-ISO data analysis +- New datasets for DDA-LFQ and DDA-ISO data analsysis +- Documentation added for DDA pipeline +- First pipeline for DIA-LFQ data analsysis ### `Fixed` -* This is the first release - no reported issues +- This is the first release - no reported issues ### `Dependencies` The pipeline is using Nextflow DSL2, each process will be run with its own [Biocontainer](https://biocontainers.pro/#/registry). This means that on occasion it is entirely possible for the pipeline to be using different versions of the same tool. However, the overall software dependency changes compared to the last release have been listed below for reference. -| Dependency | Version | -|------------------|--------------| -| `comet` | 2021010 | -| `msgf+` | 2022.01.07 | -| `openms` | 2.8.0 | -| `sdrf-pipelines` | 0.0.21 | -| `percolator` | 3.5 | -| `pmultiqc` | 0.0.10 | -| `luciphor` | 2020_04_03 | -| `dia-nn` | 1.8 | +| Dependency | Version | +| ---------------- | ---------- | +| `comet` | 2021010 | +| `msgf+` | 2022.01.07 | +| `openms` | 2.8.0 | +| `sdrf-pipelines` | 0.0.21 | +| `percolator` | 3.5 | +| `pmultiqc` | 0.0.10 | +| `luciphor` | 2020_04_03 | +| `dia-nn` | 1.8 | ### `Deprecated` diff --git a/CITATIONS.md b/CITATIONS.md index 789ef03d..339d5c35 100644 --- a/CITATIONS.md +++ b/CITATIONS.md @@ -10,48 +10,60 @@ ## Pipeline tools -* [thermorawfileparser](https://pubmed.ncbi.nlm.nih.gov/31755270/) - > Hulstaert N, Shofstahl J, Sachsenberg T, Walzer M, Barsnes H, Martens L, Perez-Riverol Y. ThermoRawFileParser: Modular, Scalable, and Cross-Platform RAW File Conversion. J Proteome Res. 2020 Jan 3;19(1):537-542. doi: 10.1021/acs.jproteome.9b00328. Epub 2019 Dec 6. PMID: 31755270; PMCID: PMC7116465. +- [thermorawfileparser](https://pubmed.ncbi.nlm.nih.gov/31755270/) -* [sdrf-pipelines](https://pubmed.ncbi.nlm.nih.gov/34615866/) - > Dai C, Füllgrabe A, Pfeuffer J, Solovyeva EM, Deng J, Moreno P, Kamatchinathan S, Kundu DJ, George N, Fexova S, Grüning B, Föll MC, Griss J, Vaudel M, Audain E, Locard-Paulet M, Turewicz M, Eisenacher M, Uszkoreit J, Van Den Bossche T, Schwämmle V, Webel H, Schulze S, Bouyssié D, Jayaram S, Duggineni VK, Samaras P, Wilhelm M, Choi M, Wang M, Kohlbacher O, Brazma A, Papatheodorou I, Bandeira N, Deutsch EW, Vizcaíno JA, Bai M, Sachsenberg T, Levitsky LI, Perez-Riverol Y. A proteomics sample metadata representation for multiomics integration and big data analysis. Nat Commun. 2021 Oct 6;12(1):5854. doi: 10.1038/s41467-021-26111-3. PMID: 34615866; PMCID: PMC8494749. + > Hulstaert N, Shofstahl J, Sachsenberg T, Walzer M, Barsnes H, Martens L, Perez-Riverol Y. ThermoRawFileParser: Modular, Scalable, and Cross-Platform RAW File Conversion. J Proteome Res. 2020 Jan 3;19(1):537-542. doi: 10.1021/acs.jproteome.9b00328. Epub 2019 Dec 6. PMID: 31755270; PMCID: PMC7116465. -* [OpenMS](https://pubmed.ncbi.nlm.nih.gov/27312411/) - > Röst HL., Sachsenberg T., Aiche S., Bielow C., Weisser H., Aicheler F., Andreotti S., Ehrlich HC., Gutenbrunner P., Kenar E., Liang X., Nahnsen S., Nilse L., Pfeuffer J., Rosenberger G., Rurik M., Schmitt U., Veit J., Walzer M., Wojnar D., Wolski WE., Schilling O., Choudhary JS, Malmström L., Aebersold R., Reinert K., Kohlbacher O. (2016). OpenMS: a flexible open-source software platform for mass spectrometry data analysis. Nature methods, 13(9), 741–748. doi: 10.1038/nmeth.3959. PubMed PMID: 27575624; PubMed Central PMCID: PMC5617107. +- [sdrf-pipelines](https://pubmed.ncbi.nlm.nih.gov/34615866/) -* [DIA-NN](https://pubmed.ncbi.nlm.nih.gov/31768060/) - > Demichev V, Messner CB, Vernardis SI, Lilley KS, Ralser M. DIA-NN: neural networks and interference correction enable deep proteome coverage in high throughput. Nat Methods. 2020 Jan;17(1):41-44. doi: 10.1038/s41592-019-0638-x. Epub 2019 Nov 25. PMID: 31768060; PMCID: PMC6949130. + > Dai C, Füllgrabe A, Pfeuffer J, Solovyeva EM, Deng J, Moreno P, Kamatchinathan S, Kundu DJ, George N, Fexova S, Grüning B, Föll MC, Griss J, Vaudel M, Audain E, Locard-Paulet M, Turewicz M, Eisenacher M, Uszkoreit J, Van Den Bossche T, Schwämmle V, Webel H, Schulze S, Bouyssié D, Jayaram S, Duggineni VK, Samaras P, Wilhelm M, Choi M, Wang M, Kohlbacher O, Brazma A, Papatheodorou I, Bandeira N, Deutsch EW, Vizcaíno JA, Bai M, Sachsenberg T, Levitsky LI, Perez-Riverol Y. A proteomics sample metadata representation for multiomics integration and big data analysis. Nat Commun. 2021 Oct 6;12(1):5854. doi: 10.1038/s41467-021-26111-3. PMID: 34615866; PMCID: PMC8494749. -* [MSstats](https://www.ncbi.nlm.nih.gov/pubmed/24794931/) - > Choi M., Chang CY., Clough T., Broudy D., Killeen T., MacLean B., Vitek O. (2014). MSstats: an R package for statistical analysis of quantitative mass spectrometry-based proteomic experiments. Bioinformatics (Oxford, England), 30(17), 2524–2526. doi: 10.1093/bioinformatics/btu305. PubMed PMID: 24794931. +- [OpenMS](https://pubmed.ncbi.nlm.nih.gov/27312411/) -* [Comet](https://www.ncbi.nlm.nih.gov/pubmed/23148064/) - > Eng JK., Jahan TA., Hoopmann MR. (2013). Comet: an open-source MS/MS sequence database search tool. Proteomics, 13(1), 22–24. doi: 10.1002/pmic.201200439. PubMed PMID: 23148064 + > Röst HL., Sachsenberg T., Aiche S., Bielow C., Weisser H., Aicheler F., Andreotti S., Ehrlich HC., Gutenbrunner P., Kenar E., Liang X., Nahnsen S., Nilse L., Pfeuffer J., Rosenberger G., Rurik M., Schmitt U., Veit J., Walzer M., Wojnar D., Wolski WE., Schilling O., Choudhary JS, Malmström L., Aebersold R., Reinert K., Kohlbacher O. (2016). OpenMS: a flexible open-source software platform for mass spectrometry data analysis. Nature methods, 13(9), 741–748. doi: 10.1038/nmeth.3959. PubMed PMID: 27575624; PubMed Central PMCID: PMC5617107. -* [MS-GF+](https://www.ncbi.nlm.nih.gov/pubmed/25358478/) - > Kim S., Pevzner PA. (2014). MS-GF+ makes progress towards a universal database search tool for proteomics. Nature communications, 5, 5277. doi: 10.1038/ncomms6277. PubMed PMID: 25358478; PubMed Central PMCID: PMC5036525 +- [DIA-NN](https://pubmed.ncbi.nlm.nih.gov/31768060/) -* [Epifany](https://pubmed.ncbi.nlm.nih.gov/31975601/) - > Pfeuffer J, Sachsenberg T, Dijkstra TMH, Serang O, Reinert K, Kohlbacher O. EPIFANY: A Method for Efficient High-Confidence Protein Inference. J Proteome Res. 2020 Mar 6;19(3):1060-1072. doi: 10.1021/acs.jproteome.9b00566. Epub 2020 Feb 13. PMID: 31975601; PMCID: PMC7583457. + > Demichev V, Messner CB, Vernardis SI, Lilley KS, Ralser M. DIA-NN: neural networks and interference correction enable deep proteome coverage in high throughput. Nat Methods. 2020 Jan;17(1):41-44. doi: 10.1038/s41592-019-0638-x. Epub 2019 Nov 25. PMID: 31768060; PMCID: PMC6949130. -* [Triqler](https://pubmed.ncbi.nlm.nih.gov/30482846/) - > The M, Käll L. Integrated Identification and Quantification Error Probabilities for Shotgun Proteomics. Mol Cell Proteomics. 2019 Mar;18(3):561-570. doi: 10.1074/mcp.RA118.001018. Epub 2018 Nov 27. PMID: 30482846; PMCID: PMC6398204. +- [MSstats](https://www.ncbi.nlm.nih.gov/pubmed/24794931/) -* [luciphor](https://pubmed.ncbi.nlm.nih.gov/23918812/) - > Fermin D, Walmsley SJ, Gingras AC, Choi H, Nesvizhskii AI. LuciPHOr: algorithm for phosphorylation site localization with false localization rate estimation using modified target-decoy approach. Mol Cell Proteomics. 2013 Nov;12(11):3409-19. doi: 10.1074/mcp.M113.028928. Epub 2013 Aug 5. PMID: 23918812; PMCID: PMC3820951. + > Choi M., Chang CY., Clough T., Broudy D., Killeen T., MacLean B., Vitek O. (2014). MSstats: an R package for statistical analysis of quantitative mass spectrometry-based proteomic experiments. Bioinformatics (Oxford, England), 30(17), 2524–2526. doi: 10.1093/bioinformatics/btu305. PubMed PMID: 24794931. + +- [Comet](https://www.ncbi.nlm.nih.gov/pubmed/23148064/) + + > Eng JK., Jahan TA., Hoopmann MR. (2013). Comet: an open-source MS/MS sequence database search tool. Proteomics, 13(1), 22–24. doi: 10.1002/pmic.201200439. PubMed PMID: 23148064 + +- [MS-GF+](https://www.ncbi.nlm.nih.gov/pubmed/25358478/) + + > Kim S., Pevzner PA. (2014). MS-GF+ makes progress towards a universal database search tool for proteomics. Nature communications, 5, 5277. doi: 10.1038/ncomms6277. PubMed PMID: 25358478; PubMed Central PMCID: PMC5036525 + +- [Epifany](https://pubmed.ncbi.nlm.nih.gov/31975601/) + + > Pfeuffer J, Sachsenberg T, Dijkstra TMH, Serang O, Reinert K, Kohlbacher O. EPIFANY: A Method for Efficient High-Confidence Protein Inference. J Proteome Res. 2020 Mar 6;19(3):1060-1072. doi: 10.1021/acs.jproteome.9b00566. Epub 2020 Feb 13. PMID: 31975601; PMCID: PMC7583457. + +- [Triqler](https://pubmed.ncbi.nlm.nih.gov/30482846/) + + > The M, Käll L. Integrated Identification and Quantification Error Probabilities for Shotgun Proteomics. Mol Cell Proteomics. 2019 Mar;18(3):561-570. doi: 10.1074/mcp.RA118.001018. Epub 2018 Nov 27. PMID: 30482846; PMCID: PMC6398204. + +- [luciphor](https://pubmed.ncbi.nlm.nih.gov/23918812/) + > Fermin D, Walmsley SJ, Gingras AC, Choi H, Nesvizhskii AI. LuciPHOr: algorithm for phosphorylation site localization with false localization rate estimation using modified target-decoy approach. Mol Cell Proteomics. 2013 Nov;12(11):3409-19. doi: 10.1074/mcp.M113.028928. Epub 2013 Aug 5. PMID: 23918812; PMCID: PMC3820951. ## Software packaging/containerisation tools -* [Anaconda](https://anaconda.com) - > Anaconda Software Distribution. Computer software. Vers. 2-2.4.0. Anaconda, Nov. 2016. Web. +- [Anaconda](https://anaconda.com) + + > Anaconda Software Distribution. Computer software. Vers. 2-2.4.0. Anaconda, Nov. 2016. Web. + +- [Bioconda](https://pubmed.ncbi.nlm.nih.gov/29967506/) + + > Grüning B, Dale R, Sjödin A, Chapman BA, Rowe J, Tomkins-Tinch CH, Valieris R, Köster J; Bioconda Team. Bioconda: sustainable and comprehensive software distribution for the life sciences. Nat Methods. 2018 Jul;15(7):475-476. doi: 10.1038/s41592-018-0046-7. PubMed PMID: 29967506. -* [Bioconda](https://pubmed.ncbi.nlm.nih.gov/29967506/) - > Grüning B, Dale R, Sjödin A, Chapman BA, Rowe J, Tomkins-Tinch CH, Valieris R, Köster J; Bioconda Team. Bioconda: sustainable and comprehensive software distribution for the life sciences. Nat Methods. 2018 Jul;15(7):475-476. doi: 10.1038/s41592-018-0046-7. PubMed PMID: 29967506. +- [BioContainers](https://pubmed.ncbi.nlm.nih.gov/28379341/) -* [BioContainers](https://pubmed.ncbi.nlm.nih.gov/28379341/) - > da Veiga Leprevost F, Grüning B, Aflitos SA, Röst HL, Uszkoreit J, Barsnes H, Vaudel M, Moreno P, Gatto L, Weber J, Bai M, Jimenez RC, Sachsenberg T, Pfeuffer J, Alvarez RV, Griss J, Nesvizhskii AI, Perez-Riverol Y. BioContainers: an open-source and community-driven framework for software standardization. Bioinformatics. 2017 Aug 15;33(16):2580-2582. doi: 10.1093/bioinformatics/btx192. PubMed PMID: 28379341; PubMed Central PMCID: PMC5870671. + > da Veiga Leprevost F, Grüning B, Aflitos SA, Röst HL, Uszkoreit J, Barsnes H, Vaudel M, Moreno P, Gatto L, Weber J, Bai M, Jimenez RC, Sachsenberg T, Pfeuffer J, Alvarez RV, Griss J, Nesvizhskii AI, Perez-Riverol Y. BioContainers: an open-source and community-driven framework for software standardization. Bioinformatics. 2017 Aug 15;33(16):2580-2582. doi: 10.1093/bioinformatics/btx192. PubMed PMID: 28379341; PubMed Central PMCID: PMC5870671. -* [Docker](https://dl.acm.org/doi/10.5555/2600239.2600241) +- [Docker](https://dl.acm.org/doi/10.5555/2600239.2600241) -* [Singularity](https://pubmed.ncbi.nlm.nih.gov/28494014/) - > Kurtzer GM, Sochat V, Bauer MW. Singularity: Scientific containers for mobility of compute. PLoS One. 2017 May 11;12(5):e0177459. doi: 10.1371/journal.pone.0177459. eCollection 2017. PubMed PMID: 28494014; PubMed Central PMCID: PMC5426675. +- [Singularity](https://pubmed.ncbi.nlm.nih.gov/28494014/) + > Kurtzer GM, Sochat V, Bauer MW. Singularity: Scientific containers for mobility of compute. PLoS One. 2017 May 11;12(5):e0177459. doi: 10.1371/journal.pone.0177459. eCollection 2017. PubMed PMID: 28494014; PubMed Central PMCID: PMC5426675. diff --git a/README.md b/README.md index 66fc708c..2a694fdf 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# ![nf-core/quantms](docs/images/nf-core-quantms_logo_light.png#gh-light-mode-only) ![nf-core/quantms](docs/images/nf-core-quantms_logo_dark.png#gh-dark-mode-only) +# ![nf-core/quantms](docs/images/nf-core/quantms_logo_light.png#gh-light-mode-only) ![nf-core/quantms](docs/images/nf-core/quantms_logo_dark.png#gh-dark-mode-only) [![GitHub Actions CI Status](https://github.com/nf-core/quantms/workflows/nf-core%20CI/badge.svg)](https://github.com/nf-core/quantms/actions?query=workflow%3A%22nf-core+CI%22) [![GitHub Actions Linting Status](https://github.com/nf-core/quantms/workflows/nf-core%20linting/badge.svg)](https://github.com/nf-core/quantms/actions?query=workflow%3A%22nf-core+linting%22) @@ -17,6 +17,7 @@ ## Introduction + **nf-core/quantms** is a bioinformatics best-practice analysis pipeline for Quantitative Mass Spectrometry (MS). Currently, the workflow supports three major MS-based analytical methods: (i) Data dependant acquisition (DDA) label-free and Isobaric quantitation (e.g. TMT, iTRAQ); (ii) Data independent acquisition (DIA) label-free quantification (for details see our in-depth documentation on [quantms](https://quantms.readthedocs.io/en/latest/)).

@@ -26,6 +27,7 @@ The pipeline is built using [Nextflow](https://www.nextflow.io), a workflow tool to run tasks across multiple compute infrastructures in a very portable manner. It uses Docker/Singularity containers making installation trivial and results highly reproducible. The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. Where possible, these processes have been submitted to and installed from [nf-core/modules](https://github.com/nf-core/modules) in order to make them available to all nf-core pipelines, and to everyone within the Nextflow community! + On release, automated continuous integration tests run the pipeline on a full-sized dataset on the AWS cloud infrastructure. This ensures that the pipeline runs on AWS, has sensible resource allocation defaults set to run on real-world datasets, and permits the persistent storage of results to benchmark between pipeline releases and other analysis sources. The results obtained from the full-sized test can be viewed on the [nf-core website](https://nf-co.re/quantms/results). This gives you a hint on which reports and file types are produced by the pipeline in a standard run. The automatic continuous integration tests evaluate different workflows, including the peptide identification, quantification for LFQ, LFQ-DIA, and TMT test datasets. ## Pipeline summary @@ -55,7 +57,7 @@ DDA-ISO: 5. Modification localization [`luciphor`](https://github.com/dfermin/lucXor) 6. Extracts and normalizes isobaric labeling [`IsobaricAnalyzer`](https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_IsobaricAnalyzer.html) 7. Protein inference [`ProteinInference`](https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_ProteinInference.html) or [`Epifany`](https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/UTILS_Epifany.html) for bayesian inference. -8. Protein Quantification [`ProteinQuantifier`](https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_ProteinQuantifier.html) +8. Protein Quantification [`ProteinQuantifier`](https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_ProteinQuantifier.html) 9. QC report generation [`pmultiqc`](https://github.com/bigbio/pmultiqc) 10. Normalization, imputation, significance testing with [`MSstats`](https://github.com/VitekLab/MSstats) @@ -74,24 +76,29 @@ DIA-LFQ: 3. Download the pipeline and test it on a minimal dataset with a single command: - ```console - nextflow run nf-core/quantms -profile test,YOURPROFILE --input project.sdrf.tsv --database protein.fasta - ``` + ```console + nextflow run nf-core/quantms -profile test,YOURPROFILE --input project.sdrf.tsv --database protein.fasta + ``` - Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string. + Note that some form of configuration will be needed so that Nextflow knows how to fetch the required software. This is usually done in the form of a config profile (`YOURPROFILE` in the example command above). You can chain multiple config profiles in a comma-separated string. - > * The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`. - > * Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment. - > * If you are using `singularity` and are persistently observing issues downloading Singularity images directly due to timeout or network issues, then you can use the `--singularity_pull_docker_container` parameter to pull and convert the Docker image instead. Alternatively, you can use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs. - > * If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs. + > - The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`. + > - Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment. + > - If you are using `singularity` and are persistently observing issues downloading Singularity images directly due to timeout or network issues, then you can use the `--singularity_pull_docker_container` parameter to pull and convert the Docker image instead. Alternatively, you can use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs. + > - If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs. + > + > * The pipeline comes with config profiles called `docker`, `singularity`, `podman`, `shifter`, `charliecloud` and `conda` which instruct the pipeline to use the named tool for software management. For example, `-profile test,docker`. + > * Please check [nf-core/configs](https://github.com/nf-core/configs#documentation) to see if a custom config file to run nf-core pipelines already exists for your Institute. If so, you can simply use `-profile ` in your command. This will enable either `docker` or `singularity` and set the appropriate execution settings for your local compute environment. + > * If you are using `singularity`, please use the [`nf-core download`](https://nf-co.re/tools/#downloading-pipelines-for-offline-use) command to download images first, before running the pipeline. Setting the [`NXF_SINGULARITY_CACHEDIR` or `singularity.cacheDir`](https://www.nextflow.io/docs/latest/singularity.html?#singularity-docker-hub) Nextflow options enables you to store and re-use the images from a central location for future pipeline runs. + > * If you are using `conda`, it is highly recommended to use the [`NXF_CONDA_CACHEDIR` or `conda.cacheDir`](https://www.nextflow.io/docs/latest/conda.html) settings to store the environments in a central location for future pipeline runs. 4. Start running your own analysis! - + - ```console - nextflow run nf-core/quantms -profile --input project.sdrf.tsv --database database.fasta - ``` + ```console + nextflow run nf-core/quantms -profile --input project.sdrf.tsv --database database.fasta + ``` ## Documentation @@ -103,8 +110,8 @@ nf-core/quantms was originally written by: Chengxin Dai ([@daichengxin](https:// We thank the following people for their extensive assistance in the development of this pipeline: -* Timo Sachsenberg ([@timosachsenberg](https://github.com/timosachsenberg)) -* Wang Hong ([@WangHong007](https://github.com/WangHong007)) +- Timo Sachsenberg ([@timosachsenberg](https://github.com/timosachsenberg)) +- Wang Hong ([@WangHong007](https://github.com/WangHong007)) @@ -120,6 +127,7 @@ For further information or help, don't hesitate to get in touch on the [Slack `# + An extensive list of references for the tools used by the pipeline can be found in the [`CITATIONS.md`](CITATIONS.md) file. You can cite the `nf-core` publication as follows: diff --git a/assets/email_template.html b/assets/email_template.html index 7a622e34..49215a6a 100644 --- a/assets/email_template.html +++ b/assets/email_template.html @@ -1,53 +1,111 @@ - - - - + + + + - - nf-core/quantms Pipeline Report - - -

+ + + nf-core/quantms Pipeline Report + + +
+ - +

nf-core/quantms v${version}

+

Run Name: $runName

-

nf-core/quantms v${version}

-

Run Name: $runName

- -<% if (!success){ - out << """ -
-

nf-core/quantms execution completed unsuccessfully!

+ <% if (!success){ out << """ +
+

nf-core/quantms execution completed unsuccessfully!

The exit status of the task that caused the workflow execution to fail was: $exitStatus.

The full error message was:

-
${errorReport}
-
- """ -} else { - out << """ -
+
${errorReport}
+
+ """ } else { out << """ +
nf-core/quantms execution completed successfully! -
- """ -} -%> +
+ """ } %> -

The workflow was completed at $dateComplete (duration: $duration)

-

The command used to launch the workflow was as follows:

-
$commandLine
+

The workflow was completed at $dateComplete (duration: $duration)

+

The command used to launch the workflow was as follows:

+
+$commandLine
-

Pipeline Configuration:

- - - <% out << summary.collect{ k,v -> "" }.join("\n") %> - -
$k
$v
+

Pipeline Configuration:

+ + + <% out << summary.collect{ k,v -> " + + + + + " }.join("\n") %> + +
+ $k + +
$v
+
-

nf-core/quantms

-

https://github.com/nf-core/quantms

- -
- - +

nf-core/quantms

+

https://github.com/nf-core/quantms

+
+ diff --git a/assets/multiqc_config.yaml b/assets/multiqc_config.yaml deleted file mode 100644 index d1e7cd4d..00000000 --- a/assets/multiqc_config.yaml +++ /dev/null @@ -1,17 +0,0 @@ -report_comment: > - This report has been generated by the nf-core/quantms - analysis pipeline. For information about how to interpret these results, please see the - documentation. -report_section_order: - software_versions: - order: -1000 - nf-core-quantms-summary: - order: -1001 - pmultiqc: - order: -1002 - -thousandsSep_format: "" -export_plots: true -custom_logo: "./nf-core-quantms_logo_light.png" -custom_logo_url: "https://github.com/bigbio/quantms" -custom_logo_title: "quantms" diff --git a/assets/multiqc_config.yml b/assets/multiqc_config.yml new file mode 100644 index 00000000..292328b0 --- /dev/null +++ b/assets/multiqc_config.yml @@ -0,0 +1,17 @@ +report_comment: > + This report has been generated by the nf-core/quantms + analysis pipeline. For information about how to interpret these results, please see the + documentation. +report_section_order: + pmultiqc: + order: 1 + software_versions: + order: -1001 + "nf-core-quantms-summary": + order: -1002 + +thousandsSep_format: "" +export_plots: true +custom_logo: "./nf-core-quantms_logo_light.png" +custom_logo_url: "https://github.com/bigbio/quantms" +custom_logo_title: "quantms" diff --git a/assets/schema_input.json b/assets/schema_input.json index 22d7e10a..09ea57c3 100644 --- a/assets/schema_input.json +++ b/assets/schema_input.json @@ -31,9 +31,6 @@ ] } }, - "required": [ - "sample", - "fastq_1" - ] + "required": ["sample", "fastq_1"] } } diff --git a/conf/test_dia.config b/conf/test_dia.config index c8d5a6ba..f20efe3b 100644 --- a/conf/test_dia.config +++ b/conf/test_dia.config @@ -8,23 +8,23 @@ */ params { - config_profile_name = 'Test profile' - config_profile_description = 'Minimal test dataset to check pipeline function' - // Limit resources so that this can run on GitHub Actions - max_cpus = 2 - max_memory = 6.GB - max_time = 48.h + config_profile_name = 'Test profile' + config_profile_description = 'Minimal test dataset to check pipeline function' + // Limit resources so that this can run on GitHub Actions + max_cpus = 2 + max_memory = 6.GB + max_time = 48.h - // Input data - input = 'https://raw.githubusercontent.com/bigbio/quantms/dev/assets/PXD026600.sdrf.tsv' - database = 'ftp://massive.ucsd.edu/MSV000087597/sequence/REF_EColi_K12_UPS1_combined.fasta' - min_pr_mz = 350 - max_pr_mz = 950 - min_fr_mz = 500 - max_fr_mz = 1500 - min_peptide_length = 15 - max_peptide_length = 30 - max_precursor_charge = 3 - allowed_missed_cleavages = 1 - diann_normalize = false + // Input data + input = 'https://raw.githubusercontent.com/bigbio/quantms/dev/assets/PXD026600.sdrf.tsv' + database = 'ftp://massive.ucsd.edu/MSV000087597/sequence/REF_EColi_K12_UPS1_combined.fasta' + min_pr_mz = 350 + max_pr_mz = 950 + min_fr_mz = 500 + max_fr_mz = 1500 + min_peptide_length = 15 + max_peptide_length = 30 + max_precursor_charge = 3 + allowed_missed_cleavages = 1 + diann_normalize = false } diff --git a/conf/test_lfq.config b/conf/test_lfq.config index e15121d5..d8d59c6a 100644 --- a/conf/test_lfq.config +++ b/conf/test_lfq.config @@ -8,23 +8,22 @@ */ params { - config_profile_name = 'Test profile' - config_profile_description = 'Minimal test dataset to check pipeline function' - // Limit resources so that this can run on GitHub Actions - max_cpus = 2 - max_memory = 6.GB - max_time = 48.h - - // Input data - labelling_type = "label free sample" - input = 'https://raw.githubusercontent.com/nf-core/test-datasets/proteomicslfq/testdata/BSA_design_urls.tsv' - database = 'https://raw.githubusercontent.com/nf-core/test-datasets/proteomicslfq/testdata/18Protein_SoCe_Tr_detergents_trace_target_decoy.fasta' - posterior_probabilities = "fit_distributions" - search_engines = "msgf" - decoy_string= "rev" - enable_qc = true - add_triqler_output = true - protein_level_fdr_cutoff = 1.0 - acqusition_method = "dda" + config_profile_name = 'Test profile' + config_profile_description = 'Minimal test dataset to check pipeline function' + // Limit resources so that this can run on GitHub Actions + max_cpus = 2 + max_memory = 6.GB + max_time = 48.h + // Input data + labelling_type = "label free sample" + input = 'https://raw.githubusercontent.com/nf-core/test-datasets/proteomicslfq/testdata/BSA_design_urls.tsv' + database = 'https://raw.githubusercontent.com/nf-core/test-datasets/proteomicslfq/testdata/18Protein_SoCe_Tr_detergents_trace_target_decoy.fasta' + posterior_probabilities = "fit_distributions" + search_engines = "msgf" + decoy_string= "rev" + enable_qc = true + add_triqler_output = true + protein_level_fdr_cutoff = 1.0 + acqusition_method = "dda" } diff --git a/docs/README.md b/docs/README.md index d04abe4d..d7a6ae3e 100644 --- a/docs/README.md +++ b/docs/README.md @@ -2,9 +2,9 @@ The nf-core/quantms documentation is split into the following pages: -* [Usage](usage.md) - * An overview of how the pipeline works, how to run it and a description of all of the different command-line flags. -* [Output](output.md) - * An overview of the different results produced by the pipeline and how to interpret them. +- [Usage](usage.md) + - An overview of how the pipeline works, how to run it and a description of all of the different command-line flags. +- [Output](output.md) + - An overview of the different results produced by the pipeline and how to interpret them. You can find a lot more documentation about installing, configuring and running nf-core pipelines on the website: [https://nf-co.re](https://nf-co.re) diff --git a/docs/output.md b/docs/output.md index 7676652c..1c667f94 100644 --- a/docs/output.md +++ b/docs/output.md @@ -48,28 +48,28 @@ The output consists of the following folders (follow the links for a more detail results - spectra data: - - [thermorawfileparser/*.mzML](#spectra) + - [thermorawfileparser/\*.mzML](#spectra) - identification results: - - [searchenginecomet/*.idXML](#identifications) - - [searchenginemsgf/*.idXML](#identifications) + - [searchenginecomet/\*.idXML](#identifications) + - [searchenginemsgf/\*.idXML](#identifications) - consensusID identifications: - - [consensusid/*.idXML](#identifications) + - [consensusid/\*.idXML](#identifications) - pipeline information: - - [pipeline_info/...](#nextflow-pipeline-info) + - [pipeline_info/...](#nextflow-pipeline-info) - DDA-LFQ quantification results: - - [proteomicslfq/out.consensusXML](#consenusxml) - - [proteomicslfq/out_msstats.csv](#msstats-ready-quantity-table) - - [proteomicslfq/out_triqler.tsv](#triqler) - - [proteomicslfq/out.mzTab](#mztab) + - [proteomicslfq/out.consensusXML](#consenusxml) + - [proteomicslfq/out_msstats.csv](#msstats-ready-quantity-table) + - [proteomicslfq/out_triqler.tsv](#triqler) + - [proteomicslfq/out.mzTab](#mztab) - DDA-ISO quantification results: - - [proteinquantifier/out.mzTab](#mztab) - - [proteinquantifier/peptide_out.csv](#tab-based-openms-formats) - - [proteinquantifier/protein_out.csv](#tab-based-openms-formats) - - [msstatsconverter/out_msstats.csv](#msstats-ready-quantity-table) + - [proteinquantifier/out.mzTab](#mztab) + - [proteinquantifier/peptide_out.csv](#tab-based-openms-formats) + - [proteinquantifier/protein_out.csv](#tab-based-openms-formats) + - [msstatsconverter/out_msstats.csv](#msstats-ready-quantity-table) - DIA-LFQ quantification results: - - [convert2msstats/out_msstats.csv](#msstats-ready-quantity-table) + - [convert2msstats/out_msstats.csv](#msstats-ready-quantity-table) - MSstats-processed results - - [msstats/out_msstats.mzTab](#msstats-processed-mztab) + - [msstats/out_msstats.mzTab](#msstats-processed-mztab) ## Output description @@ -80,10 +80,7 @@ results
Output files --`pipeline_info/` - - Reports generated by Nextflow: `execution_report.html`, `execution_timeline.html`, `execution_trace.txt` and `pipeline_dag.dot`/`pipeline_dag.svg`. - - Reports generated by the pipeline: `pipeline_report.html`, `pipeline_report.txt` and `software_versions.yml`. The `pipeline_report*` files will only be present if the `--email` / `--email_on_fail` parameter's are used when running the pipeline. - - Reformatted samplesheet files used as input to the pipeline: `samplesheet.valid.csv`. +-`pipeline_info/` - Reports generated by Nextflow: `execution_report.html`, `execution_timeline.html`, `execution_trace.txt` and `pipeline_dag.dot`/`pipeline_dag.svg`. - Reports generated by the pipeline: `pipeline_report.html`, `pipeline_report.txt` and `software_versions.yml`. The `pipeline_report*` files will only be present if the `--email` / `--email_on_fail` parameter's are used when running the pipeline. - Reformatted samplesheet files used as input to the pipeline: `samplesheet.valid.csv`.
@@ -133,8 +130,8 @@ MSstats. Output files - `multiqc//` - - `multiqc_report.html`: a standalone HTML file that can be viewed in your web browser. - - `multiqc_data/`: directory containing parsed statistics from the different tools used in the pipeline. + - `multiqc_report.html`: a standalone HTML file that can be viewed in your web browser. + - `multiqc_data/`: directory containing parsed statistics from the different tools used in the pipeline. diff --git a/docs/usage.md b/docs/usage.md index 8104ae3e..d8dc65a2 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -16,8 +16,8 @@ nextflow run nf-core/quantms --input '/url/path/to/your/experimentX_design.tsv' where the experimental design file has to be one of: -* [Sample-to-data-relationship format](https://pubs.acs.org/doi/abs/10.1021/acs.jproteome.0c00376) (.sdrf.tsv) -* [OpenMS experimental design format](https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/release/latest/html/classOpenMS_1_1ExperimentalDesign.html#details) (.tsv) +- [Sample-to-data-relationship format](https://pubs.acs.org/doi/abs/10.1021/acs.jproteome.0c00376) (.sdrf.tsv) +- [OpenMS experimental design format](https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/release/latest/html/classOpenMS_1_1ExperimentalDesign.html#details) (.tsv) In the respective "comment[file uri]" or "Spectra_Filepath" columns, the raw or mzML files with the mass spectra to be staged have to be listed. URIs are possible, and the root folder as well as the file endings can be changed in the options in case of previously downloaded, moved or converted experiments. @@ -27,9 +27,9 @@ This will launch the pipeline with the `docker` configuration profile. See below Note that the pipeline will create the following files in your working directory: ```console -work # Directory containing the nextflow working files -results # Finished results (configurable, see below) -.nextflow_log # Log file from Nextflow +work # Directory containing the nextflow working files + # Finished results in specified location (defined with --outdir) +.nextflow_log # Log file from Nextflow # Other nextflow hidden files, eg. history of pipeline runs and old logs. ``` @@ -57,7 +57,7 @@ This version number will be logged in reports when you run the pipeline, so that Use this parameter to choose a configuration profile. Profiles can give configuration presets for different compute environments. -Several generic profiles are bundled with the pipeline which instruct the pipeline to use software packaged using different methods (Docker, Singularity, Podman, Shifter, Charliecloud, Conda) - see below. When using Biocontainers, most of these software packaging methods pull Docker containers from quay.io e.g [thermorawfilerparser](https://quay.io/repository/biocontainers/thermorawfileparser) except for Singularity which directly downloads Singularity images via https hosted by the [Galaxy project](https://depot.galaxyproject.org/singularity/) and Conda which downloads and installs software locally from [Bioconda](https://bioconda.github.io/). +Several generic profiles are bundled with the pipeline which instruct the pipeline to use software packaged using different methods (Docker, Singularity, Podman, Shifter, Charliecloud, Conda) - see below. When using Biocontainers, most of these software packaging methods pull Docker containers from quay.io e.g [FastQC](https://quay.io/repository/biocontainers/fastqc) except for Singularity which directly downloads Singularity images via https hosted by the [Galaxy project](https://depot.galaxyproject.org/singularity/) and Conda which downloads and installs software locally from [Bioconda](https://bioconda.github.io/). > We highly recommend the use of Docker or Singularity containers for full pipeline reproducibility, however when this is not possible, Conda is also supported. @@ -68,25 +68,25 @@ They are loaded in sequence, so later profiles can overwrite earlier profiles. If `-profile` is not specified, the pipeline will run locally and expect all software to be installed and available on the `PATH`. This is _not_ recommended. -* `docker` - * A generic configuration profile to be used with [Docker](https://docker.com/) -* `singularity` - * A generic configuration profile to be used with [Singularity](https://sylabs.io/docs/) -* `podman` - * A generic configuration profile to be used with [Podman](https://podman.io/) -* `shifter` - * A generic configuration profile to be used with [Shifter](https://nersc.gitlab.io/development/shifter/how-to-use/) -* `charliecloud` - * A generic configuration profile to be used with [Charliecloud](https://hpc.github.io/charliecloud/) -* `conda` - * A generic configuration profile to be used with [Conda](https://conda.io/docs/). Please only use Conda as a last resort i.e. when it's not possible to run the pipeline with Docker, Singularity, Podman, Shifter or Charliecloud. -* `test` - * A profile with a complete configuration for automated testing - * Includes links to test data so needs no other parameters +- `docker` + - A generic configuration profile to be used with [Docker](https://docker.com/) +- `singularity` + - A generic configuration profile to be used with [Singularity](https://sylabs.io/docs/) +- `podman` + - A generic configuration profile to be used with [Podman](https://podman.io/) +- `shifter` + - A generic configuration profile to be used with [Shifter](https://nersc.gitlab.io/development/shifter/how-to-use/) +- `charliecloud` + - A generic configuration profile to be used with [Charliecloud](https://hpc.github.io/charliecloud/) +- `conda` + - A generic configuration profile to be used with [Conda](https://conda.io/docs/). Please only use Conda as a last resort i.e. when it's not possible to run the pipeline with Docker, Singularity, Podman, Shifter or Charliecloud. +- `test` + - A profile with a complete configuration for automated testing + - Includes links to test data so needs no other parameters ### `-resume` -Specify this when restarting a pipeline. Nextflow will used cached results from any pipeline steps where the inputs are the same, continuing from where it got to previously. +Specify this when restarting a pipeline. Nextflow will use cached results from any pipeline steps where the inputs are the same, continuing from where it got to previously. For input to be considered the same, not only the names must be identical but the files' contents as well. For more info about this parameter, see [this blog post](https://www.nextflow.io/blog/2019/demystifying-nextflow-resume.html). You can also supply a run name to resume a specific run: `-resume [run-name]`. Use the `nextflow log` command to show previous run names. @@ -105,11 +105,11 @@ Whilst the default requirements set within the pipeline will hopefully work for For example, if the nf-core/rnaseq pipeline is failing after multiple re-submissions of the `STAR_ALIGN` process due to an exit code of `137` this would indicate that there is an out of memory issue: ```console -[62/149eb0] NOTE: Process `RNASEQ:ALIGN_STAR:STAR_ALIGN (WT_REP1)` terminated with an error exit status (137) -- Execution is retried (1) -Error executing process > 'RNASEQ:ALIGN_STAR:STAR_ALIGN (WT_REP1)' +[62/149eb0] NOTE: Process `NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN (WT_REP1)` terminated with an error exit status (137) -- Execution is retried (1) +Error executing process > 'NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN (WT_REP1)' Caused by: - Process `RNASEQ:ALIGN_STAR:STAR_ALIGN (WT_REP1)` terminated with an error exit status (137) + Process `NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN (WT_REP1)` terminated with an error exit status (137) Command executed: STAR \ @@ -137,87 +137,53 @@ To bypass this error you would need to find exactly which resources are set by t ```nextflow process { - withName: STAR_ALIGN { + withName: 'NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN' { memory = 100.GB } } ``` -> **NB:** We specify just the process name i.e. `STAR_ALIGN` in the config file and not the full task name string that is printed to screen in the error message or on the terminal whilst the pipeline is running i.e. `RNASEQ:ALIGN_STAR:STAR_ALIGN`. You may get a warning suggesting that the process selector isn't recognised but you can ignore that if the process name has been specified correctly. This is something that needs to be fixed upstream in core Nextflow. - -### Tool-specific options - -For the ultimate flexibility, we have implemented and are using Nextflow DSL2 modules in a way where it is possible for both developers and users to change tool-specific command-line arguments (e.g. providing an additional command-line argument to the `STAR_ALIGN` process) as well as publishing options (e.g. saving files produced by the `STAR_ALIGN` process that aren't saved by default by the pipeline). In the majority of instances, as a user you won't have to change the default options set by the pipeline developer(s), however, there may be edge cases where creating a simple custom config file can improve the behaviour of the pipeline if for example it is failing due to a weird error that requires setting a tool-specific parameter to deal with smaller / larger genomes. - -The command-line arguments passed to STAR in the `STAR_ALIGN` module are a combination of: - -* Mandatory arguments or those that need to be evaluated within the scope of the module, as supplied in the [`script`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/modules/nf-core/software/star/align/main.nf#L49-L55) section of the module file. - -* An [`options.args`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/modules/nf-core/software/star/align/main.nf#L56) string of non-mandatory parameters that is set to be empty by default in the module but can be overwritten when including the module in the sub-workflow / workflow context via the `addParams` Nextflow option. - -The nf-core/rnaseq pipeline has a sub-workflow (see [terminology](https://github.com/nf-core/modules#terminology)) specifically to align reads with STAR and to sort, index and generate some basic stats on the resulting BAM files using SAMtools. At the top of this file we import the `STAR_ALIGN` module via the Nextflow [`include`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/subworkflows/nf-core/align_star.nf#L10) keyword and by default the options passed to the module via the `addParams` option are set as an empty Groovy map [here](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/subworkflows/nf-core/align_star.nf#L5); this in turn means `options.args` will be set to empty by default in the module file too. This is an intentional design choice and allows us to implement well-written sub-workflows composed of a chain of tools that by default run with the bare minimum parameter set for any given tool in order to make it much easier to share across pipelines and to provide the flexibility for users and developers to customise any non-mandatory arguments. - -When including the sub-workflow above in the main pipeline workflow we use the same `include` statement, however, we now have the ability to overwrite options for each of the tools in the sub-workflow including the [`align_options`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/workflows/rnaseq.nf#L225) variable that will be used specifically to overwrite the optional arguments passed to the `STAR_ALIGN` module. In this case, the options to be provided to `STAR_ALIGN` have been assigned sensible defaults by the developer(s) in the pipeline's [`modules.config`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/conf/modules.config#L70-L74) and can be accessed and customised in the [workflow context](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/workflows/rnaseq.nf#L201-L204) too before eventually passing them to the sub-workflow as a Groovy map called `star_align_options`. These options will then be propagated from `workflow -> sub-workflow -> module`. - -As mentioned at the beginning of this section it may also be necessary for users to overwrite the options passed to modules to be able to customise specific aspects of the way in which a particular tool is executed by the pipeline. Given that all of the default module options are stored in the pipeline's `modules.config` as a [`params` variable](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/conf/modules.config#L24-L25) it is also possible to overwrite any of these options via a custom config file. - -Say for example we want to append an additional, non-mandatory parameter (i.e. `--outFilterMismatchNmax 16`) to the arguments passed to the `STAR_ALIGN` module. Firstly, we need to copy across the default `args` specified in the [`modules.config`](https://github.com/nf-core/rnaseq/blob/4c27ef5610c87db00c3c5a3eed10b1d161abf575/conf/modules.config#L71) and create a custom config file that is a composite of the default `args` as well as the additional options you would like to provide. This is very important because Nextflow will overwrite the default value of `args` that you provide via the custom config. - -As you will see in the example below, we have: - -* appended `--outFilterMismatchNmax 16` to the default `args` used by the module. -* changed the default `publish_dir` value to where the files will eventually be published in the main results directory. -* appended `'bam':''` to the default value of `publish_files` so that the BAM files generated by the process will also be saved in the top-level results directory for the module. Note: `'out':'log'` means any file/directory ending in `out` will now be saved in a separate directory called `my_star_directory/log/`. - -```nextflow -params { - modules { - 'star_align' { - args = "--quantMode TranscriptomeSAM --twopassMode Basic --outSAMtype BAM Unsorted --readFilesCommand zcat --runRNGseed 0 --outFilterMultimapNmax 20 --alignSJDBoverhangMin 1 --outSAMattributes NH HI AS NM MD --quantTranscriptomeBan Singleend --outFilterMismatchNmax 16" - publish_dir = "my_star_directory" - publish_files = ['out':'log', 'tab':'log', 'bam':''] - } - } -} -``` +> **NB:** We specify the full process name i.e. `NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN` in the config file because this takes priority over the short name (`STAR_ALIGN`) and allows existing configuration using the full process name to be correctly overridden. +> +> If you get a warning suggesting that the process selector isn't recognised check that the process name has been specified correctly. ### Updating containers The [Nextflow DSL2](https://www.nextflow.io/docs/latest/dsl2.html) implementation of this pipeline uses one container per process which makes it much easier to maintain and update software dependencies. If for some reason you need to use a different version of a particular tool with the pipeline then you just need to identify the `process` name and override the Nextflow `container` definition for that process using the `withName` declaration. For example, in the [nf-core/viralrecon](https://nf-co.re/viralrecon) pipeline a tool called [Pangolin](https://github.com/cov-lineages/pangolin) has been used during the COVID-19 pandemic to assign lineages to SARS-CoV-2 genome sequenced samples. Given that the lineage assignments change quite frequently it doesn't make sense to re-release the nf-core/viralrecon everytime a new version of Pangolin has been released. However, you can override the default container used by the pipeline by creating a custom config file and passing it as a command-line argument via `-c custom.config`. 1. Check the default version used by the pipeline in the module file for [Pangolin](https://github.com/nf-core/viralrecon/blob/a85d5969f9025409e3618d6c280ef15ce417df65/modules/nf-core/software/pangolin/main.nf#L14-L19) -2. Find the latest version of the Biocontainers available on [Quay.io](https://quay.io/repository/biocontainers/pangolin?tag=latest&tab=tags) +2. Find the latest version of the Biocontainer available on [Quay.io](https://quay.io/repository/biocontainers/pangolin?tag=latest&tab=tags) 3. Create the custom config accordingly: - * For Docker: - - ```nextflow - process { - withName: PANGOLIN { - container = 'quay.io/biocontainers/pangolin:3.0.5--pyhdfd78af_0' - } - } - ``` - - * For Singularity: - - ```nextflow - process { - withName: PANGOLIN { - container = 'https://depot.galaxyproject.org/singularity/pangolin:3.0.5--pyhdfd78af_0' - } - } - ``` - - * For Conda: - - ```nextflow - process { - withName: PANGOLIN { - conda = 'bioconda::pangolin=3.0.5' - } - } - ``` + - For Docker: + + ```nextflow + process { + withName: PANGOLIN { + container = 'quay.io/biocontainers/pangolin:3.0.5--pyhdfd78af_0' + } + } + ``` + + - For Singularity: + + ```nextflow + process { + withName: PANGOLIN { + container = 'https://depot.galaxyproject.org/singularity/pangolin:3.0.5--pyhdfd78af_0' + } + } + ``` + + - For Conda: + + ```nextflow + process { + withName: PANGOLIN { + conda = 'bioconda::pangolin=3.0.5' + } + } + ``` > **NB:** If you wish to periodically update individual tool-specific results (e.g. Pangolin) generated by the pipeline then you must ensure to keep the `work/` directory otherwise the `-resume` ability of the pipeline will be compromised and it will restart from scratch. diff --git a/modules.json b/modules.json index 283d2342..40640b37 100644 --- a/modules.json +++ b/modules.json @@ -4,10 +4,10 @@ "repos": { "nf-core/modules": { "custom/dumpsoftwareversions": { - "git_sha": "20d8250d9f39ddb05dfb437603aaf99b5c0b2b41" + "git_sha": "e745e167c1020928ef20ea1397b6b4d230681b4d" }, "multiqc": { - "git_sha": "20d8250d9f39ddb05dfb437603aaf99b5c0b2b41" + "git_sha": "e745e167c1020928ef20ea1397b6b4d230681b4d" } } } diff --git a/modules/local/convert2msstats/meta.yml b/modules/local/convert2msstats/meta.yml index 1ffdbfb7..d0ba31fe 100644 --- a/modules/local/convert2msstats/meta.yml +++ b/modules/local/convert2msstats/meta.yml @@ -1,30 +1,30 @@ name: convert2msstats description: A module to convert DIA report files to MSstats keywords: - - DIA-NN - - conversion - - MSstats + - DIA-NN + - conversion + - MSstats tools: - - custom: - description: | - A custom module for DIA-NN report file conversion. - homepage: https://github.com/bigbio/quantms - documentation: https://github.com/bigbio/quantms/tree/readthedocs + - custom: + description: | + A custom module for DIA-NN report file conversion. + homepage: https://github.com/bigbio/quantms + documentation: https://github.com/bigbio/quantms/tree/readthedocs input: - - report: - type: file - description: DIA-NN main report file - - exp_design: - type: file - description: An experimental design file including Sample and replicates column et al. + - report: + type: file + description: DIA-NN main report file + - exp_design: + type: file + description: An experimental design file including Sample and replicates column et al. output: - - out_msstats: - type: file - description: MSstats input file - pattern: "*.csv" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - out_msstats: + type: file + description: MSstats input file + pattern: "*.csv" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@Chengxin Dai" + - "@Chengxin Dai" diff --git a/modules/local/diannsearch/meta.yml b/modules/local/diannsearch/meta.yml index b02c62e4..aceee32d 100644 --- a/modules/local/diannsearch/meta.yml +++ b/modules/local/diannsearch/meta.yml @@ -1,44 +1,44 @@ name: diannsearch description: A module for DIA library free analysis based on DIA-NN. keywords: - - DIA-NN - - library free - - DIA + - DIA-NN + - library free + - DIA tools: - - DIA-NN: - description: | - DIA-NN - a universal software for data-independent acquisition (DIA) proteomics data processing by Demichev. - homepage: https://github.com/vdemichev/DiaNN - documentation: https://github.com/vdemichev/DiaNN + - DIA-NN: + description: | + DIA-NN - a universal software for data-independent acquisition (DIA) proteomics data processing by Demichev. + homepage: https://github.com/vdemichev/DiaNN + documentation: https://github.com/vdemichev/DiaNN input: - - lib: - type: file - description: Spectra library file - - spectra: - type: dir - description: The directory for spectra files - - searchdb: - type: file - description: Fasta sequence file - - cfg: - type: dir - description: Specifies a file to load options/commands from. + - lib: + type: file + description: Spectra library file + - spectra: + type: dir + description: The directory for spectra files + - searchdb: + type: file + description: Fasta sequence file + - cfg: + type: dir + description: Specifies a file to load options/commands from. output: - - report: - type: file - description: Main report file. A text table containing precursor and protein IDs, as well as plenty of associated information. Most column names are self-explanatory. - pattern: "report.tsv" - - report_stat: - type: file - description: Contains a number of QC metrics which can be used for data filtering, e.g. to exclude failed runs, or as a readout for method optimization. - pattern: "report.stats.tsv" - - log: - type: file - description: DIA-NN log file - pattern: "report.log.txt" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - report: + type: file + description: Main report file. A text table containing precursor and protein IDs, as well as plenty of associated information. Most column names are self-explanatory. + pattern: "report.tsv" + - report_stat: + type: file + description: Contains a number of QC metrics which can be used for data filtering, e.g. to exclude failed runs, or as a readout for method optimization. + pattern: "report.stats.tsv" + - log: + type: file + description: DIA-NN log file + pattern: "report.log.txt" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@Chengxin Dai" + - "@Chengxin Dai" diff --git a/modules/local/generate_diann_cfg/meta.yml b/modules/local/generate_diann_cfg/meta.yml index 296eb108..29238101 100644 --- a/modules/local/generate_diann_cfg/meta.yml +++ b/modules/local/generate_diann_cfg/meta.yml @@ -1,34 +1,34 @@ name: generate_diann_cfg description: A module to generate DIA-NN configuration files, based on input files and params. keywords: - - configure - - DIA-NN + - configure + - DIA-NN tools: - - custom: - description: | - A custom module to generate DIA-NN configuration files from input files and params. - homepage: https://github.com/bigbio/quantms - documentation: https://github.com/bigbio/quantms/tree/readthedocs + - custom: + description: | + A custom module to generate DIA-NN configuration files from input files and params. + homepage: https://github.com/bigbio/quantms + documentation: https://github.com/bigbio/quantms/tree/readthedocs input: - - meta: - type: map - description: Groovy Map containing sample information + - meta: + type: map + description: Groovy Map containing sample information output: - - library_config: - type: file - description: DIA-NN configure file for library generation - pattern: "library_config.cfg" - - search_cfg: - type: file - description: DIA-NN configure file for search and quantification - pattern: "diann_config.cfg" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" - - log: - type: file - description: log file - pattern: "*.log" + - library_config: + type: file + description: DIA-NN configure file for library generation + pattern: "library_config.cfg" + - search_cfg: + type: file + description: DIA-NN configure file for search and quantification + pattern: "diann_config.cfg" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" + - log: + type: file + description: log file + pattern: "*.log" authors: - - "@Chengxin Dai" + - "@Chengxin Dai" diff --git a/modules/local/librarygeneration/meta.yml b/modules/local/librarygeneration/meta.yml index 93695a7c..ca920063 100644 --- a/modules/local/librarygeneration/meta.yml +++ b/modules/local/librarygeneration/meta.yml @@ -1,45 +1,45 @@ name: librarygeneration description: A module for library generation based on DIA-NN. keywords: - - DIA-NN - - library free - - DIA + - DIA-NN + - library free + - DIA tools: - - DIA-NN: - description: | - DIA-NN - a universal software for data-independent acquisition (DIA) proteomics data processing by Demichev. - homepage: https://github.com/vdemichev/DiaNN - documentation: https://github.com/vdemichev/DiaNN + - DIA-NN: + description: | + DIA-NN - a universal software for data-independent acquisition (DIA) proteomics data processing by Demichev. + homepage: https://github.com/vdemichev/DiaNN + documentation: https://github.com/vdemichev/DiaNN input: - - spectra: - type: file - description: Spectra file - - fasta: - type: file - description: FASTA sequence databases - - cfg: - type: file - description: specifies a configuration file to load options/commands from. + - spectra: + type: file + description: Spectra file + - fasta: + type: file + description: FASTA sequence databases + - cfg: + type: file + description: specifies a configuration file to load options/commands from. output: - - lib_splib: - type: file - description: Spectra library file. - pattern: "*_lib.tsv" - - speclib: - type: file - description: Spectral library file based on speclib format - pattern: "*.tsv.speclib" - - predict_speclib: - type: file - description: Silico-predicted spectral library by deep leaning predictor in DIA-NN - pattern: "*.predicted.speclib" - - log: - type: file - description: DIA-NN log file - pattern: "report.log.txt" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - lib_splib: + type: file + description: Spectra library file. + pattern: "*_lib.tsv" + - speclib: + type: file + description: Spectral library file based on speclib format + pattern: "*.tsv.speclib" + - predict_speclib: + type: file + description: Silico-predicted spectral library by deep leaning predictor in DIA-NN + pattern: "*.predicted.speclib" + - log: + type: file + description: DIA-NN log file + pattern: "report.log.txt" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@Chengxin Dai" + - "@Chengxin Dai" diff --git a/modules/local/openms/consensusid/meta.yml b/modules/local/openms/consensusid/meta.yml index da47947a..46f877ce 100644 --- a/modules/local/openms/consensusid/meta.yml +++ b/modules/local/openms/consensusid/meta.yml @@ -1,38 +1,38 @@ name: consensusid description: Computes a consensus from results of multiple peptide identification engines. keywords: - - consensus scoring - - peptide database search - - OpenMS + - consensus scoring + - peptide database search + - OpenMS tools: - - ConsensusID: - description: | - Tool to Computes a consensus from results of multiple peptide identification engines. - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_ConsensusID.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_ConsensusID.html + - ConsensusID: + description: | + Tool to Computes a consensus from results of multiple peptide identification engines. + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_ConsensusID.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_ConsensusID.html input: - - meta: - type: map - description: Groovy Map containing sample information - - id_file: - type: file - description: | - Identifications from searching a target-decoy database. + - meta: + type: map + description: Groovy Map containing sample information + - id_file: + type: file + description: | + Identifications from searching a target-decoy database. output: - - meta: - type: map - description: Groovy Map containing sample information - - id_files_idx_ForIDPEP_FDR: - type: file - description: | - Identifications with annotated FDR. - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - meta: + type: map + description: Groovy Map containing sample information + - id_files_idx_ForIDPEP_FDR: + type: file + description: | + Identifications with annotated FDR. + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/decoydatabase/meta.yml b/modules/local/openms/decoydatabase/meta.yml index 847bed47..cdefb72d 100644 --- a/modules/local/openms/decoydatabase/meta.yml +++ b/modules/local/openms/decoydatabase/meta.yml @@ -1,32 +1,32 @@ name: decoydatabase description: Create a decoy peptide database from standard FASTA databases. keywords: - - decoy - - fasta - - OpenMS + - decoy + - fasta + - OpenMS tools: - - DecoyDatabase: - description: | - Create a decoy peptide database from standard FASTA databases. - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/UTILS_DecoyDatabase.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/UTILS_DecoyDatabase.html + - DecoyDatabase: + description: | + Create a decoy peptide database from standard FASTA databases. + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/UTILS_DecoyDatabase.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/UTILS_DecoyDatabase.html input: - - db_for_decoy: - type: file - description: | - Input standard FASTA databases to create decoy. + - db_for_decoy: + type: file + description: | + Input standard FASTA databases to create decoy. output: - - db_decoy: - type: file - description: | - decoy peptide database - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - db_decoy: + type: file + description: | + decoy peptide database + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/epifany/meta.yml b/modules/local/openms/epifany/meta.yml index 6668de2c..73570def 100644 --- a/modules/local/openms/epifany/meta.yml +++ b/modules/local/openms/epifany/meta.yml @@ -1,33 +1,33 @@ name: epifany description: Runs a Bayesian protein inference. keywords: - - Bayesian - - inference - - OpenMS + - Bayesian + - inference + - OpenMS tools: - - Epifany: - description: | - It is a protein inference engine based on a Bayesian network. - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/UTILS_Epifany.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/UTILS_Epifany.html + - Epifany: + description: | + It is a protein inference engine based on a Bayesian network. + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/UTILS_Epifany.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/UTILS_Epifany.html input: - - consus_file: - type: file - description: | - identification results. - pattern: "*.{idXML,consensusXML}" + - consus_file: + type: file + description: | + identification results. + pattern: "*.{idXML,consensusXML}" output: - - epi_inference: - type: file - description: | - identification results with scored/grouped proteins. - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - epi_inference: + type: file + description: | + identification results with scored/grouped proteins. + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/extractpsmfeature/meta.yml b/modules/local/openms/extractpsmfeature/meta.yml index d8555593..cc9c211f 100644 --- a/modules/local/openms/extractpsmfeature/meta.yml +++ b/modules/local/openms/extractpsmfeature/meta.yml @@ -1,38 +1,38 @@ name: extracpsmfeature description: Computes extra features for each input PSM. keywords: - - PSM - - feature - - OpenMS + - PSM + - feature + - OpenMS tools: - - PSMFeatureExtractor: - description: | - Computes extra features for each input PSM. - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/UTILS_PSMFeatureExtractor.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/UTILS_PSMFeatureExtractor.html + - PSMFeatureExtractor: + description: | + Computes extra features for each input PSM. + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/UTILS_PSMFeatureExtractor.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/UTILS_PSMFeatureExtractor.html input: - - meta: - type: map - description: Groovy Map containing sample information - - id_file: - type: file - description: | - Input idXML file containing the identifications. + - meta: + type: map + description: Groovy Map containing sample information + - id_file: + type: file + description: | + Input idXML file containing the identifications. output: - - meta: - type: map - description: Groovy Map containing sample information - - id_files_idx_feat: - type: file - description: | - Output file in mzid or idXML format - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - meta: + type: map + description: Groovy Map containing sample information + - id_files_idx_feat: + type: file + description: | + Output file in mzid or idXML format + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/falsediscoveryrate/meta.yml b/modules/local/openms/falsediscoveryrate/meta.yml index 9e186950..e8de713d 100644 --- a/modules/local/openms/falsediscoveryrate/meta.yml +++ b/modules/local/openms/falsediscoveryrate/meta.yml @@ -1,38 +1,38 @@ name: falsediscoveryrate description: Estimates the false discovery rate on peptide and protein level using decoy searches. keywords: - - FDR - - decoy - - OpenMS + - FDR + - decoy + - OpenMS tools: - - FalseDiscoveryRate: - description: | - Tool to estimate the false discovery rate on peptide and protein level. - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_FalseDiscoveryRate.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_FalseDiscoveryRate.html + - FalseDiscoveryRate: + description: | + Tool to estimate the false discovery rate on peptide and protein level. + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_FalseDiscoveryRate.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_FalseDiscoveryRate.html input: - - meta: - type: map - description: Groovy Map containing sample information - - id_file: - type: file - description: | - Identifications from searching a target-decoy database. + - meta: + type: map + description: Groovy Map containing sample information + - id_file: + type: file + description: | + Identifications from searching a target-decoy database. output: - - meta: - type: map - description: Groovy Map containing sample information - - id_files_idx_ForIDPEP_FDR: - type: file - description: | - Identifications with annotated FDR. - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - meta: + type: map + description: Groovy Map containing sample information + - id_files_idx_ForIDPEP_FDR: + type: file + description: | + Identifications with annotated FDR. + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/filemerge/meta.yml b/modules/local/openms/filemerge/meta.yml index 1176e227..acb044c2 100644 --- a/modules/local/openms/filemerge/meta.yml +++ b/modules/local/openms/filemerge/meta.yml @@ -1,31 +1,31 @@ name: filemerge description: Merges several MS files into one file. keywords: - - merge - - MS - - OpenMS + - merge + - MS + - OpenMS tools: - - FileMerger: - description: | - Merges several MS files into one file. - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_FileMerger.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_FileMerger.html + - FileMerger: + description: | + Merges several MS files into one file. + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_FileMerger.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_FileMerger.html input: - - id_file: - type: file - description: | - Input files separated by blank. + - id_file: + type: file + description: | + Input files separated by blank. output: - - id_merge: - type: file - description: Output file - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - id_merge: + type: file + description: Output file + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/idconflictresolver/meta.yml b/modules/local/openms/idconflictresolver/meta.yml index 394efcdf..cf071643 100644 --- a/modules/local/openms/idconflictresolver/meta.yml +++ b/modules/local/openms/idconflictresolver/meta.yml @@ -1,31 +1,31 @@ name: idconflictresolver description: Resolves ambiguous annotations of features with peptide identifications. keywords: - - ambiguous - - OpenMS + - ambiguous + - OpenMS tools: - - IDConflictResolver: - description: | - Resolves ambiguous annotations of features with peptide identifications. - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_IDConflictResolver.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_IDConflictResolver.html + - IDConflictResolver: + description: | + Resolves ambiguous annotations of features with peptide identifications. + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_IDConflictResolver.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_IDConflictResolver.html input: - - consus_file: - type: file - description: | - Input file (data annotated with identifications) - pattern: "*.{featureXML,consensusXML}" + - consus_file: + type: file + description: | + Input file (data annotated with identifications) + pattern: "*.{featureXML,consensusXML}" output: - - pro_resconf: - type: file - description: Output file (data with one peptide identification per feature) - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - pro_resconf: + type: file + description: Output file (data with one peptide identification per feature) + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/idfilter/meta.yml b/modules/local/openms/idfilter/meta.yml index 5d2d8211..2d9f2a5c 100644 --- a/modules/local/openms/idfilter/meta.yml +++ b/modules/local/openms/idfilter/meta.yml @@ -1,37 +1,37 @@ name: idfilter description: Filters peptide/protein identification results by different criteria. keywords: - - Filter - - identification - - OpenMS + - Filter + - identification + - OpenMS tools: - - IDFilter: - description: | - Tool to Filters results from protein or peptide identification engines based on different criteria. - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_IDFilter.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_IDFilter.html + - IDFilter: + description: | + Tool to Filters results from protein or peptide identification engines based on different criteria. + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_IDFilter.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_IDFilter.html input: - - meta: - type: map - description: Groovy Map containing sample information - - id_file: - type: file - description: | - Identifications from searching a target-decoy database. + - meta: + type: map + description: Groovy Map containing sample information + - id_file: + type: file + description: | + Identifications from searching a target-decoy database. output: - - meta: - type: map - description: Groovy Map containing sample information - - id_files_idx_ForIDPEP_FDR: - type: file - description: Output file - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - meta: + type: map + description: Groovy Map containing sample information + - id_files_idx_ForIDPEP_FDR: + type: file + description: Output file + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/idmapper/meta.yml b/modules/local/openms/idmapper/meta.yml index 79117ab3..aafe2002 100644 --- a/modules/local/openms/idmapper/meta.yml +++ b/modules/local/openms/idmapper/meta.yml @@ -1,39 +1,39 @@ name: idmapper description: Assigns protein/peptide identifications to features or consensus features. keywords: - - feature - - identification - - OpenMS + - feature + - identification + - OpenMS tools: - - IDMapper: - description: | - Assigns protein/peptide identifications to features or consensus features. - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_IDMapper.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_IDMapper.html + - IDMapper: + description: | + Assigns protein/peptide identifications to features or consensus features. + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_IDMapper.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_IDMapper.html input: - - meta: - type: map - description: Groovy Map containing sample information - - id_file: - type: file - description: | - Identifications file - pattern: "*.{idXML, mzid}" - - consensusXML: - type: file - description: Feature map/consensus map file - pattern: "*.{featureXML, consensusXML, mzq}" + - meta: + type: map + description: Groovy Map containing sample information + - id_file: + type: file + description: | + Identifications file + pattern: "*.{idXML, mzid}" + - consensusXML: + type: file + description: Feature map/consensus map file + pattern: "*.{featureXML, consensusXML, mzq}" output: - - id_map: - type: file - description: Output file - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - id_map: + type: file + description: Output file + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/idpep/meta.yml b/modules/local/openms/idpep/meta.yml index 15317d47..770ecedf 100644 --- a/modules/local/openms/idpep/meta.yml +++ b/modules/local/openms/idpep/meta.yml @@ -1,38 +1,38 @@ name: idpep description: Tool to estimate the probability of peptide hits to be incorrectly assigned. keywords: - - PSM rescoring - - decoy - - OpenMS + - PSM rescoring + - decoy + - OpenMS tools: - - IDPosteriorErrorProbability: - description: | - Tool to estimate the probability of peptide hits to be incorrectly assigned. - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_IDPosteriorErrorProbability.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_IDPosteriorErrorProbability.html + - IDPosteriorErrorProbability: + description: | + Tool to estimate the probability of peptide hits to be incorrectly assigned. + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_IDPosteriorErrorProbability.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_IDPosteriorErrorProbability.html input: - - meta: - type: map - description: Groovy Map containing sample information - - id_file: - type: file - description: | - Identifications from searching a target-decoy database. + - meta: + type: map + description: Groovy Map containing sample information + - id_file: + type: file + description: | + Identifications from searching a target-decoy database. output: - - meta: - type: map - description: Groovy Map containing sample information - - id_files_ForIDPEP: - type: file - description: | - Identifications with annotated FDR. - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - meta: + type: map + description: Groovy Map containing sample information + - id_files_ForIDPEP: + type: file + description: | + Identifications with annotated FDR. + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/idscoreswitcher/meta.yml b/modules/local/openms/idscoreswitcher/meta.yml index e54b8cce..0f1bbd93 100644 --- a/modules/local/openms/idscoreswitcher/meta.yml +++ b/modules/local/openms/idscoreswitcher/meta.yml @@ -1,39 +1,39 @@ name: idscoreswitcher description: Switches between different scores of peptide hits (PSMs) or protein hits in identification data. keywords: - - Switches - - Score - - OpenMS + - Switches + - Score + - OpenMS tools: - - IDScoreSwitcher: - description: | - Switches between different scores of peptide or protein hits in identification data. - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/UTILS_IDScoreSwitcher.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/UTILS_IDScoreSwitcher.html + - IDScoreSwitcher: + description: | + Switches between different scores of peptide or protein hits in identification data. + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/UTILS_IDScoreSwitcher.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/UTILS_IDScoreSwitcher.html input: - - meta: - type: map - description: Groovy Map containing sample information - - id_file: - type: file - description: | - Identifications from searching a target-decoy database. - pattern: "*.idXML" + - meta: + type: map + description: Groovy Map containing sample information + - id_file: + type: file + description: | + Identifications from searching a target-decoy database. + pattern: "*.idXML" output: - - meta: - type: map - description: Groovy Map containing sample information - - id_score_switcher: - type: file - description: | - Identifications file - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - meta: + type: map + description: Groovy Map containing sample information + - id_score_switcher: + type: file + description: | + Identifications file + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/indexpeptides/meta.yml b/modules/local/openms/indexpeptides/meta.yml index 01a5adfe..cb9611b0 100644 --- a/modules/local/openms/indexpeptides/meta.yml +++ b/modules/local/openms/indexpeptides/meta.yml @@ -1,42 +1,42 @@ name: indexpeptides description: Refreshes the protein references for all peptide hits from an idXML file and adds target/decoy information. keywords: - - decoy - - PeptideIndexer - - OpenMS + - decoy + - PeptideIndexer + - OpenMS tools: - - PeptideIndexer: - description: | - Refreshes the protein references for all peptide hits. - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_PeptideIndexer.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_PeptideIndexer.html + - PeptideIndexer: + description: | + Refreshes the protein references for all peptide hits. + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_PeptideIndexer.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_PeptideIndexer.html input: - - meta: - type: map - description: Groovy Map containing sample information - - id_file: - type: file - description: | - Input idXML file containing the identifications. - - database: - type: file - description: | - Input sequence database in FASTA format. + - meta: + type: map + description: Groovy Map containing sample information + - id_file: + type: file + description: | + Input idXML file containing the identifications. + - database: + type: file + description: | + Input sequence database in FASTA format. output: - - meta: - type: map - description: Groovy Map containing sample information - - id_files_idx: - type: file - description: | - Output idXML file. - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - meta: + type: map + description: Groovy Map containing sample information + - id_files_idx: + type: file + description: | + Output idXML file. + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/isobaricanalyzer/meta.yml b/modules/local/openms/isobaricanalyzer/meta.yml index 82fd674f..9c9c1e7c 100644 --- a/modules/local/openms/isobaricanalyzer/meta.yml +++ b/modules/local/openms/isobaricanalyzer/meta.yml @@ -1,37 +1,37 @@ name: isobaricanalyzer description: Extracts and normalizes isobaric labeling information from an LC-MS/MS experiment. keywords: - - peak - - OpenMS + - peak + - OpenMS tools: - - IsobaricAnalyzer: - description: | - Extracts and normalizes isobaric labeling information from an LC-MS/MS experiment. - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_IsobaricAnalyzer.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_IsobaricAnalyzer.html + - IsobaricAnalyzer: + description: | + Extracts and normalizes isobaric labeling information from an LC-MS/MS experiment. + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_IsobaricAnalyzer.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_IsobaricAnalyzer.html input: - - meta: - type: map - description: Groovy Map containing sample information - - mzml_file: - type: file - description: Input profile data file. - pattern: "*.mzML" + - meta: + type: map + description: Groovy Map containing sample information + - mzml_file: + type: file + description: Input profile data file. + pattern: "*.mzML" output: - - meta: - type: map - description: Groovy Map containing sample information - - iso_consensusXML: - type: file - description: Output consensusXML file with quantitative information - pattern: "*.consensusXML" - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - meta: + type: map + description: Groovy Map containing sample information + - iso_consensusXML: + type: file + description: Output consensusXML file with quantitative information + pattern: "*.consensusXML" + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/msstatsconverter/meta.yml b/modules/local/openms/msstatsconverter/meta.yml index 4251f886..a7750844 100644 --- a/modules/local/openms/msstatsconverter/meta.yml +++ b/modules/local/openms/msstatsconverter/meta.yml @@ -1,35 +1,35 @@ name: msstatsconverter description: Converter to input for MSstats keywords: - - MSstats - - OpenMS + - MSstats + - OpenMS tools: - - MSstatsConverter: - description: | - Converter to input for MSstats - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/UTILS_MSstatsConverter.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/UTILS_MSstatsConverter.html + - MSstatsConverter: + description: | + Converter to input for MSstats + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/UTILS_MSstatsConverter.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/UTILS_MSstatsConverter.html input: - - consensusXML: - type: file - description: | - Input consensusXML with peptide intensities - pattern: "*.consensusXML" - - exp_file: - type: file - description: Experimental Design file + - consensusXML: + type: file + description: | + Input consensusXML with peptide intensities + pattern: "*.consensusXML" + - exp_file: + type: file + description: Experimental Design file output: - - out_msstats: - type: file - description: Input CSV file for MSstats. - pattern: "*.csv" - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - out_msstats: + type: file + description: Input CSV file for MSstats. + pattern: "*.csv" + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/mzmlindexing/meta.yml b/modules/local/openms/mzmlindexing/meta.yml index aee1ce1a..d433d308 100644 --- a/modules/local/openms/mzmlindexing/meta.yml +++ b/modules/local/openms/mzmlindexing/meta.yml @@ -1,40 +1,40 @@ name: mzmlindexing description: Converts between different MS file formats keywords: - - raw - - mzML - - OpenMS + - raw + - mzML + - OpenMS tools: - - FileConverter: - description: | - Converts between different MS file formats - homepage: http://www.openms.de/doxygen/nightly/html/TOPP_FileConverter.html - documentation: http://www.openms.de/doxygen/nightly/html/TOPP_FileConverter.html + - FileConverter: + description: | + Converts between different MS file formats + homepage: http://www.openms.de/doxygen/nightly/html/TOPP_FileConverter.html + documentation: http://www.openms.de/doxygen/nightly/html/TOPP_FileConverter.html input: - - meta: - type: map - description: | - Groovy Map containing sample information - - mzmlfile: - type: file - description: | - Input file to convert. + - meta: + type: map + description: | + Groovy Map containing sample information + - mzmlfile: + type: file + description: | + Input file to convert. output: - - meta: - type: map - description: | - Groovy Map containing sample information - - mzmls_indexed: - type: file - description: indexed mzML file - pattern: "*.mzML" - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - meta: + type: map + description: | + Groovy Map containing sample information + - mzmls_indexed: + type: file + description: indexed mzML file + pattern: "*.mzML" + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/openmspeakpicker/meta.yml b/modules/local/openms/openmspeakpicker/meta.yml index 0e3c6ad8..1bbc911b 100644 --- a/modules/local/openms/openmspeakpicker/meta.yml +++ b/modules/local/openms/openmspeakpicker/meta.yml @@ -1,36 +1,36 @@ name: openmspeakpicker description: Finds mass spectrometric peaks in profile mass spectra. keywords: - - peak - - OpenMS + - peak + - OpenMS tools: - - PeakPickerHiRes: - description: | - A tool for peak detection in profile data. Executes the peak picking with high_res algorithm. - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_PeakPickerHiRes.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_PeakPickerHiRes.html + - PeakPickerHiRes: + description: | + A tool for peak detection in profile data. Executes the peak picking with high_res algorithm. + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_PeakPickerHiRes.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_PeakPickerHiRes.html input: - - meta: - type: map - description: Groovy Map containing sample information - - mzml_file: - type: file - description: Input profile data file. - pattern: "*.mzML" + - meta: + type: map + description: Groovy Map containing sample information + - mzml_file: + type: file + description: Input profile data file. + pattern: "*.mzML" output: - - meta: - type: map - description: Groovy Map containing sample information - - mzmls_picked: - type: file - description: Output peak file - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - meta: + type: map + description: Groovy Map containing sample information + - mzmls_picked: + type: file + description: Output peak file + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/proteininference/meta.yml b/modules/local/openms/proteininference/meta.yml index 0b9d7751..7e9d2687 100644 --- a/modules/local/openms/proteininference/meta.yml +++ b/modules/local/openms/proteininference/meta.yml @@ -1,33 +1,33 @@ name: proteininference description: Computes a protein identification score based on an aggregation of scores of identified peptides. keywords: - - protein - - inference - - OpenMS + - protein + - inference + - OpenMS tools: - - ProteinInference: - description: | - Computes a protein identification score based on an aggregation of scores of identified peptides. - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_ProteinInference.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_ProteinInference.html + - ProteinInference: + description: | + Computes a protein identification score based on an aggregation of scores of identified peptides. + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_ProteinInference.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_ProteinInference.html input: - - consus_file: - type: file - description: | - identification results. - pattern: "*.{idXML,consensusXML}" + - consus_file: + type: file + description: | + identification results. + pattern: "*.{idXML,consensusXML}" output: - - protein_inference: - type: file - description: | - identification results with scored/grouped proteins. - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - protein_inference: + type: file + description: | + identification results with scored/grouped proteins. + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/proteinquantifier/meta.yml b/modules/local/openms/proteinquantifier/meta.yml index e2589fa1..792a4cbd 100644 --- a/modules/local/openms/proteinquantifier/meta.yml +++ b/modules/local/openms/proteinquantifier/meta.yml @@ -1,39 +1,39 @@ name: proteinquantifier description: Compute peptide and protein abundances from annotated feature/consensus maps or from identification results. keywords: - - abundances - - OpenMS + - abundances + - OpenMS tools: - - ProteinQuantifier: - description: | - Compute peptide and protein abundances from annotated feature/consensus maps or from identification results. - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_ProteinQuantifier.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_ProteinQuantifier.html + - ProteinQuantifier: + description: | + Compute peptide and protein abundances from annotated feature/consensus maps or from identification results. + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_ProteinQuantifier.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_ProteinQuantifier.html input: - - epi_filt_resolve: - type: file - description: | - Input file (data annotated with identifications) - pattern: "*.{featureXML,consensusXML,idXML}" + - epi_filt_resolve: + type: file + description: | + Input file (data annotated with identifications) + pattern: "*.{featureXML,consensusXML,idXML}" output: - - protein_out: - type: file - description: Output file for protein abundances - pattern: "*.csv" - - peptide_out: - type: file - description: Output file for peptide abundances - pattern: "*.csv" - - out_mztab: - type: file - description: Output file (mzTab) - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - protein_out: + type: file + description: Output file for protein abundances + pattern: "*.csv" + - peptide_out: + type: file + description: Output file for peptide abundances + pattern: "*.csv" + - out_mztab: + type: file + description: Output file (mzTab) + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/proteomicslfq/meta.yml b/modules/local/openms/proteomicslfq/meta.yml index 433023ce..062c402c 100644 --- a/modules/local/openms/proteomicslfq/meta.yml +++ b/modules/local/openms/proteomicslfq/meta.yml @@ -1,48 +1,48 @@ name: proteomicslfq description: ProteomicsLFQ performs label-free quantification of peptides and proteins. keywords: - - label-free - - OpenMS - - quantification + - label-free + - OpenMS + - quantification tools: - - ProteomicsLFQ: - description: | - ProteomicsLFQ performs label-free quantification of peptides and proteins. - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/release/latest/html/UTILS_ProteomicsLFQ.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/release/latest/html/UTILS_ProteomicsLFQ.html + - ProteomicsLFQ: + description: | + ProteomicsLFQ performs label-free quantification of peptides and proteins. + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/release/latest/html/UTILS_ProteomicsLFQ.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/release/latest/html/UTILS_ProteomicsLFQ.html input: - - mzmls: - type: file - description: Input Spectra in mzML format - pattern: "*.mzML" - - id_files: - type: file - description: Identifications in idXML or mzIdentML format with posterior error probabilities as score type. - - expdes: - type: file - description: An experimental design file - - fasta: - type: file - description: A protein database in with appended decoy sequences in FASTA format. + - mzmls: + type: file + description: Input Spectra in mzML format + pattern: "*.mzML" + - id_files: + type: file + description: Identifications in idXML or mzIdentML format with posterior error probabilities as score type. + - expdes: + type: file + description: An experimental design file + - fasta: + type: file + description: A protein database in with appended decoy sequences in FASTA format. output: - - out_mztab: - type: file - description: mzTab file with analysis results - pattern: "*.mzTab" - - out_consensusXML: - type: file - description: ConsensusXML file for visualization and further processing in OpenMS. - pattern: "*.consensusXML" - - out_msstats: - type: file - description: MSstats file with analysis results for statistical downstream analysis in MSstats. - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - out_mztab: + type: file + description: mzTab file with analysis results + pattern: "*.mzTab" + - out_consensusXML: + type: file + description: ConsensusXML file for visualization and further processing in OpenMS. + pattern: "*.consensusXML" + - out_msstats: + type: file + description: MSstats file with analysis results for statistical downstream analysis in MSstats. + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/thirdparty/luciphoradapter/meta.yml b/modules/local/openms/thirdparty/luciphoradapter/meta.yml index 85112dd2..bff7add3 100644 --- a/modules/local/openms/thirdparty/luciphoradapter/meta.yml +++ b/modules/local/openms/thirdparty/luciphoradapter/meta.yml @@ -1,41 +1,41 @@ name: luciphoradapter description: Modification site localisation using LuciPHOr2. keywords: - - LuciPHOr2 - - modification - - OpenMS + - LuciPHOr2 + - modification + - OpenMS tools: - - LuciphorAdapter: - description: | - Extracts and normalizes isobaric labeling information from an LC-MS/MS experiment. - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_LuciphorAdapter.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_LuciphorAdapter.html + - LuciphorAdapter: + description: | + Extracts and normalizes isobaric labeling information from an LC-MS/MS experiment. + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_LuciphorAdapter.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_LuciphorAdapter.html input: - - meta: - type: map - description: Groovy Map containing sample information - - mzml_file: - type: file - description: Input spectrum file. - pattern: "*.mzML" - - id_file: - type: file - description: Protein/peptide identifications file - pattern: "*.idXML" + - meta: + type: map + description: Groovy Map containing sample information + - mzml_file: + type: file + description: Input spectrum file. + pattern: "*.mzML" + - id_file: + type: file + description: Protein/peptide identifications file + pattern: "*.idXML" output: - - meta: - type: map - description: Groovy Map containing sample information - - ptm_in_id_luciphor: - type: file - description: Output file - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - meta: + type: map + description: Groovy Map containing sample information + - ptm_in_id_luciphor: + type: file + description: Output file + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/thirdparty/percolator/meta.yml b/modules/local/openms/thirdparty/percolator/meta.yml index 35d34689..e30fcc14 100644 --- a/modules/local/openms/thirdparty/percolator/meta.yml +++ b/modules/local/openms/thirdparty/percolator/meta.yml @@ -1,37 +1,37 @@ name: percolator description: PercolatorAdapter facilitates the input to, the call of and output integration of Percolator. keywords: - - identification - - OpenMS + - identification + - OpenMS tools: - - PercolatorAdapter: - description: | - Percolator (http://percolator.ms/) is a tool to apply semi-supervised learning for peptide identification from shotgun proteomics datasets. - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_PercolatorAdapter.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_PercolatorAdapter.html + - PercolatorAdapter: + description: | + Percolator (http://percolator.ms/) is a tool to apply semi-supervised learning for peptide identification from shotgun proteomics datasets. + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_PercolatorAdapter.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_PercolatorAdapter.html input: - - meta: - type: map - description: Groovy Map containing sample information - - id_file: - type: file - description: | - Input idXML file containing the identifications. + - meta: + type: map + description: Groovy Map containing sample information + - id_file: + type: file + description: | + Input idXML file containing the identifications. output: - - meta: - type: map - description: Groovy Map containing sample information - - id_files_perc: - type: file - description: | - Output file in idXML format - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - meta: + type: map + description: Groovy Map containing sample information + - id_files_perc: + type: file + description: | + Output file in idXML format + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/thirdparty/searchenginecomet/meta.yml b/modules/local/openms/thirdparty/searchenginecomet/meta.yml index 026628be..e44f5239 100644 --- a/modules/local/openms/thirdparty/searchenginecomet/meta.yml +++ b/modules/local/openms/thirdparty/searchenginecomet/meta.yml @@ -1,40 +1,40 @@ name: searchenginecomet description: Identifies peptides in MS/MS spectra via Comet. keywords: - - identification - - OpenMS - - comet + - identification + - OpenMS + - comet tools: - - CometAdapter: - description: | - Identifies peptides in MS/MS spectra via Comet. - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_CometAdapter.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_CometAdapter.html + - CometAdapter: + description: | + Identifies peptides in MS/MS spectra via Comet. + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_CometAdapter.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_CometAdapter.html input: - - meta: - type: map - description: Groovy Map containing sample information - - mzml_file: - type: file - description: Input profile data file. - - database: - type: file - description: fasta file + - meta: + type: map + description: Groovy Map containing sample information + - mzml_file: + type: file + description: Input profile data file. + - database: + type: file + description: fasta file output: - - meta: - type: map - description: Groovy Map containing sample information - - id_files_comet: - type: file - description: Output file - pattern: "*.idXML" - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - meta: + type: map + description: Groovy Map containing sample information + - id_files_comet: + type: file + description: Output file + pattern: "*.idXML" + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/openms/thirdparty/searchenginemsgf/meta.yml b/modules/local/openms/thirdparty/searchenginemsgf/meta.yml index d431b4ca..3828fea2 100644 --- a/modules/local/openms/thirdparty/searchenginemsgf/meta.yml +++ b/modules/local/openms/thirdparty/searchenginemsgf/meta.yml @@ -1,40 +1,40 @@ name: searchenginemsgf description: the MS-GF+ protein identification (database search) engine. keywords: - - identification - - OpenMS - - msgf + - identification + - OpenMS + - msgf tools: - - MSGFPlusAdapter: - description: | - Adapter for the MS-GF+ protein identification (database search) engine. - homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_MSGFPlusAdapter.html - documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_MSGFPlusAdapter.html + - MSGFPlusAdapter: + description: | + Adapter for the MS-GF+ protein identification (database search) engine. + homepage: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_MSGFPlusAdapter.html + documentation: https://abibuilder.informatik.uni-tuebingen.de/archive/openms/Documentation/nightly/html/TOPP_MSGFPlusAdapter.html input: - - meta: - type: map - description: Groovy Map containing sample information - - mzml_file: - type: file - description: Input profile data file. - - database: - type: file - description: fasta file + - meta: + type: map + description: Groovy Map containing sample information + - mzml_file: + type: file + description: Input profile data file. + - database: + type: file + description: fasta file output: - - meta: - type: map - description: Groovy Map containing sample information - - id_files_msgf: - type: file - description: Output file - pattern: "*.idXML" - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - meta: + type: map + description: Groovy Map containing sample information + - id_files_msgf: + type: file + description: Output file + pattern: "*.idXML" + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/local/pmultiqc/main.nf b/modules/local/pmultiqc/main.nf index e99806b7..f9f99aa9 100644 --- a/modules/local/pmultiqc/main.nf +++ b/modules/local/pmultiqc/main.nf @@ -30,7 +30,6 @@ process PMULTIQC { --exp_design ${expdesign} \\ --mzMLs ./mzMLs \\ --raw_ids ./raw_ids \\ - --config ./results/multiqc_config.yaml \\ ./results \\ -o . diff --git a/modules/local/pmultiqc/meta.yml b/modules/local/pmultiqc/meta.yml index 92a7e790..d96d5285 100644 --- a/modules/local/pmultiqc/meta.yml +++ b/modules/local/pmultiqc/meta.yml @@ -1,48 +1,48 @@ name: pmultiqc description: A library for proteomics QC report based on MultiQC framework. keywords: - - MultiQC - - QC - - Proteomics + - MultiQC + - QC + - Proteomics tools: - - pmultiqc: - description: | - A library for proteomics QC report based on MultiQC framework. - homepage: https://github.com/bigbio/pmultiqc/ - documentation: https://github.com/bigbio/pmultiqc/ + - pmultiqc: + description: | + A library for proteomics QC report based on MultiQC framework. + homepage: https://github.com/bigbio/pmultiqc/ + documentation: https://github.com/bigbio/pmultiqc/ input: - - expdesign: - type: file - description: experimental design file in openms style - - mzmls: - type: dir - description: mzML files directory - - quantms_results: - type: dir - description: the directory of quantms results including out.mzTab/out_msstats.csv - - raw_ids: - type: dir - description: idXML files directory + - expdesign: + type: file + description: experimental design file in openms style + - mzmls: + type: dir + description: mzML files directory + - quantms_results: + type: dir + description: the directory of quantms results including out.mzTab/out_msstats.csv + - raw_ids: + type: dir + description: idXML files directory output: - - report: - type: file - description: MultiQC report file - pattern: "multiqc_report.html" - - quantmsdb: - type: file - description: Sqlite3 database file stored protein psm and quantificaiton information - pattern: "*.db" - - data: - type: dir - description: MultiQC data dir - pattern: "multiqc_data" - - plots: - type: file - description: Plots created by MultiQC - pattern: "*_data" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - report: + type: file + description: MultiQC report file + pattern: "multiqc_report.html" + - quantmsdb: + type: file + description: Sqlite3 database file stored protein psm and quantificaiton information + pattern: "*.db" + - data: + type: dir + description: MultiQC data dir + pattern: "multiqc_data" + - plots: + type: file + description: Plots created by MultiQC + pattern: "*_data" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@Chengxin Dai" + - "@Chengxin Dai" diff --git a/modules/local/sdrfparsing/meta.yml b/modules/local/sdrfparsing/meta.yml index 94d8518d..ebf3c242 100644 --- a/modules/local/sdrfparsing/meta.yml +++ b/modules/local/sdrfparsing/meta.yml @@ -1,40 +1,40 @@ name: SDRFPARSING description: Convert SDRF proteomics files into pipelines config files keywords: - - SDRF - - bioinformatics tools - - OpenMS + - SDRF + - bioinformatics tools + - OpenMS tools: - - sdrf-pipelines: - description: | - Convert SDRF proteomics files into pipelines config files. - homepage: https://github.com/bigbio/sdrf-pipelines - documentation: https://github.com/bigbio/sdrf-pipelines + - sdrf-pipelines: + description: | + Convert SDRF proteomics files into pipelines config files. + homepage: https://github.com/bigbio/sdrf-pipelines + documentation: https://github.com/bigbio/sdrf-pipelines input: - - sdrf_files: - type: file - description: | - A valid sdrf file + - sdrf_files: + type: file + description: | + A valid sdrf file output: - - experimental_design: - type: file - description: experimental design file in OpenMS format - pattern: "experimental_design.tsv" - - openms: - type: file - description: config file with search engine parameters in OpenMS nomenclature - pattern: "openms.tsv" - - mqpar: - type: file - description: maxquant configuration file - pattern: "*.xml" - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - experimental_design: + type: file + description: experimental design file in OpenMS format + pattern: "experimental_design.tsv" + - openms: + type: file + description: config file with search engine parameters in OpenMS nomenclature + pattern: "openms.tsv" + - mqpar: + type: file + description: maxquant configuration file + pattern: "*.xml" + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@Chengxin Dai" + - "@Chengxin Dai" diff --git a/modules/local/thermorawfileparser/meta.yml b/modules/local/thermorawfileparser/meta.yml index 0a74eeec..575791d6 100644 --- a/modules/local/thermorawfileparser/meta.yml +++ b/modules/local/thermorawfileparser/meta.yml @@ -1,41 +1,41 @@ name: thermorawfileparser description: convert raw file to mzml files keywords: - - raw - - mzML - - OpenMS + - raw + - mzML + - OpenMS tools: - - thermorawfileparser: - description: | - It takes a thermo RAW file as input and outputs indexed mzML - homepage: https://github.com/compomics/ThermoRawFileParser - documentation: https://github.com/compomics/ThermoRawFileParser + - thermorawfileparser: + description: | + It takes a thermo RAW file as input and outputs indexed mzML + homepage: https://github.com/compomics/ThermoRawFileParser + documentation: https://github.com/compomics/ThermoRawFileParser input: - - meta: - type: map - description: | - Groovy Map containing sample information - - rawfile: - type: file - description: | - thermo RAW file + - meta: + type: map + description: | + Groovy Map containing sample information + - rawfile: + type: file + description: | + thermo RAW file output: - - meta: - type: map - description: | - Groovy Map containing sample information - e.g. [ id:'MD5', enzyme:trypsin ] - - mzml: - type: file - description: indexed mzML - pattern: "*.mzML" - - log: - type: file - description: log file - pattern: "*.log" - - version: - type: file - description: File containing software version - pattern: "*.{version.txt}" + - meta: + type: map + description: | + Groovy Map containing sample information + e.g. [ id:'MD5', enzyme:trypsin ] + - mzml: + type: file + description: indexed mzML + pattern: "*.mzML" + - log: + type: file + description: log file + pattern: "*.log" + - version: + type: file + description: File containing software version + pattern: "*.{version.txt}" authors: - - "@daichengxin" + - "@daichengxin" diff --git a/modules/nf-core/modules/custom/dumpsoftwareversions/main.nf b/modules/nf-core/modules/custom/dumpsoftwareversions/main.nf index 934bb467..327d5100 100644 --- a/modules/nf-core/modules/custom/dumpsoftwareversions/main.nf +++ b/modules/nf-core/modules/custom/dumpsoftwareversions/main.nf @@ -15,6 +15,9 @@ process CUSTOM_DUMPSOFTWAREVERSIONS { path "software_versions_mqc.yml", emit: mqc_yml path "versions.yml" , emit: versions + when: + task.ext.when == null || task.ext.when + script: def args = task.ext.args ?: '' template 'dumpsoftwareversions.py' diff --git a/modules/nf-core/modules/custom/dumpsoftwareversions/meta.yml b/modules/nf-core/modules/custom/dumpsoftwareversions/meta.yml index 5b5b8a60..60b546a0 100644 --- a/modules/nf-core/modules/custom/dumpsoftwareversions/meta.yml +++ b/modules/nf-core/modules/custom/dumpsoftwareversions/meta.yml @@ -8,7 +8,7 @@ tools: description: Custom module used to dump software versions within the nf-core pipeline template homepage: https://github.com/nf-core/tools documentation: https://github.com/nf-core/tools - licence: ['MIT'] + licence: ["MIT"] input: - versions: type: file diff --git a/modules/nf-core/modules/multiqc/main.nf b/modules/nf-core/modules/multiqc/main.nf index 3dceb162..1264aac1 100644 --- a/modules/nf-core/modules/multiqc/main.nf +++ b/modules/nf-core/modules/multiqc/main.nf @@ -1,10 +1,10 @@ process MULTIQC { label 'process_medium' - conda (params.enable_conda ? 'bioconda::multiqc=1.11' : null) + conda (params.enable_conda ? 'bioconda::multiqc=1.12' : null) container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? - 'https://depot.galaxyproject.org/singularity/multiqc:1.11--pyhdfd78af_0' : - 'quay.io/biocontainers/multiqc:1.11--pyhdfd78af_0' }" + 'https://depot.galaxyproject.org/singularity/multiqc:1.12--pyhdfd78af_0' : + 'quay.io/biocontainers/multiqc:1.12--pyhdfd78af_0' }" input: path multiqc_files @@ -15,6 +15,9 @@ process MULTIQC { path "*_plots" , optional:true, emit: plots path "versions.yml" , emit: versions + when: + task.ext.when == null || task.ext.when + script: def args = task.ext.args ?: '' """ diff --git a/modules/nf-core/modules/multiqc/meta.yml b/modules/nf-core/modules/multiqc/meta.yml index 63c75a45..6fa891ef 100644 --- a/modules/nf-core/modules/multiqc/meta.yml +++ b/modules/nf-core/modules/multiqc/meta.yml @@ -1,40 +1,40 @@ name: MultiQC description: Aggregate results from bioinformatics analyses across many samples into a single report keywords: - - QC - - bioinformatics tools - - Beautiful stand-alone HTML report + - QC + - bioinformatics tools + - Beautiful stand-alone HTML report tools: - - multiqc: - description: | - MultiQC searches a given directory for analysis logs and compiles a HTML report. - It's a general use tool, perfect for summarising the output from numerous bioinformatics tools. - homepage: https://multiqc.info/ - documentation: https://multiqc.info/docs/ - licence: ['GPL-3.0-or-later'] + - multiqc: + description: | + MultiQC searches a given directory for analysis logs and compiles a HTML report. + It's a general use tool, perfect for summarising the output from numerous bioinformatics tools. + homepage: https://multiqc.info/ + documentation: https://multiqc.info/docs/ + licence: ["GPL-3.0-or-later"] input: - - multiqc_files: - type: file - description: | - List of reports / files recognised by MultiQC, for example the html and zip output of FastQC + - multiqc_files: + type: file + description: | + List of reports / files recognised by MultiQC, for example the html and zip output of FastQC output: - - report: - type: file - description: MultiQC report file - pattern: "multiqc_report.html" - - data: - type: dir - description: MultiQC data dir - pattern: "multiqc_data" - - plots: - type: file - description: Plots created by MultiQC - pattern: "*_data" - - versions: - type: file - description: File containing software versions - pattern: "versions.yml" + - report: + type: file + description: MultiQC report file + pattern: "multiqc_report.html" + - data: + type: dir + description: MultiQC data dir + pattern: "multiqc_data" + - plots: + type: file + description: Plots created by MultiQC + pattern: "*_data" + - versions: + type: file + description: File containing software versions + pattern: "versions.yml" authors: - - "@abhi18av" - - "@bunop" - - "@drpatelh" + - "@abhi18av" + - "@bunop" + - "@drpatelh" diff --git a/nextflow.config b/nextflow.config index 5d42ec6f..104bcecb 100644 --- a/nextflow.config +++ b/nextflow.config @@ -1,7 +1,7 @@ /* -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ nf-core/quantms Nextflow config file -======================================================================================== +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default config options for all compute environments ---------------------------------------------------------------------------------------- */ diff --git a/nextflow_schema.json b/nextflow_schema.json index f94567b4..18cc0583 100644 --- a/nextflow_schema.json +++ b/nextflow_schema.json @@ -10,9 +10,7 @@ "type": "object", "fa_icon": "fas fa-terminal", "description": "Define where the pipeline should find input data and save output data.", - "required": [ - "input" - ], + "required": ["input"], "properties": { "input": { "type": "string", @@ -98,10 +96,7 @@ "description": "Choose the method to produce decoys from input target database.", "default": "reverse", "fa_icon": "fas fa-list-ol", - "enum": [ - "reverse", - "shuffle" - ] + "enum": ["reverse", "shuffle"] }, "shuffle_max_attempts": { "type": "integer", @@ -117,9 +112,7 @@ } }, "fa_icon": "fas fa-database", - "required": [ - "database" - ] + "required": ["database"] }, "spectrum_preprocessing": { "title": "Spectrum preprocessing", @@ -159,7 +152,15 @@ "description": "label method", "fa_icon": "fas fa-font", "help_text": "Isobaric Quantification method used in the experiment", - "enum": ["label free sample","itraq4plex", "itraq8plex", "tmt6plex", "tmt10plex", "tmt11plex", "tmt16plex"] + "enum": [ + "label free sample", + "itraq4plex", + "itraq8plex", + "tmt6plex", + "tmt10plex", + "tmt11plex", + "tmt16plex" + ] }, "select_activation": { "type": "string", @@ -255,11 +256,7 @@ "description": "Specify the amount of termini matching the enzyme cutting rules for a peptide to be considered. Valid values are `fully` (default), `semi`, or `none`", "default": "fully", "fa_icon": "fas fa-list-ol", - "enum": [ - "fully", - "semi", - "none" - ] + "enum": ["fully", "semi", "none"] }, "allowed_missed_cleavages": { "type": "integer", @@ -278,10 +275,7 @@ "description": "Precursor mass tolerance unit used for database search. Possible values are 'ppm' (default) and 'Da'.", "default": "ppm", "fa_icon": "fas fa-sliders-h", - "enum": [ - "Da", - "ppm" - ] + "enum": ["Da", "ppm"] }, "fragment_mass_tolerance": { "type": "number", @@ -296,10 +290,7 @@ "default": "Da", "fa_icon": "fas fa-list-ol", "help_text": "Caution: for Comet we are estimating the `fragment_bin_tolerance` parameter based on this automatically.", - "enum": [ - "Da", - "ppm" - ] + "enum": ["Da", "ppm"] }, "fixed_mods": { "type": "string", @@ -443,20 +434,14 @@ "description": "Do not fail if there are some unmatched peptides. Only activate as last resort, if you know that the rest of your settings are fine!", "default": false, "fa_icon": "far fa-check-square", - "enum": [ - false, - true - ] + "enum": [false, true] }, "IL_equivalent": { "type": "boolean", "description": "Should isoleucine and leucine be treated interchangeably when mapping search engine hits to the database? Default: true", "default": true, "fa_icon": "far fa-check-square", - "enum": [ - true, - false - ] + "enum": [true, false] } }, "fa_icon": "fas fa-project-diagram" @@ -472,10 +457,7 @@ "description": "How to calculate posterior probabilities for PSMs:\n\n* 'percolator' = Re-score based on PSM-feature-based SVM and transform distance\n to hyperplane for posteriors\n* 'fit_distributions' = Fit positive and negative distributions to scores\n (similar to PeptideProphet)", "fa_icon": "fas fa-list-ol", "default": "percolator", - "enum": [ - "percolator", - "fit_distributions" - ] + "enum": ["percolator", "fit_distributions"] }, "psm_pep_fdr_cutoff": { "type": "number", @@ -502,10 +484,7 @@ "description": "Calculate FDR on PSM ('psm-level-fdrs') or peptide level ('peptide-level-fdrs')?", "default": "peptide-level-fdrs", "fa_icon": "fas fa-list-ol", - "enum": [ - "peptide-level-fdrs", - "psm-level-fdrs" - ] + "enum": ["peptide-level-fdrs", "psm-level-fdrs"] }, "train_FDR": { "type": "number", @@ -551,12 +530,7 @@ "description": "How to handle outliers during fitting:\n\n* ignore_iqr_outliers (default): ignore outliers outside of `3*IQR` from Q1/Q3 for fitting\n* set_iqr_to_closest_valid: set IQR-based outliers to the last valid value for fitting\n* ignore_extreme_percentiles: ignore everything outside 99th and 1st percentile (also removes equal values like potential censored max values in XTandem)\n* none: do nothing", "default": "none", "fa_icon": "fas fa-list-ol", - "enum": [ - "none", - "ignore_iqr_outliers", - "set_iqr_to_closest_valid", - "ignore_extreme_percentiles" - ] + "enum": ["none", "ignore_iqr_outliers", "set_iqr_to_closest_valid", "ignore_extreme_percentiles"] } }, "fa_icon": "far fa-star-half" @@ -573,11 +547,7 @@ "default": "best", "fa_icon": "fas fa-list-ol", "help_text": "Specifies how search engine results are combined: ConsensusID offers several algorithms that can aggregate results from multiple peptide identification engines ('search engines') into consensus identifications - typically one per MS2 spectrum. This works especially well for search engines that provide more than one peptide hit per spectrum, i.e. that report not just the best hit, but also a list of runner-up candidates with corresponding scores.\n\nThe available algorithms are:\n\n* PEPMatrix: Scoring based on posterior error probabilities (PEPs) and peptide sequence similarities. This algorithm uses a substitution matrix to score the similarity of sequences not listed by all search engines. It requires PEPs as the scores for all peptide hits.\n* PEPIons: Scoring based on posterior error probabilities (PEPs) and fragment ion similarities ('shared peak count'). This algorithm, too, requires PEPs as scores.\n* best: For each peptide ID, this uses the best score of any search engine as the consensus score.\n* worst: For each peptide ID, this uses the worst score of any search engine as the consensus score.\n* average: For each peptide ID, this uses the average score of all search engines as the consensus score.\n* ranks: Calculates a consensus score based on the ranks of peptide IDs in the results of different search engines. The final score is in the range (0, 1], with 1 being the best score.\n\nTo make scores comparable, for best, worst and average, PEPs are used as well. Peptide IDs are only considered the same if they map to exactly the same sequence (including modifications and their localization). Also isobaric aminoacids are (for now) only considered equal with the PEPMatrix/PEPIons algorithms.", - "enum": [ - "best", - "PEPMatrix", - "PEPIons" - ] + "enum": ["best", "PEPMatrix", "PEPIons"] }, "consensusid_considered_top_hits": { "type": "integer", @@ -616,20 +586,14 @@ "type": "string", "description": "Unit of 'mz_tolerance'", "default": "ppm", - "enum": [ - "ppm", - "Da" - ], + "enum": ["ppm", "Da"], "fa_icon": "fas fa-list-ol" }, "mz_reference": { "type": "string", "description": "Source of m/z values for peptide identifications.", "default": "peptide", - "enum": [ - "peptide", - "precursor" - ], + "enum": ["peptide", "precursor"], "fa_icon": "fas fa-list-ol" }, "annotate_file_origin": { @@ -643,10 +607,7 @@ "type": "string", "description": "A map may contain multiple features with both identical (possibly modified i.e. not stripped) sequence and charge state. The feature with the 'highest intensity' is very likely the most reliable one. When switched on, the filter removes the sequence annotation from the lower intensity features, thereby resolving the multiplicity. Only the most reliable features for each (possibly modified i.e. not stripped) sequence maintain annotated with this peptide sequence.", "default": "off", - "enum": [ - "off", - "highest_intensity" - ], + "enum": ["off", "highest_intensity"], "fa_icon": "fas fa-list-ol" } }, @@ -682,12 +643,7 @@ "type": "string", "description": "How to aggregate scores of peptides matching to the same protein", "default": "best", - "enum": [ - "best", - "product", - "sum", - "maximum" - ], + "enum": ["best", "product", "sum", "maximum"], "fa_icon": "fas fa-list-ol" }, "min_peptides_per_protein": { @@ -706,20 +662,16 @@ "type": "string", "description": "Post-process inference output with greedy resolution of shared peptides based on the parent protein probabilities. Also adds the resolved ambiguity groups to output.", "default": "none", - "enum": [ - "none", - "remove_associations_only", - "remove_proteins_wo_evidence" - ] + "enum": ["none", "remove_associations_only", "remove_proteins_wo_evidence"] }, "keep_best_PSM_only": { - "type":"boolean", + "type": "boolean", "description": "Epifany uses the best PSM per peptide for inference. Discard the rest (true) or keepe.g. for quantification/reporting?", "default": true, "fa_icon": "fas fa-list-ol" }, "update_PSM_probabilities": { - "type":"boolean", + "type": "boolean", "description": "(Experimental:) Update PSM probabilities with their posteriors under consideration of the protein probabilities.", "default": true, "fa_icon": "fas fa-list-ol" @@ -737,10 +689,7 @@ "default": "aggregation", "fa_icon": "fas fa-list-ol", "help_text": "Infer proteins through:\n\n* 'aggregation' = aggregates all peptide scores across a protein (by calculating the maximum) (default)\n* 'bayesian' = compute a posterior probability for every protein based on a Bayesian network (i.e. using Epifany)\n* ('percolator' not yet supported)\n\n**Note:** If protein grouping is performed also depends on the `protein_quant` parameter (i.e. if peptides have to be unique or unique to a group only)", - "enum": [ - "aggregation", - "bayesian" - ] + "enum": ["aggregation", "bayesian"] } }, "fa_icon": "fab fa-hubspot" @@ -761,12 +710,7 @@ "type": "string", "description": "Averaging method used to compute protein abundances from peptide abundances.", "default": "median", - "enum": [ - "median", - "mean", - "weighted_mean", - "sum" - ], + "enum": ["median", "mean", "weighted_mean", "sum"], "fa_icon": "fas fa-list-ol" }, "best_charge_and_fraction": { @@ -802,21 +746,14 @@ "type": "string", "description": "Quantify proteins based on:\n\n* 'unique_peptides' = use peptides mapping to single proteins or a group of indistinguishable proteins (according to the set of experimentally identified peptides)\n* 'strictly_unique_peptides' = use peptides mapping to a unique single protein only\n* 'shared_peptides' = use shared peptides, too, but only greedily for its best group (by inference score)", "default": "unique_peptides", - "enum": [ - "unique_peptides", - "strictly_unique_peptides", - "shared_peptides" - ], + "enum": ["unique_peptides", "strictly_unique_peptides", "shared_peptides"], "fa_icon": "fas fa-list-ol" }, "quantification_method": { "type": "string", "description": "Choose between feature-based quantification based on integrated MS1 signals ('feature_intensity'; default) or spectral counting of PSMs ('spectral_counting'). **WARNING:** 'spectral_counting' is not compatible with our MSstats step yet. MSstats will therefore be disabled automatically with that choice.", "default": "feature_intensity", - "enum": [ - "feature_intensity", - "spectral_counting" - ], + "enum": ["feature_intensity", "spectral_counting"], "fa_icon": "fas fa-list-ol" }, "mass_recalibration": { @@ -828,10 +765,7 @@ "type": "string", "description": "Tries a targeted requantification in files where an ID is missing, based on aggregate properties (i.e. RT) of the features in other aligned files (e.g. 'mean' of RT). (**WARNING:** increased memory consumption and runtime). 'false' turns this feature off. (default: 'false')", "default": "false", - "enum": [ - "false", - "mean" - ], + "enum": ["false", "mean"], "fa_icon": "fas fa-list-ol" }, "targeted_only": { @@ -844,10 +778,7 @@ "type": "string", "description": "The order in which maps are aligned. Star = all vs. the reference with most IDs (default). TreeGuided = an alignment tree is calculated first based on similarity measures of the IDs in the maps.", "default": "star", - "enum": [ - "star", - "treeguided" - ], + "enum": ["star", "treeguided"], "fa_icon": "far fa-list-ol" }, "quantify_decoys": { @@ -874,10 +805,7 @@ "type": "string", "description": "Proteomics data acquistion method", "default": "dda", - "enum": [ - "dda", - "dia" - ], + "enum": ["dda", "dia"], "fa_icon": "far fa-list-ol" }, "matrix_spec_q": { @@ -911,9 +839,7 @@ "description": "Debug level", "default": 3, "fa_icon": "fas fa-bug", - "enum": [ - 0, 1, 2, 3, 4 - ] + "enum": [0, 1, 2, 3, 4] }, "diann_normalize": { "type": "boolean", @@ -1080,14 +1006,7 @@ "description": "Method used to save pipeline results to output directory.", "help_text": "The Nextflow `publishDir` option specifies which intermediate files should be saved to the output directory. This option tells the pipeline what method should be used to move these files. See [Nextflow docs](https://www.nextflow.io/docs/latest/process.html#publishdir) for details.", "fa_icon": "fas fa-copy", - "enum": [ - "symlink", - "rellink", - "link", - "copy", - "copyNoFollow", - "move" - ], + "enum": ["symlink", "rellink", "link", "copy", "copyNoFollow", "move"], "hidden": true }, "email_on_fail": { diff --git a/workflows/quantms.nf b/workflows/quantms.nf index 39e3ee73..0064f9e5 100644 --- a/workflows/quantms.nf +++ b/workflows/quantms.nf @@ -23,7 +23,7 @@ if (params.input) { ch_input = file(params.input) } else { exit 1, 'Input sample ======================================================================================== */ -ch_multiqc_config = file("$projectDir/assets/multiqc_config.yaml", checkIfExists: true) +ch_multiqc_config = file("$projectDir/assets/multiqc_config.yml", checkIfExists: true) ch_multiqc_custom_config = params.multiqc_config ? Channel.fromPath(params.multiqc_config) : Channel.empty()