diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6ae79d66..1ffa9360 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -77,10 +77,12 @@ jobs: --shard ${{ matrix.shard }}/${{ env.SHARDS }} \ --changed-since HEAD^ \ --profile "+${{ matrix.profile }}" \ - --filter pipeline + --filter pipeline \ + --junitxml=test.xml - name: Publish Test Report uses: mikepenz/action-junit-report@v3 if: always() # always run even if the previous step fails with: report_paths: test.xml + annotate_only: true diff --git a/CHANGELOG.md b/CHANGELOG.md index 09d6d600..653b5572 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## 1.5.2dev (UNRELEASED) +### `Changed` + +- [#253](https://github.com/nf-core/demultiplex/pull/256) Updated all modules and subworkflows, and added new bclconvert test profile. + ## 1.5.1 ### `Fixed` diff --git a/modules.json b/modules.json index 8138d097..9c303881 100644 --- a/modules.json +++ b/modules.json @@ -12,7 +12,7 @@ }, "bcl2fastq": { "branch": "master", - "git_sha": "22261833fced91665b986c6833e91a4a5cccb0a9", + "git_sha": "90b63cde0f838ca4da3a88a37a5309888cae97b9", "installed_by": ["bcl_demultiplex"] }, "bclconvert": { @@ -42,7 +42,7 @@ }, "fastqc": { "branch": "master", - "git_sha": "46eca555142d6e597729fcb682adcc791796f514", + "git_sha": "b49b8992e5271ce427f3a7cdb29628fc58400eb5", "installed_by": ["modules"] }, "fqtk": { @@ -62,7 +62,7 @@ }, "multiqc": { "branch": "master", - "git_sha": "b80f5fd12ff7c43938f424dd76392a2704fa2396", + "git_sha": "19ca321db5d8bd48923262c2eca6422359633491", "installed_by": ["modules"] }, "seqtk/sample": { @@ -86,7 +86,7 @@ "nf-core": { "bcl_demultiplex": { "branch": "master", - "git_sha": "22261833fced91665b986c6833e91a4a5cccb0a9", + "git_sha": "a6e6bec1b1da613b89a6c0a3a46713f880cf5aca", "installed_by": ["subworkflows"] }, "fastq_contam_seqtk_kraken": { @@ -96,12 +96,12 @@ }, "utils_nextflow_pipeline": { "branch": "master", - "git_sha": "5caf7640a9ef1d18d765d55339be751bb0969dfa", + "git_sha": "d20fb2a9cc3e2835e9d067d1046a63252eb17352", "installed_by": ["subworkflows"] }, "utils_nfcore_pipeline": { "branch": "master", - "git_sha": "92de218a329bfc9a9033116eb5f65fd270e72ba3", + "git_sha": "2fdce49d30c0254f76bc0f13c55c17455c1251ab", "installed_by": ["subworkflows"] }, "utils_nfvalidation_plugin": { diff --git a/modules/nf-core/bcl2fastq/tests/main.nf.test b/modules/nf-core/bcl2fastq/tests/main.nf.test index ca03b972..d0e0b9a5 100644 --- a/modules/nf-core/bcl2fastq/tests/main.nf.test +++ b/modules/nf-core/bcl2fastq/tests/main.nf.test @@ -15,8 +15,8 @@ nextflow_process { """ input[0] = [ [ id: 'test', lane:1 ], - file(params.test_data['homo_sapiens']['illumina']['test_flowcell_samplesheet'], checkIfExists: true), - file(params.test_data['homo_sapiens']['illumina']['test_flowcell'], checkIfExists: true) + file(params.modules_testdata_base_path + 'genomics/homo_sapiens/illumina/bcl/flowcell_samplesheet.csv', checkIfExists: true), + file(params.modules_testdata_base_path + 'genomics/homo_sapiens/illumina/bcl/flowcell.tar.gz', checkIfExists: true) ] """ } diff --git a/modules/nf-core/fastqc/main.nf b/modules/nf-core/fastqc/main.nf index d79f1c86..d8989f48 100644 --- a/modules/nf-core/fastqc/main.nf +++ b/modules/nf-core/fastqc/main.nf @@ -26,7 +26,10 @@ process FASTQC { def rename_to = old_new_pairs*.join(' ').join(' ') def renamed_files = old_new_pairs.collect{ old_name, new_name -> new_name }.join(' ') - def memory_in_mb = MemoryUnit.of("${task.memory}").toUnit('MB') + // The total amount of allocated RAM by FastQC is equal to the number of threads defined (--threads) time the amount of RAM defined (--memory) + // https://github.com/s-andrews/FastQC/blob/1faeea0412093224d7f6a07f777fad60a5650795/fastqc#L211-L222 + // Dividing the task.memory by task.cpu allows to stick to requested amount of RAM in the label + def memory_in_mb = MemoryUnit.of("${task.memory}").toUnit('MB') / task.cpus // FastQC memory value allowed range (100 - 10000) def fastqc_memory = memory_in_mb > 10000 ? 10000 : (memory_in_mb < 100 ? 100 : memory_in_mb) diff --git a/modules/nf-core/multiqc/environment.yml b/modules/nf-core/multiqc/environment.yml index 2121492d..a31464c9 100644 --- a/modules/nf-core/multiqc/environment.yml +++ b/modules/nf-core/multiqc/environment.yml @@ -4,4 +4,4 @@ channels: - bioconda - defaults dependencies: - - bioconda::multiqc=1.23 + - bioconda::multiqc=1.24.1 diff --git a/modules/nf-core/multiqc/main.nf b/modules/nf-core/multiqc/main.nf index 459dfea5..ceaec139 100644 --- a/modules/nf-core/multiqc/main.nf +++ b/modules/nf-core/multiqc/main.nf @@ -3,8 +3,8 @@ process MULTIQC { conda "${moduleDir}/environment.yml" container "${ workflow.containerEngine == 'singularity' && !task.ext.singularity_pull_docker_container ? - 'https://depot.galaxyproject.org/singularity/multiqc:1.23--pyhdfd78af_0' : - 'biocontainers/multiqc:1.23--pyhdfd78af_0' }" + 'https://depot.galaxyproject.org/singularity/multiqc:1.24.1--pyhdfd78af_0' : + 'biocontainers/multiqc:1.24.1--pyhdfd78af_0' }" input: path multiqc_files, stageAs: "?/*" @@ -25,9 +25,10 @@ process MULTIQC { script: def args = task.ext.args ?: '' + def prefix = task.ext.prefix ? "--filename ${task.ext.prefix}.html" : '' def config = multiqc_config ? "--config $multiqc_config" : '' def extra_config = extra_multiqc_config ? "--config $extra_multiqc_config" : '' - def logo = multiqc_logo ? /--cl-config 'custom_logo: "${multiqc_logo}"'/ : '' + def logo = multiqc_logo ? "--cl-config 'custom_logo: \"${multiqc_logo}\"'" : '' def replace = replace_names ? "--replace-names ${replace_names}" : '' def samples = sample_names ? "--sample-names ${sample_names}" : '' """ @@ -35,6 +36,7 @@ process MULTIQC { --force \\ $args \\ $config \\ + $prefix \\ $extra_config \\ $logo \\ $replace \\ diff --git a/modules/nf-core/multiqc/tests/main.nf.test b/modules/nf-core/multiqc/tests/main.nf.test index 6aa27f4c..33316a7d 100644 --- a/modules/nf-core/multiqc/tests/main.nf.test +++ b/modules/nf-core/multiqc/tests/main.nf.test @@ -8,6 +8,8 @@ nextflow_process { tag "modules_nfcore" tag "multiqc" + config "./nextflow.config" + test("sarscov2 single-end [fastqc]") { when { diff --git a/modules/nf-core/multiqc/tests/main.nf.test.snap b/modules/nf-core/multiqc/tests/main.nf.test.snap index 45e95e5d..83fa080c 100644 --- a/modules/nf-core/multiqc/tests/main.nf.test.snap +++ b/modules/nf-core/multiqc/tests/main.nf.test.snap @@ -2,7 +2,7 @@ "multiqc_versions_single": { "content": [ [ - "versions.yml:md5,87904cd321df21fac35d18f0fc01bb19" + "versions.yml:md5,6eb13f3b11bbcbfc98ad3166420ff760" ] ], "meta": { @@ -17,7 +17,7 @@ "multiqc_report.html", "multiqc_data", "multiqc_plots", - "versions.yml:md5,87904cd321df21fac35d18f0fc01bb19" + "versions.yml:md5,6eb13f3b11bbcbfc98ad3166420ff760" ] ], "meta": { @@ -29,7 +29,7 @@ "multiqc_versions_config": { "content": [ [ - "versions.yml:md5,87904cd321df21fac35d18f0fc01bb19" + "versions.yml:md5,6eb13f3b11bbcbfc98ad3166420ff760" ] ], "meta": { @@ -38,4 +38,4 @@ }, "timestamp": "2024-07-10T11:26:56.709849369" } -} \ No newline at end of file +} diff --git a/modules/nf-core/multiqc/tests/nextflow.config b/modules/nf-core/multiqc/tests/nextflow.config new file mode 100644 index 00000000..c537a6a3 --- /dev/null +++ b/modules/nf-core/multiqc/tests/nextflow.config @@ -0,0 +1,5 @@ +process { + withName: 'MULTIQC' { + ext.prefix = null + } +} diff --git a/subworkflows/nf-core/bcl_demultiplex/main.nf b/subworkflows/nf-core/bcl_demultiplex/main.nf index 3816f310..129b2f1b 100644 --- a/subworkflows/nf-core/bcl_demultiplex/main.nf +++ b/subworkflows/nf-core/bcl_demultiplex/main.nf @@ -64,65 +64,67 @@ workflow BCL_DEMULTIPLEX { } // Generate meta for each fastq - ch_fastq_with_meta = ch_fastq - // reshapes the channel from a single emit of [meta, [fastq, fastq, fastq...]] - // to emits per fastq file like [meta, fastq] - .transpose() - .map { fc_meta, fastq -> - def meta = [:] - meta.id = fastq.getSimpleName().toString() - ~/_R[0-9]_001.*$/ - meta.samplename = fastq.getSimpleName().toString() - ~/_S[0-9]+.*$/ - meta.fcid = fc_meta.id - meta.lane = fc_meta.lane - // The buffered input stream allows reading directly from cloud storage - // It will not make a local copy of the file. - fastq.withInputStream { - InputStream gzipStream = new java.util.zip.GZIPInputStream(it) - Reader decoder = new InputStreamReader(gzipStream, 'ASCII') - BufferedReader buffered = new BufferedReader(decoder) - line = buffered.readLine() - buffered.close() - } - if ( line != null && line.startsWith('@') ) { - line = line.substring(1) - // expected format is like: - // xx:yy:FLOWCELLID:LANE:... (seven fields) - fields = line.split(':') - // CASAVA 1.8+ format, from https://support.illumina.com/help/BaseSpace_OLH_009008/Content/Source/Informatics/BS/FileFormat_FASTQ-files_swBS.htm - // "@::::::: :::" - sequencer_serial = fields[0] - run_nubmer = fields[1] - fcid = fields[2] - lane = fields[3] - index = fields[-1] =~ /[GATC+-]/ ? fields[-1] : "" - ID = [fcid, lane].join(".") - PU = [fcid, lane, index].findAll().join(".") - PL = "ILLUMINA" - SM = fastq.getSimpleName().toString() - ~/_S[0-9]+.*$/ - meta.readgroup = [ - "ID": ID, - "SM": SM, - "PL": PL, - "PU": PU - ] - meta.empty = false - } else { - println "No reads were found in FASTQ file: ${fastq}" - meta.readgroup = [:] - meta.empty = true - } - return [meta, fastq] + ch_fastq + // reshapes the channel from a single emit of [meta, [fastq, fastq, fastq...]] + // to emits per fastq file like [meta, fastq] + .transpose() + .map { fc_meta, fastq -> + def meta = [:] + meta.id = fastq.getSimpleName().toString() - ~/_R[0-9]_001.*$/ + meta.samplename = fastq.getSimpleName().toString() - ~/_S[0-9]+.*$/ + meta.fcid = fc_meta.id + meta.lane = fc_meta.lane + // The buffered input stream allows reading directly from cloud storage + // It will not make a local copy of the file. + def line = "" + fastq.withInputStream { fq -> + def gzipStream = new java.util.zip.GZIPInputStream(fq) + def decoder = new InputStreamReader(gzipStream, 'ASCII') + def buffered = new BufferedReader(decoder) + line = buffered.readLine() + buffered.close() } - // Group by the meta id so that we can find mate pairs if they exist - .groupTuple(by: [0]) - .map { meta, fastq -> - meta.single_end = fastq.size() == 1 - return [meta, fastq.flatten()] - } - .branch { - fastq : it[0].empty == false - empty_fastq : it[0].empty == true + if ( line != null && line.startsWith('@') ) { + line = line.substring(1) + // expected format is like: + // xx:yy:FLOWCELLID:LANE:... (seven fields) + fields = line.split(':') + // CASAVA 1.8+ format, from https://support.illumina.com/help/BaseSpace_OLH_009008/Content/Source/Informatics/BS/FileFormat_FASTQ-files_swBS.htm + // "@::::::: :::" + sequencer_serial = fields[0] + run_nubmer = fields[1] + fcid = fields[2] + lane = fields[3] + index = fields[-1] =~ /[GATC+-]/ ? fields[-1] : "" + ID = [fcid, lane].join(".") + PU = [fcid, lane, index].findAll().join(".") + PL = "ILLUMINA" + SM = fastq.getSimpleName().toString() - ~/_S[0-9]+.*$/ + meta.readgroup = [ + "ID": ID, + "SM": SM, + "PL": PL, + "PU": PU + ] + meta.empty = false + } else { + println "No reads were found in FASTQ file: ${fastq}" + meta.readgroup = [:] + meta.empty = true } + return [meta, fastq] + } + // Group by the meta id so that we can find mate pairs if they exist + .groupTuple(by: [0]) + .map { meta, fastq -> + meta.single_end = fastq.size() == 1 + return [meta, fastq.flatten()] + } + .branch { + fastq : it[0].empty == false + empty_fastq : it[0].empty == true + } + .set{ch_fastq_with_meta} emit: fastq = ch_fastq_with_meta.fastq diff --git a/subworkflows/nf-core/utils_nextflow_pipeline/main.nf b/subworkflows/nf-core/utils_nextflow_pipeline/main.nf index ac31f28f..28e32b20 100644 --- a/subworkflows/nf-core/utils_nextflow_pipeline/main.nf +++ b/subworkflows/nf-core/utils_nextflow_pipeline/main.nf @@ -2,10 +2,6 @@ // Subworkflow with functionality that may be useful for any Nextflow pipeline // -import org.yaml.snakeyaml.Yaml -import groovy.json.JsonOutput -import nextflow.extension.FilesEx - /* ======================================================================================== SUBWORKFLOW DEFINITION @@ -58,7 +54,7 @@ workflow UTILS_NEXTFLOW_PIPELINE { // Generate version string // def getWorkflowVersion() { - String version_string = "" + def version_string = "" as String if (workflow.manifest.version) { def prefix_v = workflow.manifest.version[0] != 'v' ? 'v' : '' version_string += "${prefix_v}${workflow.manifest.version}" @@ -79,10 +75,10 @@ def dumpParametersToJSON(outdir) { def timestamp = new java.util.Date().format( 'yyyy-MM-dd_HH-mm-ss') def filename = "params_${timestamp}.json" def temp_pf = new File(workflow.launchDir.toString(), ".${filename}") - def jsonStr = JsonOutput.toJson(params) - temp_pf.text = JsonOutput.prettyPrint(jsonStr) + def jsonStr = groovy.json.JsonOutput.toJson(params) + temp_pf.text = groovy.json.JsonOutput.prettyPrint(jsonStr) - FilesEx.copyTo(temp_pf.toPath(), "${outdir}/pipeline_info/params_${timestamp}.json") + nextflow.extension.FilesEx.copyTo(temp_pf.toPath(), "${outdir}/pipeline_info/params_${timestamp}.json") temp_pf.delete() } @@ -90,7 +86,7 @@ def dumpParametersToJSON(outdir) { // When running with -profile conda, warn if channels have not been set-up appropriately // def checkCondaChannels() { - Yaml parser = new Yaml() + def parser = new org.yaml.snakeyaml.Yaml() def channels = [] try { def config = parser.load("conda config --show channels".execute().text) @@ -102,14 +98,16 @@ def checkCondaChannels() { // Check that all channels are present // This channel list is ordered by required channel priority. - def required_channels_in_order = ['conda-forge', 'bioconda', 'defaults'] + def required_channels_in_order = ['conda-forge', 'bioconda'] def channels_missing = ((required_channels_in_order as Set) - (channels as Set)) as Boolean // Check that they are in the right order def channel_priority_violation = false - def n = required_channels_in_order.size() - for (int i = 0; i < n - 1; i++) { - channel_priority_violation |= !(channels.indexOf(required_channels_in_order[i]) < channels.indexOf(required_channels_in_order[i+1])) + + required_channels_in_order.eachWithIndex { channel, index -> + if (index < required_channels_in_order.size() - 1) { + channel_priority_violation |= !(channels.indexOf(channel) < channels.indexOf(required_channels_in_order[index+1])) + } } if (channels_missing | channel_priority_violation) { diff --git a/subworkflows/nf-core/utils_nextflow_pipeline/tests/nextflow.config b/subworkflows/nf-core/utils_nextflow_pipeline/tests/nextflow.config index d0a926bf..a09572e5 100644 --- a/subworkflows/nf-core/utils_nextflow_pipeline/tests/nextflow.config +++ b/subworkflows/nf-core/utils_nextflow_pipeline/tests/nextflow.config @@ -3,7 +3,7 @@ manifest { author = """nf-core""" homePage = 'https://127.0.0.1' description = """Dummy pipeline""" - nextflowVersion = '!>=23.04.0' + nextflowVersion = '!>=23.04.0' version = '9.9.9' doi = 'https://doi.org/10.5281/zenodo.5070524' } diff --git a/subworkflows/nf-core/utils_nfcore_pipeline/main.nf b/subworkflows/nf-core/utils_nfcore_pipeline/main.nf index 14558c39..cbd8495b 100644 --- a/subworkflows/nf-core/utils_nfcore_pipeline/main.nf +++ b/subworkflows/nf-core/utils_nfcore_pipeline/main.nf @@ -2,9 +2,6 @@ // Subworkflow with utility functions specific to the nf-core pipeline template // -import org.yaml.snakeyaml.Yaml -import nextflow.extension.FilesEx - /* ======================================================================================== SUBWORKFLOW DEFINITION @@ -34,7 +31,7 @@ workflow UTILS_NFCORE_PIPELINE { // Warn if a -profile or Nextflow config has not been provided to run the pipeline // def checkConfigProvided() { - valid_config = true + def valid_config = true as Boolean if (workflow.profile == 'standard' && workflow.configFiles.size() <= 1) { log.warn "[$workflow.manifest.name] You are attempting to run the pipeline without any custom configuration!\n\n" + "This will be dependent on your local compute environment but can be achieved via one or more of the following:\n" + @@ -66,11 +63,13 @@ def checkProfileProvided(nextflow_cli_args) { // def workflowCitation() { def temp_doi_ref = "" - String[] manifest_doi = workflow.manifest.doi.tokenize(",") + def manifest_doi = workflow.manifest.doi.tokenize(",") // Using a loop to handle multiple DOIs // Removing `https://doi.org/` to handle pipelines using DOIs vs DOI resolvers // Removing ` ` since the manifest.doi is a string and not a proper list - for (String doi_ref: manifest_doi) temp_doi_ref += " https://doi.org/${doi_ref.replace('https://doi.org/', '').replace(' ', '')}\n" + manifest_doi.each { doi_ref -> + temp_doi_ref += " https://doi.org/${doi_ref.replace('https://doi.org/', '').replace(' ', '')}\n" + } return "If you use ${workflow.manifest.name} for your analysis please cite:\n\n" + "* The pipeline\n" + temp_doi_ref + "\n" + @@ -84,7 +83,7 @@ def workflowCitation() { // Generate workflow version string // def getWorkflowVersion() { - String version_string = "" + def version_string = "" as String if (workflow.manifest.version) { def prefix_v = workflow.manifest.version[0] != 'v' ? 'v' : '' version_string += "${prefix_v}${workflow.manifest.version}" @@ -102,8 +101,8 @@ def getWorkflowVersion() { // Get software versions for pipeline // def processVersionsFromYAML(yaml_file) { - Yaml yaml = new Yaml() - versions = yaml.load(yaml_file).collectEntries { k, v -> [ k.tokenize(':')[-1], v ] } + def yaml = new org.yaml.snakeyaml.Yaml() + def versions = yaml.load(yaml_file).collectEntries { k, v -> [ k.tokenize(':')[-1], v ] } return yaml.dumpAsMap(versions).trim() } @@ -124,7 +123,7 @@ def workflowVersionToYAML() { def softwareVersionsToYAML(ch_versions) { return ch_versions .unique() - .map { processVersionsFromYAML(it) } + .map { version -> processVersionsFromYAML(version) } .unique() .mix(Channel.of(workflowVersionToYAML())) } @@ -134,19 +133,19 @@ def softwareVersionsToYAML(ch_versions) { // def paramsSummaryMultiqc(summary_params) { def summary_section = '' - for (group in summary_params.keySet()) { + summary_params.keySet().each { group -> def group_params = summary_params.get(group) // This gets the parameters of that particular group if (group_params) { summary_section += "

$group

\n" summary_section += "
\n" - for (param in group_params.keySet()) { + group_params.keySet().sort().each { param -> summary_section += "
$param
${group_params.get(param) ?: 'N/A'}
\n" } summary_section += "
\n" } } - String yaml_file_text = "id: '${workflow.manifest.name.replace('/','-')}-summary'\n" + def yaml_file_text = "id: '${workflow.manifest.name.replace('/','-')}-summary'\n" as String yaml_file_text += "description: ' - this information is collected when the pipeline is started.'\n" yaml_file_text += "section_name: '${workflow.manifest.name} Workflow Summary'\n" yaml_file_text += "section_href: 'https://github.com/${workflow.manifest.name}'\n" @@ -161,7 +160,7 @@ def paramsSummaryMultiqc(summary_params) { // nf-core logo // def nfCoreLogo(monochrome_logs=true) { - Map colors = logColours(monochrome_logs) + def colors = logColours(monochrome_logs) as Map String.format( """\n ${dashedLine(monochrome_logs)} @@ -180,7 +179,7 @@ def nfCoreLogo(monochrome_logs=true) { // Return dashed line // def dashedLine(monochrome_logs=true) { - Map colors = logColours(monochrome_logs) + def colors = logColours(monochrome_logs) as Map return "-${colors.dim}----------------------------------------------------${colors.reset}-" } @@ -188,7 +187,7 @@ def dashedLine(monochrome_logs=true) { // ANSII colours used for terminal logging // def logColours(monochrome_logs=true) { - Map colorcodes = [:] + def colorcodes = [:] as Map // Reset / Meta colorcodes['reset'] = monochrome_logs ? '' : "\033[0m" @@ -287,7 +286,7 @@ def completionEmail(summary_params, email, email_on_fail, plaintext_email, outdi } def summary = [:] - for (group in summary_params.keySet()) { + summary_params.keySet().sort().each { group -> summary << summary_params[group] } @@ -344,10 +343,10 @@ def completionEmail(summary_params, email, email_on_fail, plaintext_email, outdi def sendmail_html = sendmail_template.toString() // Send the HTML e-mail - Map colors = logColours(monochrome_logs) + def colors = logColours(monochrome_logs) as Map if (email_address) { try { - if (plaintext_email) { throw GroovyException('Send plaintext e-mail, not HTML') } + if (plaintext_email) { throw new org.codehaus.groovy.GroovyException('Send plaintext e-mail, not HTML') } // Try to send HTML e-mail using sendmail def sendmail_tf = new File(workflow.launchDir.toString(), ".sendmail_tmp.html") sendmail_tf.withWriter { w -> w << sendmail_html } @@ -364,13 +363,13 @@ def completionEmail(summary_params, email, email_on_fail, plaintext_email, outdi // Write summary e-mail HTML to a file def output_hf = new File(workflow.launchDir.toString(), ".pipeline_report.html") output_hf.withWriter { w -> w << email_html } - FilesEx.copyTo(output_hf.toPath(), "${outdir}/pipeline_info/pipeline_report.html"); + nextflow.extension.FilesEx.copyTo(output_hf.toPath(), "${outdir}/pipeline_info/pipeline_report.html"); output_hf.delete() // Write summary e-mail TXT to a file def output_tf = new File(workflow.launchDir.toString(), ".pipeline_report.txt") output_tf.withWriter { w -> w << email_txt } - FilesEx.copyTo(output_tf.toPath(), "${outdir}/pipeline_info/pipeline_report.txt"); + nextflow.extension.FilesEx.copyTo(output_tf.toPath(), "${outdir}/pipeline_info/pipeline_report.txt"); output_tf.delete() } @@ -378,7 +377,7 @@ def completionEmail(summary_params, email, email_on_fail, plaintext_email, outdi // Print pipeline summary on completion // def completionSummary(monochrome_logs=true) { - Map colors = logColours(monochrome_logs) + def colors = logColours(monochrome_logs) as Map if (workflow.success) { if (workflow.stats.ignoredCount == 0) { log.info "-${colors.purple}[$workflow.manifest.name]${colors.green} Pipeline completed successfully${colors.reset}-" @@ -395,7 +394,7 @@ def completionSummary(monochrome_logs=true) { // def imNotification(summary_params, hook_url) { def summary = [:] - for (group in summary_params.keySet()) { + summary_params.keySet().sort().each { group -> summary << summary_params[group] }