Mercurial > repos > kkonganti > cfsan_bettercallsal
changeset 0:a4b1ee4b68b1
"planemo upload"
author | kkonganti |
---|---|
date | Mon, 05 Jun 2023 16:17:23 -0400 |
parents | |
children | 365849f031fd |
files | cfsan_bettercallsal.xml |
diffstat | 1 files changed, 232 insertions(+), 0 deletions(-) [+] |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/cfsan_bettercallsal.xml Mon Jun 05 16:17:23 2023 -0400 @@ -0,0 +1,232 @@ +<tool id="cfsan_centriflaken" name="Centriflaken" version="0.2.0+galaxy0"> + <description>An automated pipeline to generate a MAG of interest (E.coli or Salmonella) and perform serotyping.</description> + <requirements> + <requirement type="package" version="22.04">nextflow</requirement> + <requirement type="package">graphviz</requirement> + </requirements> + <version_command>nextflow -version</version_command> + <command detect_errors="exit_code"><![CDATA[ + mkdir -p cpipes-input || exit 1; + pwd_path=\$(pwd); + #import re + #if (str($input_read_type_cond.input_read_type) == "single_long"): + #for _, $unpaired in enumerate($input_read_type_cond.input): + #set read1 = str($unpaired.name) + #if not str($unpaired.name).endswith(('.fastq', '.fastq.gz')): + #set read1_ext = re.sub('fastqsanger', 'fastq', str($unpaired.ext)) + #set read1 = str($unpaired.name) + str('.') + $read1_ext + #end if + ln -sf '$unpaired' './cpipes-input/$read1'; + #end for + #elif (str($input_read_type_cond.input_read_type) == "paired"): + #for _, $pair in enumerate($input_read_type_cond.input_pair) + #set read_R1 = re.sub('\:forward', '_forward', str($pair.forward.name)) + #set read_R2 = re.sub('\:reverse', '_reverse', str($pair.reverse.name)) + #set read_R1_ext = re.sub('fastqsanger', 'fastq', str($pair.forward.ext)) + #set read_R2_ext = re.sub('fastqsanger', 'fastq', str($pair.reverse.ext)) + #if not str($pair.forward.name).endswith(('.fastq', '.fastq.gz')): + #set read_R1 = $read_R1 + str('.') + $read_R1_ext + #end if + #if not str($pair.reverse.name).endswith(('.fastq', '.fastq.gz')): + #set read_R2 = $read_R2 + str('.') + $read_R2_ext + #end if + ln -sf '$pair.forward' './cpipes-input/$read_R1'; + ln -sf '$pair.reverse' './cpipes-input/$read_R2'; + #end for + #end if + $__tool_directory__/0.4.0/cpipes + --pipeline $input_read_type_cond.pipeline_cond.pipeline + #if ($input_read_type_cond.pipeline_cond.pipeline == "centriflaken"): + --fq_single_end true + --flye_genome_size '${genome_size}' + #if ($input_read_type_cond.pipeline_cond.long_read_platform == "nanopore_corr"): + --flye_nano_corr true --flye_nano_raw false + #elif ($input_read_type_cond.pipeline_cond.long_read_platform == "nanopore_hq"): + --flye_nano_hq true --flye_nano_raw false + #elif ($input_read_type_cond.pipeline_cond.long_read_platform == "pacbio_raw"): + --flye_pacbio_raw true --flye_nano_raw false + #elif ($input_read_type_cond.pipeline_cond.long_read_platform == "pacbio_corr"): + --flye_pacbio_corr true --flye_nano_raw false + #elif ($input_read_type_cond.pipeline_cond.long_read_platform == "pacbio_hifi"): + --flye_pacbio_hifi true --flye_nano_raw false + #end if + #elif ($input_read_type_cond.pipeline_cond.pipeline == "centriflaken_hy"): + #if (str($input_read_type_cond.input_read_type) == "single_long"): + --fq_single_end true + #elif (str($input_read_type_cond.input_read_type) == "paired"): + --fq_single_end false --fq2_suffix '${input_read_type_cond.fq2_suffix}' + #end if + #end if + --input \${pwd_path}/cpipes-input + --output \${pwd_path}/cpipes-output + --fq_suffix '${input_read_type_cond.fq_suffix}' + #if ($fq_filter_by_len != ""): + --fq_filter_by_len $fq_filter_by_len + #end if + --fq_filename_delim '${fq_filename_delim}' + --fq_filename_delim_idx $fq_filename_delim_idx + --centrifuge_extract_bug '${centrifuge_extract_bug}' + #if (str($input_read_type_cond.pipeline_cond.rm_dup_seqs) == "true"): + --seqkit_rmdup_run true + #end if + -profile kondagac; + mv './cpipes-output/${input_read_type_cond.pipeline_cond.pipeline}-multiqc/multiqc_report.html' './multiqc_report.html' > /dev/null 2>&1 || exit 1; + mv './cpipes-output/${input_read_type_cond.pipeline_cond.pipeline}-results/kraken2_extract_contigs' kraken2_extract_contigs > /dev/null 2>&1 || exit 1; + rm -rf ./cpipes-output > /dev/null 2>&1 || exit 1; + rm -rf ./work > /dev/null 2>&1 || exit 1 + ]]></command> + <inputs> + <conditional name="input_read_type_cond"> + <param name="input_read_type" type="select" label="Select the read collection type"> + <option value="single_long" selected="true">Unpaired reads (i.e. Single-End short reads or Long reads)</option> + <option value="paired">Paired-End reads</option> + </param> + <when value="single_long"> + <param name="input" type="data_collection" collection_type="list" format="fastq,fastq.gz" + label="Dataset list of unpaired short reads or long reads" /> + <conditional name="pipeline_cond"> + <param name="pipeline" type="select" label="CPIPES Workflow name" + help="centriflaken: for long reads (Nanopore or PacBio). centriflaken_hy: for unpaired short reads. Default: centriflaken"> + <option value="centriflaken" selected="true">centriflaken</option> + <option value="centriflaken_hy">centriflaken_hy</option> + </param> + <when value="centriflaken"> + <param name="long_read_platform" type="select" label="Mention long read sequencing platform and type"> + <option value="nanopore_raw" selected="true">Nanopore raw reads, pre-Guppy5 (<20% error)</option> + <option value="nanopore_corr">Nanopore reads that were corrected with other methods (<3% error)</option> + <option value="nanopore_hq">Nanopore high-quality reads, Guppy5+ SUP or Q20 (5% error)</option> + <option value="pacbio_raw">PacBio regular CLR reads (<20% error)</option> + <option value="pacbio_corr">PacBio reads that were corrected with other methods (<3% error)</option> + <option value="pacbio_hifi">PacBio HiFi reads (<1% error)</option> + </param> + <param name="rm_dup_seqs" type="select" label="Remove duplicate sequences" + help="THIS OPTION IS IGNORED IF THE INPUT READS ARE LONG READS."> + <option value="NA" selected="true">N/A</option> + </param> + </when> + <when value="centriflaken_hy"> + <param name="long_read_platform" type="select" label="Mention long read sequencing platform and type" + help="THIS OPTION IS IGNORED IF THE INPUT READS ARE SHORT READS."> + <option value="NA" selected="true">N/A</option> + </param> + <param name="rm_dup_seqs" type="select" label="Remove duplicate sequences" + help="Selecting yes will compare sequence content and remove identical sequences i.e. only the first occured sequence record will be saved."> + <option value="true">yes</option> + <option value="false" selected="true">no</option> + </param> + </when> + </conditional> + <param name="fq_suffix" value=".fastq.gz" type="text" label="Suffix of the Unpaired FASTQ"/> + </when> + <when value="paired"> + <param name="input_pair" type="data_collection" collection_type="list:paired" format="fastq,fastq.gz" label="List of Dataset pairs" /> + <conditional name="pipeline_cond"> + <param name="pipeline" type="select" label="CPIPES Workflow name" + help="Auto selected centriflaken_hy workflow for paired-end short reads."> + <option value="centriflaken_hy" selected="true">centriflaken_hy</option> + </param> + <when value="centriflaken_hy"> + <param name="long_read_platform" type="select" label="Mention long read sequencing platform and type" + help="THIS OPTION IS IGNORED IF THE INPUT READS ARE SHORT READS."> + <option value="NA" selected="true">N/A</option> + </param> + <param name="rm_dup_seqs" type="select" label="Remove duplicate sequences" + help="Selecting yes will compare sequence content and remove identical sequences i.e. only the first occured sequence record will be saved."> + <option value="true">yes</option> + <option value="false" selected="true">no</option> + </param> + </when> + </conditional> + <param name="fq_suffix" value="_R1_001.fastq.gz" type="text" label="Suffix of the R1 FASTQ"/> + <param name="fq2_suffix" value="_R2_001.fastq.gz" type="text" label="Suffix of the R2 FASTQ"/> + </when> + </conditional> + <param name="fq_filter_by_len" optional="true" value="" type="integer" label="Enter minimum read length to retain before starting the analysis" + help="Keep this option empty to use default values. Default for centriflaken (long reads) is 4000 bp and for centriflaken_hy (short reads) is 75 bp."/> + <param name="fq_filename_delim" type="text" value="_" label="File name delimitor by which samples are grouped together (--fq_filename_delim)" + help="This is the delimitor by which samples are grouped together to display in the final MultiQC report. For example, if your input data sets are mango_replicate1.fastq.gz, mango_replicate2.fastq.gz, orange_replicate1_maryland.fastq.gz, orange_replicate2_maryland.fastq.gz, then to create 2 samples mango and orange, the value for --fq_filename_delim would be _ (underscore) and the value for --fq_filename_delim_idx would be 1, since you want to group by the first word (i.e. mango or orange) after splitting the filename based on _ (underscore)."/> + <param name="fq_filename_delim_idx" type="integer" value="1" label="File name delimitor index (--fq_filename_delim_idx)" /> + <param name="centrifuge_extract_bug" type="text" value="Escherichia coli" label="Reads belonging to this taxa are extracted and a MAG is generated to allow for serotyping"/> + <param name="genome_size" type="text" optional="true" value="5.5m" label="Estimated genome size" help="For example, 5m or 2.6g."> + <validator type="regex" message="Genome size must be a float or integer, optionally followed by the a unit prefix (kmg)">^([0-9]*[.])?[0-9]+[kmg]?$</validator> + </param> + <!-- <param name="runtime_profile" type="select" label="Run time profile"> + <option value="kondagac" selected="true">conda</option> + <option value="cingularitygac">singularity</option> + </param> --> + </inputs> + <outputs> + <data name="multiqc_report" format="html" label="${input_read_type_cond.pipeline_cond.pipeline}: MultiQC Report on ${on_string}" from_work_dir="multiqc_report.html"/> + <collection name="assembled_mags" type="list" label="${input_read_type_cond.pipeline_cond.pipeline}: Assembled MAGs on ${on_string}"> + <discover_datasets pattern="(?P<name>.*)\.assembly_filtered_contigs\.fasta" ext="fasta" directory="kraken2_extract_contigs"/> + </collection> + </outputs> + <tests> + <!--Test 01: long reads--> + <test expect_num_outputs="2"> + <param name="input"> + <collection type="list"> + <element name="FAL11127.fastq.gz" value="FAL11127.fastq.gz" /> + <element name="FAL11341.fastq.gz" value="FAL11341.fastq.gz" /> + <element name="FAL11342.fastq.gz" value="FAL11342.fastq.gz" /> + </collection> + </param> + <param name="fq_suffix" value=".fastq.gz"/> + <output name="multiqc_report" file="multiqc_report.html" ftype="html" compare="sim_size"/> + <!-- <output name="assembled_mags" file="FAL11127.assembly_filtered.contigs.fasta" ftype="fasta" compare="sim_size"/> --> + </test> + </tests> + <help><![CDATA[ + +.. class:: infomark + +**Purpose** + +Centriflaken suite of automated data analysis pipelines are based on Nextflow DSL2 developed at CFSAN, FDA. These pipelines allow rapid +and effective construction of metagenomic assembled genomes (MAGs) to enable bacterial source-tracking. It is based on methods described in our +previous publication (Maguire *et al*, 2021. doi: https://doi.org/10.1371/journal.pone.0245172). + +---- + +.. class:: infomark + +**Testing and Validation** + +The CPIPES - Centriflaken Nextflow pipeline has been wrapped to make it work in Galaxy. It takes in either paired or unpaired short reads or long reads, generates MAGs and performs +in silico-based analysis (i.e., virulence gene finding). Additionally, AMR gene finding analysis is also included in Centriflaken and performed on MAGs +of interest. The final summary plots and tables can be downloaded from the provided MultiQC HTML report generated as part of the pipeline. +The Centriflaken pipeline was validated with data from our previously published method (Maguire *et al*, 2021. doi: https://doi.org/10.1371/journal.pone.0245172) and was able to replicate the detection +and classification of STECs for each sample. We tested the pipeline with Nanopore data obtained from 21 additional enriched samples from +irrigation water and was able to perform the entire precision metagenomics analysis in less than 5 hours for all of them. All the original testing and validation was +done on the command line on the CFSAN Raven2 HPC Cluster. + + +---- + +.. class:: infomark + +**Outputs** + +The main output files are: + + :: + + - MultiQC Report: Contains a brief summary report including any serotyping and AMR result tables. + Please note that due to MultiQC customizations, the preview (eye icon) will not + work within Galaxy for the MultiQC report. Please download the file by clicking + on the floppy icon and view it in your browser on your local desktop/workstation. + - Final assembly: contains contigs and possibly scaffolds. + + ]]></help> + <citations> + <citation type="bibtex"> + @misc{gitlabCPIPES, + author = {Konganti, Kranti}, + year = {2022}, + title = {CPIPES - Centriflaken}, + publisher = {GitLab}, + journal = {GitLab repository}, + url = {https://cfsan-git.fda.gov/Kranti.Konganti/cpipes}} + </citation> + </citations> +</tool>