Mercurial > repos > kkonganti > cfsan_cronology
diff cfsan_cronology.xml @ 0:c8597e9e1a97
"planemo upload"
author | kkonganti |
---|---|
date | Mon, 27 Nov 2023 12:37:44 -0500 |
parents | |
children | c6327baca625 |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/cfsan_cronology.xml Mon Nov 27 12:37:44 2023 -0500 @@ -0,0 +1,169 @@ +<tool id="cfsan_cronology" name="cronology" version="0.1.0"> + <description>An automated workflow for Cronobacter isolate assembly, sequence typing and traceback.</description> + <requirements> + <requirement type="package" version="23.04">nextflow</requirement> + <requirement type="package" version="1.0.0">micromamba</requirement> + <requirement type="package">graphviz</requirement> + </requirements> + <version_command>nextflow -version</version_command> + <command detect_errors="exit_code"><![CDATA[ + mkdir -p cpipes-input || exit 1; + pwd_path=\$(pwd); + #import re + #if (str($input_read_type_cond.input_read_type) == "single_long"): + #for _, $unpaired in enumerate($input_read_type_cond.input): + #set read1 = str($unpaired.name) + #if not str($unpaired.name).endswith(('.fastq', '.fastq.gz')): + #set read1_ext = re.sub('fastqsanger', 'fastq', str($unpaired.ext)) + #set read1 = str($unpaired.name) + str('.') + $read1_ext + #end if + ln -sf '$unpaired' './cpipes-input/$read1'; + #end for + #elif (str($input_read_type_cond.input_read_type) == "paired"): + #for _, $pair in enumerate($input_read_type_cond.input_pair) + #set read_R1 = re.sub('\:forward', '_forward', str($pair.forward.name)) + #set read_R2 = re.sub('\:reverse', '_reverse', str($pair.reverse.name)) + #set read_R1_ext = re.sub('fastqsanger', 'fastq', str($pair.forward.ext)) + #set read_R2_ext = re.sub('fastqsanger', 'fastq', str($pair.reverse.ext)) + #if not str($pair.forward.name).endswith(('.fastq', '.fastq.gz')): + #set read_R1 = $read_R1 + str('.') + $read_R1_ext + #end if + #if not str($pair.reverse.name).endswith(('.fastq', '.fastq.gz')): + #set read_R2 = $read_R2 + str('.') + $read_R2_ext + #end if + ln -sf '$pair.forward' './cpipes-input/$read_R1'; + ln -sf '$pair.reverse' './cpipes-input/$read_R2'; + #end for + #end if + $__tool_directory__/0.1.0/cpipes + --pipeline cronology + --input \${pwd_path}/cpipes-input + --output \${pwd_path}/cpipes-output + --fq_suffix '${input_read_type_cond.fq_suffix}' + #if (str($input_read_type_cond.input_read_type) == "single_long"): + --fq_single_end true + #elif (str($input_read_type_cond.input_read_type) == "paired"): + --fq_single_end false --fq2_suffix '${input_read_type_cond.fq2_suffix}' + #end if + --ref_acc $refgenome + --tuspy_n $tuspy_n + --fq_filename_delim '${fq_filename_delim}' + --fq_filename_delim_idx $fq_filename_delim_idx + -profile kondagac; + mv './cpipes-output/cronology-multiqc/multiqc_report.html' './multiqc_report.html' > /dev/null 2>&1 || exit 1; + mv './cpipes-output/mashtree/hitsTree.dnd' './hitsTree.dnd' > /dev/null 2>&1 || exit 1; + ]]></command> + <inputs> + <conditional name="input_read_type_cond"> + <param name="input_read_type" type="select" label="Select the read collection type"> + <option value="single_long" selected="true">Single-End short reads</option> + <option value="paired">Paired-End short reads</option> + </param> + <when value="single_long"> + <param name="input" type="data_collection" collection_type="list" format="fastq,fastq.gz" + label="Dataset list of unpaired short reads or long reads" /> + <param name="fq_suffix" value=".fastq.gz" type="text" label="Suffix of the Single-End FASTQ"/> + </when> + <when value="paired"> + <param name="input_pair" type="data_collection" collection_type="list:paired" format="fastq,fastq.gz" label="List of Dataset pairs" /> + <param name="fq_suffix" value="_R1_001.fastq.gz" type="text" label="Suffix of the R1 FASTQ" + help="For any data sets downloaded from NCBI into Galaxy, change this to _forward.fastq.gz suffix."/> + <param name="fq2_suffix" value="_R2_001.fastq.gz" type="text" label="Suffix of the R2 FASTQ" + help="For any data sets downloaded from NCBI into Galaxy, change this to _reverse.fastq.gz suffix."/> + </when> + </conditional> + <param name="refgenome" optional="true" value="GCF_003516125" type="text" + label="NCBI reference genome accession" + help="Is the reference genome other than <i>Cronobacter sakazakii</i>? Reference genome FASTA is used as a model for gene prediction. DO NOT ENTER THE DECIMAL PART (Ex: GCF_003516125.1)." /> + <param name="tuspy_n" optional="true" value="10" type="integer" label="Enter the number of top unique hits to retain after initial MASH screen step" + help="These hits will be used to build a genome distance based tree for your experiment run. Default value of 2 is suitable for almost all scenarios."/> + <param name="fq_filename_delim" type="text" value="_" label="File name delimitor by which samples are grouped together (--fq_filename_delim)" + help="This is the delimitor by which samples are grouped together to display in the final MultiQC report. For example, if your input data sets are mango_replicate1.fastq.gz, mango_replicate2.fastq.gz, orange_replicate1_maryland.fastq.gz, orange_replicate2_maryland.fastq.gz, then to create 2 samples mango and orange, the value for --fq_filename_delim would be _ (underscore) and the value for --fq_filename_delim_idx would be 1, since you want to group by the first word (i.e. mango or orange) after splitting the filename based on _ (underscore)."/> + <param name="fq_filename_delim_idx" type="integer" value="1" label="File name delimitor index (--fq_filename_delim_idx)" /> + </inputs> + <outputs> + <data name="multiqc_report" format="html" label="cronology: MultiQC Report on ${on_string}" from_work_dir="multiqc_report.html"/> + <data name="mashtree" format="nwk" label="cronology: Genome distance based tree on ${on_string}" from_work_dir="hitsTree.dnd"/> + <collection name="itol_metadata" type="list" label="cronology: iTOL Metadata: ${on_string}"> + <discover_datasets pattern="(?P<name>.*)\.txt" ext="txt" match_relative_path="true" directory="./cpipes-output/cat_unique"/> + </collection> + <collection name="gene_models" type="list" label="cronology: Predicted gene models: ${on_string}"> + <discover_datasets pattern="(?P<name>.*)\.gff" ext="gff" match_relative_path="true" recurse="true" directory="./cpipes-output/prokka"/> + </collection> + <collection name="assemblies" type="list" label="cronology: Polished genome assemblies: ${on_string}"> + <discover_datasets pattern="(?P<name>.*)\.fa" ext="fa" match_relative_path="true" directory="./cpipes-output/polypolish"/> + </collection> + </outputs> + <tests> + <!--Test 01: long reads--> + <test expect_num_outputs="2"> + <param name="input"> + <collection type="list"> + <element name="FAL11127.fastq.gz" value="FAL11127.fastq.gz" /> + <element name="FAL11341.fastq.gz" value="FAL11341.fastq.gz" /> + <element name="FAL11342.fastq.gz" value="FAL11342.fastq.gz" /> + </collection> + </param> + <param name="fq_suffix" value=".fastq.gz"/> + <output name="multiqc_report" file="multiqc_report.html" ftype="html" compare="sim_size"/> + <!-- <output name="assembled_mags" file="FAL11127.assembly_filtered.contigs.fasta" ftype="fasta" compare="sim_size"/> --> + </test> + </tests> + <help><![CDATA[ + +.. class:: infomark + +**Purpose** + +cronology is an automated workflow to assign Salmonella serotype based on NCBI Pathogen Detection Project for Salmonella. +It uses MASH to reduce the search space followed by additional genome filtering with sourmash. It then performs genome based +alignment with kma followed by count generation using salmon. This workflow can be used to analyze shotgun metagenomics +datasets, quasi-metagenomic datasets (enriched for Salmonella) and target enriched datasets (enriched with molecular baits specific for Salmonella) +and is especially useful in a case where a sample is of multi-serovar mixture. + +It is written in Nextflow and is part of the modular data analysis pipelines (CFSAN PIPELINES or CPIPES for short) at CFSAN. + + +---- + +.. class:: infomark + +**Testing and Validation** + +The CPIPES - cronology Nextflow pipeline has been wrapped to make it work in Galaxy. It takes in either paired or unpaired short reads list as an input +and generates a MultiQC report in the final step. The pipeline has been tested on 2x300 bp MiSeq and 2x150 bp NextSeq simulated reads and has been shown to call multiple +Salmonella serotypes with up to ~95% accuracy. The pipeline has also been tested on metagenomics data sets from Peach and Papaya outbreaks as discussed in +our publication (https://www.frontiersin.org/articles/10.3389/fmicb.2023.1200983/full). All the original testing and validation was +done on the command line on the CFSAN Raven2 HPC Cluster. + + +---- + +.. class:: infomark + +**Outputs** + +The main output file is a: + + :: + + - MultiQC Report: Contains a brief summary report including any serotyping and AMR result tables. + Please note that due to MultiQC customizations, the preview (eye icon) will not + work within Galaxy for the MultiQC report. Please download the file by clicking + on the floppy icon and view it in your browser on your local desktop/workstation. + You can export the tables and plots from the downloaded MultiQC report. + + ]]></help> + <citations> + <citation type="bibtex"> + @article{cronology, + author = {Konganti, Kranti}, + year = {2023}, + month = {August}, + title = {cronology: better calling of Salmonella serotypes from enrichment cultures using shotgun metagenomic profiling and its application in an outbreak setting}, + journal = {Frontiers in Microbiology}, + doi = {10.3389/fmicb.2023.1200983}, + url = {https://www.frontiersin.org/articles/10.3389/fmicb.2023.1200983/full}} + </citation> + </citations> +</tool>