rliterman@0: // Subworkflow to fetch sample and reference data from --fasta/--reads/--ref_fasta/--ref_reads rliterman@0: rliterman@0: // Set path variables rliterman@0: output_directory = file(params.output_directory) rliterman@0: assembly_directory = file(params.assembly_directory) rliterman@0: log_directory = file(params.log_directory) rliterman@0: rliterman@0: ref_id_file = file(params.ref_id_file) rliterman@0: rliterman@0: // Set ref_mode rliterman@0: ref_mode = params.ref_mode rliterman@0: rliterman@0: // Set file headers rliterman@0: assembly_header = "Isolate_ID\tRead_Type\tRead_Location\tAssembly_Path\n" rliterman@0: rliterman@0: // Set paths to accessory files/scripts rliterman@0: assembly_log = file("${log_directory}/Assembly_Data.tsv") rliterman@0: user_snpdiffs_list = file("${log_directory}/Imported_SNPDiffs.txt") rliterman@0: findReads = file("${projectDir}/bin/fetchReads.py") rliterman@0: userSNPDiffs = file("${projectDir}/bin/userSNPDiffs.py") rliterman@0: rliterman@12: // Set SKESA cores to 4 or fewer rliterman@24: skesa_cpus = (params.cores as Integer) >= 4 ? 4 : params.cores as Integer rliterman@25: println "params.cores: ${params.cores}" rliterman@25: println "skesa_cpus: ${skesa_cpus}" rliterman@0: rliterman@0: workflow { rliterman@0: main: rliterman@0: input_data = fetchData() rliterman@0: query_data = input_data.query_data rliterman@0: reference_data = input_data.reference_data rliterman@0: snpdiffs_data = input_data.snpdiff_data rliterman@0: rliterman@0: publish: rliterman@0: query_data >> 'query_data.tsv' rliterman@0: reference_data >> 'reference_data.tsv' rliterman@0: snpdiff_data >> 'snpdiff_data.tsv' rliterman@0: } rliterman@0: rliterman@0: // Top-level workflow // rliterman@0: workflow fetchData{ rliterman@0: rliterman@0: emit: rliterman@0: query_data rliterman@0: reference_data rliterman@0: snpdiff_data rliterman@0: rliterman@0: main: rliterman@0: // Get any excluded IDs rliterman@0: ("${params.exclude}" != "" ? processExclude() : Channel.empty()).set{exclude_ids} rliterman@0: rliterman@0: // Process snpdiffs alignments rliterman@0: // If assembly file cannot be found, it will be 'null' rliterman@0: ("${params.snpdiffs}" != "" ? processSNPDiffs() : Channel.empty()).set{user_snpdiffs} rliterman@0: rliterman@0: excluded_snpdiffs = user_snpdiffs.map{it -> tuple(it[1],it[0])} rliterman@0: .concat(user_snpdiffs.map{it -> tuple(it[10],it[0])}) rliterman@0: .join(exclude_ids,by:0) rliterman@0: .unique{it -> it[1]} rliterman@0: .map{it -> tuple(it[1],"Exclude")} rliterman@0: rliterman@0: // Generate return channel: 3-item tuple (Query_ID, Reference_ID, SNPDiff_Path) rliterman@0: snpdiff_data = user_snpdiffs rliterman@0: .map{it -> tuple(it[0],it[1],it[10])} rliterman@0: .join(excluded_snpdiffs,by:0,remainder:true) rliterman@0: .filter{it -> it[0].toString() != "null"} rliterman@0: .filter{it -> it[3].toString() != "Exclude"} rliterman@0: .unique{it -> it[0]} rliterman@0: .map{it -> tuple(it[1],it[2],it[0])} rliterman@0: .collect().flatten().collate(3) rliterman@0: rliterman@0: // Get assembly data from snpdiffs rliterman@0: snpdiff_assemblies = user_snpdiffs.map{it-> tuple(it[1],it[2])} rliterman@0: .concat(user_snpdiffs.map{it-> tuple(it[10],it[11])}) rliterman@0: .join(exclude_ids,by:0,remainder:true) rliterman@0: .filter{it -> it[0].toString() != "null"} rliterman@0: .filter{it -> it[2].toString() != "Exclude"} rliterman@0: .map{it -> tuple(it[0],it[1],'SNPDiff')} rliterman@0: .collect().flatten().collate(3) rliterman@0: rliterman@0: assembled_snpdiffs = snpdiff_assemblies rliterman@0: .filter{it -> it[1].toString() != "null"} rliterman@0: .unique{it->it[0]}.collect().flatten().collate(3) rliterman@0: rliterman@0: // Process any data provided as assemblies rliterman@0: // Returns 2-item tuples with the following format: (Isolate_ID, Assembly_Path) rliterman@0: ("${params.fasta}" != "" ? fetchQueryFasta() : Channel.empty()).set{query_fasta} rliterman@0: ("${params.ref_fasta}" != "" ? fetchRefFasta() : Channel.empty()).set{ref_fasta} rliterman@0: rliterman@0: pre_assembled = assembled_snpdiffs rliterman@0: .map{it -> tuple(it[0],it[1])} rliterman@0: .concat(query_fasta) rliterman@0: .concat(ref_fasta) rliterman@0: .unique{it -> it[0]} rliterman@0: .join(exclude_ids,by:0,remainder:true) rliterman@0: .filter{it -> it[0].toString() != "null"} rliterman@0: .filter{it -> it[2].toString() != "Exclude"} rliterman@0: .map{it->tuple(it[0],it[1])} rliterman@0: .collect().flatten().collate(2) rliterman@0: rliterman@0: // Process any data provided as reads rliterman@0: // Returns 3-item tuples with the following format: (Isolate_ID, Read_Type, Read_Path) rliterman@0: ("${params.reads}" != "" ? fetchQueryReads() : Channel.empty()).set{query_reads} rliterman@0: ("${params.ref_reads}" != "" ? fetchRefReads() : Channel.empty()).set{ref_reads} rliterman@0: rliterman@0: all_reads = query_reads rliterman@0: .concat(ref_reads) rliterman@0: .unique{it->it[0]} rliterman@0: .join(exclude_ids,by:0,remainder:true) rliterman@0: .filter{it -> it[0].toString() != "null"} rliterman@0: .filter{it -> it[3].toString() != "Exclude"} rliterman@0: .map{it->tuple(it[0],it[1],it[2])} rliterman@0: .collect().flatten().collate(3) rliterman@0: rliterman@0: // Figure out if any assembly is necessary rliterman@0: fasta_read_combo = all_reads.join(pre_assembled,by:0,remainder: true) | rliterman@0: branch{it -> rliterman@0: assembly: it[1].toString() == "null" rliterman@0: return(tuple(it[0],it[2])) rliterman@0: read: it[3].toString() == "null" rliterman@0: return(tuple(it[0],it[1],it[2])) rliterman@0: combo: true rliterman@0: return(tuple(it[0],it[3]))} rliterman@0: rliterman@0: // Assemble reads if necessary rliterman@0: assembled_reads = fasta_read_combo.read rliterman@0: .collect().flatten().collate(3) | assembleReads rliterman@0: rliterman@0: // If runmode is 'assemble', tasks are complete rliterman@0: if(params.runmode == "assemble"){ rliterman@0: query_data = Channel.empty() rliterman@0: reference_data = Channel.empty() rliterman@0: } else{ rliterman@0: rliterman@0: // If FASTAs are provided via data and snpdiffs, use snpdiffs (as it's already been used) rliterman@0: user_fastas = query_fasta rliterman@0: .concat(ref_fasta) rliterman@0: .concat(assembled_reads) rliterman@0: .unique{it -> it[0]} rliterman@0: .join(exclude_ids,by:0,remainder:true) rliterman@0: .filter{it -> it[0].toString() != "null"} rliterman@0: .filter{it -> it[2].toString() != "Exclude"} rliterman@0: .map{it->tuple(it[0],it[1],'User')} rliterman@0: .collect().flatten().collate(3) rliterman@0: .join(assembled_snpdiffs,by:0,remainder:true) rliterman@0: .filter{it -> it[3].toString() == "null"} rliterman@0: .map{it->tuple(it[0],it[1])} rliterman@0: rliterman@0: // Get all assemblies rliterman@0: all_assembled = assembled_snpdiffs rliterman@0: .map{it -> tuple(it[0],it[1])} rliterman@0: .concat(user_fastas) rliterman@0: .unique{it->it[0]}.collect().flatten().collate(2) rliterman@0: rliterman@0: // Get data for isolates where a SNPDiff was provided, but no FASTA could be located rliterman@0: no_assembly = snpdiff_assemblies rliterman@0: .map{it -> tuple(it[0],it[1])} rliterman@0: .filter{it -> it[1].toString() == "null"} rliterman@0: .unique{it -> it[0]} rliterman@0: .join(all_assembled,by:0,remainder:true) rliterman@0: .filter{it -> it[2].toString() == "null"} rliterman@0: .map{it->tuple(it[0],it[1])} rliterman@0: .collect().flatten().collate(2) rliterman@0: rliterman@0: // Compile all samples rliterman@0: all_samples = all_assembled rliterman@0: .concat(no_assembly) rliterman@0: .unique{it-> it[0]}.collect().flatten().collate(2) rliterman@0: rliterman@0: // If no reference data is provided return a blank channel rliterman@0: if(!ref_mode){ rliterman@0: reference_data = Channel.empty() rliterman@0: rliterman@0: query_data = all_samples rliterman@0: .unique{it -> it[0]} rliterman@0: .collect().flatten().collate(2) rliterman@0: rliterman@0: } else{ rliterman@0: rliterman@0: // Process additional reference IDs rliterman@0: ("${params.ref_id}" != "" ? processRefIDs() : Channel.empty()).set{user_ref_ids} rliterman@0: rliterman@0: all_ref_ids = ref_fasta.map{it->tuple(it[0])} rliterman@0: .concat(ref_reads.map{it->tuple(it[0])}) rliterman@0: .concat(user_ref_ids) rliterman@0: .unique{it-> it[0]}.collect().flatten().collate(1) rliterman@0: .map{it -> tuple(it[0],"Reference")} rliterman@0: .join(exclude_ids,by:0,remainder:true) rliterman@0: .filter{it -> it[0].toString() != "null"} rliterman@0: .filter{it -> it[2].toString() != "Exclude"} rliterman@0: .map{it -> tuple(it[0],it[1])} rliterman@0: rliterman@0: reference_data = all_samples rliterman@0: .join(all_ref_ids,by:0,remainder:true) rliterman@0: .filter{it -> it[2].toString() == "Reference"} rliterman@0: .map{it->tuple(it[0],it[1])} rliterman@0: .unique{it -> it[0]} rliterman@0: .collect().flatten().collate(2) rliterman@0: rliterman@0: // Save reference data to file rliterman@0: reference_data rliterman@0: .collect{it -> it[0]} rliterman@0: | saveRefIDs rliterman@0: rliterman@0: if(params.runmode == "screen" || params.runmode == "align"){ rliterman@0: query_data = all_samples rliterman@0: .join(all_ref_ids,by:0,remainder:true) rliterman@0: .filter{it -> it[2].toString() != "Reference"} rliterman@0: .map{it->tuple(it[0],it[1])} rliterman@0: .unique{it -> it[0]} rliterman@0: .collect().flatten().collate(2) rliterman@0: } else if(params.runmode == "snp"){ rliterman@0: query_data = all_samples rliterman@0: .unique{it -> it[0]} rliterman@0: .collect().flatten().collate(2) rliterman@0: } rliterman@0: } rliterman@0: } rliterman@0: } rliterman@0: rliterman@0: // Fetching preassembled data // rliterman@0: workflow fetchQueryFasta{ rliterman@0: rliterman@0: emit: rliterman@0: query_fasta rliterman@0: rliterman@0: main: rliterman@0: rliterman@0: // If --fasta is set, grab assembly paths and characterize assemblies rliterman@0: ("${params.fasta}" != "" ? getAssemblies(params.fasta) : Channel.empty()).set{query_fasta} rliterman@0: } rliterman@0: workflow fetchRefFasta{ rliterman@0: rliterman@0: emit: rliterman@0: ref_fasta rliterman@0: rliterman@0: main: rliterman@0: rliterman@0: // If --fasta is set, grab assembly paths and characterize assemblies rliterman@0: ("${params.ref_fasta}" != "" ? getAssemblies(params.ref_fasta) : Channel.empty()).set{ref_fasta} rliterman@0: } rliterman@0: workflow getAssemblies{ rliterman@0: rliterman@0: take: rliterman@0: fasta_loc rliterman@0: rliterman@0: emit: rliterman@0: fasta_data rliterman@0: rliterman@0: main: rliterman@0: def trim_this = "${params.trim_name}" rliterman@0: rliterman@0: if(fasta_loc == ""){ rliterman@0: error "No assembly data provided via --fasta/--ref_fasta" rliterman@0: } else{ rliterman@0: rliterman@0: fasta_dir = file(fasta_loc) rliterman@0: rliterman@0: // If --fasta is a directory... rliterman@0: if(fasta_dir.isDirectory()){ rliterman@0: ch_fasta = Channel.fromPath(["${fasta_dir}/*.fa","${fasta_dir}/*.fasta","${fasta_dir}/*.fna"]) rliterman@0: } rliterman@0: // If --fasta is a file... rliterman@0: else if(fasta_dir.isFile()){ rliterman@0: rliterman@0: // Check if it is a single fasta file... rliterman@0: if(fasta_dir.getExtension() == "fa" || fasta_dir.getExtension() == "fna" || fasta_dir.getExtension() == "fasta"){ rliterman@0: ch_fasta = Channel.from(fasta_dir).map{it-> file(it)} rliterman@0: } rliterman@0: // Otherwise, assume a file with paths to FASTAs rliterman@0: else{ rliterman@0: ch_fasta = Channel.from(fasta_dir.readLines()).filter{ file -> file =~ /\.(fa|fasta|fna)$/}.map{it-> file(it)} rliterman@0: } rliterman@0: } else{ rliterman@0: error "$fasta_dir is not a valid directory or file..." rliterman@0: } rliterman@0: fasta_data = ch_fasta rliterman@0: .filter { file(it).exists() } rliterman@0: .map { filePath -> rliterman@0: def fileName = file(filePath).getBaseName() rliterman@0: def sampleName = fileName.replaceAll(trim_this, "") rliterman@0: tuple(sampleName, filePath)} rliterman@0: } rliterman@0: } rliterman@0: workflow processSNPDiffs{ rliterman@0: rliterman@0: emit: rliterman@0: snpdiffs_data rliterman@0: rliterman@0: main: rliterman@0: rliterman@0: if("${params.snpdiffs}" == ""){ rliterman@0: error "No assembly data provided via --snpdiffs" rliterman@0: } else{ rliterman@0: rliterman@0: snpdiffs_dir = file("${params.snpdiffs}") rliterman@0: rliterman@0: // If --fasta is a directory... rliterman@0: if(snpdiffs_dir.isDirectory()){ rliterman@0: ch_snpdiffs = Channel.fromPath("${snpdiffs_dir}/*.snpdiffs") rliterman@0: } rliterman@0: // If --fasta is a file... rliterman@0: else if(snpdiffs_dir.isFile()){ rliterman@0: rliterman@0: // Check if it is a single fasta file... rliterman@0: if(snpdiffs_dir.getExtension() == "snpdiffs"){ rliterman@0: ch_snpdiffs = Channel.from(snpdiffs_dir) rliterman@0: } rliterman@0: // Otherwise, assume a file with paths to SNPDiffs rliterman@0: else{ rliterman@0: ch_snpdiffs = Channel.from(snpdiffs_dir.readLines()).filter{it->it.endsWith('.snpdiffs') } rliterman@0: } rliterman@0: } else{ rliterman@0: error "$snpdiffs_dir is not a valid directory or file..." rliterman@0: } rliterman@0: rliterman@0: snpdiffs_data = ch_snpdiffs rliterman@0: .filter { file(it).exists() } rliterman@0: .collect() | getSNPDiffsData | splitCsv | collect | flatten | collate(19) rliterman@0: rliterman@0: // (1) SNPDiffs_File, (2) Query_ID, (3) Query_Assembly, (4) Query_Contig_Count, (5) Query_Assembly_Bases, rliterman@0: // (6) Query_N50, (7) Query_N90, (8) Query_L50, (9) Query_L90, (10) Query_SHA256, rliterman@0: // (11) Reference_ID, (12) Reference_Assembly, (13) Reference_Contig_Count, (14) Reference_Assembly_Bases, rliterman@0: // (15) Reference_N50, (16) Reference_N90, (17) Reference_L50, (18) Reference_L90, (19) Reference_SHA256 rliterman@0: } rliterman@0: } rliterman@0: process getSNPDiffsData{ rliterman@0: executor = 'local' rliterman@0: cpus = 1 rliterman@0: maxForks = 1 rliterman@0: rliterman@0: input: rliterman@0: val(snpdiffs_paths) rliterman@0: rliterman@0: output: rliterman@0: stdout rliterman@0: rliterman@0: script: rliterman@0: rliterman@0: user_snpdiffs_list.write(snpdiffs_paths.join('\n') + "\n") rliterman@0: """ rliterman@0: $params.load_python_module rliterman@0: python ${userSNPDiffs} --snpdiffs_file "${user_snpdiffs_list}" --trim_name "${params.trim_name}" rliterman@0: """ rliterman@0: } rliterman@0: rliterman@0: rliterman@0: // Fetching read data // rliterman@0: workflow fetchQueryReads{ rliterman@0: rliterman@0: emit: rliterman@0: query_reads rliterman@0: rliterman@0: main: rliterman@0: rliterman@0: // If --fasta is set, grab assembly paths and characterize assemblies rliterman@0: ("${params.reads}" != "" ? processReads(params.reads,params.readext,params.forward,params.reverse) : Channel.empty()).set{query_reads} rliterman@0: } rliterman@0: workflow fetchRefReads{ rliterman@0: rliterman@0: emit: rliterman@0: ref_reads rliterman@0: rliterman@0: main: rliterman@0: rliterman@0: // If --fasta is set, grab assembly paths and characterize assemblies rliterman@0: ("${params.ref_reads}" != "" ? processReads(params.ref_reads,params.ref_readext,params.ref_forward,params.ref_reverse) : Channel.empty()).set{ref_reads} rliterman@0: } rliterman@0: workflow processReads{ rliterman@0: rliterman@0: take: rliterman@0: read_loc rliterman@0: read_ext rliterman@0: forward rliterman@0: reverse rliterman@0: rliterman@0: emit: rliterman@0: read_info rliterman@0: rliterman@0: main: rliterman@0: rliterman@0: if(read_loc == ""){ rliterman@0: error "No data provided to --reads/--ref_reads" rliterman@0: } else{ rliterman@0: rliterman@0: read_dir = file(read_loc) rliterman@0: rliterman@0: // If --reads is a single directory, get all reads from that directory rliterman@0: if(read_dir.isDirectory()){ rliterman@0: read_info = fetchReads(read_dir,read_ext,forward,reverse) | splitCsv rliterman@0: } rliterman@0: rliterman@0: // If --reads is a file including paths to many directories, process reads from all directories rliterman@0: else if(read_dir.isFile()){ rliterman@0: read_info = fetchReads(Channel.from(read_dir.readLines()),read_ext,forward,reverse) | splitCsv rliterman@0: } rliterman@0: // Error if --reads doesn't point to a valid file or directory rliterman@0: else{ rliterman@0: error "$read_dir is neither a valid file or directory..." rliterman@0: } rliterman@0: } rliterman@0: } rliterman@0: process fetchReads{ rliterman@0: rliterman@0: executor = 'local' rliterman@0: cpus = 1 rliterman@0: maxForks = 1 rliterman@0: rliterman@0: input: rliterman@0: val dir // Directory containing read files rliterman@0: val read_ext // Extention for read files (e.g., fastq.gz or fq) rliterman@0: val forward_suffix // Identifier for forward reads (e.g., _1.fastq or _R1_001.fq.gz) rliterman@0: val reverse_suffix // Identifier for reverse reads (e.g., _2.fastq or _R2_001.fq.gz) rliterman@0: rliterman@0: output: rliterman@0: stdout rliterman@0: rliterman@0: script: rliterman@0: rliterman@0: if(!file(dir).isDirectory()){ rliterman@0: error "$dir is not a valid directory..." rliterman@0: } else{ rliterman@0: """ rliterman@0: $params.load_python_module rliterman@0: python ${findReads} --read_dir ${dir} --read_filetype ${read_ext} --forward_suffix ${forward_suffix} --reverse_suffix ${reverse_suffix} --trim_name ${params.trim_name} rliterman@0: """ rliterman@0: } rliterman@0: } rliterman@0: rliterman@0: // Fetch reference IDs // rliterman@0: workflow processRefIDs{ rliterman@0: rliterman@0: emit: rliterman@0: ref_ids rliterman@0: rliterman@0: main: rliterman@0: def trim_this = "${params.trim_name}" rliterman@0: rliterman@0: ref_ids = params.ref_id rliterman@0: .tokenize(',') rliterman@0: .unique() rliterman@0: .collect { it -> rliterman@0: "${it}".replaceAll(trim_this, "")} rliterman@0: .flatten() rliterman@0: } rliterman@0: rliterman@0: // Fetch reference IDs // rliterman@0: workflow processExclude{ rliterman@0: rliterman@0: emit: rliterman@0: exclude_ids rliterman@0: rliterman@0: main: rliterman@0: def trim_this = "${params.trim_name}" rliterman@0: rliterman@0: exclude_ids = Channel.from(params.exclude rliterman@0: .tokenize(',') rliterman@0: .collect { it -> "${it}".replaceAll(trim_this, "")}) rliterman@0: .map{it -> tuple(it.toString(),"Exclude")} rliterman@0: .unique{it -> it[0]} rliterman@0: } rliterman@0: rliterman@0: process saveRefIDs{ rliterman@0: executor = 'local' rliterman@0: cpus = 1 rliterman@0: maxForks = 1 rliterman@0: rliterman@0: input: rliterman@0: val(ref_ids) rliterman@0: rliterman@0: script: rliterman@0: ref_id_file.append(ref_ids.join('\n') + '\n') rliterman@0: """ rliterman@0: """ rliterman@0: } rliterman@0: rliterman@0: // Assembly // rliterman@0: workflow assembleReads{ rliterman@0: rliterman@0: take: rliterman@0: to_assemble rliterman@0: rliterman@0: emit: rliterman@0: assembled_data rliterman@0: rliterman@0: main: rliterman@0: rliterman@0: // Run SKESA on each entry rliterman@0: assembly_output = skesaAssemble(to_assemble).splitCsv() rliterman@0: rliterman@0: // Print log of assemblies rliterman@0: assembly_output.map {it -> it.join("\t")}.collect() | saveAssemblyLog rliterman@0: rliterman@0: // Return assembly data rliterman@0: assembled_data = assembly_output.map{it->tuple(it[0],it[3])} rliterman@0: } rliterman@0: process skesaAssemble{ rliterman@22: // label 'skesaMem' rliterman@0: rliterman@15: cpus = skesa_cpus rliterman@15: rliterman@0: input: rliterman@0: tuple val(sample_name),val(read_type),val(read_location) rliterman@0: rliterman@0: output: rliterman@0: stdout rliterman@0: rliterman@0: script: rliterman@0: assembly_file = file("${assembly_directory}/${sample_name}.fasta") rliterman@0: rliterman@0: // Ensure folder exists and file doesn't rliterman@0: if(!assembly_directory.isDirectory()){ rliterman@0: error "$assembly_directory is not a valid directory..." rliterman@0: } else if(assembly_file.isFile()){ rliterman@0: error "$assembly_file already exists..." rliterman@0: } else if(read_type == "Paired"){ rliterman@0: forward_reverse = read_location.split(";") rliterman@0: """ rliterman@0: $params.load_skesa_module rliterman@0: skesa --cores ${skesa_cpus} --use_paired_ends --fastq ${forward_reverse[0]} ${forward_reverse[1]} --contigs_out ${assembly_file} rliterman@0: echo "${sample_name},${read_type},${read_location},${assembly_file}" rliterman@0: """ rliterman@0: } else if(read_type == "Single"){ rliterman@0: """ rliterman@0: $params.load_skesa_module rliterman@0: skesa --cores ${skesa_cpus} --fastq ${read_location} --contigs_out ${assembly_file} rliterman@0: echo "${sample_name},${read_type},${read_location},${assembly_file}" rliterman@0: """ rliterman@0: } else{ rliterman@0: error "read_type should be Paired or Single, not $read_type..." rliterman@0: } rliterman@0: } rliterman@0: process saveAssemblyLog{ rliterman@0: executor = 'local' rliterman@0: cpus = 1 rliterman@0: maxForks = 1 rliterman@0: rliterman@0: input: rliterman@0: val(assembly_data) rliterman@0: rliterman@0: script: rliterman@0: assembly_log.write(assembly_header) rliterman@0: assembly_log.append(assembly_data.join('\n') + '\n') rliterman@0: """ rliterman@0: """ rliterman@0: }