estrain@9: #!/usr/bin/env python3 estrain@9: estrain@9: estrain@9: import gzip estrain@9: import io estrain@9: import pickle estrain@9: import os estrain@9: import sys estrain@9: estrain@9: from argparse import ArgumentParser estrain@9: #try: estrain@9: # from .version import SalmID_version estrain@9: #except ImportError: estrain@9: # SalmID_version = "version unknown" estrain@9: SalmID_version = '0.1.23' estrain@9: estrain@9: def reverse_complement(sequence): estrain@9: """return the reverse complement of a nucleotide (including IUPAC ambiguous nuceotide codes)""" estrain@9: complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N', 'M': 'K', 'R': 'Y', 'W': 'W', estrain@9: 'S': 'S', 'Y': 'R', 'K': 'M', 'V': 'B', 'H': 'D', 'D': 'H', 'B': 'V'} estrain@9: return "".join(complement[base] for base in reversed(sequence)) estrain@9: estrain@9: estrain@9: def parse_args(): estrain@9: "Parse the input arguments, use '-h' for help." estrain@9: parser = ArgumentParser(description='SalmID - rapid Kmer based Salmonella identifier from sequence data') estrain@9: # inputs estrain@9: parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + SalmID_version) estrain@9: parser.add_argument( estrain@9: '-i', '--input_file', type=str, required=False, default='None', metavar='your_fastqgz', estrain@9: help='Single fastq.gz file input, include path to file if file is not in same directory ') estrain@9: parser.add_argument( estrain@9: '-e', '--extension', type=str, required=False, default='.fastq.gz', metavar='file_extension', estrain@9: help='File extension, if specified without "--input_dir", SalmID will attempt to ID all files\n' + estrain@9: ' with this extension in current directory, otherwise files in input directory') estrain@9: estrain@9: parser.add_argument( estrain@9: '-d', '--input_dir', type=str, required=False, default='.', metavar='directory', estrain@9: help='Directory which contains data for identification, when not specified files in current directory will be analyzed.') estrain@9: parser.add_argument( estrain@9: '-r', '--report', type=str, required=False, default='percentage', metavar='percentage, coverage or taxonomy', estrain@9: help='Report either percentage ("percentage") of clade specific kmers recovered, average kmer-coverage ("cov"), or ' estrain@9: 'taxonomy (taxonomic species ID, plus observed mean k-mer coverages and expected coverage).') estrain@9: parser.add_argument( estrain@9: '-m', '--mode', type=str, required=False, default='quick', metavar='quick or thorough', estrain@9: help='Quick [quick] or thorough [thorough] mode') estrain@9: if len(sys.argv) == 1: estrain@9: parser.print_help(sys.stderr) estrain@9: sys.exit(1) estrain@9: return parser.parse_args() estrain@9: estrain@9: estrain@9: def get_av_read_length(file): estrain@9: """Samples the first 100 reads from a fastq file and return the average read length.""" estrain@9: i = 1 estrain@9: n_reads = 0 estrain@9: total_length = 0 estrain@9: if file.endswith(".gz"): estrain@9: file_content = io.BufferedReader(gzip.open(file)) estrain@9: else: estrain@9: file_content = open(file, "r").readlines() estrain@9: for line in file_content: estrain@9: if i % 4 == 2: estrain@9: total_length += len(line.strip()) estrain@9: n_reads += 1 estrain@9: i += 1 estrain@9: if n_reads == 100: estrain@9: break estrain@9: return total_length / 100 estrain@9: estrain@9: estrain@9: def createKmerDict_reads(list_of_strings, kmer): estrain@9: """Count occurence of K-mers in a list of strings estrain@9: estrain@9: Args: estrain@9: list_of_strings(list of str): nucleotide sequences as a list of strings estrain@9: kmer(int): length of the K-mer to count estrain@9: estrain@9: Returns: estrain@9: dict: dictionary with kmers as keys, counts for each kmer as values""" estrain@9: kmer_table = {} estrain@9: for string in list_of_strings: estrain@9: sequence = string.strip('\n') estrain@9: if len(sequence) >= kmer: estrain@9: for i in range(len(sequence) - kmer + 1): estrain@9: new_mer = sequence[i:i + kmer] estrain@9: new_mer_rc = reverse_complement(new_mer) estrain@9: if new_mer in kmer_table: estrain@9: kmer_table[new_mer.upper()] += 1 estrain@9: else: estrain@9: kmer_table[new_mer.upper()] = 1 estrain@9: if new_mer_rc in kmer_table: estrain@9: kmer_table[new_mer_rc.upper()] += 1 estrain@9: else: estrain@9: kmer_table[new_mer_rc.upper()] = 1 estrain@9: return kmer_table estrain@9: estrain@9: estrain@9: def target_read_kmerizer_multi(file, k, kmerDict_1, kmerDict_2, mode): estrain@9: mean_1 = None estrain@9: mean_2 = None estrain@9: i = 1 estrain@9: n_reads_1 = 0 estrain@9: n_reads_2 = 0 estrain@9: total_coverage_1 = 0 estrain@9: total_coverage_2 = 0 estrain@9: reads_1 = [] estrain@9: reads_2 = [] estrain@9: total_reads = 0 estrain@9: if file.endswith(".gz"): estrain@9: file_content = io.BufferedReader(gzip.open(file)) estrain@9: else: estrain@9: file_content = open(file, "r").readlines() estrain@9: for line in file_content: estrain@9: start = int((len(line) - k) // 2) estrain@9: if i % 4 == 2: estrain@9: total_reads += 1 estrain@9: if file.endswith(".gz"): estrain@9: s1 = line[start:k + start].decode() estrain@9: line = line.decode() estrain@9: else: estrain@9: s1 = line[start:k + start] estrain@9: if s1 in kmerDict_1: estrain@9: n_reads_1 += 1 estrain@9: total_coverage_1 += len(line) estrain@9: reads_1.append(line) estrain@9: if s1 in kmerDict_2: estrain@9: n_reads_2 += 1 estrain@9: total_coverage_2 += len(line) estrain@9: reads_2.append(line) estrain@9: i += 1 estrain@9: if mode == 'quick': estrain@9: if total_coverage_2 >= 800000: estrain@9: break estrain@9: estrain@9: if len(reads_1) == 0: estrain@9: kmer_Dict1 = {} estrain@9: else: estrain@9: kmer_Dict1 = createKmerDict_reads(reads_1, k) estrain@9: mers_1 = set([key for key in kmer_Dict1]) estrain@9: mean_1 = sum([kmer_Dict1[key] for key in kmer_Dict1]) / len(mers_1) estrain@9: if len(reads_2) == 0: estrain@9: kmer_Dict2 = {} estrain@9: else: estrain@9: kmer_Dict2 = createKmerDict_reads(reads_2, k) estrain@9: mers_2 = set([key for key in kmer_Dict2]) estrain@9: mean_2 = sum([kmer_Dict2[key] for key in kmer_Dict2]) / len(mers_2) estrain@9: return kmer_Dict1, kmer_Dict2, mean_1, mean_2, total_reads estrain@9: estrain@9: estrain@9: def mean_cov_selected_kmers(iterable, kmer_dict, clade_specific_kmers): estrain@9: ''' estrain@9: Given an iterable (list, set, dictrionary) returns mean coverage for the kmers in iterable estrain@9: :param iterable: set, list or dictionary containing kmers estrain@9: :param kmer_dict: dictionary with kmers as keys, kmer-frequency as value estrain@9: :param clade_specific_kmers: list, dict or set of clade specific kmers estrain@9: :return: mean frequency as float estrain@9: ''' estrain@9: if len(iterable) == 0: estrain@9: return 0 estrain@9: return sum([kmer_dict[value] for value in iterable]) / len(clade_specific_kmers) estrain@9: estrain@9: estrain@9: def kmer_lists(query_fastq_gz, k, estrain@9: allmers, allmers_rpoB, estrain@9: uniqmers_bongori, estrain@9: uniqmers_I, estrain@9: uniqmers_IIa, estrain@9: uniqmers_IIb, estrain@9: uniqmers_IIIa, estrain@9: uniqmers_IIIb, estrain@9: uniqmers_IV, estrain@9: uniqmers_VI, estrain@9: uniqmers_VII, estrain@9: uniqmers_VIII, estrain@9: uniqmers_bongori_rpoB, estrain@9: uniqmers_S_enterica_rpoB, estrain@9: uniqmers_Escherichia_rpoB, estrain@9: uniqmers_Listeria_ss_rpoB, estrain@9: uniqmers_Lmono_rpoB, estrain@9: mode): estrain@9: dict_invA, dict_rpoB, mean_invA, mean_rpoB, total_reads = target_read_kmerizer_multi(query_fastq_gz, k, allmers, estrain@9: allmers_rpoB, mode) estrain@9: target_mers_invA = set([key for key in dict_invA]) estrain@9: target_mers_rpoB = set([key for key in dict_rpoB]) estrain@9: if target_mers_invA == 0: estrain@9: print('No reads found matching invA, no Salmonella in sample?') estrain@9: else: estrain@9: p_bongori = (len(uniqmers_bongori & target_mers_invA) / len(uniqmers_bongori)) * 100 estrain@9: p_I = (len(uniqmers_I & target_mers_invA) / len(uniqmers_I)) * 100 estrain@9: p_IIa = (len(uniqmers_IIa & target_mers_invA) / len(uniqmers_IIa)) * 100 estrain@9: p_IIb = (len(uniqmers_IIb & target_mers_invA) / len(uniqmers_IIb)) * 100 estrain@9: p_IIIa = (len(uniqmers_IIIa & target_mers_invA) / len(uniqmers_IIIa)) * 100 estrain@9: p_IIIb = (len(uniqmers_IIIb & target_mers_invA) / len(uniqmers_IIIb)) * 100 estrain@9: p_VI = (len(uniqmers_VI & target_mers_invA) / len(uniqmers_VI)) * 100 estrain@9: p_IV = (len(uniqmers_IV & target_mers_invA) / len(uniqmers_IV)) * 100 estrain@9: p_VII = (len(uniqmers_VII & target_mers_invA) / len(uniqmers_VII)) * 100 estrain@9: p_VIII = (len(uniqmers_VIII & target_mers_invA) / len(uniqmers_VIII)) * 100 estrain@9: p_bongori_rpoB = (len(uniqmers_bongori_rpoB & target_mers_rpoB) / len(uniqmers_bongori_rpoB)) * 100 estrain@9: p_Senterica = (len(uniqmers_S_enterica_rpoB & target_mers_rpoB) / len(uniqmers_S_enterica_rpoB)) * 100 estrain@9: p_Escherichia = (len(uniqmers_Escherichia_rpoB & target_mers_rpoB) / len(uniqmers_Escherichia_rpoB)) * 100 estrain@9: p_Listeria_ss = (len(uniqmers_Listeria_ss_rpoB & target_mers_rpoB) / len(uniqmers_Listeria_ss_rpoB)) * 100 estrain@9: p_Lmono = (len(uniqmers_Lmono_rpoB & target_mers_rpoB) / len(uniqmers_Lmono_rpoB)) * 100 estrain@9: bongori_invA_cov = mean_cov_selected_kmers(uniqmers_bongori & target_mers_invA, dict_invA, uniqmers_bongori) estrain@9: I_invA_cov = mean_cov_selected_kmers(uniqmers_I & target_mers_invA, dict_invA, uniqmers_I) estrain@9: IIa_invA_cov = mean_cov_selected_kmers(uniqmers_IIa & target_mers_invA, dict_invA, uniqmers_IIa) estrain@9: IIb_invA_cov = mean_cov_selected_kmers(uniqmers_IIb & target_mers_invA, dict_invA, uniqmers_IIb) estrain@9: IIIa_invA_cov = mean_cov_selected_kmers(uniqmers_IIIa & target_mers_invA, dict_invA, uniqmers_IIIa) estrain@9: IIIb_invA_cov = mean_cov_selected_kmers(uniqmers_IIIb & target_mers_invA, dict_invA, uniqmers_IIIb) estrain@9: IV_invA_cov = mean_cov_selected_kmers(uniqmers_IV & target_mers_invA, dict_invA, uniqmers_IV) estrain@9: VI_invA_cov = mean_cov_selected_kmers(uniqmers_VI & target_mers_invA, dict_invA, uniqmers_VI) estrain@9: VII_invA_cov = mean_cov_selected_kmers(uniqmers_VII & target_mers_invA, dict_invA, uniqmers_VII) estrain@9: VIII_invA_cov = mean_cov_selected_kmers(uniqmers_VIII & target_mers_invA, dict_invA, uniqmers_VIII) estrain@9: S_enterica_rpoB_cov = mean_cov_selected_kmers((uniqmers_S_enterica_rpoB & target_mers_rpoB), dict_rpoB, estrain@9: uniqmers_S_enterica_rpoB) estrain@9: S_bongori_rpoB_cov = mean_cov_selected_kmers((uniqmers_bongori_rpoB & target_mers_rpoB), dict_rpoB, estrain@9: uniqmers_bongori_rpoB) estrain@9: Escherichia_rpoB_cov = mean_cov_selected_kmers((uniqmers_Escherichia_rpoB & target_mers_rpoB), dict_rpoB, estrain@9: uniqmers_Escherichia_rpoB) estrain@9: Listeria_ss_rpoB_cov = mean_cov_selected_kmers((uniqmers_Listeria_ss_rpoB & target_mers_rpoB), dict_rpoB, estrain@9: uniqmers_Listeria_ss_rpoB) estrain@9: Lmono_rpoB_cov = mean_cov_selected_kmers((uniqmers_Lmono_rpoB & target_mers_rpoB), dict_rpoB, estrain@9: uniqmers_Lmono_rpoB) estrain@9: coverages = [Listeria_ss_rpoB_cov, Lmono_rpoB_cov, Escherichia_rpoB_cov, S_bongori_rpoB_cov, estrain@9: S_enterica_rpoB_cov, bongori_invA_cov, I_invA_cov, IIa_invA_cov, IIb_invA_cov, estrain@9: IIIa_invA_cov, IIIb_invA_cov, IV_invA_cov, VI_invA_cov, VII_invA_cov, VIII_invA_cov] estrain@9: locus_scores = [p_Listeria_ss, p_Lmono, p_Escherichia, p_bongori_rpoB, p_Senterica, p_bongori, estrain@9: p_I, p_IIa, p_IIb, p_IIIa, p_IIIb, p_IV, p_VI, p_VII, p_VIII] estrain@9: return locus_scores, coverages, total_reads estrain@9: estrain@9: estrain@9: def report_taxon(locus_covs, average_read_length, number_of_reads): estrain@9: list_taxa = [ 'Listeria ss', 'Listeria monocytogenes', 'Escherichia sp.', # noqa: E201 estrain@9: 'Salmonella bongori (rpoB)', 'Salmonella enterica (rpoB)', estrain@9: 'Salmonella bongori (invA)', 'S. enterica subsp. enterica (invA)', estrain@9: 'S. enterica subsp. salamae (invA: clade a)', 'S. enterica subsp. salamae (invA: clade b)', estrain@9: 'S. enterica subsp. arizonae (invA)', 'S. enterica subsp. diarizonae (invA)', estrain@9: 'S. enterica subsp. houtenae (invA)', 'S. enterica subsp. indica (invA)', estrain@9: 'S. enterica subsp. VII (invA)', 'S. enterica subsp. salamae (invA: clade VIII)' ] # noqa: E202 estrain@9: if sum(locus_covs) < 1: estrain@9: rpoB = ('No rpoB matches!', 0) estrain@9: invA = ('No invA matches!', 0) estrain@9: return rpoB, invA, 0.0 estrain@9: else: estrain@9: # given list of scores get taxon estrain@9: if sum(locus_covs[0:5]) > 0: estrain@9: best_rpoB = max(range(len(locus_covs[1:5])), key=lambda x: locus_covs[1:5][x]) + 1 estrain@9: all_rpoB = max(range(len(locus_covs[0:5])), key=lambda x: locus_covs[0:5][x]) estrain@9: if (locus_covs[best_rpoB] != 0) & (all_rpoB == 0): estrain@9: rpoB = (list_taxa[best_rpoB], locus_covs[best_rpoB]) estrain@9: elif (all_rpoB == 0) & (round(sum(locus_covs[1:5]), 1) < 1): estrain@9: rpoB = (list_taxa[0], locus_covs[0]) estrain@9: else: estrain@9: rpoB = (list_taxa[best_rpoB], locus_covs[best_rpoB]) estrain@9: else: estrain@9: rpoB = ('No rpoB matches!', 0) estrain@9: if sum(locus_covs[5:]) > 0: estrain@9: best_invA = max(range(len(locus_covs[5:])), key=lambda x: locus_covs[5:][x]) + 5 estrain@9: invA = (list_taxa[best_invA], locus_covs[best_invA]) estrain@9: else: estrain@9: invA = ('No invA matches!', 0) estrain@9: if 'Listeria' in rpoB[0]: estrain@9: return rpoB, invA, (average_read_length * number_of_reads) / 3000000 estrain@9: else: estrain@9: return rpoB, invA, (average_read_length * number_of_reads) / 5000000 estrain@9: estrain@9: estrain@9: def main(): estrain@14: ex_dir = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) estrain@14: #ex_dir = os.path.dirname(os.path.realpath(__file__)) estrain@9: args = parse_args() estrain@9: input_file = args.input_file estrain@9: if input_file != 'None': estrain@9: files = [input_file] estrain@9: else: estrain@9: extension = args.extension estrain@9: inputdir = args.input_dir estrain@9: files = [inputdir + '/' + f for f in os.listdir(inputdir) if f.endswith(extension)] estrain@9: report = args.report estrain@9: mode = args.mode estrain@9: f_invA = open(ex_dir + "/invA_mers_dict", "rb") estrain@9: sets_dict_invA = pickle.load(f_invA) estrain@9: f_invA.close() estrain@9: allmers = sets_dict_invA['allmers'] estrain@9: uniqmers_I = sets_dict_invA['uniqmers_I'] estrain@9: uniqmers_IIa = sets_dict_invA['uniqmers_IIa'] estrain@9: uniqmers_IIb = sets_dict_invA['uniqmers_IIb'] estrain@9: uniqmers_IIIa = sets_dict_invA['uniqmers_IIIa'] estrain@9: uniqmers_IIIb = sets_dict_invA['uniqmers_IIIb'] estrain@9: uniqmers_IV = sets_dict_invA['uniqmers_IV'] estrain@9: uniqmers_VI = sets_dict_invA['uniqmers_VI'] estrain@9: uniqmers_VII = sets_dict_invA['uniqmers_VII'] estrain@9: uniqmers_VIII = sets_dict_invA['uniqmers_VIII'] estrain@9: uniqmers_bongori = sets_dict_invA['uniqmers_bongori'] estrain@9: estrain@9: f = open(ex_dir + "/rpoB_mers_dict", "rb") estrain@9: sets_dict = pickle.load(f) estrain@9: f.close() estrain@9: estrain@9: allmers_rpoB = sets_dict['allmers'] estrain@9: uniqmers_bongori_rpoB = sets_dict['uniqmers_bongori'] estrain@9: uniqmers_S_enterica_rpoB = sets_dict['uniqmers_S_enterica'] estrain@9: uniqmers_Escherichia_rpoB = sets_dict['uniqmers_Escherichia'] estrain@9: uniqmers_Listeria_ss_rpoB = sets_dict['uniqmers_Listeria_ss'] estrain@9: uniqmers_Lmono_rpoB = sets_dict['uniqmers_L_mono'] estrain@9: # todo: run kmer_lists() once, create list of tuples containing data to be used fro different reports estrain@9: if report == 'taxonomy': estrain@9: print('file\trpoB\tinvA\texpected coverage') estrain@9: for f in files: estrain@9: locus_scores, coverages, reads = kmer_lists(f, 27, estrain@9: allmers, allmers_rpoB, estrain@9: uniqmers_bongori, estrain@9: uniqmers_I, estrain@9: uniqmers_IIa, estrain@9: uniqmers_IIb, estrain@9: uniqmers_IIIa, estrain@9: uniqmers_IIIb, estrain@9: uniqmers_IV, estrain@9: uniqmers_VI, estrain@9: uniqmers_VII, estrain@9: uniqmers_VIII, estrain@9: uniqmers_bongori_rpoB, estrain@9: uniqmers_S_enterica_rpoB, estrain@9: uniqmers_Escherichia_rpoB, estrain@9: uniqmers_Listeria_ss_rpoB, estrain@9: uniqmers_Lmono_rpoB, estrain@9: mode) estrain@9: pretty_covs = [round(cov, 1) for cov in coverages] estrain@9: report = report_taxon(pretty_covs, get_av_read_length(f), reads) estrain@9: print(f.split('/')[-1] + '\t' + report[0][0] + '[' + str(report[0][1]) + ']' + '\t' + report[1][0] + estrain@9: '[' + str(report[1][1]) + ']' + estrain@9: '\t' + str(round(report[2], 1))) estrain@9: else: estrain@9: print( estrain@9: 'file\tListeria sensu stricto (rpoB)\tL. monocytogenes (rpoB)\tEscherichia spp. (rpoB)\tS. bongori (rpoB)\tS. enterica' + # noqa: E122 estrain@9: '(rpoB)\tS. bongori (invA)\tsubsp. I (invA)\tsubsp. II (clade a: invA)\tsubsp. II' + # noqa: E122 estrain@9: ' (clade b: invA)\tsubsp. IIIa (invA)\tsubsp. IIIb (invA)\tsubsp.IV (invA)\tsubsp. VI (invA)\tsubsp. VII (invA)' + # noqa: E122 estrain@9: '\tsubsp. II (clade VIII : invA)') estrain@9: if report == 'percentage': estrain@9: for f in files: estrain@9: locus_scores, coverages, reads = kmer_lists(f, 27, estrain@9: allmers, allmers_rpoB, estrain@9: uniqmers_bongori, estrain@9: uniqmers_I, estrain@9: uniqmers_IIa, estrain@9: uniqmers_IIb, estrain@9: uniqmers_IIIa, estrain@9: uniqmers_IIIb, estrain@9: uniqmers_IV, estrain@9: uniqmers_VI, estrain@9: uniqmers_VII, estrain@9: uniqmers_VIII, estrain@9: uniqmers_bongori_rpoB, estrain@9: uniqmers_S_enterica_rpoB, estrain@9: uniqmers_Escherichia_rpoB, estrain@9: uniqmers_Listeria_ss_rpoB, estrain@9: uniqmers_Lmono_rpoB, estrain@9: mode) estrain@9: pretty_scores = [str(round(score)) for score in locus_scores] estrain@9: print(f.split('/')[-1] + '\t' + '\t'.join(pretty_scores)) estrain@9: else: estrain@9: for f in files: estrain@9: locus_scores, coverages, reads = kmer_lists(f, 27, estrain@9: allmers, allmers_rpoB, estrain@9: uniqmers_bongori, estrain@9: uniqmers_I, estrain@9: uniqmers_IIa, estrain@9: uniqmers_IIb, estrain@9: uniqmers_IIIa, estrain@9: uniqmers_IIIb, estrain@9: uniqmers_IV, estrain@9: uniqmers_VI, estrain@9: uniqmers_VII, estrain@9: uniqmers_VIII, estrain@9: uniqmers_bongori_rpoB, estrain@9: uniqmers_S_enterica_rpoB, estrain@9: uniqmers_Escherichia_rpoB, estrain@9: uniqmers_Listeria_ss_rpoB, estrain@9: uniqmers_Lmono_rpoB, estrain@9: mode) estrain@9: pretty_covs = [str(round(cov, 1)) for cov in coverages] estrain@9: print(f.split('/')[-1] + '\t' + '\t'.join(pretty_covs)) estrain@9: estrain@9: estrain@9: if __name__ == '__main__': estrain@9: main() estrain@9: