rliterman@0: #!/usr/bin/env python3 rliterman@0: rliterman@0: import sys rliterman@0: import os rliterman@0: import pandas as pd rliterman@0: import re rliterman@0: from pybedtools import BedTool,helpers,Interval rliterman@0: import warnings rliterman@0: import numpy as np rliterman@0: import hashlib rliterman@0: from Bio import SeqIO rliterman@0: import subprocess rliterman@0: import uuid rliterman@0: import traceback rliterman@0: import shutil rliterman@0: import time rliterman@0: import argparse rliterman@0: rliterman@0: warnings.filterwarnings("ignore") rliterman@0: rliterman@0: def parseMUmmerReport(mum_report_dir,report_id): rliterman@0: report_data = [] rliterman@0: rliterman@0: with open(mum_report_dir+"/"+report_id+".report", 'r') as report_file: rliterman@0: rliterman@0: for line in report_file.readlines(): rliterman@0: split_line = re.split(r'\s+', line.strip()) rliterman@0: if len(split_line) == 3: rliterman@0: report_data.append( rliterman@0: { rliterman@0: 'Measure': split_line[0].split("(", 1)[0], rliterman@0: 'Ref_Value':split_line[1].split("(", 1)[0], rliterman@0: 'Query_Value':split_line[2].split("(", 1)[0] rliterman@0: }) rliterman@0: rliterman@0: report_data = pd.DataFrame(report_data) rliterman@0: report_data = report_data[report_data['Measure'].isin(['TotalSeqs','TotalBases','AlignedBases', rliterman@0: 'TotalSNPs','TotalGSNPs','TotalIndels','TotalGIndels', rliterman@0: 'Breakpoints','Relocations','Translocations','Inversions', rliterman@0: 'Insertions','TandemIns'])] rliterman@0: rliterman@0: report_data = report_data.drop_duplicates(subset=['Measure']) # Drop M-to-M rliterman@0: rliterman@0: report_data = report_data.astype({"Ref_Value":"int","Query_Value":"int"}) rliterman@0: rliterman@0: # Fetch assembly data rliterman@0: ref_bases = report_data[report_data['Measure'] == 'TotalBases']['Ref_Value'].iloc[0] rliterman@0: query_bases = report_data[report_data['Measure'] == 'TotalBases']['Query_Value'].iloc[0] rliterman@0: rliterman@0: # Fetch alignment data rliterman@0: ref_aligned = report_data[report_data['Measure'] == 'AlignedBases']['Ref_Value'].iloc[0] rliterman@0: query_aligned = report_data[report_data['Measure'] == 'AlignedBases']['Query_Value'].iloc[0] rliterman@0: rliterman@0: percent_ref_aligned = 100*(float(ref_aligned)/ref_bases) rliterman@0: percent_query_aligned = 100*(float(query_aligned)/query_bases) rliterman@0: rliterman@0: # Fetch SNP data rliterman@0: g_snps = report_data[report_data['Measure'] == 'TotalGSNPs']['Ref_Value'].iloc[0] rliterman@0: g_indels = report_data[report_data['Measure'] == 'TotalGIndels']['Ref_Value'].iloc[0] rliterman@0: rliterman@0: ref_breakpoints = report_data[report_data['Measure'] == 'Breakpoints']['Ref_Value'].iloc[0] rliterman@0: query_breakpoints = report_data[report_data['Measure'] == 'Breakpoints']['Query_Value'].iloc[0] rliterman@0: rliterman@0: ref_relocations = report_data[report_data['Measure'] == 'Relocations']['Ref_Value'].iloc[0] rliterman@0: query_relocations = report_data[report_data['Measure'] == 'Relocations']['Query_Value'].iloc[0] rliterman@0: rliterman@0: ref_translocations = report_data[report_data['Measure'] == 'Translocations']['Ref_Value'].iloc[0] rliterman@0: query_translocations = report_data[report_data['Measure'] == 'Translocations']['Query_Value'].iloc[0] rliterman@0: rliterman@0: ref_inversions = report_data[report_data['Measure'] == 'Inversions']['Ref_Value'].iloc[0] rliterman@0: query_inversions = report_data[report_data['Measure'] == 'Inversions']['Query_Value'].iloc[0] rliterman@0: rliterman@0: ref_insertions = report_data[report_data['Measure'] == 'Insertions']['Ref_Value'].iloc[0] rliterman@0: query_insertions = report_data[report_data['Measure'] == 'Insertions']['Query_Value'].iloc[0] rliterman@0: rliterman@0: ref_tandem = report_data[report_data['Measure'] == 'TandemIns']['Ref_Value'].iloc[0] rliterman@0: query_tandem = report_data[report_data['Measure'] == 'TandemIns']['Query_Value'].iloc[0] rliterman@0: rliterman@0: return [ref_bases,percent_ref_aligned, rliterman@0: query_bases,percent_query_aligned, rliterman@0: g_snps,g_indels, rliterman@0: ref_breakpoints,query_breakpoints, rliterman@0: ref_relocations,query_relocations, rliterman@0: ref_translocations,query_translocations, rliterman@0: ref_inversions,query_inversions, rliterman@0: ref_insertions,query_insertions, rliterman@0: ref_tandem,query_tandem] rliterman@0: rliterman@0: def parseMUmmerCoords(mum_coords_dir,report_id,reference_chr_bed,query_chr_bed): rliterman@0: rliterman@0: coords_file = pd.read_csv(mum_coords_dir+"/"+report_id+".1coords",sep="\t",index_col=False, rliterman@0: names=['Ref_Start','Ref_End','Query_Start','Query_End', rliterman@0: 'Ref_Aligned','Query_Aligned','Perc_Iden', rliterman@0: 'Ref_Length','Query_Length','Ref_Cov', rliterman@0: 'Query_Cov','Ref_Contig','Query_Contig'], rliterman@0: dtype={ rliterman@0: 'Ref_Start': int, rliterman@0: 'Ref_End': int, rliterman@0: 'Query_Start': int, rliterman@0: 'Query_End': int, rliterman@0: 'Ref_Aligned': int, rliterman@0: 'Query_Aligned': int, rliterman@0: 'Perc_Iden': float, rliterman@0: 'Ref_Length': int, rliterman@0: 'Query_Length': int, rliterman@0: 'Ref_Cov': float, rliterman@0: 'Query_Cov': float, rliterman@0: 'Ref_Contig': str, rliterman@0: 'Query_Contig': str rliterman@0: }) rliterman@0: rliterman@0: if coords_file.shape[0] > 0: rliterman@0: rliterman@0: coords_file['Ref_Start'] = (coords_file['Ref_Start'] - 1) rliterman@0: coords_file = coords_file.apply(adjust_query_row, axis=1) rliterman@0: rliterman@0: merged_ref_bed = BedTool.from_dataframe(coords_file[['Ref_Contig','Ref_Start','Ref_End']]).sort().merge() rliterman@0: merged_query_bed = BedTool.from_dataframe(coords_file[['Query_Contig','Query_Start','Query_End']]).sort().merge() rliterman@0: rliterman@0: covered_ref_regions = reference_chr_bed.intersect(merged_ref_bed) rliterman@0: uncovered_ref_regions = reference_chr_bed.subtract(merged_ref_bed) rliterman@0: rliterman@0: covered_query_regions = query_chr_bed.intersect(merged_query_bed) rliterman@0: uncovered_query_regions = query_chr_bed.subtract(merged_query_bed) rliterman@0: else: rliterman@0: merged_ref_bed = BedTool([]) rliterman@0: merged_query_bed = BedTool([]) rliterman@0: rliterman@0: covered_ref_regions = BedTool([]) rliterman@0: uncovered_ref_regions = BedTool([]) rliterman@0: rliterman@0: covered_query_regions = BedTool([]) rliterman@0: uncovered_query_regions = BedTool([]) rliterman@0: rliterman@0: ref_genome_size = calculate_total_length(reference_chr_bed) rliterman@0: ref_covered_size = calculate_total_length(covered_ref_regions) rliterman@0: ref_uncovered_size = calculate_total_length(uncovered_ref_regions) rliterman@0: rliterman@0: query_genome_size = calculate_total_length(query_chr_bed) rliterman@0: query_covered_size = calculate_total_length(covered_query_regions) rliterman@0: query_uncovered_size = calculate_total_length(uncovered_query_regions) rliterman@0: rliterman@0: assert ref_covered_size + ref_uncovered_size == ref_genome_size rliterman@0: assert query_covered_size + query_uncovered_size == query_genome_size rliterman@0: rliterman@0: # Create return_df rliterman@0: return_columns = ['Ref_Contig','Ref_Start','Ref_End','Ref_Length','Ref_Aligned','Query_Contig','Query_Start','Query_End','Query_Length','Query_Aligned','Perc_Iden'] rliterman@0: rliterman@0: return_df = pd.DataFrame(columns=return_columns) rliterman@0: rliterman@0: if coords_file.shape[0] > 0: rliterman@0: return_df = pd.concat([return_df,coords_file[return_columns]],ignore_index=True) rliterman@0: rliterman@0: if uncovered_ref_regions.count() > 0: rliterman@0: return_uncovered_df = uncovered_ref_regions.to_dataframe().sort_values(['chrom','start']).rename(columns={'chrom':'Ref_Contig','start':'Ref_Start','end':'Ref_End'}).assign(Ref_Aligned = ".",Query_Aligned = ".",Query_Contig=".",Query_Start=".",Query_End=".",Ref_Length = ".",Query_Length=".",Perc_Iden = ".") rliterman@0: return_df = pd.concat([return_df,return_uncovered_df[return_columns]],ignore_index=True) rliterman@0: rliterman@0: if uncovered_query_regions.count() > 0: rliterman@0: return_uncovered_df = uncovered_query_regions.to_dataframe().sort_values(['chrom','start']).rename(columns={'chrom':'Query_Contig','start':'Query_Start','end':'Query_End'}).assign(Ref_Aligned = ".",Query_Aligned = ".",Ref_Contig=".",Ref_Start=".",Ref_End=".",Ref_Length = ".",Query_Length=".",Perc_Iden = ".") rliterman@0: return_df = pd.concat([return_df,return_uncovered_df[return_columns]],ignore_index=True) rliterman@0: rliterman@0: return return_df rliterman@0: rliterman@0: def adjust_query_row(row): rliterman@0: if set(['Query_Start', 'Query_End', 'Query_Contig']).issubset(row.index): rliterman@0: if row['Query_Start'] > row['Query_End']: rliterman@0: row['Query_Start'], row['Query_End'] = row['Query_End'] - 1, row['Query_Start'] rliterman@0: else: rliterman@0: row['Query_Start'] = row['Query_Start'] - 1 rliterman@0: return row rliterman@0: rliterman@0: def parseMUmmerSNPs(mum_snps_dir,report_id,coords_file): rliterman@0: rliterman@0: return_columns = ['Ref_Contig','Start_Ref','Ref_Pos', rliterman@0: 'Query_Contig','Start_Query','Query_Pos', rliterman@0: 'Ref_Loc','Query_Loc', rliterman@0: 'Ref_Start','Ref_End', rliterman@0: 'Query_Start','Query_End', rliterman@0: 'Ref_Base','Query_Base', rliterman@0: 'Dist_to_Ref_End','Dist_to_Query_End', rliterman@0: 'Ref_Aligned','Query_Aligned', rliterman@0: 'Query_Direction','Perc_Iden','Cat'] rliterman@0: rliterman@0: snp_file = pd.read_csv(mum_snps_dir+"/"+report_id+".snps",sep="\t",index_col=False, rliterman@0: names=['Ref_Pos','Ref_Base','Query_Base','Query_Pos', rliterman@0: 'SNP_Buffer','Dist_to_End','Ref_Length','Query_Length', rliterman@0: 'Ref_Direction','Query_Direction','Ref_Contig','Query_Contig']) rliterman@0: rliterman@0: int_columns = ['Ref_Pos', 'Query_Pos', 'Dist_to_End','Ref_Length', 'Query_Length'] rliterman@0: contig_columns = ['Ref_Contig','Query_Contig'] rliterman@0: rliterman@0: if snp_file.shape[0] == 0: rliterman@0: return pd.DataFrame(columns=return_columns) rliterman@0: else: rliterman@0: for col in int_columns: rliterman@0: snp_file.loc[:, col] = snp_file.loc[:, col].astype(float).astype(int) rliterman@0: for col in contig_columns: rliterman@0: snp_file.loc[:, col] = snp_file.loc[:, col].astype(str) rliterman@0: coords_file.loc[:, col] = coords_file.loc[:, col].astype(str) rliterman@0: rliterman@0: snp_file['Start_Ref'] = snp_file['Ref_Pos'] - 1 rliterman@0: snp_file['Start_Query'] = snp_file['Query_Pos'] - 1 rliterman@0: rliterman@0: snp_file['Dist_to_Ref_End'] = [min([x,y]) for x,y in zip(snp_file['Ref_Pos'], snp_file['Ref_Length'] - snp_file['Ref_Pos'])] rliterman@0: snp_file['Dist_to_Query_End'] = [min([x,y]) for x,y in zip(snp_file['Query_Pos'],snp_file['Query_Length'] - snp_file['Query_Pos'])] rliterman@0: rliterman@0: snp_file['Ref_Loc'] = ["/".join([str(x[0]),str(x[1])]) for x in list(zip(snp_file.Ref_Contig, snp_file.Ref_Pos))] rliterman@0: snp_file['Query_Loc'] = ["/".join([str(x[0]),str(x[1])]) for x in list(zip(snp_file.Query_Contig, snp_file.Query_Pos))] rliterman@0: rliterman@0: total_snp_count = snp_file.shape[0] rliterman@0: rliterman@0: valid_bases = ['a', 'A', 'c', 'C', 'g', 'G', 't', 'T',"."] rliterman@0: rliterman@0: invalid_file = snp_file[(~snp_file['Ref_Base'].isin(valid_bases)) | (~snp_file['Query_Base'].isin(valid_bases))] rliterman@0: snp_file = snp_file[(snp_file['Ref_Base'].isin(valid_bases)) & (snp_file['Query_Base'].isin(valid_bases))] rliterman@0: indel_file = snp_file[(snp_file['Query_Base'] == ".") | (snp_file['Ref_Base'] == ".")] rliterman@0: snp_file = snp_file[~((snp_file['Query_Base'] == ".") | (snp_file['Ref_Base'] == "."))] rliterman@0: rliterman@0: if total_snp_count >= 10000: rliterman@0: return (snp_file.shape[0],indel_file.shape[0],invalid_file.shape[0]) rliterman@0: rliterman@0: else: rliterman@0: if snp_file.shape[0] == 0: rliterman@0: snp_coords = pd.DataFrame(columns=return_columns) rliterman@0: else: rliterman@0: snp_coords = makeSNPCoords(snp_file,coords_file,snp_file.shape[0]) rliterman@0: snp_coords['Cat'] = "SNP" rliterman@0: rliterman@0: if indel_file.shape[0] == 0: rliterman@0: indel_coords = pd.DataFrame(columns=return_columns) rliterman@0: else: rliterman@0: indel_coords = makeSNPCoords(indel_file,coords_file,indel_file.shape[0]) rliterman@0: indel_coords['Cat'] = "Indel" rliterman@0: rliterman@0: if invalid_file.shape[0] == 0: rliterman@0: invalid_coords = pd.DataFrame(columns=return_columns) rliterman@0: else: rliterman@0: invalid_coords = makeSNPCoords(invalid_file,coords_file,invalid_file.shape[0]) rliterman@0: invalid_coords['Cat'] = "Invalid" rliterman@0: rliterman@0: return_df = pd.concat([snp_coords,indel_coords,invalid_coords],ignore_index=True)[return_columns] rliterman@0: rliterman@0: assert return_df.shape[0] == total_snp_count rliterman@0: return return_df rliterman@0: rliterman@0: def makeSNPCoords(snp_df, coords_df, row_count): rliterman@0: snp_coords = pd.merge(snp_df, coords_df, how='left').dropna() rliterman@0: rliterman@0: condition = ((snp_coords['Ref_Start'] <= snp_coords['Ref_Pos']) & rliterman@0: (snp_coords['Ref_Pos'] <= snp_coords['Ref_End']) & rliterman@0: (snp_coords['Query_Start'] <= snp_coords['Query_Pos']) & rliterman@0: (snp_coords['Query_Pos'] <= snp_coords['Query_End'])) rliterman@0: snp_coords = snp_coords[condition] rliterman@0: rliterman@0: if row_count != snp_coords.shape[0]: rliterman@0: snp_coords.sort_values(by=['Ref_Aligned', 'Perc_Iden'], ascending=False, inplace=True) rliterman@0: snp_coords.drop_duplicates(subset=['Ref_Loc','Query_Loc'], keep='first', inplace=True) rliterman@0: rliterman@0: return snp_coords rliterman@0: rliterman@0: def fasta_info(file_path): rliterman@0: records = list(SeqIO.parse(file_path, 'fasta')) rliterman@0: contig_count = int(len(records)) rliterman@0: lengths = sorted([len(record) for record in records], reverse=True) rliterman@0: assembly_bases = sum(lengths) rliterman@0: rliterman@0: with open(file_path, 'rb') as file: rliterman@0: sha256 = hashlib.sha256(file.read()).hexdigest() rliterman@0: rliterman@0: cumulative_length = 0 rliterman@0: n50 = None rliterman@0: n90 = None rliterman@0: l50 = None rliterman@0: l90 = None rliterman@0: rliterman@0: for i, length in enumerate(lengths, start=1): rliterman@0: cumulative_length += length rliterman@0: if cumulative_length >= assembly_bases * 0.5 and n50 is None: rliterman@0: n50 = length rliterman@0: l50 = i rliterman@0: if cumulative_length >= assembly_bases * 0.9 and n90 is None: rliterman@0: n90 = length rliterman@0: l90 = i rliterman@0: if n50 is not None and n90 is not None: rliterman@0: break rliterman@0: rliterman@0: return [file_path,contig_count,assembly_bases,n50,n90,l50,l90,sha256] rliterman@0: rliterman@0: def fasta_to_bedtool(fasta_file): rliterman@0: intervals = [] rliterman@0: for record in SeqIO.parse(fasta_file, "fasta"): rliterman@0: chrom_name = record.id rliterman@0: chrom_length = len(record.seq) rliterman@0: interval = Interval(chrom_name, 0, chrom_length) rliterman@0: intervals.append(interval) rliterman@0: bedtool = BedTool(intervals).sort() rliterman@0: return bedtool rliterman@0: rliterman@0: def calculate_total_length(bedtool): rliterman@0: return sum(len(interval) for interval in bedtool) rliterman@0: rliterman@0: def get_kmers(command, log_file, retries=10, delay=5, timeout=30): rliterman@0: kmer_set = set() rliterman@0: rliterman@0: for attempt in range(retries): rliterman@0: try: rliterman@0: process = subprocess.run(command, capture_output=True, text=True, timeout=timeout) rliterman@0: kmer_set.update(process.stdout.strip().split('\n')) rliterman@0: rliterman@0: if len(kmer_set) > 1: rliterman@0: return kmer_set rliterman@0: rliterman@0: except subprocess.TimeoutExpired: rliterman@0: with open(log_file, "a") as log: rliterman@0: log.write(f"Kmer command timed out after {timeout} seconds\n") rliterman@0: time.sleep(delay) rliterman@0: rliterman@0: except subprocess.CalledProcessError as e: rliterman@0: with open(log_file, "a") as log: rliterman@0: log.write(f"Kmer command errored out: {e}\n") rliterman@0: time.sleep(delay) rliterman@0: rliterman@0: # Wait before retrying if this is not the last attempt rliterman@0: if attempt < retries - 1: rliterman@0: time.sleep(delay) rliterman@0: else: rliterman@0: raise RuntimeError(f"Command failed after {retries} attempts: {command}") rliterman@0: rliterman@0: return None rliterman@0: rliterman@0: def compare_kmers(query_file,reference_file,log_file): rliterman@0: rliterman@0: ref_command = ["kmercountexact.sh", f"in={reference_file}", "threads=1", "fastadump=f", "out=stdout", "|", "cut", "-f1"] rliterman@0: query_command = ["kmercountexact.sh", f"in={query_file}", "threads=1", "fastadump=f", "out=stdout", "|", "cut", "-f1"] rliterman@0: rliterman@0: ref_kmers = get_kmers(ref_command,log_file) rliterman@0: with open(log_file, "a") as log: rliterman@0: log.write(f"Fetched reference kmers...\n") rliterman@0: rliterman@0: query_kmers = get_kmers(query_command,log_file) rliterman@0: with open(log_file, "a") as log: rliterman@0: log.write(f"Fetched query kmers...\n") rliterman@0: rliterman@0: intersection = len(ref_kmers.intersection(query_kmers)) rliterman@0: similarity = 100*(intersection/(len(ref_kmers) + len(query_kmers) - intersection)) rliterman@0: unique_ref = len(ref_kmers.difference(query_kmers)) rliterman@0: unique_query = len(query_kmers.difference(ref_kmers)) rliterman@0: rliterman@0: return [len(ref_kmers),len(query_kmers), rliterman@0: unique_ref,unique_query, rliterman@0: intersection,similarity] rliterman@0: rliterman@0: #### 01: Read in arguments #### rliterman@0: run_failed = False rliterman@0: rliterman@0: parser = argparse.ArgumentParser(description='CSP2 MUMmer Compilation') rliterman@0: parser.add_argument('--query', type=str, help='Query') rliterman@0: parser.add_argument('--query_fasta', type=str, help='Query Fasta') rliterman@0: parser.add_argument('--reference', type=str, help='Reference') rliterman@0: parser.add_argument('--reference_fasta', type=str, help='Reference Fasta') rliterman@0: parser.add_argument('--mummer_dir', type=str, help='MUMmer Directory') rliterman@0: parser.add_argument('--snpdiffs_dir', type=str, help='snpdiffs Directory') rliterman@0: parser.add_argument('--temp_dir', type=str, default='', help='Temporary Directory') rliterman@0: parser.add_argument('--log_file', type=str, help='Log File') rliterman@0: args = parser.parse_args() rliterman@0: rliterman@0: query = args.query rliterman@0: query_fasta = args.query_fasta rliterman@0: reference = args.reference rliterman@0: reference_fasta = args.reference_fasta rliterman@0: rliterman@0: mummer_dir = os.path.normpath(os.path.abspath(args.mummer_dir)) rliterman@0: snpdiffs_dir = os.path.normpath(os.path.abspath(args.snpdiffs_dir)) rliterman@0: log_file = os.path.normpath(os.path.abspath(args.log_file)) rliterman@0: rliterman@0: with open(log_file, "w") as log: rliterman@0: log.write(f"CSP2 MUMmer Compilation Log: {query}__vs__{reference}\n") rliterman@0: log.write(f"\t- Query Fasta: {query_fasta}\n") rliterman@0: log.write(f"\t- Reference Fasta: {reference_fasta}\n") rliterman@0: log.write(f"\t - MUMmer Directory: {mummer_dir}\n") rliterman@0: log.write(f"\t - snpdiffs Directory: {snpdiffs_dir}\n") rliterman@0: rliterman@0: # Set TMP rliterman@0: global temp_dir rliterman@0: if args.temp_dir != "": rliterman@0: random_temp_id = str(uuid.uuid4()) rliterman@0: temp_dir = f"{os.path.normpath(os.path.abspath(args.temp_dir))}/{random_temp_id}" rliterman@0: try: rliterman@0: os.mkdir(temp_dir) rliterman@0: helpers.set_tempdir(temp_dir) rliterman@0: with open(log_file, "a") as log: rliterman@0: log.write(f"\t - Temporary Directory: {temp_dir}\n") rliterman@0: rliterman@0: except OSError as e: rliterman@0: run_failed = True rliterman@0: print(f"Error: Failed to create directory '{temp_dir}': {e}") rliterman@0: else: rliterman@0: temp_dir = "" rliterman@0: rliterman@0: with open(log_file, "a") as log: rliterman@0: log.write("-------------------------------------------------------\n\n") rliterman@0: rliterman@0: try: rliterman@0: # Get query data rliterman@0: query_data = [query] + fasta_info(query_fasta) rliterman@0: query_string = [x+":"+str(y) for x,y in zip(['Query_ID','Query_Assembly','Query_Contig_Count','Query_Assembly_Bases', rliterman@0: 'Query_N50','Query_N90','Query_L50','Query_L90','Query_SHA256'],query_data)] rliterman@0: rliterman@0: with open(log_file, "a") as log: rliterman@0: log.write("Fetched Query Data...\n") rliterman@0: rliterman@0: # Get reference data rliterman@0: reference_data = [reference] + fasta_info(reference_fasta) rliterman@0: reference_string = [x+":"+str(y) for x,y in zip(['Reference_ID','Reference_Assembly','Reference_Contig_Count','Reference_Assembly_Bases', rliterman@0: 'Reference_N50','Reference_N90','Reference_L50','Reference_L90','Reference_SHA256'],reference_data)] rliterman@0: rliterman@0: with open(log_file, "a") as log: rliterman@0: log.write("Fetched Reference Data...\n") rliterman@0: rliterman@0: # Get kmer distance rliterman@0: [ref_kmers,query_kmers, rliterman@0: unique_ref_kmers,unique_query_kmers, rliterman@0: kmer_intersection,kmer_similarity] = compare_kmers(query_fasta,reference_fasta,log_file) rliterman@0: rliterman@0: with open(log_file, "a") as log: rliterman@0: log.write("Fetched Kmer Data...\n") rliterman@0: rliterman@0: # Create reference BED file using fasta seq lengths rliterman@0: query_chr_bed = fasta_to_bedtool(query_fasta) rliterman@0: reference_chr_bed = fasta_to_bedtool(reference_fasta) rliterman@0: rliterman@0: with open(log_file, "a") as log: rliterman@0: log.write("Created BED files...\n") rliterman@0: rliterman@0: # Set report ID rliterman@0: report_id = query + "__vs__" + reference rliterman@0: snpdiffs_file = snpdiffs_dir + "/" + report_id + ".snpdiffs" rliterman@0: rliterman@0: # Create NA variables for all downstream options rliterman@0: median_percent_identity = "NA" rliterman@0: median_alignment_length = "NA" rliterman@0: rliterman@0: total_snp_count = "NA" rliterman@0: total_indel_count = "NA" rliterman@0: total_invalid_count = "NA" rliterman@0: rliterman@0: #### 02: Read in MUMmer report data #### rliterman@0: [ref_bases,percent_ref_aligned, rliterman@0: query_bases,percent_query_aligned, rliterman@0: g_snps,g_indels, rliterman@0: ref_breakpoints,query_breakpoints, rliterman@0: ref_relocations,query_relocations, rliterman@0: ref_translocations,query_translocations, rliterman@0: ref_inversions,query_inversions, rliterman@0: ref_insertions,query_insertions, rliterman@0: ref_tandem,query_tandem] = parseMUmmerReport(mummer_dir,report_id) rliterman@0: rliterman@0: with open(log_file, "a") as log: rliterman@0: log.write("Parsed MUMmer report...\n") rliterman@0: rliterman@0: if percent_ref_aligned > 0: rliterman@0: rliterman@0: #### 03: Process MUMmer coords file #### rliterman@0: coords_file = parseMUmmerCoords(mummer_dir,report_id,reference_chr_bed,query_chr_bed) rliterman@0: with open(log_file, "a") as log: rliterman@0: log.write("Parsed MUMmer coords...\n") rliterman@0: rliterman@0: aligned_coords = coords_file[~((coords_file['Query_Contig'] == ".") | (coords_file['Ref_Contig'] == "."))] rliterman@0: if aligned_coords.shape[0] > 0: rliterman@0: aligned_coords['Perc_Iden'] = aligned_coords['Perc_Iden'].astype(float) rliterman@0: aligned_coords['Ref_Aligned'] = aligned_coords['Ref_Aligned'].astype(int) rliterman@0: rliterman@0: median_percent_identity = np.median(aligned_coords['Perc_Iden']) rliterman@0: median_alignment_length = np.median(aligned_coords['Ref_Aligned']) rliterman@0: rliterman@0: ##### 04: Process MUMmer SNP file #### rliterman@0: processed_snps = parseMUmmerSNPs(mummer_dir,report_id,aligned_coords) rliterman@0: with open(log_file, "a") as log: rliterman@0: log.write("Parsed MUMmer SNPs...\n") rliterman@0: # Check if processed_snps is a df or a tuple rliterman@0: if isinstance(processed_snps, tuple): rliterman@0: total_snp_count,total_indel_count,total_invalid_count = processed_snps rliterman@0: else: rliterman@0: total_snp_count = processed_snps[processed_snps['Cat'] == "SNP"].shape[0] rliterman@0: total_indel_count = processed_snps[processed_snps['Cat'] == "Indel"].shape[0] rliterman@0: total_invalid_count = processed_snps[processed_snps['Cat'] == "Invalid"].shape[0] rliterman@0: rliterman@0: # Clean up pybedtools temp rliterman@0: helpers.cleanup(verbose=False,remove_all = False) rliterman@0: with open(log_file, "a") as log: rliterman@0: log.write("Cleaned up TMP...\n") rliterman@0: rliterman@0: # Create header rliterman@0: percent_ref_aligned = f"{percent_ref_aligned:.2f}" if percent_ref_aligned != "NA" else percent_ref_aligned rliterman@0: percent_query_aligned = f"{percent_query_aligned:.2f}" if percent_query_aligned != "NA" else percent_query_aligned rliterman@0: median_percent_identity = f"{median_percent_identity:.2f}" if median_percent_identity != "NA" else median_percent_identity rliterman@0: median_alignment_length = f"{median_alignment_length:.2f}" if median_alignment_length != "NA" else median_alignment_length rliterman@0: total_snp_count = f"{total_snp_count:.0f}" if total_snp_count != "NA" else total_snp_count rliterman@0: total_indel_count = f"{total_indel_count:.0f}" if total_indel_count != "NA" else total_indel_count rliterman@0: total_invalid_count = f"{total_invalid_count:.0f}" if total_invalid_count != "NA" else total_invalid_count rliterman@0: ref_breakpoints = f"{ref_breakpoints:.0f}" rliterman@0: ref_relocations = f"{ref_relocations:.0f}" rliterman@0: ref_translocations = f"{ref_translocations:.0f}" rliterman@0: ref_inversions = f"{ref_inversions:.0f}" rliterman@0: ref_insertions = f"{ref_insertions:.0f}" rliterman@0: ref_tandem = f"{ref_tandem:.0f}" rliterman@0: query_breakpoints = f"{query_breakpoints:.0f}" rliterman@0: query_relocations = f"{query_relocations:.0f}" rliterman@0: query_translocations = f"{query_translocations:.0f}" rliterman@0: query_inversions = f"{query_inversions:.0f}" rliterman@0: query_insertions = f"{query_insertions:.0f}" rliterman@0: query_tandem = f"{query_tandem:.0f}" rliterman@0: g_snps = f"{g_snps:.0f}" rliterman@0: g_indels = f"{g_indels:.0f}" rliterman@0: ref_kmers = f"{ref_kmers:.0f}" rliterman@0: query_kmers = f"{query_kmers:.0f}" rliterman@0: unique_ref_kmers = f"{unique_ref_kmers:.0f}" rliterman@0: unique_query_kmers = f"{unique_query_kmers:.0f}" rliterman@0: kmer_intersection = f"{kmer_intersection:.0f}" rliterman@0: kmer_similarity = f"{kmer_similarity:.2f}" rliterman@0: rliterman@0: snpdiffs_header=[] rliterman@0: snpdiffs_header.append("#\t" + rliterman@0: "\t".join(query_string) + rliterman@0: "\t" + "\t".join(reference_string) + rliterman@0: "\t" + "\t".join([ rliterman@0: "SNPs:"+total_snp_count, rliterman@0: "Reference_Percent_Aligned:"+percent_ref_aligned, rliterman@0: "Query_Percent_Aligned:"+percent_query_aligned, rliterman@0: "Median_Percent_Identity:"+median_percent_identity, rliterman@0: "Median_Alignment_Length:"+median_alignment_length, rliterman@0: "Kmer_Similarity:"+kmer_similarity, rliterman@0: "Shared_Kmers:"+kmer_intersection, rliterman@0: "Reference_Unique_Kmers:"+unique_ref_kmers, rliterman@0: "Query_Unique_Kmers:"+unique_query_kmers, rliterman@0: "Reference_Breakpoints:"+ref_breakpoints, rliterman@0: "Query_Breakpoints:"+query_breakpoints, rliterman@0: "Reference_Relocations:"+ref_relocations, rliterman@0: "Query_Relocations:"+query_relocations, rliterman@0: "Reference_Translocations:"+ref_translocations, rliterman@0: "Query_Translocations:"+query_translocations, rliterman@0: "Reference_Inversions:"+ref_inversions, rliterman@0: "Query_Inversions:"+query_inversions, rliterman@0: "Reference_Insertions:"+ref_insertions, rliterman@0: "Query_Insertions:"+query_insertions, rliterman@0: "Reference_Tandem:"+ref_tandem, rliterman@0: "Query_Tandem:"+query_tandem, rliterman@0: "Indels:"+total_indel_count, rliterman@0: "Invalid:"+total_invalid_count, rliterman@0: "gSNPs:"+g_snps, rliterman@0: "gIndels:"+g_indels])) rliterman@0: rliterman@0: with open(log_file, "a") as log: rliterman@0: log.write("Prepped snpdiffs output...\n") rliterman@0: rliterman@0: with open(snpdiffs_file,"w") as file: rliterman@0: file.write("\n".join(snpdiffs_header) + "\n") rliterman@0: for index, row in coords_file.iterrows(): rliterman@0: file.write("##\t" + "\t".join(map(str, row))+"\n") rliterman@0: if isinstance(processed_snps, tuple): rliterman@0: pass rliterman@0: else: rliterman@0: for index, row in processed_snps.iterrows(): rliterman@0: file.write("\t".join(map(str, row))+"\n") rliterman@0: rliterman@0: with open(log_file, "a") as log: rliterman@0: log.write("Saved snpdiffs file...\n") rliterman@0: rliterman@0: print(",".join([query,reference,snpdiffs_file])) rliterman@0: rliterman@0: except: rliterman@0: run_failed = True rliterman@0: print("Exception occurred:\n", traceback.format_exc()) rliterman@0: finally: rliterman@0: helpers.cleanup(verbose=False, remove_all=False) rliterman@0: if temp_dir != "": rliterman@0: shutil.rmtree(temp_dir) rliterman@0: if run_failed: rliterman@0: sys.exit(1)