annotate CSP2/bin/runSNPPipeline.py @ 28:893a6993efe3

"planemo upload"
author rliterman
date Wed, 04 Dec 2024 13:48:13 -0500
parents 792274118b2e
children 93393808f415
rev   line source
rliterman@0 1 #!/usr/bin/env python3
rliterman@0 2
rliterman@0 3 import sys
rliterman@0 4 import os
rliterman@0 5 import pandas as pd
rliterman@0 6 import datetime
rliterman@0 7 from pybedtools import BedTool,helpers
rliterman@0 8 import concurrent.futures
rliterman@0 9 import time
rliterman@0 10 from Bio.Seq import Seq
rliterman@0 11 from Bio.SeqRecord import SeqRecord
rliterman@0 12 from Bio.Align import MultipleSeqAlignment
rliterman@0 13 from Bio import AlignIO
rliterman@0 14 from itertools import combinations
rliterman@0 15 import numpy as np
rliterman@0 16 import uuid
rliterman@0 17 import traceback
rliterman@0 18 import shutil
rliterman@0 19 import argparse
rliterman@0 20
rliterman@0 21 def fetchHeaders(snpdiffs_file):
rliterman@0 22
rliterman@0 23 with open(snpdiffs_file, 'r') as file:
rliterman@0 24 top_line = file.readline().strip().split('\t')[1:]
rliterman@0 25
rliterman@0 26 header_cols = [item.split(':')[0] for item in top_line]
rliterman@0 27 header_vals = [item.split(':')[1] for item in top_line]
rliterman@0 28
rliterman@0 29 header_data = pd.DataFrame(columns = header_cols)
rliterman@0 30 header_data.loc[0] = header_vals
rliterman@0 31 header_data.loc[:, 'File_Path'] = snpdiffs_file
rliterman@0 32
rliterman@0 33 return header_data
rliterman@0 34
rliterman@0 35 def processBED(bed_rows,snpdiffs_orientation):
rliterman@0 36
rliterman@0 37 bed_columns = ['Ref_Contig','Ref_Start','Ref_End','Ref_Length','Ref_Aligned',
rliterman@0 38 'Query_Contig','Query_Start','Query_End','Query_Length','Query_Aligned',
rliterman@0 39 'Perc_Iden']
rliterman@0 40
rliterman@0 41 reverse_columns = ['Query_Contig','Query_Start','Query_End','Query_Length','Query_Aligned',
rliterman@0 42 'Ref_Contig','Ref_Start','Ref_End','Ref_Length','Ref_Aligned',
rliterman@0 43 'Perc_Iden']
rliterman@0 44
rliterman@0 45 int_columns = ['Ref_Start', 'Ref_End', 'Ref_Length', 'Ref_Aligned',
rliterman@0 46 'Query_Start', 'Query_End', 'Query_Length', 'Query_Aligned']
rliterman@0 47
rliterman@0 48 float_columns = ['Perc_Iden']
rliterman@0 49
rliterman@0 50 if len(bed_rows) > 0:
rliterman@0 51
rliterman@0 52 bed_df = pd.DataFrame(bed_rows, columns=bed_columns)
rliterman@0 53
rliterman@0 54 # Swap columns if reversed
rliterman@0 55 if snpdiffs_orientation == -1:
rliterman@0 56 bed_df = bed_df[reverse_columns].copy()
rliterman@0 57 bed_df.columns = bed_columns
rliterman@0 58
rliterman@0 59 # Gather covered loci
rliterman@0 60 covered_bed_df = bed_df[(bed_df['Ref_Start'] != ".") & (bed_df['Query_Start'] != ".")].copy()
rliterman@0 61 if covered_bed_df.shape[0] > 0:
rliterman@0 62 for col in int_columns:
rliterman@0 63 covered_bed_df.loc[:, col] = covered_bed_df.loc[:, col].astype(float).astype(int)
rliterman@0 64 for col in float_columns:
rliterman@0 65 covered_bed_df.loc[:, col] = covered_bed_df.loc[:, col].astype(float)
rliterman@0 66
rliterman@0 67 return covered_bed_df
rliterman@0 68 else:
rliterman@0 69 return pd.DataFrame(columns=bed_columns)
rliterman@0 70
rliterman@0 71 def processSNPs(snp_rows,snpdiffs_orientation):
rliterman@0 72
rliterman@0 73 snp_columns = ['Ref_Contig','Start_Ref','Ref_Pos',
rliterman@0 74 'Query_Contig','Start_Query','Query_Pos',
rliterman@0 75 'Ref_Loc','Query_Loc',
rliterman@0 76 'Ref_Start','Ref_End',
rliterman@0 77 'Query_Start','Query_End',
rliterman@0 78 'Ref_Base','Query_Base',
rliterman@0 79 'Dist_to_Ref_End','Dist_to_Query_End',
rliterman@0 80 'Ref_Aligned','Query_Aligned',
rliterman@0 81 'Query_Direction','Perc_Iden','Cat']
rliterman@0 82
rliterman@0 83 return_columns = ['Ref_Contig','Start_Ref','Ref_Pos',
rliterman@0 84 'Query_Contig','Start_Query','Query_Pos',
rliterman@0 85 'Ref_Loc','Query_Loc',
rliterman@0 86 'Ref_Start','Ref_End',
rliterman@0 87 'Query_Start','Query_End',
rliterman@0 88 'Ref_Base','Query_Base',
rliterman@0 89 'Dist_to_Ref_End','Dist_to_Query_End',
rliterman@0 90 'Ref_Aligned','Query_Aligned',
rliterman@0 91 'Perc_Iden','Cat']
rliterman@0 92
rliterman@0 93 reverse_columns = ['Query_Contig','Start_Query','Query_Pos',
rliterman@0 94 'Ref_Contig','Start_Ref','Ref_Pos',
rliterman@0 95 'Query_Loc','Ref_Loc',
rliterman@0 96 'Query_Start','Query_End',
rliterman@0 97 'Ref_Start','Ref_End',
rliterman@0 98 'Query_Base','Ref_Base',
rliterman@0 99 'Dist_to_Query_End','Dist_to_Ref_End',
rliterman@0 100 'Query_Aligned','Ref_Aligned',
rliterman@0 101 'Query_Direction','Perc_Iden','Cat']
rliterman@0 102
rliterman@0 103 reverse_complement = {'A':'T','T':'A','G':'C','C':'G',
rliterman@0 104 'a':'T','t':'A','c':'G','g':'C'}
rliterman@0 105
rliterman@0 106 # Columns to convert to integer
rliterman@0 107 int_columns = ['Start_Ref', 'Ref_Pos', 'Start_Query', 'Query_Pos',
rliterman@0 108 'Dist_to_Ref_End', 'Dist_to_Query_End', 'Ref_Aligned', 'Query_Aligned']
rliterman@0 109
rliterman@0 110 # Columns to convert to float
rliterman@0 111 float_columns = ['Perc_Iden']
rliterman@0 112
rliterman@0 113 if len(snp_rows) > 0:
rliterman@0 114 snp_df = pd.DataFrame(snp_rows, columns= snp_columns).copy()
rliterman@0 115
rliterman@0 116 if snpdiffs_orientation == -1:
rliterman@0 117 snp_df = snp_df[reverse_columns].copy()
rliterman@0 118 snp_df.columns = snp_columns
rliterman@0 119
rliterman@0 120 # Replace Query_Base and Reference_Base with reverse complement if Query_Direction is -1 and base is in ['A','T','G','C','a','c','t','g']
rliterman@0 121 snp_df.loc[snp_df['Query_Direction'] == '-1','Query_Base'] = snp_df.loc[snp_df['Query_Direction'] == '-1','Query_Base'].apply(lambda x: reverse_complement[x] if x in reverse_complement else x)
rliterman@0 122 snp_df.loc[snp_df['Query_Direction'] == '-1','Ref_Base'] = snp_df.loc[snp_df['Query_Direction'] == '-1','Ref_Base'].apply(lambda x: reverse_complement[x] if x in reverse_complement else x)
rliterman@0 123
rliterman@0 124 for col in int_columns:
rliterman@0 125 snp_df.loc[:, col] = snp_df.loc[:, col].astype(float).astype(int)
rliterman@0 126 for col in float_columns:
rliterman@0 127 snp_df.loc[:, col] = snp_df.loc[:, col].astype(float)
rliterman@0 128
rliterman@0 129 else:
rliterman@0 130 snp_df = pd.DataFrame(columns = return_columns)
rliterman@0 131
rliterman@0 132 return snp_df[return_columns]
rliterman@0 133
rliterman@0 134 def swapHeader(header_data):
rliterman@0 135
rliterman@0 136 raw_header_cols = [x for x in header_data.columns]
rliterman@0 137 reverse_header_cols = [item.replace('Reference', 'temp').replace('Query', 'Reference').replace('temp', 'Query') for item in raw_header_cols]
rliterman@0 138 reversed_header_data = header_data[reverse_header_cols].copy()
rliterman@0 139 reversed_header_data.columns = raw_header_cols
rliterman@0 140
rliterman@0 141 return reversed_header_data
rliterman@0 142
rliterman@0 143 def parseSNPDiffs(snpdiffs_file,snpdiffs_orientation):
rliterman@0 144
rliterman@0 145 bed_rows = []
rliterman@0 146 snp_rows = []
rliterman@0 147
rliterman@0 148 with open(snpdiffs_file, 'r') as file:
rliterman@0 149 lines = file.readlines()
rliterman@0 150
rliterman@0 151 for line in lines:
rliterman@0 152 if line[0:2] == "#\t":
rliterman@0 153 pass
rliterman@0 154 elif line[0:3] == "##\t":
rliterman@0 155 bed_rows.append(line.strip().split("\t")[1:])
rliterman@0 156 else:
rliterman@0 157 snp_rows.append(line.strip().split("\t"))
rliterman@0 158
rliterman@0 159 bed_df = processBED(bed_rows,snpdiffs_orientation)
rliterman@0 160 snp_df = processSNPs(snp_rows,snpdiffs_orientation)
rliterman@0 161 return (bed_df,snp_df)
rliterman@0 162
rliterman@0 163 def calculate_total_length(bedtool):
rliterman@0 164 return sum(len(interval) for interval in bedtool)
rliterman@0 165
rliterman@0 166 def filterSNPs(raw_snp_df,bed_df,log_file, min_len, min_iden, ref_edge, query_edge, density_windows, max_snps):
rliterman@0 167
rliterman@0 168 if temp_dir != "":
rliterman@0 169 helpers.set_tempdir(temp_dir)
rliterman@0 170
rliterman@0 171 # Grab raw data
rliterman@0 172 total_snp_count = raw_snp_df.shape[0]
rliterman@0 173
rliterman@0 174 # Get unique SNPs relative to the reference genome
rliterman@0 175 unique_ref_snps = raw_snp_df['Ref_Loc'].unique()
rliterman@0 176 unique_snp_count = len(unique_ref_snps)
rliterman@0 177
rliterman@0 178 snp_tally_df = pd.DataFrame()
rliterman@0 179
rliterman@0 180 with open(log_file,"a+") as log:
rliterman@0 181 log.write(f"\n\t- Raw SNP + indel count: {total_snp_count}\n")
rliterman@0 182 log.write(f"\n\t- Unique SNP positions in reference genome: {unique_snp_count}\n")
rliterman@0 183
rliterman@0 184 # Set all sites to SNP
rliterman@0 185 raw_snp_df['Filter_Cat'] = "SNP"
rliterman@0 186
rliterman@0 187 # Filter out SNPs based on --min_len and --min_iden
rliterman@0 188 reject_length = raw_snp_df.loc[(raw_snp_df['Ref_Aligned'] < min_len) & (raw_snp_df['Perc_Iden'] >= min_iden)].copy()
rliterman@0 189 if reject_length.shape[0] > 0:
rliterman@0 190 with open(log_file,"a+") as log:
rliterman@0 191 log.write(f"\t\t- Purged (Alignment Length): {reject_length.shape[0]}\n")
rliterman@0 192 reject_length['Filter_Cat'] = "Purged_Length"
rliterman@0 193 snp_tally_df = pd.concat([snp_tally_df,reject_length]).reset_index(drop=True)
rliterman@0 194
rliterman@0 195 reject_iden = raw_snp_df.loc[(raw_snp_df['Ref_Aligned'] >= min_len) & (raw_snp_df['Perc_Iden'] < min_iden)].copy()
rliterman@0 196 if reject_iden.shape[0] > 0:
rliterman@0 197 with open(log_file,"a+") as log:
rliterman@0 198 log.write(f"\t\t- Purged (Alignment Identity): {reject_iden.shape[0]}\n")
rliterman@0 199 reject_iden['Filter_Cat'] = "Purged_Identity"
rliterman@0 200 snp_tally_df = pd.concat([snp_tally_df,reject_iden]).reset_index(drop=True)
rliterman@0 201
rliterman@0 202 reject_lenIden = raw_snp_df.loc[(raw_snp_df['Ref_Aligned'] < min_len) & (raw_snp_df['Perc_Iden'] < min_iden)].copy()
rliterman@0 203 if reject_lenIden.shape[0] > 0:
rliterman@0 204 with open(log_file,"a+") as log:
rliterman@0 205 log.write(f"\t\t- Purged (Alignment Length + Identity): {reject_lenIden.shape[0]}\n")
rliterman@0 206 reject_lenIden['Filter_Cat'] = "Purged_LengthIdentity"
rliterman@0 207 snp_tally_df = pd.concat([snp_tally_df,reject_lenIden]).reset_index(drop=True)
rliterman@0 208
rliterman@0 209 pass_filter = raw_snp_df.loc[(raw_snp_df['Ref_Aligned'] >= min_len) & (raw_snp_df['Perc_Iden'] >= min_iden)].copy().reset_index(drop=True)
rliterman@0 210
rliterman@0 211 # Invalid processing
rliterman@0 212 reject_invalid = pass_filter[pass_filter['Cat'] == "Invalid"].copy()
rliterman@0 213 if reject_invalid.shape[0] > 0:
rliterman@0 214 with open(log_file,"a+") as log:
rliterman@0 215 log.write(f"\t\t- Purged (Invalid Base): {reject_invalid.shape[0]}\n")
rliterman@0 216 reject_invalid['Filter_Cat'] = "Purged_Invalid"
rliterman@0 217 snp_tally_df = pd.concat([snp_tally_df,reject_invalid]).reset_index(drop=True)
rliterman@0 218 pass_filter = pass_filter.loc[pass_filter['Cat'] != "Invalid"].copy()
rliterman@0 219
rliterman@0 220 # Indel processing
rliterman@0 221 reject_indel = pass_filter[pass_filter['Cat'] == "Indel"].copy()
rliterman@0 222 if reject_indel.shape[0] > 0:
rliterman@0 223 with open(log_file,"a+") as log:
rliterman@0 224 log.write(f"\t\t- Purged (Indel): {reject_indel.shape[0]}\n")
rliterman@0 225 reject_indel['Filter_Cat'] = "Purged_Indel"
rliterman@0 226 snp_tally_df = pd.concat([snp_tally_df,reject_indel]).reset_index(drop=True)
rliterman@0 227 pass_filter = pass_filter.loc[pass_filter['Cat'] != "Indel"].copy()
rliterman@0 228
rliterman@0 229 # Check for heterozygous SNPs
rliterman@0 230 check_heterozygous = pass_filter.groupby('Ref_Loc').filter(lambda x: x['Query_Base'].nunique() > 1)
rliterman@0 231 if check_heterozygous.shape[0] > 0:
rliterman@0 232 reject_heterozygous = pass_filter.loc[pass_filter['Ref_Loc'].isin(check_heterozygous['Ref_Loc'])].copy()
rliterman@0 233 reject_heterozygous['Filter_Cat'] = "Purged_Heterozygous"
rliterman@0 234 with open(log_file,"a+") as log:
rliterman@0 235 log.write(f"\t\t- Purged (Heterozygotes): {reject_heterozygous.shape[0]}\n")
rliterman@0 236 snp_tally_df = pd.concat([snp_tally_df,reject_heterozygous]).reset_index(drop=True)
rliterman@0 237 pass_filter = pass_filter.loc[~pass_filter['Ref_Loc'].isin(check_heterozygous['Ref_Loc'])].copy()
rliterman@0 238
rliterman@0 239 # Check for duplicate SNPs and take the longest, best hit
rliterman@0 240 check_duplicates = pass_filter.groupby('Ref_Loc').filter(lambda x: x.shape[0] > 1)
rliterman@0 241 if check_duplicates.shape[0] > 0:
rliterman@0 242 reject_duplicate = pass_filter.loc[pass_filter['Ref_Loc'].isin(check_duplicates['Ref_Loc'])].copy()
rliterman@0 243 pass_filter = pass_filter.loc[~pass_filter['Ref_Loc'].isin(check_duplicates['Ref_Loc'])].copy()
rliterman@0 244
rliterman@0 245 best_snp = reject_duplicate.groupby('Ref_Loc').apply(lambda x: x.sort_values(by=['Ref_Aligned', 'Perc_Iden'], ascending=[False, False]).head(1))
rliterman@0 246 pass_filter = pd.concat([pass_filter,best_snp]).reset_index(drop=True)
rliterman@0 247
rliterman@0 248 dup_snps = reject_duplicate[~reject_duplicate.apply(lambda x: x in best_snp, axis=1)]
rliterman@0 249 dup_snps['Filter_Cat'] = "Purged_Duplicate"
rliterman@0 250
rliterman@0 251 snp_tally_df = pd.concat([snp_tally_df,dup_snps]).reset_index(drop=True)
rliterman@0 252
rliterman@0 253 with open(log_file,"a+") as log:
rliterman@0 254 log.write(f"\t\t- Purged (Duplicates): {dup_snps.shape[0]}\n")
rliterman@0 255
rliterman@0 256 # Assert that Ref_Loc and Query_Loc are unique in pass_filter
rliterman@0 257 helpers.cleanup(verbose=False,remove_all = False)
rliterman@0 258 assert pass_filter['Ref_Loc'].nunique() == pass_filter.shape[0]
rliterman@0 259 assert pass_filter['Query_Loc'].nunique() == pass_filter.shape[0]
rliterman@0 260
rliterman@0 261 # Density filtering
rliterman@0 262 density_locs = []
rliterman@0 263 ref_locs = pass_filter['Ref_Loc'].tolist()
rliterman@0 264
rliterman@0 265 if len(density_windows) == 0:
rliterman@0 266 with open(log_file,"a+") as log:
rliterman@0 267 log.write("\n\t- Density filtering disabled...\n")
rliterman@0 268 elif len(ref_locs) > 0:
rliterman@0 269 density_df = pd.DataFrame([item.split('/') for item in ref_locs], columns=['Ref_Contig','Ref_End'])
rliterman@0 270 density_df['Ref_Start'] = density_df['Ref_End'].astype(float).astype(int) - 1
rliterman@0 271 density_bed = BedTool.from_dataframe(density_df[['Ref_Contig','Ref_Start','Ref_End']])
rliterman@0 272
rliterman@0 273 # For each density window, remove all SNPs that fall in a window with > max_snps
rliterman@0 274 for i in range(0,len(density_windows)):
rliterman@0 275 window_df = density_bed.window(density_bed,c=True, w=density_windows[i]).to_dataframe()
rliterman@0 276 problematic_windows = window_df[window_df['name'] > max_snps[i]].copy()
rliterman@0 277 if not problematic_windows.empty:
rliterman@0 278 temp_locs = []
rliterman@0 279 for _, row in problematic_windows.iterrows():
rliterman@0 280 purge_window_df = window_df[window_df['chrom'] == row['chrom']].copy()
rliterman@0 281 purge_window_df['Dist'] = abs(purge_window_df['end'] - row['end'])
rliterman@0 282 window_snps = purge_window_df.sort_values(by=['Dist'],ascending=True).head(row['name'])
rliterman@0 283 temp_locs = temp_locs + ["/".join([str(x[0]),str(x[1])]) for x in list(zip(window_snps.chrom, window_snps.end))]
rliterman@0 284 density_locs.extend(list(set(temp_locs)))
rliterman@0 285
rliterman@0 286 density_locs = list(set(density_locs))
rliterman@0 287 reject_density = pass_filter[pass_filter['Ref_Loc'].isin(density_locs)].copy()
rliterman@0 288
rliterman@0 289 if reject_density.shape[0] > 0:
rliterman@0 290 with open(log_file,"a+") as log:
rliterman@0 291 log.write(f"\t\t- Purged (Density): {reject_density.shape[0]}\n")
rliterman@0 292 reject_density['Filter_Cat'] = "Purged_Density"
rliterman@0 293 snp_tally_df = pd.concat([snp_tally_df,reject_density]).reset_index(drop=True)
rliterman@0 294 pass_filter = pass_filter[~pass_filter['Ref_Loc'].isin(density_locs)].copy()
rliterman@0 295
rliterman@0 296 reject_query_edge = pass_filter[(pass_filter['Dist_to_Query_End'] < query_edge) & (pass_filter['Dist_to_Ref_End'] >= ref_edge)].copy()
rliterman@0 297 reject_ref_edge = pass_filter[(pass_filter['Dist_to_Ref_End'] < ref_edge) & (pass_filter['Dist_to_Query_End'] >= query_edge)].copy()
rliterman@0 298 reject_both_edge = pass_filter[(pass_filter['Dist_to_Query_End'] < query_edge) & (pass_filter['Dist_to_Ref_End'] < ref_edge)].copy()
rliterman@0 299
rliterman@0 300 if reject_query_edge.shape[0] > 0:
rliterman@0 301 with open(log_file,"a+") as log:
rliterman@0 302 log.write(f"\t\t- Purged (Query Edge): {reject_query_edge.shape[0]}\n")
rliterman@0 303 reject_query_edge['Filter_Cat'] = "Filtered_Query_Edge"
rliterman@0 304 snp_tally_df = pd.concat([snp_tally_df,reject_query_edge]).reset_index(drop=True)
rliterman@0 305
rliterman@0 306 if reject_ref_edge.shape[0] > 0:
rliterman@0 307 with open(log_file,"a+") as log:
rliterman@0 308 log.write(f"\t\t- Purged (Ref Edge): {reject_ref_edge.shape[0]}\n")
rliterman@0 309 reject_ref_edge['Filter_Cat'] = "Filtered_Ref_Edge"
rliterman@0 310 snp_tally_df = pd.concat([snp_tally_df,reject_ref_edge]).reset_index(drop=True)
rliterman@0 311
rliterman@0 312 if reject_both_edge.shape[0] > 0:
rliterman@0 313 with open(log_file,"a+") as log:
rliterman@0 314 log.write(f"\t\t- Purged (Both Edge): {reject_both_edge.shape[0]}\n")
rliterman@0 315 reject_both_edge['Filter_Cat'] = "Filtered_Both_Edge"
rliterman@0 316 snp_tally_df = pd.concat([snp_tally_df,reject_both_edge]).reset_index(drop=True)
rliterman@0 317
rliterman@0 318 pass_filter = pass_filter[(pass_filter['Dist_to_Query_End'] >= query_edge) & (pass_filter['Dist_to_Ref_End'] >= ref_edge)].copy()
rliterman@0 319
rliterman@0 320 helpers.cleanup(verbose=False,remove_all = False)
rliterman@0 321
rliterman@0 322 assert snp_tally_df.shape[0] + pass_filter.shape[0] == total_snp_count
rliterman@0 323 return_df = pd.concat([pass_filter,snp_tally_df]).reset_index(drop=True).sort_values(by=['Ref_Loc'])
rliterman@0 324
rliterman@0 325 return return_df.drop(columns=['Cat']).rename({'Filter_Cat':'Cat'}, axis=1)
rliterman@0 326
rliterman@0 327 def screenSNPDiffs(snpdiffs_file,trim_name, min_cov, min_len, min_iden, ref_edge, query_edge, density_windows, max_snps,reference_id,log_directory):
rliterman@0 328
rliterman@0 329 screen_start_time = time.time()
rliterman@0 330
rliterman@0 331 if temp_dir != "":
rliterman@0 332 helpers.set_tempdir(temp_dir)
rliterman@0 333
rliterman@0 334 # Set CSP2 variables to NA
rliterman@0 335 csp2_screen_snps = purged_length = purged_identity = purged_invalid = purged_indel = purged_lengthIdentity = purged_duplicate = purged_het = purged_density = filtered_ref_edge = filtered_query_edge = filtered_both_edge = "NA"
rliterman@0 336 filtered_snp_df = pd.DataFrame()
rliterman@0 337 good_reference_bed_df = pd.DataFrame()
rliterman@0 338
rliterman@0 339 # Ensure snpdiffs file exists
rliterman@0 340 if not os.path.exists(snpdiffs_file) or not snpdiffs_file.endswith('.snpdiffs'):
rliterman@0 341 run_failed = True
rliterman@0 342 sys.exit(f"Invalid snpdiffs file provided: {snpdiffs_file}")
rliterman@0 343
rliterman@0 344 # Ensure header can be read in
rliterman@0 345 try:
rliterman@0 346 header_data = fetchHeaders(snpdiffs_file)
rliterman@0 347 header_query = header_data['Query_ID'][0].replace(trim_name,'')
rliterman@0 348 header_ref = header_data['Reference_ID'][0].replace(trim_name,'')
rliterman@0 349 except:
rliterman@0 350 run_failed = True
rliterman@0 351 sys.exit(f"Error reading headers from snpdiffs file: {snpdiffs_file}")
rliterman@0 352
rliterman@0 353 # Check snpdiffs orientation
rliterman@0 354 if (header_ref == reference_id):
rliterman@0 355 snpdiffs_orientation = 1
rliterman@0 356 query_id = header_query
rliterman@0 357 elif (header_query == reference_id):
rliterman@0 358 snpdiffs_orientation = -1
rliterman@0 359 query_id = header_ref
rliterman@0 360 header_data = swapHeader(header_data)
rliterman@0 361 else:
rliterman@0 362 run_failed = True
rliterman@0 363 sys.exit(f"Error: Reference ID not found in header of {snpdiffs_file}...")
rliterman@0 364
rliterman@0 365 # Establish log file
rliterman@0 366 log_file = f"{log_directory}/{query_id}__vs__{reference_id}.log"
rliterman@0 367 with open(log_file,"w+") as log:
rliterman@0 368 log.write("Reference Screening for SNP Pipeline Analysis\n")
rliterman@0 369 log.write(f"Query Isolate: {query_id}\n")
rliterman@0 370 log.write(f"Reference Isolate: {reference_id}\n")
rliterman@0 371 log.write(str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))+"\n")
rliterman@0 372 log.write("-------------------------------------------------------\n\n")
rliterman@0 373 if snpdiffs_orientation == 1:
rliterman@0 374 log.write("\t- SNPDiffs file is in the forward orientation\n")
rliterman@0 375 log.write("-------------------------------------------------------\n\n")
rliterman@0 376 else:
rliterman@0 377 log.write("\t- SNPDiffs file is in the reverse orientation\n")
rliterman@0 378 log.write("-------------------------------------------------------\n\n")
rliterman@0 379
rliterman@0 380
rliterman@0 381 # Set variables from header data
rliterman@0 382 raw_snps = int(header_data['SNPs'][0])
rliterman@0 383 raw_indels = int(header_data['Indels'][0])
rliterman@0 384 raw_invalid = int(header_data['Invalid'][0])
rliterman@0 385
rliterman@0 386 kmer_similarity = float(header_data['Kmer_Similarity'][0])
rliterman@0 387 shared_kmers = int(header_data['Shared_Kmers'][0])
rliterman@0 388 query_unique_kmers = int(header_data['Query_Unique_Kmers'][0])
rliterman@0 389 reference_unique_kmers = int(header_data['Reference_Unique_Kmers'][0])
rliterman@0 390 mummer_gsnps = int(header_data['gSNPs'][0])
rliterman@0 391 mummer_gindels = int(header_data['gIndels'][0])
rliterman@0 392
rliterman@0 393 query_bases = int(header_data['Query_Assembly_Bases'][0])
rliterman@0 394 reference_bases = int(header_data['Reference_Assembly_Bases'][0])
rliterman@0 395
rliterman@0 396 query_contigs = int(header_data['Query_Contig_Count'][0])
rliterman@0 397 reference_contigs = int(header_data['Reference_Contig_Count'][0])
rliterman@0 398
rliterman@0 399 raw_query_percent_aligned = float(header_data['Query_Percent_Aligned'][0])
rliterman@0 400 raw_ref_percent_aligned = float(header_data['Reference_Percent_Aligned'][0])
rliterman@0 401
rliterman@0 402 # If the reference is not covered by at least min_cov, STOP
rliterman@0 403 if raw_ref_percent_aligned < min_cov:
rliterman@0 404 query_percent_aligned = raw_query_percent_aligned
rliterman@0 405 reference_percent_aligned = raw_ref_percent_aligned
rliterman@0 406 screen_category = "Low_Coverage"
rliterman@0 407 with open(log_file,"a+") as log:
rliterman@0 408 log.write(f"\t- Reference genome coverage: {raw_ref_percent_aligned}% \n")
rliterman@0 409 log.write(f"\t- Query covers less than --min_cov ({min_cov}%)...Screen halted...\n")
rliterman@0 410 log.write("-------------------------------------------------------\n\n")
rliterman@0 411
rliterman@0 412 elif raw_snps + raw_indels + raw_invalid > 10000:
rliterman@0 413 query_percent_aligned = raw_query_percent_aligned
rliterman@0 414 reference_percent_aligned = raw_ref_percent_aligned
rliterman@0 415 screen_category = "SNP_Cutoff"
rliterman@0 416 with open(log_file,"a+") as log:
rliterman@0 417 log.write(f"\t- {raw_snps} detected...\n")
rliterman@0 418 log.write("\t- > 10,000 SNPs, indels, or invalid sites detected by MUMmer...Screen halted...\n")
rliterman@0 419 log.write("-------------------------------------------------------\n\n")
rliterman@0 420
rliterman@0 421 else:
rliterman@0 422
rliterman@0 423 ##### 02: Read in BED/SNP data #####
rliterman@0 424 with open(log_file,"a+") as log:
rliterman@0 425 log.write("Step 1: Reading in snpdiffs BED/SNP data...")
rliterman@0 426 try:
rliterman@0 427 bed_df,snp_df = parseSNPDiffs(snpdiffs_file,snpdiffs_orientation)
rliterman@0 428
rliterman@0 429 with open(log_file,"a+") as log:
rliterman@0 430 log.write("Done!\n")
rliterman@0 431 log.write("-------------------------------------------------------\n\n")
rliterman@0 432
rliterman@0 433 except Exception as e:
rliterman@0 434 run_failed = True
rliterman@0 435 with open(log_file,"a+") as log:
rliterman@0 436 log.write(f"\nError reading BED/SNP data from file: {snpdiffs_file}\n{str(e)}")
rliterman@0 437 sys.exit(f"Error reading BED/SNP data from file: {snpdiffs_file}\n{str(e)}")
rliterman@0 438
rliterman@0 439 ##### 03: Filter genome overlaps #####
rliterman@0 440 with open(log_file,"a+") as log:
rliterman@0 441 log.write("Step 2: Filtering for short overlaps and low percent identity...")
rliterman@0 442
rliterman@0 443 good_bed_df = bed_df[(bed_df['Ref_Aligned'] >= min_len) & (bed_df['Perc_Iden'] >= min_iden)].copy()
rliterman@0 444
rliterman@0 445 if good_bed_df.shape[0] == 0:
rliterman@0 446 screen_category = "Low_Quality_Coverage"
rliterman@0 447 with open(log_file,"a+") as log:
rliterman@0 448 log.write(f"\n\t- After filtering based on --min_len ({min_len}) and --min_iden ({min_iden}) , no valid alignments remain...Screen halted...\n")
rliterman@0 449 log.write("-------------------------------------------------------\n\n")
rliterman@0 450
rliterman@0 451
rliterman@0 452 else:
rliterman@0 453 # Create a BED file for alignments that pass basic QC
rliterman@0 454 good_query_bed_df = good_bed_df[['Query_Contig','Query_Start','Query_End']].copy()
rliterman@0 455 good_reference_bed_df = good_bed_df[['Ref_Contig','Ref_Start','Ref_End']].copy()
rliterman@0 456 good_reference_bed_df.loc[:, 'Query_ID'] = query_id
rliterman@0 457
rliterman@0 458 good_query_aligned = calculate_total_length(BedTool.from_dataframe(good_query_bed_df).sort().merge())
rliterman@0 459 good_reference_aligned = calculate_total_length(BedTool.from_dataframe(good_reference_bed_df[['Ref_Contig','Ref_Start','Ref_End']]).sort().merge())
rliterman@0 460
rliterman@0 461 query_percent_aligned = (good_query_aligned / query_bases) * 100
rliterman@0 462 reference_percent_aligned = (good_reference_aligned / reference_bases) * 100
rliterman@0 463
rliterman@0 464 if reference_percent_aligned < min_cov:
rliterman@0 465 screen_category = "Low_Quality_Coverage"
rliterman@0 466 with open(log_file,"a+") as log:
rliterman@0 467 log.write(f"\n\t- Raw reference genome coverage was {raw_ref_percent_aligned}% \n")
rliterman@0 468 log.write(f"\t- After filtering based on --min_len ({min_len}) and --min_iden ({min_iden}), reference genome coverage was {reference_percent_aligned:.2f}% \n")
rliterman@0 469 log.write(f"\t- Query covers less than --min_cov ({min_cov}%) of reference after filtering...Screen halted...\n")
rliterman@0 470 log.write("-------------------------------------------------------\n\n")
rliterman@0 471
rliterman@0 472 else:
rliterman@0 473 screen_category = "Pass"
rliterman@0 474 with open(log_file,"a+") as log:
rliterman@0 475 log.write("Done!\n")
rliterman@0 476 log.write(f"\t- Raw reference genome coverage was {raw_ref_percent_aligned}% \n")
rliterman@0 477 log.write(f"\t- After filtering based on --min_len ({min_len}) and --min_iden ({min_iden}), reference genome coverage was {reference_percent_aligned:.2f}% \n")
rliterman@0 478 log.write("-------------------------------------------------------\n\n")
rliterman@0 479
rliterman@0 480
rliterman@0 481 # Filter SNPs
rliterman@0 482 with open(log_file,"a+") as log:
rliterman@0 483 log.write("Step 3: Filtering SNPs to get final SNP distances...")
rliterman@0 484
rliterman@0 485 if raw_snps == 0:
rliterman@0 486 csp2_screen_snps = purged_length = purged_identity = purged_lengthIdentity = purged_indel = purged_invalid = purged_duplicate = purged_het = purged_density = filtered_ref_edge = filtered_query_edge = filtered_both_edge = 0
rliterman@0 487 with open(log_file,"a+") as log:
rliterman@0 488 log.write("Done!\n")
rliterman@0 489 log.write("\t- No SNPs detected in MUMmer output, no filtering required\n")
rliterman@0 490 log.write("-------------------------------------------------------\n\n")
rliterman@0 491
rliterman@0 492 else:
rliterman@0 493 filtered_snp_df = filterSNPs(snp_df,bed_df,log_file, min_len, min_iden, ref_edge, query_edge, density_windows, max_snps)
rliterman@0 494
rliterman@0 495 csp2_screen_snps = filtered_snp_df[filtered_snp_df.Cat == "SNP"].shape[0]
rliterman@0 496 purged_length = filtered_snp_df[filtered_snp_df.Cat == "Purged_Length"].shape[0]
rliterman@0 497 purged_identity = filtered_snp_df[filtered_snp_df.Cat == "Purged_Identity"].shape[0]
rliterman@0 498 purged_lengthIdentity = filtered_snp_df[filtered_snp_df.Cat == "Purged_LengthIdentity"].shape[0]
rliterman@0 499 purged_invalid = filtered_snp_df[filtered_snp_df.Cat == "Purged_Invalid"].shape[0]
rliterman@0 500 purged_indel = filtered_snp_df[filtered_snp_df.Cat == "Purged_Indel"].shape[0]
rliterman@0 501 purged_het = filtered_snp_df[filtered_snp_df.Cat == "Purged_Heterozygous"].shape[0]
rliterman@0 502 purged_duplicate = filtered_snp_df[filtered_snp_df.Cat == "Purged_Duplicate"].shape[0]
rliterman@0 503 purged_density = filtered_snp_df[filtered_snp_df.Cat == "Purged_Density"].shape[0]
rliterman@0 504 filtered_query_edge = filtered_snp_df[filtered_snp_df.Cat == "Filtered_Query_Edge"].shape[0]
rliterman@0 505 filtered_ref_edge = filtered_snp_df[filtered_snp_df.Cat == "Filtered_Ref_Edge"].shape[0]
rliterman@0 506 filtered_both_edge = filtered_snp_df[filtered_snp_df.Cat == "Filtered_Both_Edge"].shape[0]
rliterman@0 507
rliterman@0 508 # Write filtered SNP data to file
rliterman@0 509 snp_file = log_file.replace(".log","_SNPs.tsv")
rliterman@0 510 filtered_snp_df.to_csv(snp_file, sep="\t", index=False)
rliterman@0 511
rliterman@0 512 filtered_snp_df.loc[:, 'Query_ID'] = query_id
rliterman@0 513
rliterman@0 514 with open(log_file,"a+") as log:
rliterman@0 515 log.write("Done!\n")
rliterman@0 516 log.write(f"\t- {csp2_screen_snps} SNPs detected between {query_id} and {reference_id} after filtering\n")
rliterman@0 517 log.write(f"\t- Full SNP data saved to {snp_file}\n")
rliterman@0 518 log.write("-------------------------------------------------------\n\n")
rliterman@0 519
rliterman@0 520 screen_end_time = time.time()
rliterman@0 521 with open(log_file,"a+") as log:
rliterman@0 522 log.write(f"Screening Time: {screen_end_time - screen_start_time:.2f} seconds\n")
rliterman@0 523
rliterman@0 524 # Clean up pybedtools temp
rliterman@0 525 helpers.cleanup(verbose=False, remove_all=False)
rliterman@0 526
rliterman@0 527 return ([str(item) for item in [query_id,reference_id,screen_category,csp2_screen_snps,
rliterman@0 528 f"{query_percent_aligned:.2f}",f"{reference_percent_aligned:.2f}",
rliterman@0 529 query_contigs,query_bases,reference_contigs,reference_bases,
rliterman@0 530 raw_snps,purged_length,purged_identity,purged_lengthIdentity,purged_invalid,purged_indel,purged_duplicate,purged_het,purged_density,
rliterman@0 531 filtered_query_edge,filtered_ref_edge,filtered_both_edge,
rliterman@0 532 kmer_similarity,shared_kmers,query_unique_kmers,reference_unique_kmers,
rliterman@0 533 mummer_gsnps,mummer_gindels]],good_reference_bed_df,filtered_snp_df)
rliterman@0 534
rliterman@0 535 def assessCoverage(query_id,site_list):
rliterman@0 536
rliterman@0 537 if temp_dir != "":
rliterman@0 538 helpers.set_tempdir(temp_dir)
rliterman@0 539
rliterman@0 540 if len(site_list) == 0:
rliterman@0 541 return pd.DataFrame(columns=['Ref_Loc','Query_ID','Cat'])
rliterman@0 542 else:
rliterman@0 543 coverage_df = pass_filter_coverage_df[pass_filter_coverage_df['Query_ID'] == query_id].copy()
rliterman@0 544
rliterman@0 545 if coverage_df.shape[0] == 0:
rliterman@0 546 uncovered_loc_df = pd.DataFrame({
rliterman@0 547 'Ref_Loc': site_list,
rliterman@0 548 'Query_ID': [query_id] * len(site_list),
rliterman@0 549 'Cat': ["Uncovered"] * len(site_list)
rliterman@0 550 })
rliterman@0 551 return uncovered_loc_df
rliterman@0 552 else:
rliterman@0 553 coverage_bed = BedTool.from_dataframe(coverage_df[['Ref_Contig','Ref_Start','Ref_End']]).sort()
rliterman@0 554 snp_bed_df = pd.DataFrame([item.split('/') for item in site_list], columns=['Ref_Contig','Ref_End'])
rliterman@0 555 snp_bed_df['Ref_Start'] = snp_bed_df['Ref_End'].astype(float).astype(int) - 1
rliterman@0 556 snp_bed_df['Ref_Loc'] = site_list
rliterman@0 557 snp_bed = BedTool.from_dataframe(snp_bed_df[['Ref_Contig','Ref_Start','Ref_End','Ref_Loc']]).sort()
rliterman@0 558
rliterman@0 559 # Ref_Locs from snp_bed that intersect with coverage_bed go into covered_locs, the rest go into uncovered_locs
rliterman@0 560 covered_locs = snp_bed.intersect(coverage_bed, wa=True)
rliterman@0 561 uncovered_locs = snp_bed.intersect(coverage_bed, v=True, wa=True)
rliterman@0 562
rliterman@0 563 covered_loc_df = pd.DataFrame({
rliterman@0 564 'Ref_Loc': [snp.fields[3] for snp in covered_locs],
rliterman@0 565 'Query_ID': [query_id] * covered_locs.count(),
rliterman@0 566 'Cat': ["Ref_Base"] * covered_locs.count()
rliterman@0 567 }) if covered_locs.count() > 0 else pd.DataFrame(columns=['Ref_Loc','Query_ID','Cat'])
rliterman@0 568
rliterman@0 569 uncovered_loc_df = pd.DataFrame({
rliterman@0 570 'Ref_Loc': [snp.fields[3] for snp in uncovered_locs],
rliterman@0 571 'Query_ID': [query_id] * uncovered_locs.count(),
rliterman@0 572 'Cat': ["Uncovered"] * uncovered_locs.count()
rliterman@0 573 }) if uncovered_locs.count() > 0 else pd.DataFrame(columns=['Ref_Loc','Query_ID','Cat'])
rliterman@0 574
rliterman@0 575 # Clean up pybedtools temp
rliterman@0 576 helpers.cleanup(verbose=False, remove_all=False)
rliterman@0 577
rliterman@0 578 return pd.concat([covered_loc_df.drop_duplicates(['Ref_Loc']),uncovered_loc_df])
rliterman@0 579
rliterman@0 580 def getPairwise(chunk, sequences, ids):
rliterman@0 581 results = []
rliterman@0 582
rliterman@0 583 for i, j in chunk:
rliterman@0 584 seq1, seq2 = sequences[i], sequences[j]
rliterman@0 585 actg_mask1 = np.isin(seq1, list('ACTGactg'))
rliterman@0 586 actg_mask2 = np.isin(seq2, list('ACTGactg'))
rliterman@0 587 cocalled_mask = actg_mask1 & actg_mask2
rliterman@0 588
rliterman@0 589 snps_cocalled = np.sum(cocalled_mask)
rliterman@0 590 snp_distance = np.sum((seq1 != seq2) & cocalled_mask)
rliterman@0 591
rliterman@0 592 results.append([ids[i], ids[j], snp_distance, snps_cocalled])
rliterman@0 593
rliterman@0 594 return results
rliterman@0 595
rliterman@0 596 def parallelAlignment(alignment, chunk_size=5000):
rliterman@0 597 sequences = [np.array(list(record.seq)) for record in alignment]
rliterman@0 598 ids = [record.id for record in alignment]
rliterman@0 599 pairwise_combinations = list(combinations(range(len(sequences)), 2))
rliterman@0 600
rliterman@0 601 # Create chunks of pairwise combinations
rliterman@0 602 chunks = [pairwise_combinations[i:i + chunk_size] for i in range(0, len(pairwise_combinations), chunk_size)]
rliterman@0 603
rliterman@0 604 results = []
rliterman@0 605 with concurrent.futures.ProcessPoolExecutor() as executor:
rliterman@0 606 future_to_chunk = {executor.submit(getPairwise, chunk, sequences, ids): chunk for chunk in chunks}
rliterman@0 607 for future in concurrent.futures.as_completed(future_to_chunk):
rliterman@0 608 chunk_results = future.result()
rliterman@0 609 results.extend(chunk_results)
rliterman@0 610 return results
rliterman@0 611
rliterman@0 612 def getFinalPurge(df):
rliterman@0 613 # Returns the 'farthest along' category for a given Ref_Loc
rliterman@0 614 if "Purged_Density" in df['Cat'].values:
rliterman@0 615 return "Purged_Density"
rliterman@0 616 elif "Purged_Heterozygous" in df['Cat'].values:
rliterman@0 617 return "Purged_Heterozygous"
rliterman@0 618 elif "Purged_Indel" in df['Cat'].values:
rliterman@0 619 return "Purged_Indel"
rliterman@0 620 elif "Purged_Invalid" in df['Cat'].values:
rliterman@0 621 return "Purged_Invalid"
rliterman@0 622 elif "Purged_Length" in df['Cat'].values:
rliterman@0 623 return "Purged_Length"
rliterman@0 624 else:
rliterman@0 625 return "Purged_Identity"
rliterman@0 626
rliterman@0 627
rliterman@0 628 # Read in arguments
rliterman@0 629 global run_failed
rliterman@0 630 run_failed = False
rliterman@0 631
rliterman@0 632 start_time = time.time()
rliterman@0 633
rliterman@0 634 parser = argparse.ArgumentParser(description='CSP2 SNP Pipeline Analysis')
rliterman@0 635 parser.add_argument('--reference_id', type=str, help='Reference Isolate')
rliterman@0 636 parser.add_argument('--output_directory', type=str, help='Output Directory')
rliterman@0 637 parser.add_argument('--log_directory', type=str, help='Log Directory')
rliterman@0 638 parser.add_argument('--snpdiffs_file', type=str, help='Path to SNPdiffs file')
rliterman@27 639 parser.add_argument('--min_cov', default=85,type=float, help='Minimum coverage')
rliterman@27 640 parser.add_argument('--min_len', default=500,type=int, help='Minimum length')
rliterman@27 641 parser.add_argument('--min_iden', default=99,type=float, help='Minimum identity')
rliterman@27 642 parser.add_argument('--ref_edge', default=150,type=int, help='Reference edge')
rliterman@27 643 parser.add_argument('--query_edge', default=150,type=int, help='Query edge')
rliterman@27 644 parser.add_argument('--density_windows', default="1000,125,15",type=str, help='Density windows')
rliterman@27 645 parser.add_argument('--max_snps', default="3,2,1", type=str, help='Maximum SNPs')
rliterman@28 646 parser.add_argument('--trim_name', nargs='?', const="", default="", type=str, help='Trim name')
rliterman@27 647 parser.add_argument('--max_missing',default=50, type=float, help='Maximum missing')
rliterman@27 648 parser.add_argument('--tmp_dir',default="", type=str, help='Temporary directory')
rliterman@27 649 parser.add_argument('--rescue', default="norescue",type=str, help='Rescue edge SNPs (rescue/norescue)')
rliterman@0 650 args = parser.parse_args()
rliterman@0 651
rliterman@0 652 reference_id = args.reference_id
rliterman@0 653 output_directory = os.path.abspath(args.output_directory)
rliterman@0 654 log_directory = os.path.abspath(args.log_directory)
rliterman@0 655 log_file = f"{output_directory}/CSP2_SNP_Pipeline.log"
rliterman@0 656 snpdiffs_file = args.snpdiffs_file
rliterman@0 657 min_cov = args.min_cov
rliterman@0 658 min_len = args.min_len
rliterman@0 659 min_iden = args.min_iden
rliterman@0 660 ref_edge = args.ref_edge
rliterman@0 661 query_edge = args.query_edge
rliterman@0 662 density_windows = [int(x) for x in args.density_windows.split(",")]
rliterman@0 663 max_snps = [int(x) for x in args.max_snps.split(",")]
rliterman@0 664 trim_name = args.trim_name
rliterman@0 665 max_missing = args.max_missing
rliterman@0 666
rliterman@0 667 # Establish log file
rliterman@0 668 with open(log_file,"w+") as log:
rliterman@0 669 log.write("CSP2 SNP Pipeline Analysis\n")
rliterman@0 670 log.write(f"Reference Isolate: {reference_id}\n")
rliterman@0 671 log.write(str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))+"\n")
rliterman@0 672 log.write("-------------------------------------------------------\n\n")
rliterman@0 673 log.write("Reading in SNPDiffs files...")
rliterman@0 674
rliterman@0 675
rliterman@0 676 # Read in all lines and ensure each file exists
rliterman@0 677 snpdiffs_list = [line.strip() for line in open(snpdiffs_file, 'r')]
rliterman@0 678 snpdiffs_list = [line for line in snpdiffs_list if line]
rliterman@0 679 for snpdiffs_file in snpdiffs_list:
rliterman@0 680 if not os.path.exists(snpdiffs_file):
rliterman@0 681 run_failed = True
rliterman@0 682 sys.exit("Error: File does not exist: " + snpdiffs_file)
rliterman@0 683
rliterman@0 684 snpdiffs_list = list(set(snpdiffs_list))
rliterman@0 685
rliterman@0 686 if len(snpdiffs_list) == 0:
rliterman@0 687 run_failed = True
rliterman@0 688 sys.exit("No SNPdiffs files provided...")
rliterman@0 689
rliterman@0 690 with open(log_file, "a+") as log:
rliterman@0 691 log.write("Done!\n")
rliterman@0 692 log.write(f"\t- Read in {len(snpdiffs_list)} SNPdiffs files\n")
rliterman@0 693 log.write("-------------------------------------------------------\n\n")
rliterman@0 694
rliterman@0 695 global temp_dir
rliterman@0 696 if args.tmp_dir != "":
rliterman@0 697 random_temp_id = str(uuid.uuid4())
rliterman@0 698 temp_dir = f"{os.path.normpath(os.path.abspath(args.tmp_dir))}/{random_temp_id}"
rliterman@0 699 try:
rliterman@0 700 os.mkdir(temp_dir)
rliterman@0 701 helpers.set_tempdir(temp_dir)
rliterman@0 702 except OSError as e:
rliterman@0 703 run_failed = True
rliterman@0 704 print(f"Error: Failed to create directory '{temp_dir}': {e}")
rliterman@0 705 else:
rliterman@0 706 temp_dir = ""
rliterman@0 707
rliterman@0 708 rescue_edge = str(args.rescue)
rliterman@0 709 if rescue_edge not in ["rescue","norescue"]:
rliterman@0 710 with open(log_file,"a+") as log:
rliterman@0 711 log.write(f"\t- Unexpected rescue_edge variable ('{rescue_edge}'). Not performing SNP edge rescuing (any SNPs found within {query_edge}bp of a query contig edge will be purged)...\n")
rliterman@0 712 log.write("-------------------------------------------------------\n\n")
rliterman@0 713 rescue_edge = "norescue"
rliterman@0 714 elif rescue_edge == "rescue":
rliterman@0 715 with open(log_file,"a+") as log:
rliterman@0 716 log.write(f"\t- Rescuing edge SNPs within {query_edge}bp of query contig edges if found more centrally in another query...\n")
rliterman@0 717 log.write("-------------------------------------------------------\n\n")
rliterman@0 718 else:
rliterman@0 719 with open(log_file,"a+") as log:
rliterman@0 720 log.write(f"\t- Not performing SNP edge rescuing (any SNPs found within {query_edge}bp of a query contig edge will be purged)...\n")
rliterman@0 721 log.write("-------------------------------------------------------\n\n")
rliterman@0 722
rliterman@0 723 try:
rliterman@0 724 # Establish output files
rliterman@0 725 reference_screening_file = f"{output_directory}/Reference_Screening.tsv"
rliterman@0 726 locus_category_file = f"{output_directory}/Locus_Categories.tsv"
rliterman@0 727 query_coverage_file = f"{output_directory}/Query_Coverage.tsv"
rliterman@0 728 raw_loclist = f"{output_directory}/snplist.txt"
rliterman@0 729 raw_alignment = f"{output_directory}/snpma.fasta"
rliterman@0 730 preserved_loclist = f"{output_directory}/snplist_preserved.txt"
rliterman@0 731 preserved_alignment_file = f"{output_directory}/snpma_preserved.fasta"
rliterman@0 732 raw_pairwise = f"{output_directory}/snp_distance_pairwise.tsv"
rliterman@0 733 raw_matrix = f"{output_directory}/snp_distance_matrix.tsv"
rliterman@0 734 preserved_pairwise = f"{output_directory}/snp_distance_pairwise_preserved.tsv"
rliterman@0 735 preserved_matrix = f"{output_directory}/snp_distance_matrix_preserved.tsv"
rliterman@0 736
rliterman@0 737 with open(log_file,"a+") as log:
rliterman@0 738 log.write("Screening all queries against reference...")
rliterman@0 739 with concurrent.futures.ProcessPoolExecutor() as executor:
rliterman@0 740 results = [executor.submit(screenSNPDiffs,snp_diff_file,trim_name, min_cov, min_len, min_iden, ref_edge, query_edge, density_windows, max_snps,reference_id,log_directory) for snp_diff_file in snpdiffs_list]
rliterman@0 741
rliterman@0 742 # Combine results into a dataframe
rliterman@0 743 output_columns = ['Query_ID','Reference_ID','Screen_Category','CSP2_Screen_SNPs',
rliterman@0 744 'Query_Percent_Aligned','Reference_Percent_Aligned',
rliterman@0 745 'Query_Contigs','Query_Bases','Reference_Contigs','Reference_Bases',
rliterman@0 746 'Raw_SNPs','Purged_Length','Purged_Identity','Purged_LengthIdentity','Purged_Invalid','Purged_Indel','Purged_Duplicate','Purged_Het','Purged_Density',
rliterman@0 747 'Filtered_Query_Edge','Filtered_Ref_Edge','Filtered_Both_Edge',
rliterman@0 748 'Kmer_Similarity','Shared_Kmers','Query_Unique_Kmers','Reference_Unique_Kmers',
rliterman@0 749 'MUMmer_gSNPs','MUMmer_gIndels']
rliterman@0 750
rliterman@0 751 # Save reference screening
rliterman@0 752 results_df = pd.DataFrame([item.result()[0] for item in results], columns = output_columns)
rliterman@0 753 results_df.to_csv(reference_screening_file, sep="\t", index=False)
rliterman@0 754
rliterman@0 755 # Get reference bed dfs
rliterman@0 756 covered_df = pd.concat([item.result()[1] for item in results])
rliterman@0 757
rliterman@0 758 # Get snp dfs
rliterman@0 759 filtered_snp_df = pd.concat([item.result()[2] for item in results])
rliterman@0 760
rliterman@0 761 # Separate isolates that pass QC
rliterman@0 762 pass_qc_isolates = list(set(results_df[results_df['Screen_Category'] == "Pass"]['Query_ID']))
rliterman@0 763 fail_qc_isolates = list(set(results_df[results_df['Screen_Category'] != "Pass"]['Query_ID']))
rliterman@0 764
rliterman@0 765 if len(pass_qc_isolates) == 0:
rliterman@0 766 with open(log_file,"a+") as log:
rliterman@0 767 log.write("Done!\n")
rliterman@0 768 log.write(f"\t- Reference screening data saved to {reference_screening_file}\n")
rliterman@0 769 log.write(f"\t- Of {len(snpdiffs_list)} comparisons, no isolates passed QC. Pipeline cannot continue.\n")
rliterman@0 770 log.write(f"\t- {len(fail_qc_isolates)} comparisons failed QC\n")
rliterman@0 771 for isolate in fail_qc_isolates:
rliterman@0 772 isolate_category = results_df[results_df['Query_ID'] == isolate]['Screen_Category'].values[0]
rliterman@0 773 log.write(f"\t\t- {isolate}: {isolate_category}\n")
rliterman@0 774 log.write("-------------------------------------------------------\n\n")
rliterman@0 775 sys.exit(0)
rliterman@0 776 else:
rliterman@0 777 with open(log_file,"a+") as log:
rliterman@0 778 log.write("Done!\n")
rliterman@0 779 log.write(f"\t- Reference screening data saved to {reference_screening_file}\n")
rliterman@0 780 log.write(f"\t- Of {len(snpdiffs_list)} comparisons, {len(pass_qc_isolates)} covered at least {min_cov}% of the reference genome after removing poor alignments\n")
rliterman@0 781 if len(fail_qc_isolates) > 0:
rliterman@0 782 log.write(f"\t- {len(fail_qc_isolates)} comparisons failed QC\n")
rliterman@0 783 for isolate in fail_qc_isolates:
rliterman@0 784 isolate_category = results_df[results_df['Query_ID'] == isolate]['Screen_Category'].values[0]
rliterman@0 785 log.write(f"\t\t- {isolate}: {isolate_category}\n")
rliterman@0 786 log.write("-------------------------------------------------------\n\n")
rliterman@0 787
rliterman@0 788 with open(log_file,"a+") as log:
rliterman@0 789 log.write(f"Compiling SNPs across {len(pass_qc_isolates)} samples...\n")
rliterman@0 790
rliterman@0 791 if filtered_snp_df.shape[0] == 0:
rliterman@0 792 snp_count = 0
rliterman@0 793 with open(log_file,"a+") as log:
rliterman@0 794 log.write("\t- No SNPs detected across samples...Skipping to output...\n")
rliterman@0 795 else:
rliterman@0 796
rliterman@0 797 # Remove samples that failed QC
rliterman@0 798 pass_filter_snps = filtered_snp_df[filtered_snp_df['Query_ID'].isin(pass_qc_isolates)].copy()
rliterman@0 799 pass_filter_snp_list = list(set(pass_filter_snps['Ref_Loc']))
rliterman@0 800 pass_filter_snp_count = len(pass_filter_snp_list)
rliterman@0 801
rliterman@0 802 global pass_filter_coverage_df
rliterman@0 803 pass_filter_coverage_df = covered_df[covered_df['Query_ID'].isin(pass_qc_isolates)].copy()
rliterman@0 804
rliterman@0 805 # Get SNP counts
rliterman@0 806 snp_df = pass_filter_snps[pass_filter_snps['Cat'] == "SNP"].copy()
rliterman@0 807
rliterman@0 808 if snp_df.shape[0] == 0:
rliterman@0 809 snp_count = 0
rliterman@0 810 with open(log_file,"a+") as log:
rliterman@0 811 log.write(f"\t- {pass_filter_snp_count} total SNPs detected across all samples...\n")
rliterman@0 812 log.write("\t- No SNPs passed QC filtering in any sample...Skipping to output...\n")
rliterman@0 813
rliterman@0 814 else:
rliterman@0 815 snp_list = list(set(snp_df['Ref_Loc']))
rliterman@0 816 snp_count = len(snp_list)
rliterman@0 817
rliterman@0 818 with open(log_file,"a+") as log:
rliterman@0 819 log.write(f"\t- {pass_filter_snp_count} total SNPs detected across all samples...\n")
rliterman@0 820 log.write(f"\t- {snp_count} unique SNPs passed QC filtering in at least one sample...\n")
rliterman@0 821
rliterman@0 822 # Note SNPs lost irrevocably to reference edge trimming
rliterman@0 823 ref_edge_df = pass_filter_snps[pass_filter_snps['Cat'].isin(["Filtered_Ref_Edge",'Filtered_Both_Edge'])].copy()
rliterman@0 824 if ref_edge_df.shape[0] > 0:
rliterman@0 825 with open(log_file,"a+") as log:
rliterman@0 826 log.write(f"\t- {ref_edge_df['Ref_Loc'].nunique()} unique SNPs were within {ref_edge}bp of a reference contig end and were not considered in any query...\n")
rliterman@0 827
rliterman@0 828 # Create Ref_Base df
rliterman@0 829 ref_base_df = snp_df[['Ref_Loc', 'Ref_Base']].copy().drop_duplicates().rename(columns = {'Ref_Base':'Query_Base'})
rliterman@0 830 ref_base_df.loc[:,'Query_ID'] = reference_id
rliterman@0 831 ref_base_df.loc[:,'Cat'] = "Reference_Isolate"
rliterman@0 832 ref_base_df = ref_base_df.loc[:,['Ref_Loc','Query_ID','Query_Base','Cat']]
rliterman@0 833
rliterman@0 834 # Rescue SNPs that are near the edge if they are valid SNPs in other samples
rliterman@0 835 rescued_edge_df = pass_filter_snps[(pass_filter_snps['Cat'] == "Filtered_Query_Edge") & (pass_filter_snps['Ref_Loc'].isin(snp_list))].copy()
rliterman@0 836
rliterman@0 837 if rescue_edge == "norescue":
rliterman@0 838 with open(log_file,"a+") as log:
rliterman@0 839 log.write("\t- Skipping edge resucing...\n")
rliterman@0 840
rliterman@0 841 elif rescued_edge_df.shape[0] > 0:
rliterman@0 842
rliterman@0 843 # Remove rescued sites from pass_filter_snps
rliterman@0 844 rescue_merge = pass_filter_snps.merge(rescued_edge_df, indicator=True, how='outer')
rliterman@0 845 pass_filter_snps = rescue_merge[rescue_merge['_merge'] == 'left_only'].drop(columns=['_merge']).copy()
rliterman@0 846
rliterman@0 847 # Add rescued SNPs to snp_df
rliterman@0 848 rescued_edge_df.loc[:,'Cat'] = "Rescued_SNP"
rliterman@0 849 snp_df = pd.concat([snp_df,rescued_edge_df]).reset_index(drop=True)
rliterman@0 850 with open(log_file,"a+") as log:
rliterman@0 851 log.write(f"\t- Rescued {rescued_edge_df.shape[0]} query SNPs that fell within {query_edge}bp of the query contig edge...\n")
rliterman@0 852
rliterman@0 853 else:
rliterman@0 854 with open(log_file,"a+") as log:
rliterman@0 855 log.write(f"\t- No query SNPs that fell within {query_edge}bp of the query contig edge were rescued...\n")
rliterman@0 856
rliterman@0 857 # Gather base data for all valid SNPs
rliterman@0 858 snp_base_df = snp_df[['Ref_Loc','Query_ID','Query_Base','Cat']].copy()
rliterman@0 859
rliterman@0 860 # Process purged sites
rliterman@0 861 purged_snp_df = pd.DataFrame(columns=['Ref_Loc','Query_ID','Query_Base','Cat'])
rliterman@0 862 purged_df = pass_filter_snps[pass_filter_snps['Cat'] != "SNP"].copy()
rliterman@0 863
rliterman@0 864 if purged_df.shape[0] > 0:
rliterman@0 865
rliterman@0 866 # Remove rows from purged_df if the Ref_Loc/Query_ID pair is already in snp_base_df
rliterman@0 867 purged_df = purged_df[~purged_df[['Ref_Loc','Query_ID']].apply(tuple,1).isin(snp_base_df[['Ref_Loc','Query_ID']].apply(tuple,1))].copy()
rliterman@0 868
rliterman@0 869 if purged_df.shape[0] > 0:
rliterman@0 870
rliterman@0 871 # Get purged SNPs where no query has a valid SNP
rliterman@0 872 non_snp_df = purged_df[~purged_df['Ref_Loc'].isin(snp_list)].copy()
rliterman@0 873 if non_snp_df.shape[0] > 0:
rliterman@0 874 non_snp_merge = purged_df.merge(non_snp_df, indicator=True, how='outer')
rliterman@0 875 purged_df = non_snp_merge[non_snp_merge['_merge'] == 'left_only'].drop(columns=['_merge']).copy()
rliterman@0 876
rliterman@0 877 with open(log_file,"a+") as log:
rliterman@0 878 log.write(f"\t- {len(list(set(non_snp_df['Ref_Loc'])))} unique SNPs were purged in all queries they were found in, and were not considered in the final dataset...\n")
rliterman@0 879
rliterman@0 880 if purged_df.shape[0] > 0:
rliterman@0 881
rliterman@0 882 purged_snp_df = purged_df[['Ref_Loc','Query_ID']].copy().drop_duplicates()
rliterman@0 883 purged_snp_df.loc[:, 'Query_Base'] = "N"
rliterman@0 884 final_purge_df = purged_df.groupby(['Ref_Loc','Query_ID']).apply(getFinalPurge).reset_index().rename(columns={0:'Cat'})
rliterman@0 885 purged_snp_df = purged_snp_df.merge(final_purge_df, on=['Ref_Loc','Query_ID'], how='inner')
rliterman@0 886
rliterman@0 887 # Genomic positions that do not occur- in the SNP data are either uncovered or match the reference base
rliterman@0 888 missing_df = pd.DataFrame(columns=['Ref_Loc','Query_ID','Query_Base','Cat'])
rliterman@0 889
rliterman@0 890 covered_snps = pd.concat([snp_base_df,purged_snp_df]).copy()
rliterman@0 891 ref_loc_sets = covered_snps.groupby('Query_ID')['Ref_Loc'].agg(set).to_dict()
rliterman@0 892 isolates_with_missing = [isolate for isolate in pass_qc_isolates if len(set(snp_list) - ref_loc_sets.get(isolate, set())) > 0]
rliterman@0 893
rliterman@0 894 uncovered_df = pd.DataFrame()
rliterman@0 895
rliterman@0 896 if isolates_with_missing:
rliterman@0 897 isolate_data = [(isolate, list(set(snp_list) - ref_loc_sets.get(isolate, set()))) for isolate in isolates_with_missing]
rliterman@0 898
rliterman@0 899 with concurrent.futures.ProcessPoolExecutor() as executor:
rliterman@0 900 results = [executor.submit(assessCoverage, query, sites) for query, sites in isolate_data]
rliterman@0 901 coverage_dfs = [result.result() for result in concurrent.futures.as_completed(results)]
rliterman@0 902
rliterman@0 903 coverage_df = pd.concat(coverage_dfs)
rliterman@0 904 covered_df = coverage_df[coverage_df['Cat'] == 'Ref_Base']
rliterman@0 905 uncovered_df = coverage_df[coverage_df['Cat'] == 'Uncovered']
rliterman@0 906
rliterman@0 907 if not uncovered_df.empty:
rliterman@0 908 uncovered_df.loc[:, 'Query_Base'] = "?"
rliterman@0 909 missing_df = pd.concat([missing_df, uncovered_df[['Ref_Loc', 'Query_ID', 'Query_Base', 'Cat']]])
rliterman@0 910
rliterman@0 911 if not covered_df.empty:
rliterman@0 912 ref_base_snp_df = covered_df.merge(ref_base_df[['Ref_Loc', 'Query_Base']], on='Ref_Loc', how='left')
rliterman@0 913 missing_df = pd.concat([missing_df, ref_base_snp_df[['Ref_Loc', 'Query_ID', 'Query_Base', 'Cat']]])
rliterman@0 914
rliterman@0 915 with open(log_file,"a+") as log:
rliterman@0 916 log.write("\t- Processed coverage information...\n")
rliterman@0 917
rliterman@0 918 final_snp_df = pd.concat([snp_base_df,purged_snp_df,missing_df,ref_base_df]).sort_values(by=['Ref_Loc','Query_ID']).reset_index(drop=True)
rliterman@0 919 snp_counts = final_snp_df.groupby('Query_ID')['Ref_Loc'].count().reset_index().rename(columns={'Ref_Loc':'SNP_Count'})
rliterman@0 920
rliterman@0 921 # Assert that all snp_counts == snp_count
rliterman@0 922 assert snp_counts['SNP_Count'].nunique() == 1
rliterman@0 923 assert snp_counts['SNP_Count'].values[0] == snp_count
rliterman@0 924
rliterman@0 925 # Get locus coverage stats
rliterman@0 926 snp_coverage_df = final_snp_df[final_snp_df['Cat'].isin(['SNP','Rescued_SNP'])].groupby('Ref_Loc')['Query_ID'].count().reset_index().rename(columns={'Query_ID':'SNP_Count'})
rliterman@0 927 ref_base_coverage_df = final_snp_df[final_snp_df['Cat'].isin(["Ref_Base","Reference_Isolate"])].groupby('Ref_Loc')['Query_ID'].count().reset_index().rename(columns={'Query_ID':'Ref_Base_Count'})
rliterman@0 928
rliterman@0 929 if uncovered_df.shape[0] > 0:
rliterman@0 930 uncovered_count_df = final_snp_df[final_snp_df['Cat'] == "Uncovered"].groupby('Ref_Loc')['Query_ID'].count().reset_index().rename(columns={'Query_ID':'Uncovered_Count'}).copy()
rliterman@0 931 else:
rliterman@0 932 uncovered_count_df = pd.DataFrame(columns=['Ref_Loc','Uncovered_Count'])
rliterman@0 933
rliterman@0 934 possible_purged_cols = ['Purged_Length','Purged_Identity','Purged_Invalid','Purged_Indel','Purged_Heterozygous','Purged_Density']
rliterman@0 935 if purged_snp_df.shape[0] > 0:
rliterman@0 936 purged_count_df = final_snp_df[final_snp_df['Cat'].isin(possible_purged_cols)].groupby('Ref_Loc')['Query_ID'].count().reset_index().rename(columns={'Query_ID':'Purged_Count'}).copy()
rliterman@0 937 else:
rliterman@0 938 purged_count_df = pd.DataFrame(columns=['Ref_Loc','Purged_Count'])
rliterman@0 939
rliterman@0 940 locus_coverage_df = snp_coverage_df.merge(ref_base_coverage_df, how='outer', on='Ref_Loc').merge(uncovered_count_df, how='outer', on='Ref_Loc').merge(purged_count_df, how='outer', on='Ref_Loc').fillna(0)
rliterman@0 941 locus_coverage_df.loc[:, ['SNP_Count','Ref_Base_Count','Uncovered_Count','Purged_Count']] = locus_coverage_df.loc[:, ['SNP_Count','Ref_Base_Count','Uncovered_Count','Purged_Count']].astype(int)
rliterman@0 942 locus_coverage_df['Missing_Ratio'] = ((locus_coverage_df['Uncovered_Count'] + locus_coverage_df['Purged_Count']) / (1+len(pass_qc_isolates))) * 100
rliterman@0 943 locus_coverage_df.to_csv(locus_category_file, sep="\t", index=False)
rliterman@0 944
rliterman@0 945 # Get isolate coverage stats
rliterman@0 946 min_isolate_cols = ['Query_ID','SNP','Ref_Base','Percent_Missing','Purged','Uncovered','Rescued_SNP','Purged_Ref_Edge']
rliterman@0 947 isolate_coverage_df = final_snp_df.groupby('Query_ID')['Cat'].value_counts().unstack().fillna(0).astype(float).astype(int).reset_index().drop(columns=['Reference_Isolate'])
rliterman@0 948 isolate_coverage_df.loc[isolate_coverage_df['Query_ID'] == reference_id, 'Ref_Base'] = snp_count
rliterman@0 949
rliterman@0 950 if "Rescued_SNP" not in isolate_coverage_df.columns.tolist():
rliterman@0 951 isolate_coverage_df.loc[:,'Rescued_SNP'] = 0
rliterman@0 952 isolate_coverage_df['SNP'] = isolate_coverage_df['SNP'] + isolate_coverage_df['Rescued_SNP']
rliterman@0 953
rliterman@0 954 for col in ['Uncovered'] + possible_purged_cols:
rliterman@0 955 if col not in isolate_coverage_df.columns.tolist():
rliterman@0 956 isolate_coverage_df.loc[:,col] = 0
rliterman@0 957
rliterman@0 958 isolate_coverage_df['Purged'] = isolate_coverage_df[possible_purged_cols].sum(axis=1)
rliterman@0 959
rliterman@0 960 isolate_coverage_df['Percent_Missing'] = (isolate_coverage_df['Uncovered'] + isolate_coverage_df['Purged'])/(isolate_coverage_df['Uncovered'] + isolate_coverage_df['Purged'] + isolate_coverage_df['Ref_Base'] + isolate_coverage_df['SNP']) * 100
rliterman@0 961
rliterman@0 962 isolate_coverage_df.loc[:,'Purged_Ref_Edge'] = 0
rliterman@0 963 if ref_edge_df.shape[0] > 0:
rliterman@0 964 isolate_coverage_df.loc[isolate_coverage_df['Query_ID'] == reference_id, 'Purged_Ref_Edge'] = ref_edge_df['Ref_Loc'].nunique()
rliterman@0 965
rliterman@0 966 isolate_coverage_df = isolate_coverage_df[min_isolate_cols + possible_purged_cols].sort_values(by = 'Percent_Missing',ascending = False).reset_index(drop=True)
rliterman@0 967 isolate_coverage_df.to_csv(query_coverage_file, sep="\t", index=False)
rliterman@0 968
rliterman@0 969 with open(log_file,"a+") as log:
rliterman@0 970 log.write(f"\t- SNP coverage information: {locus_category_file}\n")
rliterman@0 971 log.write(f"\t- Query coverage information: {query_coverage_file}\n")
rliterman@0 972 log.write("-------------------------------------------------------\n\n")
rliterman@0 973
rliterman@0 974 with open(log_file,"a+") as log:
rliterman@0 975 log.write("Processing alignment data...")
rliterman@0 976
rliterman@0 977 alignment_df = final_snp_df[['Query_ID','Ref_Loc','Query_Base']].copy().rename(columns={'Query_Base':'Base'}).pivot(index='Query_ID', columns='Ref_Loc', values='Base')
rliterman@0 978 csp2_ordered = alignment_df.columns
rliterman@0 979
rliterman@0 980 with open(raw_loclist,"w+") as loclist:
rliterman@0 981 loclist.write("\n".join(csp2_ordered)+"\n")
rliterman@0 982
rliterman@0 983 seq_records = [SeqRecord(Seq(''.join(row)), id=query,description='') for query,row in alignment_df.iterrows()]
rliterman@0 984
rliterman@0 985 alignment = MultipleSeqAlignment(seq_records)
rliterman@0 986 AlignIO.write(alignment,raw_alignment,"fasta")
rliterman@0 987
rliterman@0 988 with open(log_file,"a+") as log:
rliterman@0 989 log.write("Done!\n")
rliterman@0 990 log.write(f"\t- Saved alignment of {snp_count} SNPs to {raw_alignment}\n")
rliterman@0 991 log.write(f"\t- Saved ordered loc list to {raw_loclist}\n")
rliterman@0 992
rliterman@0 993 if max_missing == float(100):
rliterman@0 994 locs_pass_missing = csp2_ordered
rliterman@0 995 preserved_alignment = alignment
rliterman@0 996
rliterman@0 997 AlignIO.write(preserved_alignment,preserved_alignment_file,"fasta")
rliterman@0 998 with open(preserved_loclist,"w+") as loclist:
rliterman@0 999 loclist.write("\n".join(csp2_ordered)+"\n")
rliterman@0 1000
rliterman@0 1001 with open(log_file,"a+") as log:
rliterman@0 1002 log.write("Skipping SNP preservation step...\n")
rliterman@0 1003 log.write(f"\t- Saved duplicate alignment to {preserved_alignment_file}\n")
rliterman@0 1004 log.write(f"\t- Saved duplicate ordered loc list to {preserved_loclist}\n")
rliterman@0 1005 else:
rliterman@0 1006 with open(log_file,"a+") as log:
rliterman@0 1007 log.write(f"\t- Preserving SNPs with at most {max_missing}% missing data...\n")
rliterman@0 1008
rliterman@0 1009 # Parse missing data
rliterman@0 1010 locs_pass_missing = list(set(locus_coverage_df[locus_coverage_df['Missing_Ratio'] <= max_missing]['Ref_Loc']))
rliterman@0 1011
rliterman@0 1012 if len(locs_pass_missing) == 0:
rliterman@0 1013 with open(log_file,"a+") as log:
rliterman@0 1014 log.write(f"\t- Of {snp_count} SNPs, no SNPs pass the {max_missing}% missing data threshold...\n")
rliterman@0 1015 log.write("-------------------------------------------------------\n\n")
rliterman@0 1016 else:
rliterman@0 1017 preserved_alignment_df = alignment_df[locs_pass_missing].copy()
rliterman@0 1018
rliterman@0 1019 preserved_ordered = preserved_alignment_df.columns
rliterman@0 1020 with open(preserved_loclist,"w+") as loclist:
rliterman@0 1021 loclist.write("\n".join(preserved_ordered)+"\n")
rliterman@0 1022
rliterman@0 1023 seq_records = [SeqRecord(Seq(''.join(row)), id=query,description='') for query,row in preserved_alignment_df.iterrows()]
rliterman@0 1024 preserved_alignment = MultipleSeqAlignment(seq_records)
rliterman@0 1025 AlignIO.write(preserved_alignment,preserved_alignment_file,"fasta")
rliterman@0 1026 with open(log_file,"a+") as log:
rliterman@0 1027 log.write(f"\t- Of {snp_count} SNPs, {len(locs_pass_missing)} SNPs pass the {max_missing}% missing data threshold...\n")
rliterman@0 1028 log.write(f"\t- Saved preserved alignment to {preserved_alignment_file}\n")
rliterman@0 1029 log.write(f"\t- Saved preserved ordered loc list to {preserved_loclist}\n")
rliterman@0 1030 log.write("-------------------------------------------------------\n\n")
rliterman@0 1031
rliterman@0 1032 with open(log_file,"a+") as log:
rliterman@0 1033 log.write("Processing pairwise comparisons files...")
rliterman@0 1034
rliterman@0 1035 # Get pairwise comparisons between all pass_qc_isolates and reference_id
rliterman@0 1036 pairwise_combinations = [sorted(x) for x in list(combinations([reference_id] + pass_qc_isolates, 2))]
rliterman@0 1037
rliterman@0 1038 if snp_count == 0:
rliterman@0 1039 pairwise_df = pd.DataFrame([(pairwise[0], pairwise[1], 0,np.nan) for pairwise in pairwise_combinations],columns = ['Query_1','Query_2','SNP_Distance','SNPs_Cocalled'])
rliterman@0 1040 preserved_pairwise_df = pairwise_df.copy()
rliterman@0 1041
rliterman@0 1042 pairwise_df.to_csv(raw_pairwise, sep="\t", index=False)
rliterman@0 1043 preserved_pairwise_df.to_csv(preserved_pairwise, sep="\t", index=False)
rliterman@0 1044
rliterman@0 1045 # Create matrix
rliterman@0 1046 idx = sorted(set(pairwise_df['Query_1']).union(pairwise_df['Query_2']))
rliterman@0 1047 mirrored_distance_df = pairwise_df.pivot(index='Query_1', columns='Query_2', values='SNP_Distance').reindex(index=idx, columns=idx).fillna(0, downcast='infer').pipe(lambda x: x+x.values.T).applymap(lambda x: format(x, '.0f'))
rliterman@0 1048 mirrored_distance_df.index.name = ''
rliterman@0 1049 mirrored_distance_df.to_csv(raw_matrix,sep="\t")
rliterman@0 1050 mirrored_distance_df.to_csv(preserved_matrix,sep="\t")
rliterman@0 1051
rliterman@0 1052 else:
rliterman@0 1053 raw_distance_results = parallelAlignment(alignment)
rliterman@0 1054 raw_pairwise_df = pd.DataFrame(raw_distance_results, columns=['Query_1', 'Query_2', 'SNP_Distance', 'SNPs_Cocalled'])
rliterman@0 1055 raw_pairwise_df.to_csv(raw_pairwise, sep="\t", index=False)
rliterman@0 1056
rliterman@0 1057 if len(locs_pass_missing) == snp_count:
rliterman@0 1058 preserved_pairwise_df = raw_pairwise_df.copy()
rliterman@0 1059 preserved_pairwise_df.to_csv(preserved_pairwise, sep="\t", index=False)
rliterman@0 1060 elif len(locs_pass_missing) == 0:
rliterman@0 1061 preserved_pairwise_df = pd.DataFrame([(pairwise[0], pairwise[1], 0,np.nan) for pairwise in pairwise_combinations],columns = ['Query_1','Query_2','SNP_Distance','SNPs_Cocalled'])
rliterman@0 1062 preserved_pairwise_df.to_csv(preserved_pairwise, sep="\t", index=False)
rliterman@0 1063 else:
rliterman@0 1064 preserved_distance_results = parallelAlignment(preserved_alignment)
rliterman@0 1065 preserved_pairwise_df = pd.DataFrame(preserved_distance_results, columns=['Query_1', 'Query_2', 'SNP_Distance', 'SNPs_Cocalled'])
rliterman@0 1066 preserved_pairwise_df.to_csv(preserved_pairwise, sep="\t", index=False)
rliterman@0 1067
rliterman@0 1068 # Create matrix
rliterman@0 1069 idx = sorted(set(raw_pairwise_df['Query_1']).union(raw_pairwise_df['Query_2']))
rliterman@0 1070 mirrored_distance_df = raw_pairwise_df.pivot(index='Query_1', columns='Query_2', values='SNP_Distance').reindex(index=idx, columns=idx).fillna(0, downcast='infer').pipe(lambda x: x+x.values.T).applymap(lambda x: format(x, '.0f'))
rliterman@0 1071 mirrored_distance_df.index.name = ''
rliterman@0 1072 mirrored_distance_df.to_csv(raw_matrix,sep="\t")
rliterman@0 1073
rliterman@0 1074 idx = sorted(set(preserved_pairwise_df['Query_1']).union(preserved_pairwise_df['Query_2']))
rliterman@0 1075 mirrored_distance_df = preserved_pairwise_df.pivot(index='Query_1', columns='Query_2', values='SNP_Distance').reindex(index=idx, columns=idx).fillna(0, downcast='infer').pipe(lambda x: x+x.values.T).applymap(lambda x: format(x, '.0f'))
rliterman@0 1076 mirrored_distance_df.index.name = ''
rliterman@0 1077 mirrored_distance_df.to_csv(preserved_matrix,sep="\t")
rliterman@0 1078
rliterman@0 1079 # Clean up pybedtools temp
rliterman@0 1080 helpers.cleanup(verbose=False,remove_all = False)
rliterman@0 1081
rliterman@0 1082 end_time = time.time()
rliterman@0 1083 with open(log_file,"a+") as log:
rliterman@0 1084 log.write("Done!\n")
rliterman@0 1085 if snp_count == 0:
rliterman@0 1086 log.write(f"\t- No SNPs detected, zeroed pairwise distance files saved to {raw_pairwise}/{preserved_pairwise}/{raw_matrix}/{preserved_matrix}\n")
rliterman@0 1087 else:
rliterman@0 1088 log.write(f"\t- Saved raw pairwise distances to {raw_pairwise}\n")
rliterman@0 1089 log.write(f"\t- Saved raw pairwise matrix to {raw_matrix}\n")
rliterman@0 1090
rliterman@0 1091 if max_missing == float(100):
rliterman@0 1092 log.write("Skipped SNP preservation step...\n")
rliterman@0 1093 log.write(f"\t- Saved duplicated preserved pairwise distances to {preserved_pairwise}\n")
rliterman@0 1094 log.write(f"\t- Saved duplicated preserved pairwise matrix to {preserved_matrix}\n")
rliterman@0 1095 elif len(locs_pass_missing) == 0:
rliterman@0 1096 log.write(f"\t- No SNPs passed the {max_missing}% missing data threshold, zeroed pairwise distance files saved to {preserved_pairwise}/{preserved_matrix}\n")
rliterman@0 1097 else:
rliterman@0 1098 log.write(f"\t- Saved preserved pairwise distances to {preserved_pairwise}\n")
rliterman@0 1099 log.write(f"\t- Saved preserved pairwise matrix to {preserved_matrix}\n")
rliterman@0 1100 log.write(f"Total Time: {end_time - start_time:.2f} seconds\n")
rliterman@0 1101 log.write("-------------------------------------------------------\n\n")
rliterman@0 1102
rliterman@0 1103 except:
rliterman@0 1104 run_failed = True
rliterman@0 1105 print("Exception occurred:\n", traceback.format_exc())
rliterman@0 1106 finally:
rliterman@0 1107 helpers.cleanup(verbose=False, remove_all=False)
rliterman@0 1108 if temp_dir != "":
rliterman@0 1109 shutil.rmtree(temp_dir)
rliterman@0 1110 if run_failed:
rliterman@0 1111 sys.exit(1)