kkonganti@1: #!/usr/bin/env python3
kkonganti@1:
kkonganti@1: # Kranti Konganti
kkonganti@1:
kkonganti@1: import os
kkonganti@1: import argparse
kkonganti@1: import inspect
kkonganti@1: import logging
kkonganti@1: import re
kkonganti@1: import pickle
kkonganti@1: import pprint
kkonganti@1: import json
kkonganti@1: from collections import defaultdict
kkonganti@1:
kkonganti@1: # Set logging.
kkonganti@1: logging.basicConfig(
kkonganti@1: format="\n" + "=" * 55 + "\n%(asctime)s - %(levelname)s\n" + "=" * 55 + "\n%(message)s\n\n",
kkonganti@1: level=logging.DEBUG,
kkonganti@1: )
kkonganti@1:
kkonganti@1: # Debug print.
kkonganti@1: ppp = pprint.PrettyPrinter(width=50, indent=4)
kkonganti@1:
kkonganti@1: # Multiple inheritence for pretty printing of help text.
kkonganti@1: class MultiArgFormatClasses(argparse.RawTextHelpFormatter, argparse.ArgumentDefaultsHelpFormatter):
kkonganti@1: pass
kkonganti@1:
kkonganti@1:
kkonganti@1: def main() -> None:
kkonganti@1: """
kkonganti@1: This script works only in the context of `bettercallsal` Nextflow workflow.
kkonganti@1: It takes:
kkonganti@1: 1. A CSV file containing a similarity matrix or dissimilarity matrix where
kkonganti@1: the header row contains the names.
kkonganti@1: 3. It takes indexed NCBI Pathogen metadata in pickle format and converts
kkonganti@1: accessions to serotype names in the final distance matrix output.
kkonganti@1: """
kkonganti@1:
kkonganti@1: prog_name = os.path.basename(inspect.stack()[0].filename)
kkonganti@1:
kkonganti@1: parser = argparse.ArgumentParser(
kkonganti@1: prog=prog_name, description=main.__doc__, formatter_class=MultiArgFormatClasses
kkonganti@1: )
kkonganti@1:
kkonganti@1: required = parser.add_argument_group("required arguments")
kkonganti@1:
kkonganti@1: required.add_argument(
kkonganti@1: "-csv",
kkonganti@1: dest="mat",
kkonganti@1: default=False,
kkonganti@1: required=True,
kkonganti@1: help="Absolute UNIX path to .csv file containing similarity\n"
kkonganti@1: + "or dissimilarity matrix from `sourmash compare`.",
kkonganti@1: )
kkonganti@1: required.add_argument(
kkonganti@1: "-pickle",
kkonganti@1: dest="acc2sero",
kkonganti@1: default=False,
kkonganti@1: required=True,
kkonganti@1: help="Absolute UNIX Path to the *ACC2SERO.pickle\n"
kkonganti@1: + "metadata file. On raven2, these are located at\n"
kkonganti@1: + "/hpc/db/bettercallsal/PDGXXXXXXXXXX.XXXXX/",
kkonganti@1: )
kkonganti@1: required.add_argument(
kkonganti@1: "-labels",
kkonganti@1: dest="labels",
kkonganti@1: default=False,
kkonganti@1: required=True,
kkonganti@1: help="Absolute UNIX Path to the *.labels.txt\n"
kkonganti@1: + "file from `sourmash compare`. The accessions\n"
kkonganti@1: + "will be renanamed to serotype names.",
kkonganti@1: )
kkonganti@1:
kkonganti@1: args = parser.parse_args()
kkonganti@1: csv = args.mat
kkonganti@1: labels = args.labels
kkonganti@1: pickled_sero = args.acc2sero
kkonganti@1: row_names = list()
kkonganti@1: distance_mat = defaultdict(defaultdict)
kkonganti@1: out_csv = os.path.join(os.getcwd(), "bcs_sourmash_matrix.tblsum.txt")
kkonganti@1: out_json = os.path.join(os.getcwd(), "bcs_sourmash_matrix_mqc.json")
kkonganti@1:
kkonganti@1: # Prepare dictionary to be dumped as JSON.
kkonganti@1: distance_mat["id"] = "BETTERCALLSAL_CONTAINMENT_INDEX"
kkonganti@1: distance_mat["section_name"] = "Containment index"
kkonganti@1: distance_mat["description"] = (
kkonganti@1: "This section shows the containment index between a sample and the genomes"
kkonganti@1: + "by running sourmash gather
"
kkonganti@1: + "using --containment
option."
kkonganti@1: )
kkonganti@1: distance_mat["plot_type"] = "heatmap"
kkonganti@1: distance_mat["pconfig"]["id"] = "bettercallsal_containment_index_heatmap"
kkonganti@1: distance_mat["pconfig"]["title"] = "Sourmash: containment index"
kkonganti@1: distance_mat["pconfig"]["xTitle"] = "Samples"
kkonganti@1: distance_mat["pconfig"]["yTitle"] = "Isolates (Genome assemblies)"
kkonganti@1: distance_mat["pconfig"]["ycats_samples"] = "False"
kkonganti@1: distance_mat["pconfig"]["xcats_samples"] = "False"
kkonganti@1: distance_mat["pconfig"]["square"] = "False"
kkonganti@1: distance_mat["pconfig"]["min"] = "0.0"
kkonganti@1: distance_mat["pconfig"]["max"] = "1.0"
kkonganti@1: distance_mat["data"]["data"] = list()
kkonganti@1:
kkonganti@1: if pickled_sero and (not os.path.exists(pickled_sero) or not os.path.getsize(pickled_sero)):
kkonganti@1: logging.error(
kkonganti@1: "The pickle file,\n" + f"{os.path.basename(pickled_sero)} does not exist or is empty!"
kkonganti@1: )
kkonganti@1: exit(1)
kkonganti@1: else:
kkonganti@1: acc2sero = pickle.load(file=open(pickled_sero, "rb"))
kkonganti@1:
kkonganti@1: if csv and (not os.path.exists(csv) or not os.path.getsize(csv) > 0):
kkonganti@1: logging.error("File,\n" + f"{csv}\ndoes not exist " + "or is empty!")
kkonganti@1: exit(0)
kkonganti@1:
kkonganti@1: if labels and (not os.path.exists(labels) or not os.path.getsize(labels) > 0):
kkonganti@1: logging.error("File,\n" + f"{labels}\ndoes not exist " + "or is empty!")
kkonganti@1: exit(0)
kkonganti@1:
kkonganti@1: # with open(out_labels, "w") as out_labels_fh:
kkonganti@1: with open(labels, "r") as labels_fh:
kkonganti@1: for line in labels_fh:
kkonganti@1: line = line.strip()
kkonganti@1: if line not in acc2sero.keys():
kkonganti@1: row_names.append(line)
kkonganti@1:
kkonganti@1: labels_fh.close()
kkonganti@1:
kkonganti@1: with open(out_csv, "w") as csv_out_fh:
kkonganti@1: with open(csv, "r") as csv_in_fh:
kkonganti@1: header = csv_in_fh.readline().strip().split(",")
kkonganti@1: acc_cols = [idx for idx, col in enumerate(header) if col in acc2sero.keys()]
kkonganti@1: sample_cols = [idx for idx, col in enumerate(header) if col not in acc2sero.keys()]
kkonganti@1:
kkonganti@1: col_names = [
kkonganti@1: re.sub(r"serotype=|\,antigen_formula=.*?\|", "", s)
kkonganti@1: for s in [acc2sero[col] + f"| | {col}" for col in header if col in acc2sero.keys()]
kkonganti@1: ]
kkonganti@1:
kkonganti@1: distance_mat["xcats"] = col_names
kkonganti@1: csv_out_fh.write("\t".join(["Sample"] + col_names) + "\n")
kkonganti@1: line_num = 0
kkonganti@1:
kkonganti@1: for line in csv_in_fh:
kkonganti@1: if line_num not in sample_cols:
kkonganti@1: continue
kkonganti@1: else:
kkonganti@1:
kkonganti@1: heatmap_rows = [
kkonganti@1: str(round(float(line.strip().split(",")[col]), 5)) for col in acc_cols
kkonganti@1: ]
kkonganti@1: # distance_mat["data"]["hmdata"].append(heatmap_rows)
kkonganti@1: # distance_mat["data"][row_names[line_num]] = heatmap_rows
kkonganti@1: distance_mat["data"]["data"].append(heatmap_rows)
kkonganti@1: # distance_mat["data"][row_names[line_num]] = dict(
kkonganti@1: # [(col_names[idx], val) for idx, val in enumerate(heatmap_rows)]
kkonganti@1: # )
kkonganti@1: csv_out_fh.write("\t".join([row_names[line_num]] + heatmap_rows) + "\n")
kkonganti@1: line_num += 1
kkonganti@1: csv_in_fh.close()
kkonganti@1: csv_out_fh.close()
kkonganti@1:
kkonganti@1: distance_mat["ycats"] = row_names
kkonganti@1: json.dump(distance_mat, open(out_json, "w"))
kkonganti@1:
kkonganti@1:
kkonganti@1: if __name__ == "__main__":
kkonganti@1: main()