kkonganti@0: """Pipeline script""" kkonganti@0: kkonganti@0: import csv, datetime, logging, re, sqlite3, sys kkonganti@0: import lexmapr.create_databases as cdkp kkonganti@0: import lexmapr.ontology_reasoner as ontr kkonganti@0: import lexmapr.pipeline_helpers as helpers kkonganti@0: import lexmapr.pipeline_resources as pipeline_resources kkonganti@0: import lexmapr.run_summary as summarize kkonganti@0: from itertools import permutations kkonganti@0: from collections import OrderedDict kkonganti@0: from nltk.tokenize import word_tokenize kkonganti@0: from lexmapr.definitions import not_provided, arg_bins, ontol_db, ontol_interest kkonganti@0: kkonganti@0: kkonganti@0: # TODO: deal with ambiguous terms: nut milk, dairy (building ENVO_00003862 v dairy food product) kkonganti@0: # TODO: combine ScorLex.csv and misspellings.csv and edit _add_predefined_resources function kkonganti@0: # TODO: decide what to do with same synonym, ex custom-customize vs custom-tradition kkonganti@0: # TODO: make web on database instead of pulling relationships from API? kkonganti@0: # TODO: remove synonyms over time from SynLex and resource_synonyms.csv; edit get_synonyms kkonganti@0: kkonganti@0: def run(run_args): kkonganti@0: '''Main text processing and mapping pipeline''' kkonganti@0: kkonganti@0: # Add information from EMBL and predefined_resources folder kkonganti@0: t0 = datetime.datetime.now() kkonganti@0: global ontol_interest kkonganti@0: if run_args.embl_ontol: kkonganti@0: ontol_interest = run_args.embl_ontol kkonganti@0: kkonganti@0: print('\nBuilding databases...') kkonganti@0: cdkp.get_synonyms(run_args.remake_cache, ontol_interest) kkonganti@0: cdkp.get_resource_ids(run_args.remake_cache, ontol_interest) kkonganti@0: lookup_table = pipeline_resources.get_predefined_resources() kkonganti@0: t1 = datetime.datetime.now() kkonganti@0: print(f'\tDone! {t1-t0} passed'.ljust(60) + '\nMapping terms...') kkonganti@0: logging.info(f'Database build/confirm: {t1}') kkonganti@0: kkonganti@0: # Apply other arguments and initiate mapping cache kkonganti@0: term_cache = {'':'\t\t\t\t',} kkonganti@0: output_fields = ['Sample_Id', kkonganti@0: 'Sample_Desc', kkonganti@0: 'Processed_Sample', kkonganti@0: 'Annotated_Sample', kkonganti@0: 'Matched_Components'] kkonganti@0: if run_args.full: kkonganti@0: output_fields += ['Match_Status (Macro Level)', kkonganti@0: 'Match_Status (Micro Level)', kkonganti@0: 'Sample_Transformations'] kkonganti@0: term_cache[''] += '\t\t' kkonganti@0: else: kkonganti@0: output_fields += ['Match_Status (Macro Level)'] kkonganti@0: kkonganti@0: if run_args.bin: kkonganti@0: global arg_bins kkonganti@0: if run_args.user_bin is not None: kkonganti@0: arg_bins = run_args.user_bin kkonganti@0: for x in arg_bins: kkonganti@0: arg_bins[x] = ontr.Ontology_package(x, arg_bins[x]) kkonganti@0: term_cache[''] += '\t'*len(arg_bins) kkonganti@0: output_fields += list(arg_bins.keys()) kkonganti@0: else: kkonganti@0: arg_bins = {} kkonganti@0: kkonganti@0: OUT_file = open(run_args.output, 'w') if run_args.output else sys.stdout kkonganti@0: if OUT_file is sys.stdout: kkonganti@0: OUT_file.write('\n') kkonganti@0: OUT_file.write('\t'.join(output_fields)) kkonganti@0: kkonganti@0: IN_file = open(run_args.input, 'r') kkonganti@0: if run_args.input[-4:] == '.csv': kkonganti@0: fr_reader = csv.reader(IN_file, delimiter=',') kkonganti@0: elif run_args.input[-4:] == '.tsv': kkonganti@0: fr_reader = csv.reader(IN_file, delimiter='\t') kkonganti@0: next(fr_reader) kkonganti@0: kkonganti@0: # Connect to primary database kkonganti@0: conn = sqlite3.connect(ontol_db) kkonganti@0: c = conn.cursor() kkonganti@0: kkonganti@0: # Iterate over samples in input file kkonganti@0: for sample_row in fr_reader: kkonganti@0: sample_id = '\n' + sample_row[0].strip() + '\t' + ','.join(sample_row[1:]) kkonganti@0: original_sample = ' '.join(sample_row[1:]).strip() kkonganti@0: cleaned_sample = '' kkonganti@0: cleaned_annotated = '' kkonganti@0: macro_status = 'No Match' kkonganti@0: matched_components = [] kkonganti@0: synonym_match = [] kkonganti@0: micro_status = [] kkonganti@0: bin_class = {x:[] for x in arg_bins} kkonganti@0: ancestors = set() kkonganti@0: sample_conversion_status = {} kkonganti@0: synonym_map = {} kkonganti@0: treated_sample = helpers.punctuation_treatment(original_sample) kkonganti@0: kkonganti@0: # Determine if sample in predefined list of null values kkonganti@0: if treated_sample in not_provided: kkonganti@0: write_line = '\t' + treated_sample + '\tNot annotated\t\t' + macro_status kkonganti@0: OUT_file.write(sample_id + write_line) kkonganti@0: if run_args.full: kkonganti@0: OUT_file.write('\t' + str(micro_status) + '\t' + str(sample_conversion_status)) kkonganti@0: if run_args.bin: kkonganti@0: OUT_file.write('\t'*len(bin_class)) kkonganti@0: continue kkonganti@0: kkonganti@0: # Remove negated words and some compound words, apply corrections kkonganti@0: proc_sample = helpers.further_cleanup(treated_sample) kkonganti@0: proc_sample, micro_status = helpers.process_sample(proc_sample,lookup_table,micro_status) kkonganti@0: kkonganti@0: # Try finding processed sample in cache kkonganti@0: try: kkonganti@0: OUT_file.write(sample_id+term_cache[proc_sample]) kkonganti@0: continue kkonganti@0: except(KeyError): kkonganti@0: pass kkonganti@0: kkonganti@0: # Attempt full term matches with and without suffixes kkonganti@0: if OUT_file is not sys.stdout: kkonganti@0: print('\tMatching '+sample_row[0].strip()+' '+ \ kkonganti@0: '{:<40}'.format(proc_sample[:40]).ljust(60), end='\r') kkonganti@0: kkonganti@0: full_term_match = helpers.map_term(treated_sample, lookup_table, c) kkonganti@0: if full_term_match == []: kkonganti@0: full_term_match = helpers.map_term(proc_sample, lookup_table, c) kkonganti@0: if full_term_match != []: kkonganti@0: micro_status.insert(0, 'Used Processed Sample') kkonganti@0: if full_term_match == [] and 'FOODON' in ontol_interest: kkonganti@0: full_term_match = helpers.map_term(proc_sample, lookup_table, c, True) kkonganti@0: if full_term_match != []: kkonganti@0: micro_status.insert(0, 'Used Processed Sample') kkonganti@0: kkonganti@0: # Attempt full term match with cleaned sample using suffixes kkonganti@0: if full_term_match == []: kkonganti@0: for sw_token in word_tokenize(proc_sample): kkonganti@0: if helpers.is_date(sw_token) or helpers.is_number(sw_token): kkonganti@0: continue kkonganti@0: lemma, micro_status = helpers.singularize_token(sw_token, lookup_table, kkonganti@0: micro_status, c) kkonganti@0: if not sw_token == lemma: kkonganti@0: sample_conversion_status[sw_token] = lemma kkonganti@0: cleaned_sample = helpers.get_cleaned_sample(cleaned_sample, lemma, lookup_table) kkonganti@0: # Not de-duplicating tokens because can't account for all legitimate double names kkonganti@0: kkonganti@0: full_term_match = helpers.map_term(cleaned_sample, lookup_table, c) kkonganti@0: if full_term_match == [] and 'FOODON' in ontol_interest: kkonganti@0: full_term_match = helpers.map_term(cleaned_sample, lookup_table, c, True) kkonganti@0: if full_term_match != []: kkonganti@0: micro_status.insert(0, 'Used Cleaned Sample') kkonganti@0: kkonganti@0: # Combine the matched terms kkonganti@0: if full_term_match != []: kkonganti@0: for x in full_term_match: kkonganti@0: matched_components.append(x['term'] + ':' + x['id']) kkonganti@0: macro_status = 'Full Term Match' kkonganti@0: micro_status += x['status'] kkonganti@0: kkonganti@0: # Try matching permutations if full term match fails kkonganti@0: # Functions mostly retained from v 0.7 kkonganti@0: if macro_status == 'No Match': kkonganti@0: covered_tokens = set() kkonganti@0: for i in range(5, 0, -1): kkonganti@0: for gram_chunk in helpers.get_gram_chunks(cleaned_sample, i): kkonganti@0: concat_gram_chunk = ' '.join(gram_chunk) kkonganti@0: gram_permutations =\ kkonganti@0: list(OrderedDict.fromkeys(permutations(concat_gram_chunk.split()))) kkonganti@0: if set(gram_chunk) <= covered_tokens: kkonganti@0: continue kkonganti@0: for gram_permutation in gram_permutations: kkonganti@0: gram_permutation_str = ' '.join(gram_permutation) kkonganti@0: component_match = helpers.map_term(gram_permutation_str, lookup_table, c) kkonganti@0: if not component_match and 'FOODON' in ontol_interest: kkonganti@0: component_match = helpers.map_term(gram_permutation_str, kkonganti@0: lookup_table, c, True) kkonganti@0: if component_match: kkonganti@0: for x in component_match: kkonganti@0: matched_components.append(x['term'] + ':' + x['id']) kkonganti@0: macro_status = 'Component Match' kkonganti@0: micro_status += x['status'] kkonganti@0: covered_tokens.update(gram_chunk) kkonganti@0: kkonganti@0: # Try matching annotated synonyms if component match fails kkonganti@0: if macro_status == 'No Match': kkonganti@0: for clean_token in cleaned_sample.split(): kkonganti@0: cleaned_annotated,s_m=helpers.get_annotated_sample(cleaned_annotated,clean_token) kkonganti@0: synonym_map.update(s_m) kkonganti@0: cleaned_annotated = helpers.annotation_reduce(cleaned_annotated, synonym_map) kkonganti@0: kkonganti@0: for x in helpers.get_annotated_synonyms(cleaned_annotated): kkonganti@0: synonym_match.extend(helpers.map_term(x, lookup_table, c)) kkonganti@0: if synonym_match == [] and 'FOODON' in ontol_interest: kkonganti@0: for x in helpers.get_annotated_synonyms(cleaned_annotated): kkonganti@0: synonym_match.extend(helpers.map_term(x, lookup_table, c, True)) kkonganti@0: if synonym_match != []: kkonganti@0: macro_status = 'Synonym Match' kkonganti@0: for x in synonym_match: kkonganti@0: matched_components.append(x['term'] + ':' + x['id']) kkonganti@0: micro_status += x['status'] kkonganti@0: kkonganti@0: # Remove matches that are ancestral to other matches kkonganti@0: if run_args.no_ancestors: kkonganti@0: for match_term in matched_components: kkonganti@0: match_term = match_term.replace('NCBITAXON','NCBITaxon') kkonganti@0: ontol_acc = ontr.Ontology_accession.make_instance(match_term) kkonganti@0: ontol_anc = ontol_acc.get_family('ancestors') kkonganti@0: try: kkonganti@0: ontol_anc.remove('none found') kkonganti@0: except(ValueError): kkonganti@0: pass kkonganti@0: ancestors |= set([x.id for x in ontol_anc]) kkonganti@0: kkonganti@0: final_matches = [] kkonganti@0: for match_term in matched_components: kkonganti@0: if match_term.split(':')[-1].replace('NCBITAXON','NCBITaxon') not in ancestors: kkonganti@0: final_matches.append(match_term) kkonganti@0: kkonganti@0: # Bin matches kkonganti@0: for x in arg_bins: kkonganti@0: for y in matched_components: kkonganti@0: ontol_y = ontr.Ontology_accession.make_instance(y) kkonganti@0: bin_class[x].extend(ontol_y.bin_term(arg_bins[x])) kkonganti@0: kkonganti@0: # Write to output kkonganti@0: if cleaned_annotated == '': kkonganti@0: cleaned_annotated = 'Not annotated' kkonganti@0: write_line = '\t' + cleaned_sample + '\t' + cleaned_annotated +\ kkonganti@0: '\t' + '|'.join(sorted(set(matched_components))) + '\t' + macro_status kkonganti@0: while re.search(' ', write_line): kkonganti@0: write_line = write_line.replace(' ',' ') kkonganti@0: term_cache[proc_sample] = write_line kkonganti@0: OUT_file.write(sample_id + write_line) kkonganti@0: if run_args.full: kkonganti@0: OUT_file.write('\t' + str(micro_status) + '\t' + str(sample_conversion_status)) kkonganti@0: term_cache[proc_sample] += '\t'+str(micro_status)+'\t'+str(sample_conversion_status) kkonganti@0: if run_args.bin: kkonganti@0: for x in list(bin_class): kkonganti@0: OUT_file.write('\t' + '|'.join(sorted(set(bin_class[x]))).replace('[]','')) kkonganti@0: term_cache[proc_sample] += '\t' + '|'.join(sorted(set(bin_class[x]))) kkonganti@0: kkonganti@0: kkonganti@0: IN_file.close() kkonganti@0: conn.close() kkonganti@0: if OUT_file is not sys.stdout: kkonganti@0: OUT_file.close() kkonganti@0: else: kkonganti@0: OUT_file.write('\n\n') kkonganti@0: kkonganti@0: # Report results to log and generate graphs kkonganti@0: t2 = datetime.datetime.now() kkonganti@0: print(f'\tDone! {t2-t1} passed'.ljust(60) + '\nReporting results...') kkonganti@0: if run_args.output: kkonganti@0: summarize.report_results(run_args.output, list(arg_bins.keys())) kkonganti@0: if run_args.graph == True: kkonganti@0: summarize.figure_folder() kkonganti@0: summarize.visualize_results(run_args.output, list(arg_bins.keys())) kkonganti@0: else: kkonganti@0: match_counts = summarize.report_cache(term_cache) kkonganti@0: if run_args.graph == True: kkonganti@0: summarize.figure_folder() kkonganti@0: summarize.visualize_cache(match_counts) kkonganti@0: kkonganti@0: print('\t'+f'Done! {datetime.datetime.now()-t2} passed'.ljust(60)+'\n')