annotate lexmapr/pipeline_helpers.py @ 0:f298f3e5c515

"planemo upload"
author cstrittmatter
date Mon, 27 Jun 2022 17:48:55 -0400
parents
children
rev   line source
cstrittmatter@0 1 """Helper functions for main pipeline"""
cstrittmatter@0 2
cstrittmatter@0 3 import inflection, re, unicodedata, sqlite3
cstrittmatter@0 4 from collections import OrderedDict
cstrittmatter@0 5 from itertools import combinations
cstrittmatter@0 6 from dateutil.parser import parse
cstrittmatter@0 7 from lexmapr.definitions import synonym_db
cstrittmatter@0 8 from nltk import pos_tag
cstrittmatter@0 9 from nltk.tokenize import word_tokenize
cstrittmatter@0 10 from nltk.tokenize.treebank import TreebankWordDetokenizer
cstrittmatter@0 11
cstrittmatter@0 12
cstrittmatter@0 13 def _lookup_correction(sample, lookup_table, lookup_x, micro_status, status_title):
cstrittmatter@0 14 '''Apply corections, if available in resource'''
cstrittmatter@0 15 sample = ' ' + sample + ' '
cstrittmatter@0 16 for x in lookup_table[lookup_x]:
cstrittmatter@0 17 find_x = re.findall(' '+x+' ', sample)
cstrittmatter@0 18 if find_x != []:
cstrittmatter@0 19 micro_status.append(status_title + x)
cstrittmatter@0 20 sample = sample.replace(' '+x+' ', ' '+lookup_table[lookup_x][x]+' ')
cstrittmatter@0 21 return(' '.join(sample.split()), micro_status)
cstrittmatter@0 22
cstrittmatter@0 23
cstrittmatter@0 24 def _remove_annotated_synonyms(input_annotations):
cstrittmatter@0 25 '''Remove annotations to see original phrase'''
cstrittmatter@0 26 output_sample = ''
cstrittmatter@0 27 copy_char = True
cstrittmatter@0 28 for x in input_annotations:
cstrittmatter@0 29 if x == '{':
cstrittmatter@0 30 copy_char = False
cstrittmatter@0 31 elif x == '}':
cstrittmatter@0 32 copy_char = True
cstrittmatter@0 33 else:
cstrittmatter@0 34 if copy_char == True:
cstrittmatter@0 35 output_sample += x
cstrittmatter@0 36 while re.search(' ', output_sample):
cstrittmatter@0 37 output_sample = output_sample.replace(' ', ' ')
cstrittmatter@0 38 return(output_sample)
cstrittmatter@0 39
cstrittmatter@0 40
cstrittmatter@0 41 def _retrieve_map_id(search_results, c):
cstrittmatter@0 42 '''Get resource id from database'''
cstrittmatter@0 43 return_list = []
cstrittmatter@0 44 for x in search_results:
cstrittmatter@0 45 c.execute('SELECT * FROM non_standard_resource_ids WHERE key=:key', {'key':x[1]})
cstrittmatter@0 46 for y in c.fetchall():
cstrittmatter@0 47 result_dic = {'term':y[1], 'id':y[0], 'status':[]}
cstrittmatter@0 48 if not result_dic in return_list:
cstrittmatter@0 49 return_list.append(result_dic)
cstrittmatter@0 50 return(return_list)
cstrittmatter@0 51
cstrittmatter@0 52
cstrittmatter@0 53 def _map_term_helper(term, c):
cstrittmatter@0 54 '''Maps term to resource or resource permutation'''
cstrittmatter@0 55 c.execute('SELECT * FROM standard_resource_labels WHERE key=:key', {'key':term})
cstrittmatter@0 56 search_results = c.fetchall()
cstrittmatter@0 57 if len(search_results) == 0:
cstrittmatter@0 58 c.execute('SELECT * FROM standard_resource_permutations WHERE key=:key', {'key':term})
cstrittmatter@0 59 search_results = c.fetchall()
cstrittmatter@0 60 if len(search_results) != 0:
cstrittmatter@0 61 return(_retrieve_map_id(search_results, c))
cstrittmatter@0 62 else:
cstrittmatter@0 63 return(_retrieve_map_id(search_results, c))
cstrittmatter@0 64 return(None)
cstrittmatter@0 65
cstrittmatter@0 66
cstrittmatter@0 67 def _ngrams(input_phrase, gram_value):
cstrittmatter@0 68 '''Get ngrams with a given value of gram_value'''
cstrittmatter@0 69 input_phrase = input_phrase.split()
cstrittmatter@0 70 output = []
cstrittmatter@0 71 for i in range(len(input_phrase) - gram_value + 1):
cstrittmatter@0 72 output.append(input_phrase[i:i + gram_value])
cstrittmatter@0 73 return(output)
cstrittmatter@0 74
cstrittmatter@0 75
cstrittmatter@0 76 def process_sample(sample, lookup_table, micro_status):
cstrittmatter@0 77 '''Apply corrections to input sample'''
cstrittmatter@0 78 sample, micro_status = _lookup_correction(sample, lookup_table, 'spelling_mistakes',
cstrittmatter@0 79 micro_status, 'Spelling Correction Treatment: ')
cstrittmatter@0 80 sample, micro_status = _lookup_correction(sample, lookup_table, 'abbreviations',
cstrittmatter@0 81 micro_status, 'Abbreviation-Acronym Treatment: ')
cstrittmatter@0 82 sample, micro_status = _lookup_correction(sample, lookup_table, 'non_english_words',
cstrittmatter@0 83 micro_status, 'Non English Language Words Treatment: ')
cstrittmatter@0 84 return(sample, micro_status)
cstrittmatter@0 85
cstrittmatter@0 86
cstrittmatter@0 87 def punctuation_treatment(untreated_term):
cstrittmatter@0 88 '''Remove punctuations from term'''
cstrittmatter@0 89 punctuations_regex_char_class = '[~`!@#$%^*()_\|/{}:;,.<>?]'
cstrittmatter@0 90 ret_term = ''
cstrittmatter@0 91 for word_token in untreated_term.split():
cstrittmatter@0 92 if word_token.count('-') > 1:
cstrittmatter@0 93 ret_term += word_token.replace('-',' ') + ' '
cstrittmatter@0 94 else:
cstrittmatter@0 95 ret_term += word_token + ' '
cstrittmatter@0 96 ret_term = ret_term.lower().replace('\"','').replace('\'ve','').replace('\'m','')
cstrittmatter@0 97 ret_term = ret_term.replace('\'s','').replace('\'t','').replace('\'ll','').replace('\'re','')
cstrittmatter@0 98 ret_term = ret_term.replace('\'','').replace('-','').replace('[','').replace(']','')
cstrittmatter@0 99 ret_term = ret_term.replace('&',' and ').replace('+',' and ').replace('=',' is ')
cstrittmatter@0 100 ret_term = re.sub(punctuations_regex_char_class, ' ', ret_term).lower()
cstrittmatter@0 101 return(' '.join(ret_term.split()))
cstrittmatter@0 102
cstrittmatter@0 103
cstrittmatter@0 104 def further_cleanup(sample_text):
cstrittmatter@0 105 '''Remove terms indicated to not be relevant and some compound words'''
cstrittmatter@0 106 new_text = []
cstrittmatter@0 107 neg_words = [r'no ',r'non',r'not',r'neither',r'nor',r'without']
cstrittmatter@0 108 stt_words = ['animal','cb','chicken','environmental','food','human','large','medium','necropsy',
cstrittmatter@0 109 'organic','other','poultry','product','sausage','small','stool','swab','wild',]
cstrittmatter@0 110 end_words = ['aspirate','culture','environmental','fluid','food','intestine','large','meal','medium',
cstrittmatter@0 111 'mixed','necropsy','other','poultry','product','research','sample','sausage','slaughter',
cstrittmatter@0 112 'small','swab','water','wild',]
cstrittmatter@0 113 not_replace = ['agriculture','apiculture','aquaculture','aquiculture','aviculture',
cstrittmatter@0 114 'coculture','hemoculture','mariculture','monoculture','sericulture',
cstrittmatter@0 115 'subculture','viniculture','viticulture',
cstrittmatter@0 116 'semifluid','subfluid','superfluid',
cstrittmatter@0 117 'superlarge','reenlarge','enlarge','overlarge','largemouth','larges',
cstrittmatter@0 118 'bonemeal','cornmeal','fishmeal','inchmeal','oatmeal','piecemeal','premeal',
cstrittmatter@0 119 'wholemeal','biosample','ensample','resample','subsample','backwater',
cstrittmatter@0 120 'another','bother','brother','foremother','frother','godmother','grandmother',
cstrittmatter@0 121 'housemother','mother','otherguess','otherness','othernesse','otherwhere',
cstrittmatter@0 122 'otherwhile','otherworld','pother','soother','smoother','smother','stepbrother',
cstrittmatter@0 123 'stepmother','tother',
cstrittmatter@0 124 'byproduct','coproduct','production','productive','subproduct',
cstrittmatter@0 125 'ultrasmall','smaller','smallmouth','smalltime','smallpox','smallpoxe',
cstrittmatter@0 126 'smallsword','smallsholder','mediumship',
cstrittmatter@0 127 'bathwater','bilgewater','blackwater','breakwater','cutwater','deepwater',
cstrittmatter@0 128 'dewater','dishwater','eyewater','firewater','floodwater','freshwater',
cstrittmatter@0 129 'graywater','groundwater','headwater','jerkwater','limewater','meltwater',
cstrittmatter@0 130 'overwater','polywater','rainwater','rosewater','saltwater','seawater',
cstrittmatter@0 131 'shearwater','springwater','tailwater','tidewater','underwater','wastewater',
cstrittmatter@0 132 'semiwild','wildcard','wildcat','wildcatter','wildcatted','wildebeest','wilded',
cstrittmatter@0 133 'wilder','wilderment','wilderness','wildernesse','wildest','wildfire','wildflower',
cstrittmatter@0 134 'wildfowl','wildfowler','wildish','wildland','wildling','wildlife','wildwood',
cstrittmatter@0 135 ]
cstrittmatter@0 136
cstrittmatter@0 137 found_comp = []
cstrittmatter@0 138 for comp_word in stt_words:
cstrittmatter@0 139 found_comp.extend(re.findall(f'({comp_word})(\w+)', sample_text))
cstrittmatter@0 140 for comp_word in end_words:
cstrittmatter@0 141 found_comp.extend(re.findall(f'(\w+)({comp_word})', sample_text))
cstrittmatter@0 142 for x in found_comp:
cstrittmatter@0 143 if x[0]+x[1] not in not_replace and x[0]+x[1]+'s' not in not_replace:
cstrittmatter@0 144 sample_text = sample_text.replace(x[0]+x[1], x[0]+' '+x[1])
cstrittmatter@0 145
cstrittmatter@0 146 for sample_word in sample_text.split():
cstrittmatter@0 147 if len(sample_word) > 1:
cstrittmatter@0 148 new_text.append(sample_word.strip())
cstrittmatter@0 149
cstrittmatter@0 150 if 'nor' in new_text:
cstrittmatter@0 151 if 'neither' not in new_text:
cstrittmatter@0 152 word_ind = new_text.index('nor')
cstrittmatter@0 153 new_text.insert(max[0,word_ind-2], 'neither')
cstrittmatter@0 154
cstrittmatter@0 155 for neg_word in neg_words:
cstrittmatter@0 156 if neg_word in new_text:
cstrittmatter@0 157 word_ind = new_text.index(neg_word)
cstrittmatter@0 158 del(new_text[word_ind:word_ind+2])
cstrittmatter@0 159 return(' '.join(new_text))
cstrittmatter@0 160
cstrittmatter@0 161
cstrittmatter@0 162 def is_number(input_string):
cstrittmatter@0 163 '''Determine whether a string is a number'''
cstrittmatter@0 164 try:
cstrittmatter@0 165 unicodedata.numeric(input_string)
cstrittmatter@0 166 return(True)
cstrittmatter@0 167 except(TypeError, ValueError):
cstrittmatter@0 168 return(False)
cstrittmatter@0 169
cstrittmatter@0 170
cstrittmatter@0 171 def is_date(input_string):
cstrittmatter@0 172 '''Determine whether a string is a date or day'''
cstrittmatter@0 173 try:
cstrittmatter@0 174 parse(input_string)
cstrittmatter@0 175 return(True)
cstrittmatter@0 176 except(ValueError, OverflowError):
cstrittmatter@0 177 return(False)
cstrittmatter@0 178
cstrittmatter@0 179
cstrittmatter@0 180 def singularize_token(token, lookup_table, micro_status, c):
cstrittmatter@0 181 '''Singularize the string token, if applicable'''
cstrittmatter@0 182 if token in lookup_table['inflection_exceptions']:
cstrittmatter@0 183 return(token, micro_status)
cstrittmatter@0 184
cstrittmatter@0 185 exception_tail_chars_list = ['us', 'ia', 'ta', 'ss'] # TODO: add as, is?
cstrittmatter@0 186 for char in exception_tail_chars_list:
cstrittmatter@0 187 if token.endswith(char):
cstrittmatter@0 188 return(token, micro_status)
cstrittmatter@0 189
cstrittmatter@0 190 taxon_names = c.execute('''SELECT * FROM standard_resource_labels WHERE key LIKE :key AND
cstrittmatter@0 191 value LIKE :value''',
cstrittmatter@0 192 {'key':'% '+token,'value':'NCBITaxon%'}).fetchall()
cstrittmatter@0 193 remove_finds = []
cstrittmatter@0 194 for x in taxon_names:
cstrittmatter@0 195 if len(x[0].split()) > 2:
cstrittmatter@0 196 remove_finds.append(x)
cstrittmatter@0 197 for x in remove_finds:
cstrittmatter@0 198 taxon_names.remove(x)
cstrittmatter@0 199 if taxon_names != []:
cstrittmatter@0 200 return(token, micro_status)
cstrittmatter@0 201
cstrittmatter@0 202 lemma = inflection.singularize(token)
cstrittmatter@0 203 micro_status.append('Inflection (Plural) Treatment: ' + token)
cstrittmatter@0 204 return(lemma, micro_status)
cstrittmatter@0 205
cstrittmatter@0 206
cstrittmatter@0 207 def get_cleaned_sample(input_sample, token, lookup_table):
cstrittmatter@0 208 '''Prepare the cleaned sample phrase using the input token'''
cstrittmatter@0 209 if input_sample == '' and token not in lookup_table['stop_words']:
cstrittmatter@0 210 return(token)
cstrittmatter@0 211 elif token not in lookup_table['stop_words']:
cstrittmatter@0 212 return(input_sample + ' ' + token)
cstrittmatter@0 213 else:
cstrittmatter@0 214 return(input_sample)
cstrittmatter@0 215
cstrittmatter@0 216
cstrittmatter@0 217 def get_annotated_sample(annotated_sample, lemma):
cstrittmatter@0 218 '''Embed synonyms in the sample, if available'''
cstrittmatter@0 219 # TODO: able to annotate permuatations instead of just left to right?
cstrittmatter@0 220 synonym_map = {}
cstrittmatter@0 221 if not annotated_sample:
cstrittmatter@0 222 annotated_sample = lemma
cstrittmatter@0 223 else:
cstrittmatter@0 224 annotated_sample = f'{annotated_sample} {lemma}'
cstrittmatter@0 225
cstrittmatter@0 226 conn_syn = sqlite3.connect(synonym_db)
cstrittmatter@0 227 d = conn_syn.cursor()
cstrittmatter@0 228 for y in [lemma, _remove_annotated_synonyms(annotated_sample)]:
cstrittmatter@0 229 d.execute('SELECT * FROM label_synonyms WHERE key=:key', {'key':y})
cstrittmatter@0 230 for x in d.fetchall():
cstrittmatter@0 231 if not re.search(x[1], annotated_sample):
cstrittmatter@0 232 annotated_sample = annotated_sample+' {'+x[1]+'}'
cstrittmatter@0 233 synonym_map[y] = x[1]
cstrittmatter@0 234 conn_syn.close()
cstrittmatter@0 235 return(annotated_sample, synonym_map)
cstrittmatter@0 236
cstrittmatter@0 237
cstrittmatter@0 238 def map_term(term, lookup_table, c, consider_suffixes=False):
cstrittmatter@0 239 '''Map term to some resource in database'''
cstrittmatter@0 240 if consider_suffixes:
cstrittmatter@0 241 for suffix in lookup_table['suffixes']:
cstrittmatter@0 242 mapping = _map_term_helper(term+' '+suffix, c)
cstrittmatter@0 243 if mapping:
cstrittmatter@0 244 for x in mapping:
cstrittmatter@0 245 x['status'].insert(-2, 'Suffix Addition')
cstrittmatter@0 246 return(mapping)
cstrittmatter@0 247 else:
cstrittmatter@0 248 mapping = _map_term_helper(term, c)
cstrittmatter@0 249 if mapping:
cstrittmatter@0 250 return(mapping)
cstrittmatter@0 251 return([])
cstrittmatter@0 252
cstrittmatter@0 253
cstrittmatter@0 254 def annotation_reduce(annotated_sample, synonym_map):
cstrittmatter@0 255 '''Remove annotations on shorter phrases included in longer phrases with annotations'''
cstrittmatter@0 256 remove_list = []
cstrittmatter@0 257 for x in list(synonym_map.keys()):
cstrittmatter@0 258 for y in list(synonym_map.keys()):
cstrittmatter@0 259 if x != y:
cstrittmatter@0 260 if x.startswith(y) or x.endswith(y) == True:
cstrittmatter@0 261 remove_list.append(y)
cstrittmatter@0 262 for x in remove_list:
cstrittmatter@0 263 annotated_sample = annotated_sample.replace('{'+synonym_map[x]+'}',' ')
cstrittmatter@0 264 return(' '.join(annotated_sample.split()))
cstrittmatter@0 265
cstrittmatter@0 266
cstrittmatter@0 267 def get_annotated_synonyms(input_annotations):
cstrittmatter@0 268 '''Get list of the annotations'''
cstrittmatter@0 269 synonym_list = []
cstrittmatter@0 270 for x in input_annotations.split('{')[1:]:
cstrittmatter@0 271 synonym_list.append(x.split('}')[0])
cstrittmatter@0 272 return(synonym_list)
cstrittmatter@0 273
cstrittmatter@0 274
cstrittmatter@0 275 def get_gram_chunks(input_phrase, num):
cstrittmatter@0 276 '''Make num-gram chunks from input'''
cstrittmatter@0 277 input_tokens = input_phrase.split()
cstrittmatter@0 278 if len(input_tokens) < 15:
cstrittmatter@0 279 return(list(combinations(input_tokens, num)))
cstrittmatter@0 280 else:
cstrittmatter@0 281 return(_ngrams(input_phrase, num))