annotate CSP2/CSP2_env/env-d9b9114564458d9d-741b3de822f2aaca6c6caa4325c4afce/lib/python3.8/site-packages/Bio/MaxEntropy.py @ 68:5028fdace37b

planemo upload commit 2e9511a184a1ca667c7be0c6321a36dc4e3d116d
author jpayne
date Tue, 18 Mar 2025 16:23:26 -0400
parents
children
rev   line source
jpayne@68 1 # Copyright 2001 by Jeffrey Chang. All rights reserved.
jpayne@68 2 #
jpayne@68 3 # This file is part of the Biopython distribution and governed by your
jpayne@68 4 # choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
jpayne@68 5 # Please see the LICENSE file that should have been included as part of this
jpayne@68 6 # package.
jpayne@68 7 """Maximum Entropy code.
jpayne@68 8
jpayne@68 9 Uses Improved Iterative Scaling.
jpayne@68 10 """
jpayne@68 11 # TODO Define terminology
jpayne@68 12
jpayne@68 13 from functools import reduce
jpayne@68 14 import warnings
jpayne@68 15
jpayne@68 16 try:
jpayne@68 17 import numpy as np
jpayne@68 18 except ImportError:
jpayne@68 19 from Bio import MissingPythonDependencyError
jpayne@68 20
jpayne@68 21 raise MissingPythonDependencyError(
jpayne@68 22 "Please install NumPy if you want to use Bio.MaxEntropy. "
jpayne@68 23 "See http://www.numpy.org/"
jpayne@68 24 ) from None
jpayne@68 25
jpayne@68 26
jpayne@68 27 from Bio import BiopythonDeprecationWarning
jpayne@68 28
jpayne@68 29 warnings.warn(
jpayne@68 30 "The 'Bio.MaxEntropy' module is deprecated and will be removed in a future "
jpayne@68 31 "release of Biopython. Consider using scikit-learn instead.",
jpayne@68 32 BiopythonDeprecationWarning,
jpayne@68 33 )
jpayne@68 34
jpayne@68 35
jpayne@68 36 class MaxEntropy:
jpayne@68 37 """Hold information for a Maximum Entropy classifier.
jpayne@68 38
jpayne@68 39 Members:
jpayne@68 40 classes List of the possible classes of data.
jpayne@68 41 alphas List of the weights for each feature.
jpayne@68 42 feature_fns List of the feature functions.
jpayne@68 43
jpayne@68 44 Car data from example Naive Bayes Classifier example by Eric Meisner November 22, 2003
jpayne@68 45 http://www.inf.u-szeged.hu/~ormandi/teaching
jpayne@68 46
jpayne@68 47 >>> from Bio.MaxEntropy import train, classify
jpayne@68 48 >>> xcar = [
jpayne@68 49 ... ['Red', 'Sports', 'Domestic'],
jpayne@68 50 ... ['Red', 'Sports', 'Domestic'],
jpayne@68 51 ... ['Red', 'Sports', 'Domestic'],
jpayne@68 52 ... ['Yellow', 'Sports', 'Domestic'],
jpayne@68 53 ... ['Yellow', 'Sports', 'Imported'],
jpayne@68 54 ... ['Yellow', 'SUV', 'Imported'],
jpayne@68 55 ... ['Yellow', 'SUV', 'Imported'],
jpayne@68 56 ... ['Yellow', 'SUV', 'Domestic'],
jpayne@68 57 ... ['Red', 'SUV', 'Imported'],
jpayne@68 58 ... ['Red', 'Sports', 'Imported']]
jpayne@68 59 >>> ycar = ['Yes','No','Yes','No','Yes','No','Yes','No','No','Yes']
jpayne@68 60
jpayne@68 61 Requires some rules or features
jpayne@68 62
jpayne@68 63 >>> def udf1(ts, cl):
jpayne@68 64 ... return ts[0] != 'Red'
jpayne@68 65 ...
jpayne@68 66 >>> def udf2(ts, cl):
jpayne@68 67 ... return ts[1] != 'Sports'
jpayne@68 68 ...
jpayne@68 69 >>> def udf3(ts, cl):
jpayne@68 70 ... return ts[2] != 'Domestic'
jpayne@68 71 ...
jpayne@68 72 >>> user_functions = [udf1, udf2, udf3] # must be an iterable type
jpayne@68 73 >>> xe = train(xcar, ycar, user_functions)
jpayne@68 74 >>> for xv, yv in zip(xcar, ycar):
jpayne@68 75 ... xc = classify(xe, xv)
jpayne@68 76 ... print('Pred: %s gives %s y is %s' % (xv, xc, yv))
jpayne@68 77 ...
jpayne@68 78 Pred: ['Red', 'Sports', 'Domestic'] gives No y is Yes
jpayne@68 79 Pred: ['Red', 'Sports', 'Domestic'] gives No y is No
jpayne@68 80 Pred: ['Red', 'Sports', 'Domestic'] gives No y is Yes
jpayne@68 81 Pred: ['Yellow', 'Sports', 'Domestic'] gives No y is No
jpayne@68 82 Pred: ['Yellow', 'Sports', 'Imported'] gives No y is Yes
jpayne@68 83 Pred: ['Yellow', 'SUV', 'Imported'] gives No y is No
jpayne@68 84 Pred: ['Yellow', 'SUV', 'Imported'] gives No y is Yes
jpayne@68 85 Pred: ['Yellow', 'SUV', 'Domestic'] gives No y is No
jpayne@68 86 Pred: ['Red', 'SUV', 'Imported'] gives No y is No
jpayne@68 87 Pred: ['Red', 'Sports', 'Imported'] gives No y is Yes
jpayne@68 88 """
jpayne@68 89
jpayne@68 90 def __init__(self):
jpayne@68 91 """Initialize the class."""
jpayne@68 92 self.classes = []
jpayne@68 93 self.alphas = []
jpayne@68 94 self.feature_fns = []
jpayne@68 95
jpayne@68 96
jpayne@68 97 def calculate(me, observation):
jpayne@68 98 """Calculate the log of the probability for each class.
jpayne@68 99
jpayne@68 100 me is a MaxEntropy object that has been trained. observation is a vector
jpayne@68 101 representing the observed data. The return value is a list of
jpayne@68 102 unnormalized log probabilities for each class.
jpayne@68 103 """
jpayne@68 104 scores = []
jpayne@68 105 assert len(me.feature_fns) == len(me.alphas)
jpayne@68 106 for klass in me.classes:
jpayne@68 107 lprob = 0.0
jpayne@68 108 for fn, alpha in zip(me.feature_fns, me.alphas):
jpayne@68 109 lprob += fn(observation, klass) * alpha
jpayne@68 110 scores.append(lprob)
jpayne@68 111 return scores
jpayne@68 112
jpayne@68 113
jpayne@68 114 def classify(me, observation):
jpayne@68 115 """Classify an observation into a class."""
jpayne@68 116 scores = calculate(me, observation)
jpayne@68 117 max_score, klass = scores[0], me.classes[0]
jpayne@68 118 for i in range(1, len(scores)):
jpayne@68 119 if scores[i] > max_score:
jpayne@68 120 max_score, klass = scores[i], me.classes[i]
jpayne@68 121 return klass
jpayne@68 122
jpayne@68 123
jpayne@68 124 def _eval_feature_fn(fn, xs, classes):
jpayne@68 125 """Evaluate a feature function on every instance of the training set and class (PRIVATE).
jpayne@68 126
jpayne@68 127 fn is a callback function that takes two parameters: a
jpayne@68 128 training instance and a class. Return a dictionary of (training
jpayne@68 129 set index, class index) -> non-zero value. Values of 0 are not
jpayne@68 130 stored in the dictionary.
jpayne@68 131 """
jpayne@68 132 values = {}
jpayne@68 133 for i in range(len(xs)):
jpayne@68 134 for j in range(len(classes)):
jpayne@68 135 f = fn(xs[i], classes[j])
jpayne@68 136 if f != 0:
jpayne@68 137 values[(i, j)] = f
jpayne@68 138 return values
jpayne@68 139
jpayne@68 140
jpayne@68 141 def _calc_empirical_expects(xs, ys, classes, features):
jpayne@68 142 """Calculate the expectation of each function from the data (PRIVATE).
jpayne@68 143
jpayne@68 144 This is the constraint for the maximum entropy distribution. Return a
jpayne@68 145 list of expectations, parallel to the list of features.
jpayne@68 146 """
jpayne@68 147 # E[f_i] = SUM_x,y P(x, y) f(x, y)
jpayne@68 148 # = 1/N f(x, y)
jpayne@68 149 class2index = {}
jpayne@68 150 for index, key in enumerate(classes):
jpayne@68 151 class2index[key] = index
jpayne@68 152 ys_i = [class2index[y] for y in ys]
jpayne@68 153
jpayne@68 154 expect = []
jpayne@68 155 N = len(xs)
jpayne@68 156 for feature in features:
jpayne@68 157 s = 0
jpayne@68 158 for i in range(N):
jpayne@68 159 s += feature.get((i, ys_i[i]), 0)
jpayne@68 160 expect.append(s / N)
jpayne@68 161 return expect
jpayne@68 162
jpayne@68 163
jpayne@68 164 def _calc_model_expects(xs, classes, features, alphas):
jpayne@68 165 """Calculate the expectation of each feature from the model (PRIVATE).
jpayne@68 166
jpayne@68 167 This is not used in maximum entropy training, but provides a good function
jpayne@68 168 for debugging.
jpayne@68 169 """
jpayne@68 170 # SUM_X P(x) SUM_Y P(Y|X) F(X, Y)
jpayne@68 171 # = 1/N SUM_X SUM_Y P(Y|X) F(X, Y)
jpayne@68 172 p_yx = _calc_p_class_given_x(xs, classes, features, alphas)
jpayne@68 173
jpayne@68 174 expects = []
jpayne@68 175 for feature in features:
jpayne@68 176 sum = 0.0
jpayne@68 177 for (i, j), f in feature.items():
jpayne@68 178 sum += p_yx[i][j] * f
jpayne@68 179 expects.append(sum / len(xs))
jpayne@68 180 return expects
jpayne@68 181
jpayne@68 182
jpayne@68 183 def _calc_p_class_given_x(xs, classes, features, alphas):
jpayne@68 184 """Calculate conditional probability P(y|x) (PRIVATE).
jpayne@68 185
jpayne@68 186 y is the class and x is an instance from the training set.
jpayne@68 187 Return a XSxCLASSES matrix of probabilities.
jpayne@68 188 """
jpayne@68 189 prob_yx = np.zeros((len(xs), len(classes)))
jpayne@68 190
jpayne@68 191 # Calculate log P(y, x).
jpayne@68 192 assert len(features) == len(alphas)
jpayne@68 193 for feature, alpha in zip(features, alphas):
jpayne@68 194 for (x, y), f in feature.items():
jpayne@68 195 prob_yx[x][y] += alpha * f
jpayne@68 196 # Take an exponent to get P(y, x)
jpayne@68 197 prob_yx = np.exp(prob_yx)
jpayne@68 198 # Divide out the probability over each class, so we get P(y|x).
jpayne@68 199 for i in range(len(xs)):
jpayne@68 200 z = sum(prob_yx[i])
jpayne@68 201 prob_yx[i] = prob_yx[i] / z
jpayne@68 202 return prob_yx
jpayne@68 203
jpayne@68 204
jpayne@68 205 def _calc_f_sharp(N, nclasses, features):
jpayne@68 206 """Calculate a matrix of f sharp values (PRIVATE)."""
jpayne@68 207 # f#(x, y) = SUM_i feature(x, y)
jpayne@68 208 f_sharp = np.zeros((N, nclasses))
jpayne@68 209 for feature in features:
jpayne@68 210 for (i, j), f in feature.items():
jpayne@68 211 f_sharp[i][j] += f
jpayne@68 212 return f_sharp
jpayne@68 213
jpayne@68 214
jpayne@68 215 def _iis_solve_delta(
jpayne@68 216 N, feature, f_sharp, empirical, prob_yx, max_newton_iterations, newton_converge
jpayne@68 217 ):
jpayne@68 218 """Solve delta using Newton's method (PRIVATE)."""
jpayne@68 219 # SUM_x P(x) * SUM_c P(c|x) f_i(x, c) e^[delta_i * f#(x, c)] = 0
jpayne@68 220 delta = 0.0
jpayne@68 221 iters = 0
jpayne@68 222 while iters < max_newton_iterations: # iterate for Newton's method
jpayne@68 223 f_newton = df_newton = 0.0 # evaluate the function and derivative
jpayne@68 224 for (i, j), f in feature.items():
jpayne@68 225 prod = prob_yx[i][j] * f * np.exp(delta * f_sharp[i][j])
jpayne@68 226 f_newton += prod
jpayne@68 227 df_newton += prod * f_sharp[i][j]
jpayne@68 228 f_newton, df_newton = empirical - f_newton / N, -df_newton / N
jpayne@68 229
jpayne@68 230 ratio = f_newton / df_newton
jpayne@68 231 delta -= ratio
jpayne@68 232 if np.fabs(ratio) < newton_converge: # converged
jpayne@68 233 break
jpayne@68 234 iters = iters + 1
jpayne@68 235 else:
jpayne@68 236 raise RuntimeError("Newton's method did not converge")
jpayne@68 237 return delta
jpayne@68 238
jpayne@68 239
jpayne@68 240 def _train_iis(
jpayne@68 241 xs,
jpayne@68 242 classes,
jpayne@68 243 features,
jpayne@68 244 f_sharp,
jpayne@68 245 alphas,
jpayne@68 246 e_empirical,
jpayne@68 247 max_newton_iterations,
jpayne@68 248 newton_converge,
jpayne@68 249 ):
jpayne@68 250 """Do one iteration of hill climbing to find better alphas (PRIVATE)."""
jpayne@68 251 # This is a good function to parallelize.
jpayne@68 252
jpayne@68 253 # Pre-calculate P(y|x)
jpayne@68 254 p_yx = _calc_p_class_given_x(xs, classes, features, alphas)
jpayne@68 255
jpayne@68 256 N = len(xs)
jpayne@68 257 newalphas = alphas[:]
jpayne@68 258 for i in range(len(alphas)):
jpayne@68 259 delta = _iis_solve_delta(
jpayne@68 260 N,
jpayne@68 261 features[i],
jpayne@68 262 f_sharp,
jpayne@68 263 e_empirical[i],
jpayne@68 264 p_yx,
jpayne@68 265 max_newton_iterations,
jpayne@68 266 newton_converge,
jpayne@68 267 )
jpayne@68 268 newalphas[i] += delta
jpayne@68 269 return newalphas
jpayne@68 270
jpayne@68 271
jpayne@68 272 def train(
jpayne@68 273 training_set,
jpayne@68 274 results,
jpayne@68 275 feature_fns,
jpayne@68 276 update_fn=None,
jpayne@68 277 max_iis_iterations=10000,
jpayne@68 278 iis_converge=1.0e-5,
jpayne@68 279 max_newton_iterations=100,
jpayne@68 280 newton_converge=1.0e-10,
jpayne@68 281 ):
jpayne@68 282 """Train a maximum entropy classifier, returns MaxEntropy object.
jpayne@68 283
jpayne@68 284 Train a maximum entropy classifier on a training set.
jpayne@68 285 training_set is a list of observations. results is a list of the
jpayne@68 286 class assignments for each observation. feature_fns is a list of
jpayne@68 287 the features. These are callback functions that take an
jpayne@68 288 observation and class and return a 1 or 0. update_fn is a
jpayne@68 289 callback function that is called at each training iteration. It is
jpayne@68 290 passed a MaxEntropy object that encapsulates the current state of
jpayne@68 291 the training.
jpayne@68 292
jpayne@68 293 The maximum number of iterations and the convergence criterion for IIS
jpayne@68 294 are given by max_iis_iterations and iis_converge, respectively, while
jpayne@68 295 max_newton_iterations and newton_converge are the maximum number
jpayne@68 296 of iterations and the convergence criterion for Newton's method.
jpayne@68 297 """
jpayne@68 298 if not training_set:
jpayne@68 299 raise ValueError("No data in the training set.")
jpayne@68 300 if len(training_set) != len(results):
jpayne@68 301 raise ValueError("training_set and results should be parallel lists.")
jpayne@68 302
jpayne@68 303 # Rename variables for convenience.
jpayne@68 304 xs, ys = training_set, results
jpayne@68 305
jpayne@68 306 # Get a list of all the classes that need to be trained.
jpayne@68 307 classes = sorted(set(results))
jpayne@68 308
jpayne@68 309 # Cache values for all features.
jpayne@68 310 features = [_eval_feature_fn(fn, training_set, classes) for fn in feature_fns]
jpayne@68 311 # Cache values for f#.
jpayne@68 312 f_sharp = _calc_f_sharp(len(training_set), len(classes), features)
jpayne@68 313
jpayne@68 314 # Pre-calculate the empirical expectations of the features.
jpayne@68 315 e_empirical = _calc_empirical_expects(xs, ys, classes, features)
jpayne@68 316
jpayne@68 317 # Now train the alpha parameters to weigh each feature.
jpayne@68 318 alphas = [0.0] * len(features)
jpayne@68 319 iters = 0
jpayne@68 320 while iters < max_iis_iterations:
jpayne@68 321 nalphas = _train_iis(
jpayne@68 322 xs,
jpayne@68 323 classes,
jpayne@68 324 features,
jpayne@68 325 f_sharp,
jpayne@68 326 alphas,
jpayne@68 327 e_empirical,
jpayne@68 328 max_newton_iterations,
jpayne@68 329 newton_converge,
jpayne@68 330 )
jpayne@68 331 diff = [np.fabs(x - y) for x, y in zip(alphas, nalphas)]
jpayne@68 332 diff = reduce(np.add, diff, 0)
jpayne@68 333 alphas = nalphas
jpayne@68 334
jpayne@68 335 me = MaxEntropy()
jpayne@68 336 me.alphas, me.classes, me.feature_fns = alphas, classes, feature_fns
jpayne@68 337 if update_fn is not None:
jpayne@68 338 update_fn(me)
jpayne@68 339
jpayne@68 340 if diff < iis_converge: # converged
jpayne@68 341 break
jpayne@68 342 else:
jpayne@68 343 raise RuntimeError("IIS did not converge")
jpayne@68 344
jpayne@68 345 return me
jpayne@68 346
jpayne@68 347
jpayne@68 348 if __name__ == "__main__":
jpayne@68 349 from Bio._utils import run_doctest
jpayne@68 350
jpayne@68 351 run_doctest(verbose=0)