comparison CSP2/CSP2_env/env-d9b9114564458d9d-741b3de822f2aaca6c6caa4325c4afce/lib/python3.8/site-packages/Bio/MaxEntropy.py @ 68:5028fdace37b

planemo upload commit 2e9511a184a1ca667c7be0c6321a36dc4e3d116d
author jpayne
date Tue, 18 Mar 2025 16:23:26 -0400
parents
children
comparison
equal deleted inserted replaced
67:0e9998148a16 68:5028fdace37b
1 # Copyright 2001 by Jeffrey Chang. All rights reserved.
2 #
3 # This file is part of the Biopython distribution and governed by your
4 # choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
5 # Please see the LICENSE file that should have been included as part of this
6 # package.
7 """Maximum Entropy code.
8
9 Uses Improved Iterative Scaling.
10 """
11 # TODO Define terminology
12
13 from functools import reduce
14 import warnings
15
16 try:
17 import numpy as np
18 except ImportError:
19 from Bio import MissingPythonDependencyError
20
21 raise MissingPythonDependencyError(
22 "Please install NumPy if you want to use Bio.MaxEntropy. "
23 "See http://www.numpy.org/"
24 ) from None
25
26
27 from Bio import BiopythonDeprecationWarning
28
29 warnings.warn(
30 "The 'Bio.MaxEntropy' module is deprecated and will be removed in a future "
31 "release of Biopython. Consider using scikit-learn instead.",
32 BiopythonDeprecationWarning,
33 )
34
35
36 class MaxEntropy:
37 """Hold information for a Maximum Entropy classifier.
38
39 Members:
40 classes List of the possible classes of data.
41 alphas List of the weights for each feature.
42 feature_fns List of the feature functions.
43
44 Car data from example Naive Bayes Classifier example by Eric Meisner November 22, 2003
45 http://www.inf.u-szeged.hu/~ormandi/teaching
46
47 >>> from Bio.MaxEntropy import train, classify
48 >>> xcar = [
49 ... ['Red', 'Sports', 'Domestic'],
50 ... ['Red', 'Sports', 'Domestic'],
51 ... ['Red', 'Sports', 'Domestic'],
52 ... ['Yellow', 'Sports', 'Domestic'],
53 ... ['Yellow', 'Sports', 'Imported'],
54 ... ['Yellow', 'SUV', 'Imported'],
55 ... ['Yellow', 'SUV', 'Imported'],
56 ... ['Yellow', 'SUV', 'Domestic'],
57 ... ['Red', 'SUV', 'Imported'],
58 ... ['Red', 'Sports', 'Imported']]
59 >>> ycar = ['Yes','No','Yes','No','Yes','No','Yes','No','No','Yes']
60
61 Requires some rules or features
62
63 >>> def udf1(ts, cl):
64 ... return ts[0] != 'Red'
65 ...
66 >>> def udf2(ts, cl):
67 ... return ts[1] != 'Sports'
68 ...
69 >>> def udf3(ts, cl):
70 ... return ts[2] != 'Domestic'
71 ...
72 >>> user_functions = [udf1, udf2, udf3] # must be an iterable type
73 >>> xe = train(xcar, ycar, user_functions)
74 >>> for xv, yv in zip(xcar, ycar):
75 ... xc = classify(xe, xv)
76 ... print('Pred: %s gives %s y is %s' % (xv, xc, yv))
77 ...
78 Pred: ['Red', 'Sports', 'Domestic'] gives No y is Yes
79 Pred: ['Red', 'Sports', 'Domestic'] gives No y is No
80 Pred: ['Red', 'Sports', 'Domestic'] gives No y is Yes
81 Pred: ['Yellow', 'Sports', 'Domestic'] gives No y is No
82 Pred: ['Yellow', 'Sports', 'Imported'] gives No y is Yes
83 Pred: ['Yellow', 'SUV', 'Imported'] gives No y is No
84 Pred: ['Yellow', 'SUV', 'Imported'] gives No y is Yes
85 Pred: ['Yellow', 'SUV', 'Domestic'] gives No y is No
86 Pred: ['Red', 'SUV', 'Imported'] gives No y is No
87 Pred: ['Red', 'Sports', 'Imported'] gives No y is Yes
88 """
89
90 def __init__(self):
91 """Initialize the class."""
92 self.classes = []
93 self.alphas = []
94 self.feature_fns = []
95
96
97 def calculate(me, observation):
98 """Calculate the log of the probability for each class.
99
100 me is a MaxEntropy object that has been trained. observation is a vector
101 representing the observed data. The return value is a list of
102 unnormalized log probabilities for each class.
103 """
104 scores = []
105 assert len(me.feature_fns) == len(me.alphas)
106 for klass in me.classes:
107 lprob = 0.0
108 for fn, alpha in zip(me.feature_fns, me.alphas):
109 lprob += fn(observation, klass) * alpha
110 scores.append(lprob)
111 return scores
112
113
114 def classify(me, observation):
115 """Classify an observation into a class."""
116 scores = calculate(me, observation)
117 max_score, klass = scores[0], me.classes[0]
118 for i in range(1, len(scores)):
119 if scores[i] > max_score:
120 max_score, klass = scores[i], me.classes[i]
121 return klass
122
123
124 def _eval_feature_fn(fn, xs, classes):
125 """Evaluate a feature function on every instance of the training set and class (PRIVATE).
126
127 fn is a callback function that takes two parameters: a
128 training instance and a class. Return a dictionary of (training
129 set index, class index) -> non-zero value. Values of 0 are not
130 stored in the dictionary.
131 """
132 values = {}
133 for i in range(len(xs)):
134 for j in range(len(classes)):
135 f = fn(xs[i], classes[j])
136 if f != 0:
137 values[(i, j)] = f
138 return values
139
140
141 def _calc_empirical_expects(xs, ys, classes, features):
142 """Calculate the expectation of each function from the data (PRIVATE).
143
144 This is the constraint for the maximum entropy distribution. Return a
145 list of expectations, parallel to the list of features.
146 """
147 # E[f_i] = SUM_x,y P(x, y) f(x, y)
148 # = 1/N f(x, y)
149 class2index = {}
150 for index, key in enumerate(classes):
151 class2index[key] = index
152 ys_i = [class2index[y] for y in ys]
153
154 expect = []
155 N = len(xs)
156 for feature in features:
157 s = 0
158 for i in range(N):
159 s += feature.get((i, ys_i[i]), 0)
160 expect.append(s / N)
161 return expect
162
163
164 def _calc_model_expects(xs, classes, features, alphas):
165 """Calculate the expectation of each feature from the model (PRIVATE).
166
167 This is not used in maximum entropy training, but provides a good function
168 for debugging.
169 """
170 # SUM_X P(x) SUM_Y P(Y|X) F(X, Y)
171 # = 1/N SUM_X SUM_Y P(Y|X) F(X, Y)
172 p_yx = _calc_p_class_given_x(xs, classes, features, alphas)
173
174 expects = []
175 for feature in features:
176 sum = 0.0
177 for (i, j), f in feature.items():
178 sum += p_yx[i][j] * f
179 expects.append(sum / len(xs))
180 return expects
181
182
183 def _calc_p_class_given_x(xs, classes, features, alphas):
184 """Calculate conditional probability P(y|x) (PRIVATE).
185
186 y is the class and x is an instance from the training set.
187 Return a XSxCLASSES matrix of probabilities.
188 """
189 prob_yx = np.zeros((len(xs), len(classes)))
190
191 # Calculate log P(y, x).
192 assert len(features) == len(alphas)
193 for feature, alpha in zip(features, alphas):
194 for (x, y), f in feature.items():
195 prob_yx[x][y] += alpha * f
196 # Take an exponent to get P(y, x)
197 prob_yx = np.exp(prob_yx)
198 # Divide out the probability over each class, so we get P(y|x).
199 for i in range(len(xs)):
200 z = sum(prob_yx[i])
201 prob_yx[i] = prob_yx[i] / z
202 return prob_yx
203
204
205 def _calc_f_sharp(N, nclasses, features):
206 """Calculate a matrix of f sharp values (PRIVATE)."""
207 # f#(x, y) = SUM_i feature(x, y)
208 f_sharp = np.zeros((N, nclasses))
209 for feature in features:
210 for (i, j), f in feature.items():
211 f_sharp[i][j] += f
212 return f_sharp
213
214
215 def _iis_solve_delta(
216 N, feature, f_sharp, empirical, prob_yx, max_newton_iterations, newton_converge
217 ):
218 """Solve delta using Newton's method (PRIVATE)."""
219 # SUM_x P(x) * SUM_c P(c|x) f_i(x, c) e^[delta_i * f#(x, c)] = 0
220 delta = 0.0
221 iters = 0
222 while iters < max_newton_iterations: # iterate for Newton's method
223 f_newton = df_newton = 0.0 # evaluate the function and derivative
224 for (i, j), f in feature.items():
225 prod = prob_yx[i][j] * f * np.exp(delta * f_sharp[i][j])
226 f_newton += prod
227 df_newton += prod * f_sharp[i][j]
228 f_newton, df_newton = empirical - f_newton / N, -df_newton / N
229
230 ratio = f_newton / df_newton
231 delta -= ratio
232 if np.fabs(ratio) < newton_converge: # converged
233 break
234 iters = iters + 1
235 else:
236 raise RuntimeError("Newton's method did not converge")
237 return delta
238
239
240 def _train_iis(
241 xs,
242 classes,
243 features,
244 f_sharp,
245 alphas,
246 e_empirical,
247 max_newton_iterations,
248 newton_converge,
249 ):
250 """Do one iteration of hill climbing to find better alphas (PRIVATE)."""
251 # This is a good function to parallelize.
252
253 # Pre-calculate P(y|x)
254 p_yx = _calc_p_class_given_x(xs, classes, features, alphas)
255
256 N = len(xs)
257 newalphas = alphas[:]
258 for i in range(len(alphas)):
259 delta = _iis_solve_delta(
260 N,
261 features[i],
262 f_sharp,
263 e_empirical[i],
264 p_yx,
265 max_newton_iterations,
266 newton_converge,
267 )
268 newalphas[i] += delta
269 return newalphas
270
271
272 def train(
273 training_set,
274 results,
275 feature_fns,
276 update_fn=None,
277 max_iis_iterations=10000,
278 iis_converge=1.0e-5,
279 max_newton_iterations=100,
280 newton_converge=1.0e-10,
281 ):
282 """Train a maximum entropy classifier, returns MaxEntropy object.
283
284 Train a maximum entropy classifier on a training set.
285 training_set is a list of observations. results is a list of the
286 class assignments for each observation. feature_fns is a list of
287 the features. These are callback functions that take an
288 observation and class and return a 1 or 0. update_fn is a
289 callback function that is called at each training iteration. It is
290 passed a MaxEntropy object that encapsulates the current state of
291 the training.
292
293 The maximum number of iterations and the convergence criterion for IIS
294 are given by max_iis_iterations and iis_converge, respectively, while
295 max_newton_iterations and newton_converge are the maximum number
296 of iterations and the convergence criterion for Newton's method.
297 """
298 if not training_set:
299 raise ValueError("No data in the training set.")
300 if len(training_set) != len(results):
301 raise ValueError("training_set and results should be parallel lists.")
302
303 # Rename variables for convenience.
304 xs, ys = training_set, results
305
306 # Get a list of all the classes that need to be trained.
307 classes = sorted(set(results))
308
309 # Cache values for all features.
310 features = [_eval_feature_fn(fn, training_set, classes) for fn in feature_fns]
311 # Cache values for f#.
312 f_sharp = _calc_f_sharp(len(training_set), len(classes), features)
313
314 # Pre-calculate the empirical expectations of the features.
315 e_empirical = _calc_empirical_expects(xs, ys, classes, features)
316
317 # Now train the alpha parameters to weigh each feature.
318 alphas = [0.0] * len(features)
319 iters = 0
320 while iters < max_iis_iterations:
321 nalphas = _train_iis(
322 xs,
323 classes,
324 features,
325 f_sharp,
326 alphas,
327 e_empirical,
328 max_newton_iterations,
329 newton_converge,
330 )
331 diff = [np.fabs(x - y) for x, y in zip(alphas, nalphas)]
332 diff = reduce(np.add, diff, 0)
333 alphas = nalphas
334
335 me = MaxEntropy()
336 me.alphas, me.classes, me.feature_fns = alphas, classes, feature_fns
337 if update_fn is not None:
338 update_fn(me)
339
340 if diff < iis_converge: # converged
341 break
342 else:
343 raise RuntimeError("IIS did not converge")
344
345 return me
346
347
348 if __name__ == "__main__":
349 from Bio._utils import run_doctest
350
351 run_doctest(verbose=0)