jpayne@69
|
1 # Copyright 2001 by Jeffrey Chang. All rights reserved.
|
jpayne@69
|
2 #
|
jpayne@69
|
3 # This file is part of the Biopython distribution and governed by your
|
jpayne@69
|
4 # choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
|
jpayne@69
|
5 # Please see the LICENSE file that should have been included as part of this
|
jpayne@69
|
6 # package.
|
jpayne@69
|
7 """Maximum Entropy code.
|
jpayne@69
|
8
|
jpayne@69
|
9 Uses Improved Iterative Scaling.
|
jpayne@69
|
10 """
|
jpayne@69
|
11 # TODO Define terminology
|
jpayne@69
|
12
|
jpayne@69
|
13 from functools import reduce
|
jpayne@69
|
14 import warnings
|
jpayne@69
|
15
|
jpayne@69
|
16 try:
|
jpayne@69
|
17 import numpy as np
|
jpayne@69
|
18 except ImportError:
|
jpayne@69
|
19 from Bio import MissingPythonDependencyError
|
jpayne@69
|
20
|
jpayne@69
|
21 raise MissingPythonDependencyError(
|
jpayne@69
|
22 "Please install NumPy if you want to use Bio.MaxEntropy. "
|
jpayne@69
|
23 "See http://www.numpy.org/"
|
jpayne@69
|
24 ) from None
|
jpayne@69
|
25
|
jpayne@69
|
26
|
jpayne@69
|
27 from Bio import BiopythonDeprecationWarning
|
jpayne@69
|
28
|
jpayne@69
|
29 warnings.warn(
|
jpayne@69
|
30 "The 'Bio.MaxEntropy' module is deprecated and will be removed in a future "
|
jpayne@69
|
31 "release of Biopython. Consider using scikit-learn instead.",
|
jpayne@69
|
32 BiopythonDeprecationWarning,
|
jpayne@69
|
33 )
|
jpayne@69
|
34
|
jpayne@69
|
35
|
jpayne@69
|
36 class MaxEntropy:
|
jpayne@69
|
37 """Hold information for a Maximum Entropy classifier.
|
jpayne@69
|
38
|
jpayne@69
|
39 Members:
|
jpayne@69
|
40 classes List of the possible classes of data.
|
jpayne@69
|
41 alphas List of the weights for each feature.
|
jpayne@69
|
42 feature_fns List of the feature functions.
|
jpayne@69
|
43
|
jpayne@69
|
44 Car data from example Naive Bayes Classifier example by Eric Meisner November 22, 2003
|
jpayne@69
|
45 http://www.inf.u-szeged.hu/~ormandi/teaching
|
jpayne@69
|
46
|
jpayne@69
|
47 >>> from Bio.MaxEntropy import train, classify
|
jpayne@69
|
48 >>> xcar = [
|
jpayne@69
|
49 ... ['Red', 'Sports', 'Domestic'],
|
jpayne@69
|
50 ... ['Red', 'Sports', 'Domestic'],
|
jpayne@69
|
51 ... ['Red', 'Sports', 'Domestic'],
|
jpayne@69
|
52 ... ['Yellow', 'Sports', 'Domestic'],
|
jpayne@69
|
53 ... ['Yellow', 'Sports', 'Imported'],
|
jpayne@69
|
54 ... ['Yellow', 'SUV', 'Imported'],
|
jpayne@69
|
55 ... ['Yellow', 'SUV', 'Imported'],
|
jpayne@69
|
56 ... ['Yellow', 'SUV', 'Domestic'],
|
jpayne@69
|
57 ... ['Red', 'SUV', 'Imported'],
|
jpayne@69
|
58 ... ['Red', 'Sports', 'Imported']]
|
jpayne@69
|
59 >>> ycar = ['Yes','No','Yes','No','Yes','No','Yes','No','No','Yes']
|
jpayne@69
|
60
|
jpayne@69
|
61 Requires some rules or features
|
jpayne@69
|
62
|
jpayne@69
|
63 >>> def udf1(ts, cl):
|
jpayne@69
|
64 ... return ts[0] != 'Red'
|
jpayne@69
|
65 ...
|
jpayne@69
|
66 >>> def udf2(ts, cl):
|
jpayne@69
|
67 ... return ts[1] != 'Sports'
|
jpayne@69
|
68 ...
|
jpayne@69
|
69 >>> def udf3(ts, cl):
|
jpayne@69
|
70 ... return ts[2] != 'Domestic'
|
jpayne@69
|
71 ...
|
jpayne@69
|
72 >>> user_functions = [udf1, udf2, udf3] # must be an iterable type
|
jpayne@69
|
73 >>> xe = train(xcar, ycar, user_functions)
|
jpayne@69
|
74 >>> for xv, yv in zip(xcar, ycar):
|
jpayne@69
|
75 ... xc = classify(xe, xv)
|
jpayne@69
|
76 ... print('Pred: %s gives %s y is %s' % (xv, xc, yv))
|
jpayne@69
|
77 ...
|
jpayne@69
|
78 Pred: ['Red', 'Sports', 'Domestic'] gives No y is Yes
|
jpayne@69
|
79 Pred: ['Red', 'Sports', 'Domestic'] gives No y is No
|
jpayne@69
|
80 Pred: ['Red', 'Sports', 'Domestic'] gives No y is Yes
|
jpayne@69
|
81 Pred: ['Yellow', 'Sports', 'Domestic'] gives No y is No
|
jpayne@69
|
82 Pred: ['Yellow', 'Sports', 'Imported'] gives No y is Yes
|
jpayne@69
|
83 Pred: ['Yellow', 'SUV', 'Imported'] gives No y is No
|
jpayne@69
|
84 Pred: ['Yellow', 'SUV', 'Imported'] gives No y is Yes
|
jpayne@69
|
85 Pred: ['Yellow', 'SUV', 'Domestic'] gives No y is No
|
jpayne@69
|
86 Pred: ['Red', 'SUV', 'Imported'] gives No y is No
|
jpayne@69
|
87 Pred: ['Red', 'Sports', 'Imported'] gives No y is Yes
|
jpayne@69
|
88 """
|
jpayne@69
|
89
|
jpayne@69
|
90 def __init__(self):
|
jpayne@69
|
91 """Initialize the class."""
|
jpayne@69
|
92 self.classes = []
|
jpayne@69
|
93 self.alphas = []
|
jpayne@69
|
94 self.feature_fns = []
|
jpayne@69
|
95
|
jpayne@69
|
96
|
jpayne@69
|
97 def calculate(me, observation):
|
jpayne@69
|
98 """Calculate the log of the probability for each class.
|
jpayne@69
|
99
|
jpayne@69
|
100 me is a MaxEntropy object that has been trained. observation is a vector
|
jpayne@69
|
101 representing the observed data. The return value is a list of
|
jpayne@69
|
102 unnormalized log probabilities for each class.
|
jpayne@69
|
103 """
|
jpayne@69
|
104 scores = []
|
jpayne@69
|
105 assert len(me.feature_fns) == len(me.alphas)
|
jpayne@69
|
106 for klass in me.classes:
|
jpayne@69
|
107 lprob = 0.0
|
jpayne@69
|
108 for fn, alpha in zip(me.feature_fns, me.alphas):
|
jpayne@69
|
109 lprob += fn(observation, klass) * alpha
|
jpayne@69
|
110 scores.append(lprob)
|
jpayne@69
|
111 return scores
|
jpayne@69
|
112
|
jpayne@69
|
113
|
jpayne@69
|
114 def classify(me, observation):
|
jpayne@69
|
115 """Classify an observation into a class."""
|
jpayne@69
|
116 scores = calculate(me, observation)
|
jpayne@69
|
117 max_score, klass = scores[0], me.classes[0]
|
jpayne@69
|
118 for i in range(1, len(scores)):
|
jpayne@69
|
119 if scores[i] > max_score:
|
jpayne@69
|
120 max_score, klass = scores[i], me.classes[i]
|
jpayne@69
|
121 return klass
|
jpayne@69
|
122
|
jpayne@69
|
123
|
jpayne@69
|
124 def _eval_feature_fn(fn, xs, classes):
|
jpayne@69
|
125 """Evaluate a feature function on every instance of the training set and class (PRIVATE).
|
jpayne@69
|
126
|
jpayne@69
|
127 fn is a callback function that takes two parameters: a
|
jpayne@69
|
128 training instance and a class. Return a dictionary of (training
|
jpayne@69
|
129 set index, class index) -> non-zero value. Values of 0 are not
|
jpayne@69
|
130 stored in the dictionary.
|
jpayne@69
|
131 """
|
jpayne@69
|
132 values = {}
|
jpayne@69
|
133 for i in range(len(xs)):
|
jpayne@69
|
134 for j in range(len(classes)):
|
jpayne@69
|
135 f = fn(xs[i], classes[j])
|
jpayne@69
|
136 if f != 0:
|
jpayne@69
|
137 values[(i, j)] = f
|
jpayne@69
|
138 return values
|
jpayne@69
|
139
|
jpayne@69
|
140
|
jpayne@69
|
141 def _calc_empirical_expects(xs, ys, classes, features):
|
jpayne@69
|
142 """Calculate the expectation of each function from the data (PRIVATE).
|
jpayne@69
|
143
|
jpayne@69
|
144 This is the constraint for the maximum entropy distribution. Return a
|
jpayne@69
|
145 list of expectations, parallel to the list of features.
|
jpayne@69
|
146 """
|
jpayne@69
|
147 # E[f_i] = SUM_x,y P(x, y) f(x, y)
|
jpayne@69
|
148 # = 1/N f(x, y)
|
jpayne@69
|
149 class2index = {}
|
jpayne@69
|
150 for index, key in enumerate(classes):
|
jpayne@69
|
151 class2index[key] = index
|
jpayne@69
|
152 ys_i = [class2index[y] for y in ys]
|
jpayne@69
|
153
|
jpayne@69
|
154 expect = []
|
jpayne@69
|
155 N = len(xs)
|
jpayne@69
|
156 for feature in features:
|
jpayne@69
|
157 s = 0
|
jpayne@69
|
158 for i in range(N):
|
jpayne@69
|
159 s += feature.get((i, ys_i[i]), 0)
|
jpayne@69
|
160 expect.append(s / N)
|
jpayne@69
|
161 return expect
|
jpayne@69
|
162
|
jpayne@69
|
163
|
jpayne@69
|
164 def _calc_model_expects(xs, classes, features, alphas):
|
jpayne@69
|
165 """Calculate the expectation of each feature from the model (PRIVATE).
|
jpayne@69
|
166
|
jpayne@69
|
167 This is not used in maximum entropy training, but provides a good function
|
jpayne@69
|
168 for debugging.
|
jpayne@69
|
169 """
|
jpayne@69
|
170 # SUM_X P(x) SUM_Y P(Y|X) F(X, Y)
|
jpayne@69
|
171 # = 1/N SUM_X SUM_Y P(Y|X) F(X, Y)
|
jpayne@69
|
172 p_yx = _calc_p_class_given_x(xs, classes, features, alphas)
|
jpayne@69
|
173
|
jpayne@69
|
174 expects = []
|
jpayne@69
|
175 for feature in features:
|
jpayne@69
|
176 sum = 0.0
|
jpayne@69
|
177 for (i, j), f in feature.items():
|
jpayne@69
|
178 sum += p_yx[i][j] * f
|
jpayne@69
|
179 expects.append(sum / len(xs))
|
jpayne@69
|
180 return expects
|
jpayne@69
|
181
|
jpayne@69
|
182
|
jpayne@69
|
183 def _calc_p_class_given_x(xs, classes, features, alphas):
|
jpayne@69
|
184 """Calculate conditional probability P(y|x) (PRIVATE).
|
jpayne@69
|
185
|
jpayne@69
|
186 y is the class and x is an instance from the training set.
|
jpayne@69
|
187 Return a XSxCLASSES matrix of probabilities.
|
jpayne@69
|
188 """
|
jpayne@69
|
189 prob_yx = np.zeros((len(xs), len(classes)))
|
jpayne@69
|
190
|
jpayne@69
|
191 # Calculate log P(y, x).
|
jpayne@69
|
192 assert len(features) == len(alphas)
|
jpayne@69
|
193 for feature, alpha in zip(features, alphas):
|
jpayne@69
|
194 for (x, y), f in feature.items():
|
jpayne@69
|
195 prob_yx[x][y] += alpha * f
|
jpayne@69
|
196 # Take an exponent to get P(y, x)
|
jpayne@69
|
197 prob_yx = np.exp(prob_yx)
|
jpayne@69
|
198 # Divide out the probability over each class, so we get P(y|x).
|
jpayne@69
|
199 for i in range(len(xs)):
|
jpayne@69
|
200 z = sum(prob_yx[i])
|
jpayne@69
|
201 prob_yx[i] = prob_yx[i] / z
|
jpayne@69
|
202 return prob_yx
|
jpayne@69
|
203
|
jpayne@69
|
204
|
jpayne@69
|
205 def _calc_f_sharp(N, nclasses, features):
|
jpayne@69
|
206 """Calculate a matrix of f sharp values (PRIVATE)."""
|
jpayne@69
|
207 # f#(x, y) = SUM_i feature(x, y)
|
jpayne@69
|
208 f_sharp = np.zeros((N, nclasses))
|
jpayne@69
|
209 for feature in features:
|
jpayne@69
|
210 for (i, j), f in feature.items():
|
jpayne@69
|
211 f_sharp[i][j] += f
|
jpayne@69
|
212 return f_sharp
|
jpayne@69
|
213
|
jpayne@69
|
214
|
jpayne@69
|
215 def _iis_solve_delta(
|
jpayne@69
|
216 N, feature, f_sharp, empirical, prob_yx, max_newton_iterations, newton_converge
|
jpayne@69
|
217 ):
|
jpayne@69
|
218 """Solve delta using Newton's method (PRIVATE)."""
|
jpayne@69
|
219 # SUM_x P(x) * SUM_c P(c|x) f_i(x, c) e^[delta_i * f#(x, c)] = 0
|
jpayne@69
|
220 delta = 0.0
|
jpayne@69
|
221 iters = 0
|
jpayne@69
|
222 while iters < max_newton_iterations: # iterate for Newton's method
|
jpayne@69
|
223 f_newton = df_newton = 0.0 # evaluate the function and derivative
|
jpayne@69
|
224 for (i, j), f in feature.items():
|
jpayne@69
|
225 prod = prob_yx[i][j] * f * np.exp(delta * f_sharp[i][j])
|
jpayne@69
|
226 f_newton += prod
|
jpayne@69
|
227 df_newton += prod * f_sharp[i][j]
|
jpayne@69
|
228 f_newton, df_newton = empirical - f_newton / N, -df_newton / N
|
jpayne@69
|
229
|
jpayne@69
|
230 ratio = f_newton / df_newton
|
jpayne@69
|
231 delta -= ratio
|
jpayne@69
|
232 if np.fabs(ratio) < newton_converge: # converged
|
jpayne@69
|
233 break
|
jpayne@69
|
234 iters = iters + 1
|
jpayne@69
|
235 else:
|
jpayne@69
|
236 raise RuntimeError("Newton's method did not converge")
|
jpayne@69
|
237 return delta
|
jpayne@69
|
238
|
jpayne@69
|
239
|
jpayne@69
|
240 def _train_iis(
|
jpayne@69
|
241 xs,
|
jpayne@69
|
242 classes,
|
jpayne@69
|
243 features,
|
jpayne@69
|
244 f_sharp,
|
jpayne@69
|
245 alphas,
|
jpayne@69
|
246 e_empirical,
|
jpayne@69
|
247 max_newton_iterations,
|
jpayne@69
|
248 newton_converge,
|
jpayne@69
|
249 ):
|
jpayne@69
|
250 """Do one iteration of hill climbing to find better alphas (PRIVATE)."""
|
jpayne@69
|
251 # This is a good function to parallelize.
|
jpayne@69
|
252
|
jpayne@69
|
253 # Pre-calculate P(y|x)
|
jpayne@69
|
254 p_yx = _calc_p_class_given_x(xs, classes, features, alphas)
|
jpayne@69
|
255
|
jpayne@69
|
256 N = len(xs)
|
jpayne@69
|
257 newalphas = alphas[:]
|
jpayne@69
|
258 for i in range(len(alphas)):
|
jpayne@69
|
259 delta = _iis_solve_delta(
|
jpayne@69
|
260 N,
|
jpayne@69
|
261 features[i],
|
jpayne@69
|
262 f_sharp,
|
jpayne@69
|
263 e_empirical[i],
|
jpayne@69
|
264 p_yx,
|
jpayne@69
|
265 max_newton_iterations,
|
jpayne@69
|
266 newton_converge,
|
jpayne@69
|
267 )
|
jpayne@69
|
268 newalphas[i] += delta
|
jpayne@69
|
269 return newalphas
|
jpayne@69
|
270
|
jpayne@69
|
271
|
jpayne@69
|
272 def train(
|
jpayne@69
|
273 training_set,
|
jpayne@69
|
274 results,
|
jpayne@69
|
275 feature_fns,
|
jpayne@69
|
276 update_fn=None,
|
jpayne@69
|
277 max_iis_iterations=10000,
|
jpayne@69
|
278 iis_converge=1.0e-5,
|
jpayne@69
|
279 max_newton_iterations=100,
|
jpayne@69
|
280 newton_converge=1.0e-10,
|
jpayne@69
|
281 ):
|
jpayne@69
|
282 """Train a maximum entropy classifier, returns MaxEntropy object.
|
jpayne@69
|
283
|
jpayne@69
|
284 Train a maximum entropy classifier on a training set.
|
jpayne@69
|
285 training_set is a list of observations. results is a list of the
|
jpayne@69
|
286 class assignments for each observation. feature_fns is a list of
|
jpayne@69
|
287 the features. These are callback functions that take an
|
jpayne@69
|
288 observation and class and return a 1 or 0. update_fn is a
|
jpayne@69
|
289 callback function that is called at each training iteration. It is
|
jpayne@69
|
290 passed a MaxEntropy object that encapsulates the current state of
|
jpayne@69
|
291 the training.
|
jpayne@69
|
292
|
jpayne@69
|
293 The maximum number of iterations and the convergence criterion for IIS
|
jpayne@69
|
294 are given by max_iis_iterations and iis_converge, respectively, while
|
jpayne@69
|
295 max_newton_iterations and newton_converge are the maximum number
|
jpayne@69
|
296 of iterations and the convergence criterion for Newton's method.
|
jpayne@69
|
297 """
|
jpayne@69
|
298 if not training_set:
|
jpayne@69
|
299 raise ValueError("No data in the training set.")
|
jpayne@69
|
300 if len(training_set) != len(results):
|
jpayne@69
|
301 raise ValueError("training_set and results should be parallel lists.")
|
jpayne@69
|
302
|
jpayne@69
|
303 # Rename variables for convenience.
|
jpayne@69
|
304 xs, ys = training_set, results
|
jpayne@69
|
305
|
jpayne@69
|
306 # Get a list of all the classes that need to be trained.
|
jpayne@69
|
307 classes = sorted(set(results))
|
jpayne@69
|
308
|
jpayne@69
|
309 # Cache values for all features.
|
jpayne@69
|
310 features = [_eval_feature_fn(fn, training_set, classes) for fn in feature_fns]
|
jpayne@69
|
311 # Cache values for f#.
|
jpayne@69
|
312 f_sharp = _calc_f_sharp(len(training_set), len(classes), features)
|
jpayne@69
|
313
|
jpayne@69
|
314 # Pre-calculate the empirical expectations of the features.
|
jpayne@69
|
315 e_empirical = _calc_empirical_expects(xs, ys, classes, features)
|
jpayne@69
|
316
|
jpayne@69
|
317 # Now train the alpha parameters to weigh each feature.
|
jpayne@69
|
318 alphas = [0.0] * len(features)
|
jpayne@69
|
319 iters = 0
|
jpayne@69
|
320 while iters < max_iis_iterations:
|
jpayne@69
|
321 nalphas = _train_iis(
|
jpayne@69
|
322 xs,
|
jpayne@69
|
323 classes,
|
jpayne@69
|
324 features,
|
jpayne@69
|
325 f_sharp,
|
jpayne@69
|
326 alphas,
|
jpayne@69
|
327 e_empirical,
|
jpayne@69
|
328 max_newton_iterations,
|
jpayne@69
|
329 newton_converge,
|
jpayne@69
|
330 )
|
jpayne@69
|
331 diff = [np.fabs(x - y) for x, y in zip(alphas, nalphas)]
|
jpayne@69
|
332 diff = reduce(np.add, diff, 0)
|
jpayne@69
|
333 alphas = nalphas
|
jpayne@69
|
334
|
jpayne@69
|
335 me = MaxEntropy()
|
jpayne@69
|
336 me.alphas, me.classes, me.feature_fns = alphas, classes, feature_fns
|
jpayne@69
|
337 if update_fn is not None:
|
jpayne@69
|
338 update_fn(me)
|
jpayne@69
|
339
|
jpayne@69
|
340 if diff < iis_converge: # converged
|
jpayne@69
|
341 break
|
jpayne@69
|
342 else:
|
jpayne@69
|
343 raise RuntimeError("IIS did not converge")
|
jpayne@69
|
344
|
jpayne@69
|
345 return me
|
jpayne@69
|
346
|
jpayne@69
|
347
|
jpayne@69
|
348 if __name__ == "__main__":
|
jpayne@69
|
349 from Bio._utils import run_doctest
|
jpayne@69
|
350
|
jpayne@69
|
351 run_doctest(verbose=0)
|