annotate CSP2/CSP2_env/env-d9b9114564458d9d-741b3de822f2aaca6c6caa4325c4afce/lib/python3.8/site-packages/Bio/MarkovModel.py @ 68:5028fdace37b

planemo upload commit 2e9511a184a1ca667c7be0c6321a36dc4e3d116d
author jpayne
date Tue, 18 Mar 2025 16:23:26 -0400
parents
children
rev   line source
jpayne@68 1 # Copyright 2002 by Jeffrey Chang.
jpayne@68 2 # All rights reserved.
jpayne@68 3 #
jpayne@68 4 # This file is part of the Biopython distribution and governed by your
jpayne@68 5 # choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
jpayne@68 6 # Please see the LICENSE file that should have been included as part of this
jpayne@68 7 # package.
jpayne@68 8 """A state-emitting MarkovModel.
jpayne@68 9
jpayne@68 10 Note terminology similar to Manning and Schutze is used.
jpayne@68 11
jpayne@68 12
jpayne@68 13 Functions:
jpayne@68 14 train_bw Train a markov model using the Baum-Welch algorithm.
jpayne@68 15 train_visible Train a visible markov model using MLE.
jpayne@68 16 find_states Find the a state sequence that explains some observations.
jpayne@68 17
jpayne@68 18 load Load a MarkovModel.
jpayne@68 19 save Save a MarkovModel.
jpayne@68 20
jpayne@68 21 Classes:
jpayne@68 22 MarkovModel Holds the description of a markov model
jpayne@68 23 """
jpayne@68 24
jpayne@68 25 import warnings
jpayne@68 26 from Bio import BiopythonDeprecationWarning
jpayne@68 27
jpayne@68 28 warnings.warn(
jpayne@68 29 "The 'Bio.MarkovModel' module is deprecated and will be removed in a "
jpayne@68 30 "future release of Biopython. Consider using the hmmlearn package instead.",
jpayne@68 31 BiopythonDeprecationWarning,
jpayne@68 32 )
jpayne@68 33
jpayne@68 34
jpayne@68 35 try:
jpayne@68 36 import numpy as np
jpayne@68 37 except ImportError:
jpayne@68 38 from Bio import MissingPythonDependencyError
jpayne@68 39
jpayne@68 40 raise MissingPythonDependencyError(
jpayne@68 41 "Please install NumPy if you want to use Bio.MarkovModel. "
jpayne@68 42 "See http://www.numpy.org/"
jpayne@68 43 ) from None
jpayne@68 44
jpayne@68 45 logaddexp = np.logaddexp
jpayne@68 46
jpayne@68 47
jpayne@68 48 def itemindex(values):
jpayne@68 49 """Return a dictionary of values with their sequence offset as keys."""
jpayne@68 50 d = {}
jpayne@68 51 entries = enumerate(values[::-1])
jpayne@68 52 n = len(values) - 1
jpayne@68 53 for index, key in entries:
jpayne@68 54 d[key] = n - index
jpayne@68 55 return d
jpayne@68 56
jpayne@68 57
jpayne@68 58 np.random.seed()
jpayne@68 59
jpayne@68 60 VERY_SMALL_NUMBER = 1e-300
jpayne@68 61 LOG0 = np.log(VERY_SMALL_NUMBER)
jpayne@68 62
jpayne@68 63
jpayne@68 64 class MarkovModel:
jpayne@68 65 """Create a state-emitting MarkovModel object."""
jpayne@68 66
jpayne@68 67 def __init__(
jpayne@68 68 self, states, alphabet, p_initial=None, p_transition=None, p_emission=None
jpayne@68 69 ):
jpayne@68 70 """Initialize the class."""
jpayne@68 71 self.states = states
jpayne@68 72 self.alphabet = alphabet
jpayne@68 73 self.p_initial = p_initial
jpayne@68 74 self.p_transition = p_transition
jpayne@68 75 self.p_emission = p_emission
jpayne@68 76
jpayne@68 77 def __str__(self):
jpayne@68 78 """Create a string representation of the MarkovModel object."""
jpayne@68 79 from io import StringIO
jpayne@68 80
jpayne@68 81 handle = StringIO()
jpayne@68 82 save(self, handle)
jpayne@68 83 handle.seek(0)
jpayne@68 84 return handle.read()
jpayne@68 85
jpayne@68 86
jpayne@68 87 def _readline_and_check_start(handle, start):
jpayne@68 88 """Read the first line and evaluate that begisn with the correct start (PRIVATE)."""
jpayne@68 89 line = handle.readline()
jpayne@68 90 if not line.startswith(start):
jpayne@68 91 raise ValueError(f"I expected {start!r} but got {line!r}")
jpayne@68 92 return line
jpayne@68 93
jpayne@68 94
jpayne@68 95 def load(handle):
jpayne@68 96 """Parse a file handle into a MarkovModel object."""
jpayne@68 97 # Load the states.
jpayne@68 98 line = _readline_and_check_start(handle, "STATES:")
jpayne@68 99 states = line.split()[1:]
jpayne@68 100
jpayne@68 101 # Load the alphabet.
jpayne@68 102 line = _readline_and_check_start(handle, "ALPHABET:")
jpayne@68 103 alphabet = line.split()[1:]
jpayne@68 104
jpayne@68 105 mm = MarkovModel(states, alphabet)
jpayne@68 106 N, M = len(states), len(alphabet)
jpayne@68 107
jpayne@68 108 # Load the initial probabilities.
jpayne@68 109 mm.p_initial = np.zeros(N)
jpayne@68 110 line = _readline_and_check_start(handle, "INITIAL:")
jpayne@68 111 for i in range(len(states)):
jpayne@68 112 line = _readline_and_check_start(handle, f" {states[i]}:")
jpayne@68 113 mm.p_initial[i] = float(line.split()[-1])
jpayne@68 114
jpayne@68 115 # Load the transition.
jpayne@68 116 mm.p_transition = np.zeros((N, N))
jpayne@68 117 line = _readline_and_check_start(handle, "TRANSITION:")
jpayne@68 118 for i in range(len(states)):
jpayne@68 119 line = _readline_and_check_start(handle, f" {states[i]}:")
jpayne@68 120 mm.p_transition[i, :] = [float(v) for v in line.split()[1:]]
jpayne@68 121
jpayne@68 122 # Load the emission.
jpayne@68 123 mm.p_emission = np.zeros((N, M))
jpayne@68 124 line = _readline_and_check_start(handle, "EMISSION:")
jpayne@68 125 for i in range(len(states)):
jpayne@68 126 line = _readline_and_check_start(handle, f" {states[i]}:")
jpayne@68 127 mm.p_emission[i, :] = [float(v) for v in line.split()[1:]]
jpayne@68 128
jpayne@68 129 return mm
jpayne@68 130
jpayne@68 131
jpayne@68 132 def save(mm, handle):
jpayne@68 133 """Save MarkovModel object into handle."""
jpayne@68 134 # This will fail if there are spaces in the states or alphabet.
jpayne@68 135 w = handle.write
jpayne@68 136 w(f"STATES: {' '.join(mm.states)}\n")
jpayne@68 137 w(f"ALPHABET: {' '.join(mm.alphabet)}\n")
jpayne@68 138 w("INITIAL:\n")
jpayne@68 139 for i in range(len(mm.p_initial)):
jpayne@68 140 w(f" {mm.states[i]}: {mm.p_initial[i]:g}\n")
jpayne@68 141 w("TRANSITION:\n")
jpayne@68 142 for i in range(len(mm.p_transition)):
jpayne@68 143 w(f" {mm.states[i]}: {' '.join(str(x) for x in mm.p_transition[i])}\n")
jpayne@68 144 w("EMISSION:\n")
jpayne@68 145 for i in range(len(mm.p_emission)):
jpayne@68 146 w(f" {mm.states[i]}: {' '.join(str(x) for x in mm.p_emission[i])}\n")
jpayne@68 147
jpayne@68 148
jpayne@68 149 # XXX allow them to specify starting points
jpayne@68 150 def train_bw(
jpayne@68 151 states,
jpayne@68 152 alphabet,
jpayne@68 153 training_data,
jpayne@68 154 pseudo_initial=None,
jpayne@68 155 pseudo_transition=None,
jpayne@68 156 pseudo_emission=None,
jpayne@68 157 update_fn=None,
jpayne@68 158 ):
jpayne@68 159 """Train a MarkovModel using the Baum-Welch algorithm.
jpayne@68 160
jpayne@68 161 Train a MarkovModel using the Baum-Welch algorithm. states is a list
jpayne@68 162 of strings that describe the names of each state. alphabet is a
jpayne@68 163 list of objects that indicate the allowed outputs. training_data
jpayne@68 164 is a list of observations. Each observation is a list of objects
jpayne@68 165 from the alphabet.
jpayne@68 166
jpayne@68 167 pseudo_initial, pseudo_transition, and pseudo_emission are
jpayne@68 168 optional parameters that you can use to assign pseudo-counts to
jpayne@68 169 different matrices. They should be matrices of the appropriate
jpayne@68 170 size that contain numbers to add to each parameter matrix, before
jpayne@68 171 normalization.
jpayne@68 172
jpayne@68 173 update_fn is an optional callback that takes parameters
jpayne@68 174 (iteration, log_likelihood). It is called once per iteration.
jpayne@68 175 """
jpayne@68 176 N, M = len(states), len(alphabet)
jpayne@68 177 if not training_data:
jpayne@68 178 raise ValueError("No training data given.")
jpayne@68 179 if pseudo_initial is not None:
jpayne@68 180 pseudo_initial = np.asarray(pseudo_initial)
jpayne@68 181 if pseudo_initial.shape != (N,):
jpayne@68 182 raise ValueError("pseudo_initial not shape len(states)")
jpayne@68 183 if pseudo_transition is not None:
jpayne@68 184 pseudo_transition = np.asarray(pseudo_transition)
jpayne@68 185 if pseudo_transition.shape != (N, N):
jpayne@68 186 raise ValueError("pseudo_transition not shape len(states) X len(states)")
jpayne@68 187 if pseudo_emission is not None:
jpayne@68 188 pseudo_emission = np.asarray(pseudo_emission)
jpayne@68 189 if pseudo_emission.shape != (N, M):
jpayne@68 190 raise ValueError("pseudo_emission not shape len(states) X len(alphabet)")
jpayne@68 191
jpayne@68 192 # Training data is given as a list of members of the alphabet.
jpayne@68 193 # Replace those with indexes into the alphabet list for easier
jpayne@68 194 # computation.
jpayne@68 195 training_outputs = []
jpayne@68 196 indexes = itemindex(alphabet)
jpayne@68 197 for outputs in training_data:
jpayne@68 198 training_outputs.append([indexes[x] for x in outputs])
jpayne@68 199
jpayne@68 200 # Do some sanity checking on the outputs.
jpayne@68 201 lengths = [len(x) for x in training_outputs]
jpayne@68 202 if min(lengths) == 0:
jpayne@68 203 raise ValueError("I got training data with outputs of length 0")
jpayne@68 204
jpayne@68 205 # Do the training with baum welch.
jpayne@68 206 x = _baum_welch(
jpayne@68 207 N,
jpayne@68 208 M,
jpayne@68 209 training_outputs,
jpayne@68 210 pseudo_initial=pseudo_initial,
jpayne@68 211 pseudo_transition=pseudo_transition,
jpayne@68 212 pseudo_emission=pseudo_emission,
jpayne@68 213 update_fn=update_fn,
jpayne@68 214 )
jpayne@68 215 p_initial, p_transition, p_emission = x
jpayne@68 216 return MarkovModel(states, alphabet, p_initial, p_transition, p_emission)
jpayne@68 217
jpayne@68 218
jpayne@68 219 MAX_ITERATIONS = 1000
jpayne@68 220
jpayne@68 221
jpayne@68 222 def _baum_welch(
jpayne@68 223 N,
jpayne@68 224 M,
jpayne@68 225 training_outputs,
jpayne@68 226 p_initial=None,
jpayne@68 227 p_transition=None,
jpayne@68 228 p_emission=None,
jpayne@68 229 pseudo_initial=None,
jpayne@68 230 pseudo_transition=None,
jpayne@68 231 pseudo_emission=None,
jpayne@68 232 update_fn=None,
jpayne@68 233 ):
jpayne@68 234 """Implement the Baum-Welch algorithm to evaluate unknown parameters in the MarkovModel object (PRIVATE)."""
jpayne@68 235 if p_initial is None:
jpayne@68 236 p_initial = _random_norm(N)
jpayne@68 237 else:
jpayne@68 238 p_initial = _copy_and_check(p_initial, (N,))
jpayne@68 239
jpayne@68 240 if p_transition is None:
jpayne@68 241 p_transition = _random_norm((N, N))
jpayne@68 242 else:
jpayne@68 243 p_transition = _copy_and_check(p_transition, (N, N))
jpayne@68 244 if p_emission is None:
jpayne@68 245 p_emission = _random_norm((N, M))
jpayne@68 246 else:
jpayne@68 247 p_emission = _copy_and_check(p_emission, (N, M))
jpayne@68 248
jpayne@68 249 # Do all the calculations in log space to avoid underflows.
jpayne@68 250 lp_initial = np.log(p_initial)
jpayne@68 251 lp_transition = np.log(p_transition)
jpayne@68 252 lp_emission = np.log(p_emission)
jpayne@68 253 if pseudo_initial is not None:
jpayne@68 254 lpseudo_initial = np.log(pseudo_initial)
jpayne@68 255 else:
jpayne@68 256 lpseudo_initial = None
jpayne@68 257 if pseudo_transition is not None:
jpayne@68 258 lpseudo_transition = np.log(pseudo_transition)
jpayne@68 259 else:
jpayne@68 260 lpseudo_transition = None
jpayne@68 261 if pseudo_emission is not None:
jpayne@68 262 lpseudo_emission = np.log(pseudo_emission)
jpayne@68 263 else:
jpayne@68 264 lpseudo_emission = None
jpayne@68 265
jpayne@68 266 # Iterate through each sequence of output, updating the parameters
jpayne@68 267 # to the HMM. Stop when the log likelihoods of the sequences
jpayne@68 268 # stops varying.
jpayne@68 269 prev_llik = None
jpayne@68 270 for i in range(MAX_ITERATIONS):
jpayne@68 271 llik = LOG0
jpayne@68 272 for outputs in training_outputs:
jpayne@68 273 llik += _baum_welch_one(
jpayne@68 274 N,
jpayne@68 275 M,
jpayne@68 276 outputs,
jpayne@68 277 lp_initial,
jpayne@68 278 lp_transition,
jpayne@68 279 lp_emission,
jpayne@68 280 lpseudo_initial,
jpayne@68 281 lpseudo_transition,
jpayne@68 282 lpseudo_emission,
jpayne@68 283 )
jpayne@68 284 if update_fn is not None:
jpayne@68 285 update_fn(i, llik)
jpayne@68 286 if prev_llik is not None and np.fabs(prev_llik - llik) < 0.1:
jpayne@68 287 break
jpayne@68 288 prev_llik = llik
jpayne@68 289 else:
jpayne@68 290 raise RuntimeError("HMM did not converge in %d iterations" % MAX_ITERATIONS)
jpayne@68 291
jpayne@68 292 # Return everything back in normal space.
jpayne@68 293 return [np.exp(_) for _ in (lp_initial, lp_transition, lp_emission)]
jpayne@68 294
jpayne@68 295
jpayne@68 296 def _baum_welch_one(
jpayne@68 297 N,
jpayne@68 298 M,
jpayne@68 299 outputs,
jpayne@68 300 lp_initial,
jpayne@68 301 lp_transition,
jpayne@68 302 lp_emission,
jpayne@68 303 lpseudo_initial,
jpayne@68 304 lpseudo_transition,
jpayne@68 305 lpseudo_emission,
jpayne@68 306 ):
jpayne@68 307 """Execute one step for Baum-Welch algorithm (PRIVATE).
jpayne@68 308
jpayne@68 309 Do one iteration of Baum-Welch based on a sequence of output.
jpayne@68 310 Changes the value for lp_initial, lp_transition and lp_emission in place.
jpayne@68 311 """
jpayne@68 312 T = len(outputs)
jpayne@68 313 fmat = _forward(N, T, lp_initial, lp_transition, lp_emission, outputs)
jpayne@68 314 bmat = _backward(N, T, lp_transition, lp_emission, outputs)
jpayne@68 315
jpayne@68 316 # Calculate the probability of traversing each arc for any given
jpayne@68 317 # transition.
jpayne@68 318 lp_arc = np.zeros((N, N, T))
jpayne@68 319 for t in range(T):
jpayne@68 320 k = outputs[t]
jpayne@68 321 lp_traverse = np.zeros((N, N)) # P going over one arc.
jpayne@68 322 for i in range(N):
jpayne@68 323 for j in range(N):
jpayne@68 324 # P(getting to this arc)
jpayne@68 325 # P(making this transition)
jpayne@68 326 # P(emitting this character)
jpayne@68 327 # P(going to the end)
jpayne@68 328 lp = (
jpayne@68 329 fmat[i][t]
jpayne@68 330 + lp_transition[i][j]
jpayne@68 331 + lp_emission[i][k]
jpayne@68 332 + bmat[j][t + 1]
jpayne@68 333 )
jpayne@68 334 lp_traverse[i][j] = lp
jpayne@68 335 # Normalize the probability for this time step.
jpayne@68 336 lp_arc[:, :, t] = lp_traverse - _logsum(lp_traverse)
jpayne@68 337
jpayne@68 338 # Sum of all the transitions out of state i at time t.
jpayne@68 339 lp_arcout_t = np.zeros((N, T))
jpayne@68 340 for t in range(T):
jpayne@68 341 for i in range(N):
jpayne@68 342 lp_arcout_t[i][t] = _logsum(lp_arc[i, :, t])
jpayne@68 343
jpayne@68 344 # Sum of all the transitions out of state i.
jpayne@68 345 lp_arcout = np.zeros(N)
jpayne@68 346 for i in range(N):
jpayne@68 347 lp_arcout[i] = _logsum(lp_arcout_t[i, :])
jpayne@68 348
jpayne@68 349 # UPDATE P_INITIAL.
jpayne@68 350 lp_initial = lp_arcout_t[:, 0]
jpayne@68 351 if lpseudo_initial is not None:
jpayne@68 352 lp_initial = _logvecadd(lp_initial, lpseudo_initial)
jpayne@68 353 lp_initial = lp_initial - _logsum(lp_initial)
jpayne@68 354
jpayne@68 355 # UPDATE P_TRANSITION. p_transition[i][j] is the sum of all the
jpayne@68 356 # transitions from i to j, normalized by the sum of the
jpayne@68 357 # transitions out of i.
jpayne@68 358 for i in range(N):
jpayne@68 359 for j in range(N):
jpayne@68 360 lp_transition[i][j] = _logsum(lp_arc[i, j, :]) - lp_arcout[i]
jpayne@68 361 if lpseudo_transition is not None:
jpayne@68 362 lp_transition[i] = _logvecadd(lp_transition[i], lpseudo_transition)
jpayne@68 363 lp_transition[i] = lp_transition[i] - _logsum(lp_transition[i])
jpayne@68 364
jpayne@68 365 # UPDATE P_EMISSION. lp_emission[i][k] is the sum of all the
jpayne@68 366 # transitions out of i when k is observed, divided by the sum of
jpayne@68 367 # the transitions out of i.
jpayne@68 368 for i in range(N):
jpayne@68 369 ksum = np.zeros(M) + LOG0 # ksum[k] is the sum of all i with k.
jpayne@68 370 for t in range(T):
jpayne@68 371 k = outputs[t]
jpayne@68 372 for j in range(N):
jpayne@68 373 ksum[k] = logaddexp(ksum[k], lp_arc[i, j, t])
jpayne@68 374 ksum = ksum - _logsum(ksum) # Normalize
jpayne@68 375 if lpseudo_emission is not None:
jpayne@68 376 ksum = _logvecadd(ksum, lpseudo_emission[i])
jpayne@68 377 ksum = ksum - _logsum(ksum) # Renormalize
jpayne@68 378 lp_emission[i, :] = ksum
jpayne@68 379
jpayne@68 380 # Calculate the log likelihood of the output based on the forward
jpayne@68 381 # matrix. Since the parameters of the HMM has changed, the log
jpayne@68 382 # likelihoods are going to be a step behind, and we might be doing
jpayne@68 383 # one extra iteration of training. The alternative is to rerun
jpayne@68 384 # the _forward algorithm and calculate from the clean one, but
jpayne@68 385 # that may be more expensive than overshooting the training by one
jpayne@68 386 # step.
jpayne@68 387 return _logsum(fmat[:, T])
jpayne@68 388
jpayne@68 389
jpayne@68 390 def _forward(N, T, lp_initial, lp_transition, lp_emission, outputs):
jpayne@68 391 """Implement forward algorithm (PRIVATE).
jpayne@68 392
jpayne@68 393 Calculate a Nx(T+1) matrix, where the last column is the total
jpayne@68 394 probability of the output.
jpayne@68 395 """
jpayne@68 396 matrix = np.zeros((N, T + 1))
jpayne@68 397
jpayne@68 398 # Initialize the first column to be the initial values.
jpayne@68 399 matrix[:, 0] = lp_initial
jpayne@68 400 for t in range(1, T + 1):
jpayne@68 401 k = outputs[t - 1]
jpayne@68 402 for j in range(N):
jpayne@68 403 # The probability of the state is the sum of the
jpayne@68 404 # transitions from all the states from time t-1.
jpayne@68 405 lprob = LOG0
jpayne@68 406 for i in range(N):
jpayne@68 407 lp = matrix[i][t - 1] + lp_transition[i][j] + lp_emission[i][k]
jpayne@68 408 lprob = logaddexp(lprob, lp)
jpayne@68 409 matrix[j][t] = lprob
jpayne@68 410 return matrix
jpayne@68 411
jpayne@68 412
jpayne@68 413 def _backward(N, T, lp_transition, lp_emission, outputs):
jpayne@68 414 """Implement backward algorithm (PRIVATE)."""
jpayne@68 415 matrix = np.zeros((N, T + 1))
jpayne@68 416 for t in range(T - 1, -1, -1):
jpayne@68 417 k = outputs[t]
jpayne@68 418 for i in range(N):
jpayne@68 419 # The probability of the state is the sum of the
jpayne@68 420 # transitions from all the states from time t+1.
jpayne@68 421 lprob = LOG0
jpayne@68 422 for j in range(N):
jpayne@68 423 lp = matrix[j][t + 1] + lp_transition[i][j] + lp_emission[i][k]
jpayne@68 424 lprob = logaddexp(lprob, lp)
jpayne@68 425 matrix[i][t] = lprob
jpayne@68 426 return matrix
jpayne@68 427
jpayne@68 428
jpayne@68 429 def train_visible(
jpayne@68 430 states,
jpayne@68 431 alphabet,
jpayne@68 432 training_data,
jpayne@68 433 pseudo_initial=None,
jpayne@68 434 pseudo_transition=None,
jpayne@68 435 pseudo_emission=None,
jpayne@68 436 ):
jpayne@68 437 """Train a visible MarkovModel using maximum likelihoood estimates for each of the parameters.
jpayne@68 438
jpayne@68 439 Train a visible MarkovModel using maximum likelihoood estimates
jpayne@68 440 for each of the parameters. states is a list of strings that
jpayne@68 441 describe the names of each state. alphabet is a list of objects
jpayne@68 442 that indicate the allowed outputs. training_data is a list of
jpayne@68 443 (outputs, observed states) where outputs is a list of the emission
jpayne@68 444 from the alphabet, and observed states is a list of states from
jpayne@68 445 states.
jpayne@68 446
jpayne@68 447 pseudo_initial, pseudo_transition, and pseudo_emission are
jpayne@68 448 optional parameters that you can use to assign pseudo-counts to
jpayne@68 449 different matrices. They should be matrices of the appropriate
jpayne@68 450 size that contain numbers to add to each parameter matrix.
jpayne@68 451 """
jpayne@68 452 N, M = len(states), len(alphabet)
jpayne@68 453 if pseudo_initial is not None:
jpayne@68 454 pseudo_initial = np.asarray(pseudo_initial)
jpayne@68 455 if pseudo_initial.shape != (N,):
jpayne@68 456 raise ValueError("pseudo_initial not shape len(states)")
jpayne@68 457 if pseudo_transition is not None:
jpayne@68 458 pseudo_transition = np.asarray(pseudo_transition)
jpayne@68 459 if pseudo_transition.shape != (N, N):
jpayne@68 460 raise ValueError("pseudo_transition not shape len(states) X len(states)")
jpayne@68 461 if pseudo_emission is not None:
jpayne@68 462 pseudo_emission = np.asarray(pseudo_emission)
jpayne@68 463 if pseudo_emission.shape != (N, M):
jpayne@68 464 raise ValueError("pseudo_emission not shape len(states) X len(alphabet)")
jpayne@68 465
jpayne@68 466 # Training data is given as a list of members of the alphabet.
jpayne@68 467 # Replace those with indexes into the alphabet list for easier
jpayne@68 468 # computation.
jpayne@68 469 training_states, training_outputs = [], []
jpayne@68 470 states_indexes = itemindex(states)
jpayne@68 471 outputs_indexes = itemindex(alphabet)
jpayne@68 472 for toutputs, tstates in training_data:
jpayne@68 473 if len(tstates) != len(toutputs):
jpayne@68 474 raise ValueError("states and outputs not aligned")
jpayne@68 475 training_states.append([states_indexes[x] for x in tstates])
jpayne@68 476 training_outputs.append([outputs_indexes[x] for x in toutputs])
jpayne@68 477
jpayne@68 478 x = _mle(
jpayne@68 479 N,
jpayne@68 480 M,
jpayne@68 481 training_outputs,
jpayne@68 482 training_states,
jpayne@68 483 pseudo_initial,
jpayne@68 484 pseudo_transition,
jpayne@68 485 pseudo_emission,
jpayne@68 486 )
jpayne@68 487 p_initial, p_transition, p_emission = x
jpayne@68 488
jpayne@68 489 return MarkovModel(states, alphabet, p_initial, p_transition, p_emission)
jpayne@68 490
jpayne@68 491
jpayne@68 492 def _mle(
jpayne@68 493 N,
jpayne@68 494 M,
jpayne@68 495 training_outputs,
jpayne@68 496 training_states,
jpayne@68 497 pseudo_initial,
jpayne@68 498 pseudo_transition,
jpayne@68 499 pseudo_emission,
jpayne@68 500 ):
jpayne@68 501 """Implement Maximum likelihood estimation algorithm (PRIVATE)."""
jpayne@68 502 # p_initial is the probability that a sequence of states starts
jpayne@68 503 # off with a particular one.
jpayne@68 504 p_initial = np.zeros(N)
jpayne@68 505 if pseudo_initial:
jpayne@68 506 p_initial = p_initial + pseudo_initial
jpayne@68 507 for states in training_states:
jpayne@68 508 p_initial[states[0]] += 1
jpayne@68 509 p_initial = _normalize(p_initial)
jpayne@68 510
jpayne@68 511 # p_transition is the probability that a state leads to the next
jpayne@68 512 # one. C(i,j)/C(i) where i and j are states.
jpayne@68 513 p_transition = np.zeros((N, N))
jpayne@68 514 if pseudo_transition:
jpayne@68 515 p_transition = p_transition + pseudo_transition
jpayne@68 516 for states in training_states:
jpayne@68 517 for n in range(len(states) - 1):
jpayne@68 518 i, j = states[n], states[n + 1]
jpayne@68 519 p_transition[i, j] += 1
jpayne@68 520 for i in range(len(p_transition)):
jpayne@68 521 p_transition[i, :] = p_transition[i, :] / sum(p_transition[i, :])
jpayne@68 522
jpayne@68 523 # p_emission is the probability of an output given a state.
jpayne@68 524 # C(s,o)|C(s) where o is an output and s is a state.
jpayne@68 525 p_emission = np.zeros((N, M))
jpayne@68 526 if pseudo_emission:
jpayne@68 527 p_emission = p_emission + pseudo_emission
jpayne@68 528 p_emission = np.ones((N, M))
jpayne@68 529 for outputs, states in zip(training_outputs, training_states):
jpayne@68 530 for o, s in zip(outputs, states):
jpayne@68 531 p_emission[s, o] += 1
jpayne@68 532 for i in range(len(p_emission)):
jpayne@68 533 p_emission[i, :] = p_emission[i, :] / sum(p_emission[i, :])
jpayne@68 534
jpayne@68 535 return p_initial, p_transition, p_emission
jpayne@68 536
jpayne@68 537
jpayne@68 538 def _argmaxes(vector, allowance=None):
jpayne@68 539 """Return indices of the maximum values aong the vector (PRIVATE)."""
jpayne@68 540 return [np.argmax(vector)]
jpayne@68 541
jpayne@68 542
jpayne@68 543 def find_states(markov_model, output):
jpayne@68 544 """Find states in the given Markov model output.
jpayne@68 545
jpayne@68 546 Returns a list of (states, score) tuples.
jpayne@68 547 """
jpayne@68 548 mm = markov_model
jpayne@68 549 N = len(mm.states)
jpayne@68 550
jpayne@68 551 # _viterbi does calculations in log space. Add a tiny bit to the
jpayne@68 552 # matrices so that the logs will not break.
jpayne@68 553 lp_initial = np.log(mm.p_initial + VERY_SMALL_NUMBER)
jpayne@68 554 lp_transition = np.log(mm.p_transition + VERY_SMALL_NUMBER)
jpayne@68 555 lp_emission = np.log(mm.p_emission + VERY_SMALL_NUMBER)
jpayne@68 556 # Change output into a list of indexes into the alphabet.
jpayne@68 557 indexes = itemindex(mm.alphabet)
jpayne@68 558 output = [indexes[x] for x in output]
jpayne@68 559
jpayne@68 560 # Run the viterbi algorithm.
jpayne@68 561 results = _viterbi(N, lp_initial, lp_transition, lp_emission, output)
jpayne@68 562
jpayne@68 563 for i in range(len(results)):
jpayne@68 564 states, score = results[i]
jpayne@68 565 results[i] = [mm.states[x] for x in states], np.exp(score)
jpayne@68 566 return results
jpayne@68 567
jpayne@68 568
jpayne@68 569 def _viterbi(N, lp_initial, lp_transition, lp_emission, output):
jpayne@68 570 """Implement Viterbi algorithm to find most likely states for a given input (PRIVATE)."""
jpayne@68 571 T = len(output)
jpayne@68 572 # Store the backtrace in a NxT matrix.
jpayne@68 573 backtrace = [] # list of indexes of states in previous timestep.
jpayne@68 574 for i in range(N):
jpayne@68 575 backtrace.append([None] * T)
jpayne@68 576
jpayne@68 577 # Store the best scores.
jpayne@68 578 scores = np.zeros((N, T))
jpayne@68 579 scores[:, 0] = lp_initial + lp_emission[:, output[0]]
jpayne@68 580 for t in range(1, T):
jpayne@68 581 k = output[t]
jpayne@68 582 for j in range(N):
jpayne@68 583 # Find the most likely place it came from.
jpayne@68 584 i_scores = scores[:, t - 1] + lp_transition[:, j] + lp_emission[j, k]
jpayne@68 585 indexes = _argmaxes(i_scores)
jpayne@68 586 scores[j, t] = i_scores[indexes[0]]
jpayne@68 587 backtrace[j][t] = indexes
jpayne@68 588
jpayne@68 589 # Do the backtrace. First, find a good place to start. Then,
jpayne@68 590 # we'll follow the backtrace matrix to find the list of states.
jpayne@68 591 # In the event of ties, there may be multiple paths back through
jpayne@68 592 # the matrix, which implies a recursive solution. We'll simulate
jpayne@68 593 # it by keeping our own stack.
jpayne@68 594 in_process = [] # list of (t, states, score)
jpayne@68 595 results = [] # return values. list of (states, score)
jpayne@68 596 indexes = _argmaxes(scores[:, T - 1]) # pick the first place
jpayne@68 597 for i in indexes:
jpayne@68 598 in_process.append((T - 1, [i], scores[i][T - 1]))
jpayne@68 599 while in_process:
jpayne@68 600 t, states, score = in_process.pop()
jpayne@68 601 if t == 0:
jpayne@68 602 results.append((states, score))
jpayne@68 603 else:
jpayne@68 604 indexes = backtrace[states[0]][t]
jpayne@68 605 for i in indexes:
jpayne@68 606 in_process.append((t - 1, [i] + states, score))
jpayne@68 607 return results
jpayne@68 608
jpayne@68 609
jpayne@68 610 def _normalize(matrix):
jpayne@68 611 """Normalize matrix object (PRIVATE)."""
jpayne@68 612 if len(matrix.shape) == 1:
jpayne@68 613 matrix = matrix / sum(matrix)
jpayne@68 614 elif len(matrix.shape) == 2:
jpayne@68 615 # Normalize by rows.
jpayne@68 616 for i in range(len(matrix)):
jpayne@68 617 matrix[i, :] = matrix[i, :] / sum(matrix[i, :])
jpayne@68 618 else:
jpayne@68 619 raise ValueError("I cannot handle matrixes of that shape")
jpayne@68 620 return matrix
jpayne@68 621
jpayne@68 622
jpayne@68 623 def _uniform_norm(shape):
jpayne@68 624 """Normalize a uniform matrix (PRIVATE)."""
jpayne@68 625 matrix = np.ones(shape)
jpayne@68 626 return _normalize(matrix)
jpayne@68 627
jpayne@68 628
jpayne@68 629 def _random_norm(shape):
jpayne@68 630 """Normalize a random matrix (PRIVATE)."""
jpayne@68 631 matrix = np.random.random(shape)
jpayne@68 632 return _normalize(matrix)
jpayne@68 633
jpayne@68 634
jpayne@68 635 def _copy_and_check(matrix, desired_shape):
jpayne@68 636 """Copy a matrix and check its dimension. Normalize at the end (PRIVATE)."""
jpayne@68 637 # Copy the matrix.
jpayne@68 638 matrix = np.array(matrix, copy=1)
jpayne@68 639 # Check the dimensions.
jpayne@68 640 if matrix.shape != desired_shape:
jpayne@68 641 raise ValueError("Incorrect dimension")
jpayne@68 642 # Make sure it's normalized.
jpayne@68 643 if len(matrix.shape) == 1:
jpayne@68 644 if np.fabs(sum(matrix) - 1.0) > 0.01:
jpayne@68 645 raise ValueError("matrix not normalized to 1.0")
jpayne@68 646 elif len(matrix.shape) == 2:
jpayne@68 647 for i in range(len(matrix)):
jpayne@68 648 if np.fabs(sum(matrix[i]) - 1.0) > 0.01:
jpayne@68 649 raise ValueError("matrix %d not normalized to 1.0" % i)
jpayne@68 650 else:
jpayne@68 651 raise ValueError("I don't handle matrices > 2 dimensions")
jpayne@68 652 return matrix
jpayne@68 653
jpayne@68 654
jpayne@68 655 def _logsum(matrix):
jpayne@68 656 """Implement logsum for a matrix object (PRIVATE)."""
jpayne@68 657 if len(matrix.shape) > 1:
jpayne@68 658 vec = np.reshape(matrix, (np.prod(matrix.shape),))
jpayne@68 659 else:
jpayne@68 660 vec = matrix
jpayne@68 661 sum = LOG0
jpayne@68 662 for num in vec:
jpayne@68 663 sum = logaddexp(sum, num)
jpayne@68 664 return sum
jpayne@68 665
jpayne@68 666
jpayne@68 667 def _logvecadd(logvec1, logvec2):
jpayne@68 668 """Implement a log sum for two vector objects (PRIVATE)."""
jpayne@68 669 assert len(logvec1) == len(logvec2), "vectors aren't the same length"
jpayne@68 670 sumvec = np.zeros(len(logvec1))
jpayne@68 671 for i in range(len(logvec1)):
jpayne@68 672 sumvec[i] = logaddexp(logvec1[i], logvec2[i])
jpayne@68 673 return sumvec
jpayne@68 674
jpayne@68 675
jpayne@68 676 def _exp_logsum(numbers):
jpayne@68 677 """Return the exponential of a logsum (PRIVATE)."""
jpayne@68 678 sum = _logsum(numbers)
jpayne@68 679 return np.exp(sum)