annotate CSP2/CSP2_env/env-d9b9114564458d9d-741b3de822f2aaca6c6caa4325c4afce/lib/python3.8/site-packages/Bio/NaiveBayes.py @ 68:5028fdace37b

planemo upload commit 2e9511a184a1ca667c7be0c6321a36dc4e3d116d
author jpayne
date Tue, 18 Mar 2025 16:23:26 -0400
parents
children
rev   line source
jpayne@68 1 # Copyright 2000 by Jeffrey Chang. All rights reserved.
jpayne@68 2 #
jpayne@68 3 # This file is part of the Biopython distribution and governed by your
jpayne@68 4 # choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
jpayne@68 5 # Please see the LICENSE file that should have been included as part of this
jpayne@68 6 # package.
jpayne@68 7
jpayne@68 8 """General Naive Bayes learner (DEPRECATED).
jpayne@68 9
jpayne@68 10 Naive Bayes is a supervised classification algorithm that uses Bayes
jpayne@68 11 rule to compute the fit between a new observation and some previously
jpayne@68 12 observed data. The observations are discrete feature vectors, with
jpayne@68 13 the Bayes assumption that the features are independent. Although this
jpayne@68 14 is hardly ever true, the classifier works well enough in practice.
jpayne@68 15
jpayne@68 16 Glossary:
jpayne@68 17 - observation - A feature vector of discrete data.
jpayne@68 18 - class - A possible classification for an observation.
jpayne@68 19
jpayne@68 20 Classes:
jpayne@68 21 - NaiveBayes - Holds information for a naive Bayes classifier.
jpayne@68 22
jpayne@68 23 Functions:
jpayne@68 24 - train - Train a new naive Bayes classifier.
jpayne@68 25 - calculate - Calculate the probabilities of each class,
jpayne@68 26 given an observation.
jpayne@68 27 - classify - Classify an observation into a class.
jpayne@68 28
jpayne@68 29 """
jpayne@68 30
jpayne@68 31
jpayne@68 32 import warnings
jpayne@68 33 from Bio import BiopythonDeprecationWarning
jpayne@68 34
jpayne@68 35 warnings.warn(
jpayne@68 36 "The 'Bio.NaiveBayes' module is deprecated and will be removed in a future "
jpayne@68 37 "release of Biopython. Consider using scikit-learn instead.",
jpayne@68 38 BiopythonDeprecationWarning,
jpayne@68 39 )
jpayne@68 40
jpayne@68 41
jpayne@68 42 try:
jpayne@68 43 import numpy as np
jpayne@68 44 except ImportError:
jpayne@68 45 from Bio import MissingPythonDependencyError
jpayne@68 46
jpayne@68 47 raise MissingPythonDependencyError(
jpayne@68 48 "Please install NumPy if you want to use Bio.NaiveBayes. "
jpayne@68 49 "See http://www.numpy.org/"
jpayne@68 50 ) from None
jpayne@68 51
jpayne@68 52
jpayne@68 53 def _contents(items):
jpayne@68 54 """Return a dictionary where the key is the item and the value is the probablity associated (PRIVATE)."""
jpayne@68 55 term = 1.0 / len(items)
jpayne@68 56 counts = {}
jpayne@68 57 for item in items:
jpayne@68 58 counts[item] = counts.get(item, 0) + term
jpayne@68 59 return counts
jpayne@68 60
jpayne@68 61
jpayne@68 62 class NaiveBayes:
jpayne@68 63 """Hold information for a NaiveBayes classifier.
jpayne@68 64
jpayne@68 65 Attributes:
jpayne@68 66 - classes - List of the possible classes of data.
jpayne@68 67 - p_conditional - CLASS x DIM array of dicts of value -> ``P(value|class,dim)``
jpayne@68 68 - p_prior - List of the prior probabilities for every class.
jpayne@68 69 - dimensionality - Dimensionality of the data.
jpayne@68 70
jpayne@68 71 """
jpayne@68 72
jpayne@68 73 def __init__(self):
jpayne@68 74 """Initialize the class."""
jpayne@68 75 self.classes = []
jpayne@68 76 self.p_conditional = None
jpayne@68 77 self.p_prior = []
jpayne@68 78 self.dimensionality = None
jpayne@68 79
jpayne@68 80
jpayne@68 81 def calculate(nb, observation, scale=False):
jpayne@68 82 """Calculate the logarithmic conditional probability for each class.
jpayne@68 83
jpayne@68 84 Arguments:
jpayne@68 85 - nb - A NaiveBayes classifier that has been trained.
jpayne@68 86 - observation - A list representing the observed data.
jpayne@68 87 - scale - Boolean to indicate whether the probability should be
jpayne@68 88 scaled by ``P(observation)``. By default, no scaling is done.
jpayne@68 89
jpayne@68 90 A dictionary is returned where the key is the class and the value is
jpayne@68 91 the log probability of the class.
jpayne@68 92 """
jpayne@68 93 # P(class|observation) = P(observation|class)*P(class)/P(observation)
jpayne@68 94 # Taking the log:
jpayne@68 95 # lP(class|observation) = lP(observation|class)+lP(class)-lP(observation)
jpayne@68 96
jpayne@68 97 # Make sure the observation has the right dimensionality.
jpayne@68 98 if len(observation) != nb.dimensionality:
jpayne@68 99 raise ValueError(
jpayne@68 100 f"observation in {len(observation)} dimension,"
jpayne@68 101 f" but classifier in {nb.dimensionality}"
jpayne@68 102 )
jpayne@68 103
jpayne@68 104 # Calculate log P(observation|class) for every class.
jpayne@68 105 n = len(nb.classes)
jpayne@68 106 lp_observation_class = np.zeros(n) # array of log P(observation|class)
jpayne@68 107 for i in range(n):
jpayne@68 108 # log P(observation|class) = SUM_i log P(observation_i|class)
jpayne@68 109 probs = [None] * len(observation)
jpayne@68 110 for j in range(len(observation)):
jpayne@68 111 probs[j] = nb.p_conditional[i][j].get(observation[j], 0)
jpayne@68 112 lprobs = np.log(np.clip(probs, 1.0e-300, 1.0e300))
jpayne@68 113 lp_observation_class[i] = sum(lprobs)
jpayne@68 114
jpayne@68 115 # Calculate log P(class).
jpayne@68 116 lp_prior = np.log(nb.p_prior)
jpayne@68 117
jpayne@68 118 # Calculate log P(observation).
jpayne@68 119 lp_observation = 0.0 # P(observation)
jpayne@68 120 if scale: # Only calculate this if requested.
jpayne@68 121 # log P(observation) = log SUM_i P(observation|class_i)P(class_i)
jpayne@68 122 obs = np.exp(np.clip(lp_prior + lp_observation_class, -700, +700))
jpayne@68 123 lp_observation = np.log(sum(obs))
jpayne@68 124
jpayne@68 125 # Calculate log P(class|observation).
jpayne@68 126 lp_class_observation = {} # Dict of class : log P(class|observation)
jpayne@68 127 for i in range(len(nb.classes)):
jpayne@68 128 lp_class_observation[nb.classes[i]] = (
jpayne@68 129 lp_observation_class[i] + lp_prior[i] - lp_observation
jpayne@68 130 )
jpayne@68 131
jpayne@68 132 return lp_class_observation
jpayne@68 133
jpayne@68 134
jpayne@68 135 def classify(nb, observation):
jpayne@68 136 """Classify an observation into a class."""
jpayne@68 137 # The class is the one with the highest probability.
jpayne@68 138 probs = calculate(nb, observation, scale=False)
jpayne@68 139 max_prob = max_class = None
jpayne@68 140 for klass in nb.classes:
jpayne@68 141 if max_prob is None or probs[klass] > max_prob:
jpayne@68 142 max_prob, max_class = probs[klass], klass
jpayne@68 143 return max_class
jpayne@68 144
jpayne@68 145
jpayne@68 146 def train(training_set, results, priors=None, typecode=None):
jpayne@68 147 """Train a NaiveBayes classifier on a training set.
jpayne@68 148
jpayne@68 149 Arguments:
jpayne@68 150 - training_set - List of observations.
jpayne@68 151 - results - List of the class assignments for each observation.
jpayne@68 152 Thus, training_set and results must be the same length.
jpayne@68 153 - priors - Optional dictionary specifying the prior probabilities
jpayne@68 154 for each type of result. If not specified, the priors will
jpayne@68 155 be estimated from the training results.
jpayne@68 156
jpayne@68 157 """
jpayne@68 158 if not len(training_set):
jpayne@68 159 raise ValueError("No data in the training set.")
jpayne@68 160 if len(training_set) != len(results):
jpayne@68 161 raise ValueError("training_set and results should be parallel lists.")
jpayne@68 162
jpayne@68 163 # If no typecode is specified, try to pick a reasonable one. If
jpayne@68 164 # training_set is a Numeric array, then use that typecode.
jpayne@68 165 # Otherwise, choose a reasonable default.
jpayne@68 166 # XXX NOT IMPLEMENTED
jpayne@68 167
jpayne@68 168 # Check to make sure each vector in the training set has the same
jpayne@68 169 # dimensionality.
jpayne@68 170 dimensions = [len(x) for x in training_set]
jpayne@68 171 if min(dimensions) != max(dimensions):
jpayne@68 172 raise ValueError("observations have different dimensionality")
jpayne@68 173
jpayne@68 174 nb = NaiveBayes()
jpayne@68 175 nb.dimensionality = dimensions[0]
jpayne@68 176
jpayne@68 177 # Get a list of all the classes, and
jpayne@68 178 # estimate the prior probabilities for the classes.
jpayne@68 179 if priors is not None:
jpayne@68 180 percs = priors
jpayne@68 181 nb.classes = list(set(results))
jpayne@68 182 else:
jpayne@68 183 class_freq = _contents(results)
jpayne@68 184 nb.classes = list(class_freq.keys())
jpayne@68 185 percs = class_freq
jpayne@68 186 nb.classes.sort() # keep it tidy
jpayne@68 187
jpayne@68 188 nb.p_prior = np.zeros(len(nb.classes))
jpayne@68 189 for i in range(len(nb.classes)):
jpayne@68 190 nb.p_prior[i] = percs[nb.classes[i]]
jpayne@68 191
jpayne@68 192 # Collect all the observations in class. For each class, make a
jpayne@68 193 # matrix of training instances versus dimensions. I might be able
jpayne@68 194 # to optimize this with Numeric, if the training_set parameter
jpayne@68 195 # were guaranteed to be a matrix. However, this may not be the
jpayne@68 196 # case, because the client may be hacking up a sparse matrix or
jpayne@68 197 # something.
jpayne@68 198 c2i = {} # class to index of class
jpayne@68 199 for index, key in enumerate(nb.classes):
jpayne@68 200 c2i[key] = index
jpayne@68 201 observations = [[] for c in nb.classes] # separate observations by class
jpayne@68 202 for i in range(len(results)):
jpayne@68 203 klass, obs = results[i], training_set[i]
jpayne@68 204 observations[c2i[klass]].append(obs)
jpayne@68 205 # Now make the observations Numeric matrix.
jpayne@68 206 for i in range(len(observations)):
jpayne@68 207 # XXX typecode must be specified!
jpayne@68 208 observations[i] = np.asarray(observations[i], typecode)
jpayne@68 209
jpayne@68 210 # Calculate P(value|class,dim) for every class.
jpayne@68 211 # This is a good loop to optimize.
jpayne@68 212 nb.p_conditional = []
jpayne@68 213 for i in range(len(nb.classes)):
jpayne@68 214 class_observations = observations[i] # observations for this class
jpayne@68 215 nb.p_conditional.append([None] * nb.dimensionality)
jpayne@68 216 for j in range(nb.dimensionality):
jpayne@68 217 # Collect all the values in this dimension.
jpayne@68 218 values = class_observations[:, j]
jpayne@68 219
jpayne@68 220 # Add pseudocounts here. This needs to be parameterized.
jpayne@68 221 # values = list(values) + range(len(nb.classes)) # XXX add 1
jpayne@68 222
jpayne@68 223 # Estimate P(value|class,dim)
jpayne@68 224 nb.p_conditional[i][j] = _contents(values)
jpayne@68 225 return nb