annotate CSP2/CSP2_env/env-d9b9114564458d9d-741b3de822f2aaca6c6caa4325c4afce/lib/python3.8/logging/handlers.py @ 69:33d812a61356

planemo upload commit 2e9511a184a1ca667c7be0c6321a36dc4e3d116d
author jpayne
date Tue, 18 Mar 2025 17:55:14 -0400
parents
children
rev   line source
jpayne@69 1 # Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
jpayne@69 2 #
jpayne@69 3 # Permission to use, copy, modify, and distribute this software and its
jpayne@69 4 # documentation for any purpose and without fee is hereby granted,
jpayne@69 5 # provided that the above copyright notice appear in all copies and that
jpayne@69 6 # both that copyright notice and this permission notice appear in
jpayne@69 7 # supporting documentation, and that the name of Vinay Sajip
jpayne@69 8 # not be used in advertising or publicity pertaining to distribution
jpayne@69 9 # of the software without specific, written prior permission.
jpayne@69 10 # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
jpayne@69 11 # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
jpayne@69 12 # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
jpayne@69 13 # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
jpayne@69 14 # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
jpayne@69 15 # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
jpayne@69 16
jpayne@69 17 """
jpayne@69 18 Additional handlers for the logging package for Python. The core package is
jpayne@69 19 based on PEP 282 and comments thereto in comp.lang.python.
jpayne@69 20
jpayne@69 21 Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
jpayne@69 22
jpayne@69 23 To use, simply 'import logging.handlers' and log away!
jpayne@69 24 """
jpayne@69 25
jpayne@69 26 import logging, socket, os, pickle, struct, time, re
jpayne@69 27 from stat import ST_DEV, ST_INO, ST_MTIME
jpayne@69 28 import queue
jpayne@69 29 import threading
jpayne@69 30 import copy
jpayne@69 31
jpayne@69 32 #
jpayne@69 33 # Some constants...
jpayne@69 34 #
jpayne@69 35
jpayne@69 36 DEFAULT_TCP_LOGGING_PORT = 9020
jpayne@69 37 DEFAULT_UDP_LOGGING_PORT = 9021
jpayne@69 38 DEFAULT_HTTP_LOGGING_PORT = 9022
jpayne@69 39 DEFAULT_SOAP_LOGGING_PORT = 9023
jpayne@69 40 SYSLOG_UDP_PORT = 514
jpayne@69 41 SYSLOG_TCP_PORT = 514
jpayne@69 42
jpayne@69 43 _MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
jpayne@69 44
jpayne@69 45 class BaseRotatingHandler(logging.FileHandler):
jpayne@69 46 """
jpayne@69 47 Base class for handlers that rotate log files at a certain point.
jpayne@69 48 Not meant to be instantiated directly. Instead, use RotatingFileHandler
jpayne@69 49 or TimedRotatingFileHandler.
jpayne@69 50 """
jpayne@69 51 def __init__(self, filename, mode, encoding=None, delay=False):
jpayne@69 52 """
jpayne@69 53 Use the specified filename for streamed logging
jpayne@69 54 """
jpayne@69 55 logging.FileHandler.__init__(self, filename, mode, encoding, delay)
jpayne@69 56 self.mode = mode
jpayne@69 57 self.encoding = encoding
jpayne@69 58 self.namer = None
jpayne@69 59 self.rotator = None
jpayne@69 60
jpayne@69 61 def emit(self, record):
jpayne@69 62 """
jpayne@69 63 Emit a record.
jpayne@69 64
jpayne@69 65 Output the record to the file, catering for rollover as described
jpayne@69 66 in doRollover().
jpayne@69 67 """
jpayne@69 68 try:
jpayne@69 69 if self.shouldRollover(record):
jpayne@69 70 self.doRollover()
jpayne@69 71 logging.FileHandler.emit(self, record)
jpayne@69 72 except Exception:
jpayne@69 73 self.handleError(record)
jpayne@69 74
jpayne@69 75 def rotation_filename(self, default_name):
jpayne@69 76 """
jpayne@69 77 Modify the filename of a log file when rotating.
jpayne@69 78
jpayne@69 79 This is provided so that a custom filename can be provided.
jpayne@69 80
jpayne@69 81 The default implementation calls the 'namer' attribute of the
jpayne@69 82 handler, if it's callable, passing the default name to
jpayne@69 83 it. If the attribute isn't callable (the default is None), the name
jpayne@69 84 is returned unchanged.
jpayne@69 85
jpayne@69 86 :param default_name: The default name for the log file.
jpayne@69 87 """
jpayne@69 88 if not callable(self.namer):
jpayne@69 89 result = default_name
jpayne@69 90 else:
jpayne@69 91 result = self.namer(default_name)
jpayne@69 92 return result
jpayne@69 93
jpayne@69 94 def rotate(self, source, dest):
jpayne@69 95 """
jpayne@69 96 When rotating, rotate the current log.
jpayne@69 97
jpayne@69 98 The default implementation calls the 'rotator' attribute of the
jpayne@69 99 handler, if it's callable, passing the source and dest arguments to
jpayne@69 100 it. If the attribute isn't callable (the default is None), the source
jpayne@69 101 is simply renamed to the destination.
jpayne@69 102
jpayne@69 103 :param source: The source filename. This is normally the base
jpayne@69 104 filename, e.g. 'test.log'
jpayne@69 105 :param dest: The destination filename. This is normally
jpayne@69 106 what the source is rotated to, e.g. 'test.log.1'.
jpayne@69 107 """
jpayne@69 108 if not callable(self.rotator):
jpayne@69 109 # Issue 18940: A file may not have been created if delay is True.
jpayne@69 110 if os.path.exists(source):
jpayne@69 111 os.rename(source, dest)
jpayne@69 112 else:
jpayne@69 113 self.rotator(source, dest)
jpayne@69 114
jpayne@69 115 class RotatingFileHandler(BaseRotatingHandler):
jpayne@69 116 """
jpayne@69 117 Handler for logging to a set of files, which switches from one file
jpayne@69 118 to the next when the current file reaches a certain size.
jpayne@69 119 """
jpayne@69 120 def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
jpayne@69 121 """
jpayne@69 122 Open the specified file and use it as the stream for logging.
jpayne@69 123
jpayne@69 124 By default, the file grows indefinitely. You can specify particular
jpayne@69 125 values of maxBytes and backupCount to allow the file to rollover at
jpayne@69 126 a predetermined size.
jpayne@69 127
jpayne@69 128 Rollover occurs whenever the current log file is nearly maxBytes in
jpayne@69 129 length. If backupCount is >= 1, the system will successively create
jpayne@69 130 new files with the same pathname as the base file, but with extensions
jpayne@69 131 ".1", ".2" etc. appended to it. For example, with a backupCount of 5
jpayne@69 132 and a base file name of "app.log", you would get "app.log",
jpayne@69 133 "app.log.1", "app.log.2", ... through to "app.log.5". The file being
jpayne@69 134 written to is always "app.log" - when it gets filled up, it is closed
jpayne@69 135 and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
jpayne@69 136 exist, then they are renamed to "app.log.2", "app.log.3" etc.
jpayne@69 137 respectively.
jpayne@69 138
jpayne@69 139 If maxBytes is zero, rollover never occurs.
jpayne@69 140 """
jpayne@69 141 # If rotation/rollover is wanted, it doesn't make sense to use another
jpayne@69 142 # mode. If for example 'w' were specified, then if there were multiple
jpayne@69 143 # runs of the calling application, the logs from previous runs would be
jpayne@69 144 # lost if the 'w' is respected, because the log file would be truncated
jpayne@69 145 # on each run.
jpayne@69 146 if maxBytes > 0:
jpayne@69 147 mode = 'a'
jpayne@69 148 BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
jpayne@69 149 self.maxBytes = maxBytes
jpayne@69 150 self.backupCount = backupCount
jpayne@69 151
jpayne@69 152 def doRollover(self):
jpayne@69 153 """
jpayne@69 154 Do a rollover, as described in __init__().
jpayne@69 155 """
jpayne@69 156 if self.stream:
jpayne@69 157 self.stream.close()
jpayne@69 158 self.stream = None
jpayne@69 159 if self.backupCount > 0:
jpayne@69 160 for i in range(self.backupCount - 1, 0, -1):
jpayne@69 161 sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
jpayne@69 162 dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
jpayne@69 163 i + 1))
jpayne@69 164 if os.path.exists(sfn):
jpayne@69 165 if os.path.exists(dfn):
jpayne@69 166 os.remove(dfn)
jpayne@69 167 os.rename(sfn, dfn)
jpayne@69 168 dfn = self.rotation_filename(self.baseFilename + ".1")
jpayne@69 169 if os.path.exists(dfn):
jpayne@69 170 os.remove(dfn)
jpayne@69 171 self.rotate(self.baseFilename, dfn)
jpayne@69 172 if not self.delay:
jpayne@69 173 self.stream = self._open()
jpayne@69 174
jpayne@69 175 def shouldRollover(self, record):
jpayne@69 176 """
jpayne@69 177 Determine if rollover should occur.
jpayne@69 178
jpayne@69 179 Basically, see if the supplied record would cause the file to exceed
jpayne@69 180 the size limit we have.
jpayne@69 181 """
jpayne@69 182 if self.stream is None: # delay was set...
jpayne@69 183 self.stream = self._open()
jpayne@69 184 if self.maxBytes > 0: # are we rolling over?
jpayne@69 185 msg = "%s\n" % self.format(record)
jpayne@69 186 self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
jpayne@69 187 if self.stream.tell() + len(msg) >= self.maxBytes:
jpayne@69 188 return 1
jpayne@69 189 return 0
jpayne@69 190
jpayne@69 191 class TimedRotatingFileHandler(BaseRotatingHandler):
jpayne@69 192 """
jpayne@69 193 Handler for logging to a file, rotating the log file at certain timed
jpayne@69 194 intervals.
jpayne@69 195
jpayne@69 196 If backupCount is > 0, when rollover is done, no more than backupCount
jpayne@69 197 files are kept - the oldest ones are deleted.
jpayne@69 198 """
jpayne@69 199 def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False, atTime=None):
jpayne@69 200 BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
jpayne@69 201 self.when = when.upper()
jpayne@69 202 self.backupCount = backupCount
jpayne@69 203 self.utc = utc
jpayne@69 204 self.atTime = atTime
jpayne@69 205 # Calculate the real rollover interval, which is just the number of
jpayne@69 206 # seconds between rollovers. Also set the filename suffix used when
jpayne@69 207 # a rollover occurs. Current 'when' events supported:
jpayne@69 208 # S - Seconds
jpayne@69 209 # M - Minutes
jpayne@69 210 # H - Hours
jpayne@69 211 # D - Days
jpayne@69 212 # midnight - roll over at midnight
jpayne@69 213 # W{0-6} - roll over on a certain day; 0 - Monday
jpayne@69 214 #
jpayne@69 215 # Case of the 'when' specifier is not important; lower or upper case
jpayne@69 216 # will work.
jpayne@69 217 if self.when == 'S':
jpayne@69 218 self.interval = 1 # one second
jpayne@69 219 self.suffix = "%Y-%m-%d_%H-%M-%S"
jpayne@69 220 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
jpayne@69 221 elif self.when == 'M':
jpayne@69 222 self.interval = 60 # one minute
jpayne@69 223 self.suffix = "%Y-%m-%d_%H-%M"
jpayne@69 224 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
jpayne@69 225 elif self.when == 'H':
jpayne@69 226 self.interval = 60 * 60 # one hour
jpayne@69 227 self.suffix = "%Y-%m-%d_%H"
jpayne@69 228 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
jpayne@69 229 elif self.when == 'D' or self.when == 'MIDNIGHT':
jpayne@69 230 self.interval = 60 * 60 * 24 # one day
jpayne@69 231 self.suffix = "%Y-%m-%d"
jpayne@69 232 self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
jpayne@69 233 elif self.when.startswith('W'):
jpayne@69 234 self.interval = 60 * 60 * 24 * 7 # one week
jpayne@69 235 if len(self.when) != 2:
jpayne@69 236 raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
jpayne@69 237 if self.when[1] < '0' or self.when[1] > '6':
jpayne@69 238 raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
jpayne@69 239 self.dayOfWeek = int(self.when[1])
jpayne@69 240 self.suffix = "%Y-%m-%d"
jpayne@69 241 self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
jpayne@69 242 else:
jpayne@69 243 raise ValueError("Invalid rollover interval specified: %s" % self.when)
jpayne@69 244
jpayne@69 245 self.extMatch = re.compile(self.extMatch, re.ASCII)
jpayne@69 246 self.interval = self.interval * interval # multiply by units requested
jpayne@69 247 # The following line added because the filename passed in could be a
jpayne@69 248 # path object (see Issue #27493), but self.baseFilename will be a string
jpayne@69 249 filename = self.baseFilename
jpayne@69 250 if os.path.exists(filename):
jpayne@69 251 t = os.stat(filename)[ST_MTIME]
jpayne@69 252 else:
jpayne@69 253 t = int(time.time())
jpayne@69 254 self.rolloverAt = self.computeRollover(t)
jpayne@69 255
jpayne@69 256 def computeRollover(self, currentTime):
jpayne@69 257 """
jpayne@69 258 Work out the rollover time based on the specified time.
jpayne@69 259 """
jpayne@69 260 result = currentTime + self.interval
jpayne@69 261 # If we are rolling over at midnight or weekly, then the interval is already known.
jpayne@69 262 # What we need to figure out is WHEN the next interval is. In other words,
jpayne@69 263 # if you are rolling over at midnight, then your base interval is 1 day,
jpayne@69 264 # but you want to start that one day clock at midnight, not now. So, we
jpayne@69 265 # have to fudge the rolloverAt value in order to trigger the first rollover
jpayne@69 266 # at the right time. After that, the regular interval will take care of
jpayne@69 267 # the rest. Note that this code doesn't care about leap seconds. :)
jpayne@69 268 if self.when == 'MIDNIGHT' or self.when.startswith('W'):
jpayne@69 269 # This could be done with less code, but I wanted it to be clear
jpayne@69 270 if self.utc:
jpayne@69 271 t = time.gmtime(currentTime)
jpayne@69 272 else:
jpayne@69 273 t = time.localtime(currentTime)
jpayne@69 274 currentHour = t[3]
jpayne@69 275 currentMinute = t[4]
jpayne@69 276 currentSecond = t[5]
jpayne@69 277 currentDay = t[6]
jpayne@69 278 # r is the number of seconds left between now and the next rotation
jpayne@69 279 if self.atTime is None:
jpayne@69 280 rotate_ts = _MIDNIGHT
jpayne@69 281 else:
jpayne@69 282 rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
jpayne@69 283 self.atTime.second)
jpayne@69 284
jpayne@69 285 r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
jpayne@69 286 currentSecond)
jpayne@69 287 if r < 0:
jpayne@69 288 # Rotate time is before the current time (for example when
jpayne@69 289 # self.rotateAt is 13:45 and it now 14:15), rotation is
jpayne@69 290 # tomorrow.
jpayne@69 291 r += _MIDNIGHT
jpayne@69 292 currentDay = (currentDay + 1) % 7
jpayne@69 293 result = currentTime + r
jpayne@69 294 # If we are rolling over on a certain day, add in the number of days until
jpayne@69 295 # the next rollover, but offset by 1 since we just calculated the time
jpayne@69 296 # until the next day starts. There are three cases:
jpayne@69 297 # Case 1) The day to rollover is today; in this case, do nothing
jpayne@69 298 # Case 2) The day to rollover is further in the interval (i.e., today is
jpayne@69 299 # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
jpayne@69 300 # next rollover is simply 6 - 2 - 1, or 3.
jpayne@69 301 # Case 3) The day to rollover is behind us in the interval (i.e., today
jpayne@69 302 # is day 5 (Saturday) and rollover is on day 3 (Thursday).
jpayne@69 303 # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
jpayne@69 304 # number of days left in the current week (1) plus the number
jpayne@69 305 # of days in the next week until the rollover day (3).
jpayne@69 306 # The calculations described in 2) and 3) above need to have a day added.
jpayne@69 307 # This is because the above time calculation takes us to midnight on this
jpayne@69 308 # day, i.e. the start of the next day.
jpayne@69 309 if self.when.startswith('W'):
jpayne@69 310 day = currentDay # 0 is Monday
jpayne@69 311 if day != self.dayOfWeek:
jpayne@69 312 if day < self.dayOfWeek:
jpayne@69 313 daysToWait = self.dayOfWeek - day
jpayne@69 314 else:
jpayne@69 315 daysToWait = 6 - day + self.dayOfWeek + 1
jpayne@69 316 newRolloverAt = result + (daysToWait * (60 * 60 * 24))
jpayne@69 317 if not self.utc:
jpayne@69 318 dstNow = t[-1]
jpayne@69 319 dstAtRollover = time.localtime(newRolloverAt)[-1]
jpayne@69 320 if dstNow != dstAtRollover:
jpayne@69 321 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
jpayne@69 322 addend = -3600
jpayne@69 323 else: # DST bows out before next rollover, so we need to add an hour
jpayne@69 324 addend = 3600
jpayne@69 325 newRolloverAt += addend
jpayne@69 326 result = newRolloverAt
jpayne@69 327 return result
jpayne@69 328
jpayne@69 329 def shouldRollover(self, record):
jpayne@69 330 """
jpayne@69 331 Determine if rollover should occur.
jpayne@69 332
jpayne@69 333 record is not used, as we are just comparing times, but it is needed so
jpayne@69 334 the method signatures are the same
jpayne@69 335 """
jpayne@69 336 t = int(time.time())
jpayne@69 337 if t >= self.rolloverAt:
jpayne@69 338 return 1
jpayne@69 339 return 0
jpayne@69 340
jpayne@69 341 def getFilesToDelete(self):
jpayne@69 342 """
jpayne@69 343 Determine the files to delete when rolling over.
jpayne@69 344
jpayne@69 345 More specific than the earlier method, which just used glob.glob().
jpayne@69 346 """
jpayne@69 347 dirName, baseName = os.path.split(self.baseFilename)
jpayne@69 348 fileNames = os.listdir(dirName)
jpayne@69 349 result = []
jpayne@69 350 prefix = baseName + "."
jpayne@69 351 plen = len(prefix)
jpayne@69 352 for fileName in fileNames:
jpayne@69 353 if fileName[:plen] == prefix:
jpayne@69 354 suffix = fileName[plen:]
jpayne@69 355 if self.extMatch.match(suffix):
jpayne@69 356 result.append(os.path.join(dirName, fileName))
jpayne@69 357 if len(result) < self.backupCount:
jpayne@69 358 result = []
jpayne@69 359 else:
jpayne@69 360 result.sort()
jpayne@69 361 result = result[:len(result) - self.backupCount]
jpayne@69 362 return result
jpayne@69 363
jpayne@69 364 def doRollover(self):
jpayne@69 365 """
jpayne@69 366 do a rollover; in this case, a date/time stamp is appended to the filename
jpayne@69 367 when the rollover happens. However, you want the file to be named for the
jpayne@69 368 start of the interval, not the current time. If there is a backup count,
jpayne@69 369 then we have to get a list of matching filenames, sort them and remove
jpayne@69 370 the one with the oldest suffix.
jpayne@69 371 """
jpayne@69 372 if self.stream:
jpayne@69 373 self.stream.close()
jpayne@69 374 self.stream = None
jpayne@69 375 # get the time that this sequence started at and make it a TimeTuple
jpayne@69 376 currentTime = int(time.time())
jpayne@69 377 dstNow = time.localtime(currentTime)[-1]
jpayne@69 378 t = self.rolloverAt - self.interval
jpayne@69 379 if self.utc:
jpayne@69 380 timeTuple = time.gmtime(t)
jpayne@69 381 else:
jpayne@69 382 timeTuple = time.localtime(t)
jpayne@69 383 dstThen = timeTuple[-1]
jpayne@69 384 if dstNow != dstThen:
jpayne@69 385 if dstNow:
jpayne@69 386 addend = 3600
jpayne@69 387 else:
jpayne@69 388 addend = -3600
jpayne@69 389 timeTuple = time.localtime(t + addend)
jpayne@69 390 dfn = self.rotation_filename(self.baseFilename + "." +
jpayne@69 391 time.strftime(self.suffix, timeTuple))
jpayne@69 392 if os.path.exists(dfn):
jpayne@69 393 os.remove(dfn)
jpayne@69 394 self.rotate(self.baseFilename, dfn)
jpayne@69 395 if self.backupCount > 0:
jpayne@69 396 for s in self.getFilesToDelete():
jpayne@69 397 os.remove(s)
jpayne@69 398 if not self.delay:
jpayne@69 399 self.stream = self._open()
jpayne@69 400 newRolloverAt = self.computeRollover(currentTime)
jpayne@69 401 while newRolloverAt <= currentTime:
jpayne@69 402 newRolloverAt = newRolloverAt + self.interval
jpayne@69 403 #If DST changes and midnight or weekly rollover, adjust for this.
jpayne@69 404 if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
jpayne@69 405 dstAtRollover = time.localtime(newRolloverAt)[-1]
jpayne@69 406 if dstNow != dstAtRollover:
jpayne@69 407 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
jpayne@69 408 addend = -3600
jpayne@69 409 else: # DST bows out before next rollover, so we need to add an hour
jpayne@69 410 addend = 3600
jpayne@69 411 newRolloverAt += addend
jpayne@69 412 self.rolloverAt = newRolloverAt
jpayne@69 413
jpayne@69 414 class WatchedFileHandler(logging.FileHandler):
jpayne@69 415 """
jpayne@69 416 A handler for logging to a file, which watches the file
jpayne@69 417 to see if it has changed while in use. This can happen because of
jpayne@69 418 usage of programs such as newsyslog and logrotate which perform
jpayne@69 419 log file rotation. This handler, intended for use under Unix,
jpayne@69 420 watches the file to see if it has changed since the last emit.
jpayne@69 421 (A file has changed if its device or inode have changed.)
jpayne@69 422 If it has changed, the old file stream is closed, and the file
jpayne@69 423 opened to get a new stream.
jpayne@69 424
jpayne@69 425 This handler is not appropriate for use under Windows, because
jpayne@69 426 under Windows open files cannot be moved or renamed - logging
jpayne@69 427 opens the files with exclusive locks - and so there is no need
jpayne@69 428 for such a handler. Furthermore, ST_INO is not supported under
jpayne@69 429 Windows; stat always returns zero for this value.
jpayne@69 430
jpayne@69 431 This handler is based on a suggestion and patch by Chad J.
jpayne@69 432 Schroeder.
jpayne@69 433 """
jpayne@69 434 def __init__(self, filename, mode='a', encoding=None, delay=False):
jpayne@69 435 logging.FileHandler.__init__(self, filename, mode, encoding, delay)
jpayne@69 436 self.dev, self.ino = -1, -1
jpayne@69 437 self._statstream()
jpayne@69 438
jpayne@69 439 def _statstream(self):
jpayne@69 440 if self.stream:
jpayne@69 441 sres = os.fstat(self.stream.fileno())
jpayne@69 442 self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
jpayne@69 443
jpayne@69 444 def reopenIfNeeded(self):
jpayne@69 445 """
jpayne@69 446 Reopen log file if needed.
jpayne@69 447
jpayne@69 448 Checks if the underlying file has changed, and if it
jpayne@69 449 has, close the old stream and reopen the file to get the
jpayne@69 450 current stream.
jpayne@69 451 """
jpayne@69 452 # Reduce the chance of race conditions by stat'ing by path only
jpayne@69 453 # once and then fstat'ing our new fd if we opened a new log stream.
jpayne@69 454 # See issue #14632: Thanks to John Mulligan for the problem report
jpayne@69 455 # and patch.
jpayne@69 456 try:
jpayne@69 457 # stat the file by path, checking for existence
jpayne@69 458 sres = os.stat(self.baseFilename)
jpayne@69 459 except FileNotFoundError:
jpayne@69 460 sres = None
jpayne@69 461 # compare file system stat with that of our stream file handle
jpayne@69 462 if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
jpayne@69 463 if self.stream is not None:
jpayne@69 464 # we have an open file handle, clean it up
jpayne@69 465 self.stream.flush()
jpayne@69 466 self.stream.close()
jpayne@69 467 self.stream = None # See Issue #21742: _open () might fail.
jpayne@69 468 # open a new file handle and get new stat info from that fd
jpayne@69 469 self.stream = self._open()
jpayne@69 470 self._statstream()
jpayne@69 471
jpayne@69 472 def emit(self, record):
jpayne@69 473 """
jpayne@69 474 Emit a record.
jpayne@69 475
jpayne@69 476 If underlying file has changed, reopen the file before emitting the
jpayne@69 477 record to it.
jpayne@69 478 """
jpayne@69 479 self.reopenIfNeeded()
jpayne@69 480 logging.FileHandler.emit(self, record)
jpayne@69 481
jpayne@69 482
jpayne@69 483 class SocketHandler(logging.Handler):
jpayne@69 484 """
jpayne@69 485 A handler class which writes logging records, in pickle format, to
jpayne@69 486 a streaming socket. The socket is kept open across logging calls.
jpayne@69 487 If the peer resets it, an attempt is made to reconnect on the next call.
jpayne@69 488 The pickle which is sent is that of the LogRecord's attribute dictionary
jpayne@69 489 (__dict__), so that the receiver does not need to have the logging module
jpayne@69 490 installed in order to process the logging event.
jpayne@69 491
jpayne@69 492 To unpickle the record at the receiving end into a LogRecord, use the
jpayne@69 493 makeLogRecord function.
jpayne@69 494 """
jpayne@69 495
jpayne@69 496 def __init__(self, host, port):
jpayne@69 497 """
jpayne@69 498 Initializes the handler with a specific host address and port.
jpayne@69 499
jpayne@69 500 When the attribute *closeOnError* is set to True - if a socket error
jpayne@69 501 occurs, the socket is silently closed and then reopened on the next
jpayne@69 502 logging call.
jpayne@69 503 """
jpayne@69 504 logging.Handler.__init__(self)
jpayne@69 505 self.host = host
jpayne@69 506 self.port = port
jpayne@69 507 if port is None:
jpayne@69 508 self.address = host
jpayne@69 509 else:
jpayne@69 510 self.address = (host, port)
jpayne@69 511 self.sock = None
jpayne@69 512 self.closeOnError = False
jpayne@69 513 self.retryTime = None
jpayne@69 514 #
jpayne@69 515 # Exponential backoff parameters.
jpayne@69 516 #
jpayne@69 517 self.retryStart = 1.0
jpayne@69 518 self.retryMax = 30.0
jpayne@69 519 self.retryFactor = 2.0
jpayne@69 520
jpayne@69 521 def makeSocket(self, timeout=1):
jpayne@69 522 """
jpayne@69 523 A factory method which allows subclasses to define the precise
jpayne@69 524 type of socket they want.
jpayne@69 525 """
jpayne@69 526 if self.port is not None:
jpayne@69 527 result = socket.create_connection(self.address, timeout=timeout)
jpayne@69 528 else:
jpayne@69 529 result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
jpayne@69 530 result.settimeout(timeout)
jpayne@69 531 try:
jpayne@69 532 result.connect(self.address)
jpayne@69 533 except OSError:
jpayne@69 534 result.close() # Issue 19182
jpayne@69 535 raise
jpayne@69 536 return result
jpayne@69 537
jpayne@69 538 def createSocket(self):
jpayne@69 539 """
jpayne@69 540 Try to create a socket, using an exponential backoff with
jpayne@69 541 a max retry time. Thanks to Robert Olson for the original patch
jpayne@69 542 (SF #815911) which has been slightly refactored.
jpayne@69 543 """
jpayne@69 544 now = time.time()
jpayne@69 545 # Either retryTime is None, in which case this
jpayne@69 546 # is the first time back after a disconnect, or
jpayne@69 547 # we've waited long enough.
jpayne@69 548 if self.retryTime is None:
jpayne@69 549 attempt = True
jpayne@69 550 else:
jpayne@69 551 attempt = (now >= self.retryTime)
jpayne@69 552 if attempt:
jpayne@69 553 try:
jpayne@69 554 self.sock = self.makeSocket()
jpayne@69 555 self.retryTime = None # next time, no delay before trying
jpayne@69 556 except OSError:
jpayne@69 557 #Creation failed, so set the retry time and return.
jpayne@69 558 if self.retryTime is None:
jpayne@69 559 self.retryPeriod = self.retryStart
jpayne@69 560 else:
jpayne@69 561 self.retryPeriod = self.retryPeriod * self.retryFactor
jpayne@69 562 if self.retryPeriod > self.retryMax:
jpayne@69 563 self.retryPeriod = self.retryMax
jpayne@69 564 self.retryTime = now + self.retryPeriod
jpayne@69 565
jpayne@69 566 def send(self, s):
jpayne@69 567 """
jpayne@69 568 Send a pickled string to the socket.
jpayne@69 569
jpayne@69 570 This function allows for partial sends which can happen when the
jpayne@69 571 network is busy.
jpayne@69 572 """
jpayne@69 573 if self.sock is None:
jpayne@69 574 self.createSocket()
jpayne@69 575 #self.sock can be None either because we haven't reached the retry
jpayne@69 576 #time yet, or because we have reached the retry time and retried,
jpayne@69 577 #but are still unable to connect.
jpayne@69 578 if self.sock:
jpayne@69 579 try:
jpayne@69 580 self.sock.sendall(s)
jpayne@69 581 except OSError: #pragma: no cover
jpayne@69 582 self.sock.close()
jpayne@69 583 self.sock = None # so we can call createSocket next time
jpayne@69 584
jpayne@69 585 def makePickle(self, record):
jpayne@69 586 """
jpayne@69 587 Pickles the record in binary format with a length prefix, and
jpayne@69 588 returns it ready for transmission across the socket.
jpayne@69 589 """
jpayne@69 590 ei = record.exc_info
jpayne@69 591 if ei:
jpayne@69 592 # just to get traceback text into record.exc_text ...
jpayne@69 593 dummy = self.format(record)
jpayne@69 594 # See issue #14436: If msg or args are objects, they may not be
jpayne@69 595 # available on the receiving end. So we convert the msg % args
jpayne@69 596 # to a string, save it as msg and zap the args.
jpayne@69 597 d = dict(record.__dict__)
jpayne@69 598 d['msg'] = record.getMessage()
jpayne@69 599 d['args'] = None
jpayne@69 600 d['exc_info'] = None
jpayne@69 601 # Issue #25685: delete 'message' if present: redundant with 'msg'
jpayne@69 602 d.pop('message', None)
jpayne@69 603 s = pickle.dumps(d, 1)
jpayne@69 604 slen = struct.pack(">L", len(s))
jpayne@69 605 return slen + s
jpayne@69 606
jpayne@69 607 def handleError(self, record):
jpayne@69 608 """
jpayne@69 609 Handle an error during logging.
jpayne@69 610
jpayne@69 611 An error has occurred during logging. Most likely cause -
jpayne@69 612 connection lost. Close the socket so that we can retry on the
jpayne@69 613 next event.
jpayne@69 614 """
jpayne@69 615 if self.closeOnError and self.sock:
jpayne@69 616 self.sock.close()
jpayne@69 617 self.sock = None #try to reconnect next time
jpayne@69 618 else:
jpayne@69 619 logging.Handler.handleError(self, record)
jpayne@69 620
jpayne@69 621 def emit(self, record):
jpayne@69 622 """
jpayne@69 623 Emit a record.
jpayne@69 624
jpayne@69 625 Pickles the record and writes it to the socket in binary format.
jpayne@69 626 If there is an error with the socket, silently drop the packet.
jpayne@69 627 If there was a problem with the socket, re-establishes the
jpayne@69 628 socket.
jpayne@69 629 """
jpayne@69 630 try:
jpayne@69 631 s = self.makePickle(record)
jpayne@69 632 self.send(s)
jpayne@69 633 except Exception:
jpayne@69 634 self.handleError(record)
jpayne@69 635
jpayne@69 636 def close(self):
jpayne@69 637 """
jpayne@69 638 Closes the socket.
jpayne@69 639 """
jpayne@69 640 self.acquire()
jpayne@69 641 try:
jpayne@69 642 sock = self.sock
jpayne@69 643 if sock:
jpayne@69 644 self.sock = None
jpayne@69 645 sock.close()
jpayne@69 646 logging.Handler.close(self)
jpayne@69 647 finally:
jpayne@69 648 self.release()
jpayne@69 649
jpayne@69 650 class DatagramHandler(SocketHandler):
jpayne@69 651 """
jpayne@69 652 A handler class which writes logging records, in pickle format, to
jpayne@69 653 a datagram socket. The pickle which is sent is that of the LogRecord's
jpayne@69 654 attribute dictionary (__dict__), so that the receiver does not need to
jpayne@69 655 have the logging module installed in order to process the logging event.
jpayne@69 656
jpayne@69 657 To unpickle the record at the receiving end into a LogRecord, use the
jpayne@69 658 makeLogRecord function.
jpayne@69 659
jpayne@69 660 """
jpayne@69 661 def __init__(self, host, port):
jpayne@69 662 """
jpayne@69 663 Initializes the handler with a specific host address and port.
jpayne@69 664 """
jpayne@69 665 SocketHandler.__init__(self, host, port)
jpayne@69 666 self.closeOnError = False
jpayne@69 667
jpayne@69 668 def makeSocket(self):
jpayne@69 669 """
jpayne@69 670 The factory method of SocketHandler is here overridden to create
jpayne@69 671 a UDP socket (SOCK_DGRAM).
jpayne@69 672 """
jpayne@69 673 if self.port is None:
jpayne@69 674 family = socket.AF_UNIX
jpayne@69 675 else:
jpayne@69 676 family = socket.AF_INET
jpayne@69 677 s = socket.socket(family, socket.SOCK_DGRAM)
jpayne@69 678 return s
jpayne@69 679
jpayne@69 680 def send(self, s):
jpayne@69 681 """
jpayne@69 682 Send a pickled string to a socket.
jpayne@69 683
jpayne@69 684 This function no longer allows for partial sends which can happen
jpayne@69 685 when the network is busy - UDP does not guarantee delivery and
jpayne@69 686 can deliver packets out of sequence.
jpayne@69 687 """
jpayne@69 688 if self.sock is None:
jpayne@69 689 self.createSocket()
jpayne@69 690 self.sock.sendto(s, self.address)
jpayne@69 691
jpayne@69 692 class SysLogHandler(logging.Handler):
jpayne@69 693 """
jpayne@69 694 A handler class which sends formatted logging records to a syslog
jpayne@69 695 server. Based on Sam Rushing's syslog module:
jpayne@69 696 http://www.nightmare.com/squirl/python-ext/misc/syslog.py
jpayne@69 697 Contributed by Nicolas Untz (after which minor refactoring changes
jpayne@69 698 have been made).
jpayne@69 699 """
jpayne@69 700
jpayne@69 701 # from <linux/sys/syslog.h>:
jpayne@69 702 # ======================================================================
jpayne@69 703 # priorities/facilities are encoded into a single 32-bit quantity, where
jpayne@69 704 # the bottom 3 bits are the priority (0-7) and the top 28 bits are the
jpayne@69 705 # facility (0-big number). Both the priorities and the facilities map
jpayne@69 706 # roughly one-to-one to strings in the syslogd(8) source code. This
jpayne@69 707 # mapping is included in this file.
jpayne@69 708 #
jpayne@69 709 # priorities (these are ordered)
jpayne@69 710
jpayne@69 711 LOG_EMERG = 0 # system is unusable
jpayne@69 712 LOG_ALERT = 1 # action must be taken immediately
jpayne@69 713 LOG_CRIT = 2 # critical conditions
jpayne@69 714 LOG_ERR = 3 # error conditions
jpayne@69 715 LOG_WARNING = 4 # warning conditions
jpayne@69 716 LOG_NOTICE = 5 # normal but significant condition
jpayne@69 717 LOG_INFO = 6 # informational
jpayne@69 718 LOG_DEBUG = 7 # debug-level messages
jpayne@69 719
jpayne@69 720 # facility codes
jpayne@69 721 LOG_KERN = 0 # kernel messages
jpayne@69 722 LOG_USER = 1 # random user-level messages
jpayne@69 723 LOG_MAIL = 2 # mail system
jpayne@69 724 LOG_DAEMON = 3 # system daemons
jpayne@69 725 LOG_AUTH = 4 # security/authorization messages
jpayne@69 726 LOG_SYSLOG = 5 # messages generated internally by syslogd
jpayne@69 727 LOG_LPR = 6 # line printer subsystem
jpayne@69 728 LOG_NEWS = 7 # network news subsystem
jpayne@69 729 LOG_UUCP = 8 # UUCP subsystem
jpayne@69 730 LOG_CRON = 9 # clock daemon
jpayne@69 731 LOG_AUTHPRIV = 10 # security/authorization messages (private)
jpayne@69 732 LOG_FTP = 11 # FTP daemon
jpayne@69 733
jpayne@69 734 # other codes through 15 reserved for system use
jpayne@69 735 LOG_LOCAL0 = 16 # reserved for local use
jpayne@69 736 LOG_LOCAL1 = 17 # reserved for local use
jpayne@69 737 LOG_LOCAL2 = 18 # reserved for local use
jpayne@69 738 LOG_LOCAL3 = 19 # reserved for local use
jpayne@69 739 LOG_LOCAL4 = 20 # reserved for local use
jpayne@69 740 LOG_LOCAL5 = 21 # reserved for local use
jpayne@69 741 LOG_LOCAL6 = 22 # reserved for local use
jpayne@69 742 LOG_LOCAL7 = 23 # reserved for local use
jpayne@69 743
jpayne@69 744 priority_names = {
jpayne@69 745 "alert": LOG_ALERT,
jpayne@69 746 "crit": LOG_CRIT,
jpayne@69 747 "critical": LOG_CRIT,
jpayne@69 748 "debug": LOG_DEBUG,
jpayne@69 749 "emerg": LOG_EMERG,
jpayne@69 750 "err": LOG_ERR,
jpayne@69 751 "error": LOG_ERR, # DEPRECATED
jpayne@69 752 "info": LOG_INFO,
jpayne@69 753 "notice": LOG_NOTICE,
jpayne@69 754 "panic": LOG_EMERG, # DEPRECATED
jpayne@69 755 "warn": LOG_WARNING, # DEPRECATED
jpayne@69 756 "warning": LOG_WARNING,
jpayne@69 757 }
jpayne@69 758
jpayne@69 759 facility_names = {
jpayne@69 760 "auth": LOG_AUTH,
jpayne@69 761 "authpriv": LOG_AUTHPRIV,
jpayne@69 762 "cron": LOG_CRON,
jpayne@69 763 "daemon": LOG_DAEMON,
jpayne@69 764 "ftp": LOG_FTP,
jpayne@69 765 "kern": LOG_KERN,
jpayne@69 766 "lpr": LOG_LPR,
jpayne@69 767 "mail": LOG_MAIL,
jpayne@69 768 "news": LOG_NEWS,
jpayne@69 769 "security": LOG_AUTH, # DEPRECATED
jpayne@69 770 "syslog": LOG_SYSLOG,
jpayne@69 771 "user": LOG_USER,
jpayne@69 772 "uucp": LOG_UUCP,
jpayne@69 773 "local0": LOG_LOCAL0,
jpayne@69 774 "local1": LOG_LOCAL1,
jpayne@69 775 "local2": LOG_LOCAL2,
jpayne@69 776 "local3": LOG_LOCAL3,
jpayne@69 777 "local4": LOG_LOCAL4,
jpayne@69 778 "local5": LOG_LOCAL5,
jpayne@69 779 "local6": LOG_LOCAL6,
jpayne@69 780 "local7": LOG_LOCAL7,
jpayne@69 781 }
jpayne@69 782
jpayne@69 783 #The map below appears to be trivially lowercasing the key. However,
jpayne@69 784 #there's more to it than meets the eye - in some locales, lowercasing
jpayne@69 785 #gives unexpected results. See SF #1524081: in the Turkish locale,
jpayne@69 786 #"INFO".lower() != "info"
jpayne@69 787 priority_map = {
jpayne@69 788 "DEBUG" : "debug",
jpayne@69 789 "INFO" : "info",
jpayne@69 790 "WARNING" : "warning",
jpayne@69 791 "ERROR" : "error",
jpayne@69 792 "CRITICAL" : "critical"
jpayne@69 793 }
jpayne@69 794
jpayne@69 795 def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
jpayne@69 796 facility=LOG_USER, socktype=None):
jpayne@69 797 """
jpayne@69 798 Initialize a handler.
jpayne@69 799
jpayne@69 800 If address is specified as a string, a UNIX socket is used. To log to a
jpayne@69 801 local syslogd, "SysLogHandler(address="/dev/log")" can be used.
jpayne@69 802 If facility is not specified, LOG_USER is used. If socktype is
jpayne@69 803 specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
jpayne@69 804 socket type will be used. For Unix sockets, you can also specify a
jpayne@69 805 socktype of None, in which case socket.SOCK_DGRAM will be used, falling
jpayne@69 806 back to socket.SOCK_STREAM.
jpayne@69 807 """
jpayne@69 808 logging.Handler.__init__(self)
jpayne@69 809
jpayne@69 810 self.address = address
jpayne@69 811 self.facility = facility
jpayne@69 812 self.socktype = socktype
jpayne@69 813
jpayne@69 814 if isinstance(address, str):
jpayne@69 815 self.unixsocket = True
jpayne@69 816 # Syslog server may be unavailable during handler initialisation.
jpayne@69 817 # C's openlog() function also ignores connection errors.
jpayne@69 818 # Moreover, we ignore these errors while logging, so it not worse
jpayne@69 819 # to ignore it also here.
jpayne@69 820 try:
jpayne@69 821 self._connect_unixsocket(address)
jpayne@69 822 except OSError:
jpayne@69 823 pass
jpayne@69 824 else:
jpayne@69 825 self.unixsocket = False
jpayne@69 826 if socktype is None:
jpayne@69 827 socktype = socket.SOCK_DGRAM
jpayne@69 828 host, port = address
jpayne@69 829 ress = socket.getaddrinfo(host, port, 0, socktype)
jpayne@69 830 if not ress:
jpayne@69 831 raise OSError("getaddrinfo returns an empty list")
jpayne@69 832 for res in ress:
jpayne@69 833 af, socktype, proto, _, sa = res
jpayne@69 834 err = sock = None
jpayne@69 835 try:
jpayne@69 836 sock = socket.socket(af, socktype, proto)
jpayne@69 837 if socktype == socket.SOCK_STREAM:
jpayne@69 838 sock.connect(sa)
jpayne@69 839 break
jpayne@69 840 except OSError as exc:
jpayne@69 841 err = exc
jpayne@69 842 if sock is not None:
jpayne@69 843 sock.close()
jpayne@69 844 if err is not None:
jpayne@69 845 raise err
jpayne@69 846 self.socket = sock
jpayne@69 847 self.socktype = socktype
jpayne@69 848
jpayne@69 849 def _connect_unixsocket(self, address):
jpayne@69 850 use_socktype = self.socktype
jpayne@69 851 if use_socktype is None:
jpayne@69 852 use_socktype = socket.SOCK_DGRAM
jpayne@69 853 self.socket = socket.socket(socket.AF_UNIX, use_socktype)
jpayne@69 854 try:
jpayne@69 855 self.socket.connect(address)
jpayne@69 856 # it worked, so set self.socktype to the used type
jpayne@69 857 self.socktype = use_socktype
jpayne@69 858 except OSError:
jpayne@69 859 self.socket.close()
jpayne@69 860 if self.socktype is not None:
jpayne@69 861 # user didn't specify falling back, so fail
jpayne@69 862 raise
jpayne@69 863 use_socktype = socket.SOCK_STREAM
jpayne@69 864 self.socket = socket.socket(socket.AF_UNIX, use_socktype)
jpayne@69 865 try:
jpayne@69 866 self.socket.connect(address)
jpayne@69 867 # it worked, so set self.socktype to the used type
jpayne@69 868 self.socktype = use_socktype
jpayne@69 869 except OSError:
jpayne@69 870 self.socket.close()
jpayne@69 871 raise
jpayne@69 872
jpayne@69 873 def encodePriority(self, facility, priority):
jpayne@69 874 """
jpayne@69 875 Encode the facility and priority. You can pass in strings or
jpayne@69 876 integers - if strings are passed, the facility_names and
jpayne@69 877 priority_names mapping dictionaries are used to convert them to
jpayne@69 878 integers.
jpayne@69 879 """
jpayne@69 880 if isinstance(facility, str):
jpayne@69 881 facility = self.facility_names[facility]
jpayne@69 882 if isinstance(priority, str):
jpayne@69 883 priority = self.priority_names[priority]
jpayne@69 884 return (facility << 3) | priority
jpayne@69 885
jpayne@69 886 def close(self):
jpayne@69 887 """
jpayne@69 888 Closes the socket.
jpayne@69 889 """
jpayne@69 890 self.acquire()
jpayne@69 891 try:
jpayne@69 892 self.socket.close()
jpayne@69 893 logging.Handler.close(self)
jpayne@69 894 finally:
jpayne@69 895 self.release()
jpayne@69 896
jpayne@69 897 def mapPriority(self, levelName):
jpayne@69 898 """
jpayne@69 899 Map a logging level name to a key in the priority_names map.
jpayne@69 900 This is useful in two scenarios: when custom levels are being
jpayne@69 901 used, and in the case where you can't do a straightforward
jpayne@69 902 mapping by lowercasing the logging level name because of locale-
jpayne@69 903 specific issues (see SF #1524081).
jpayne@69 904 """
jpayne@69 905 return self.priority_map.get(levelName, "warning")
jpayne@69 906
jpayne@69 907 ident = '' # prepended to all messages
jpayne@69 908 append_nul = True # some old syslog daemons expect a NUL terminator
jpayne@69 909
jpayne@69 910 def emit(self, record):
jpayne@69 911 """
jpayne@69 912 Emit a record.
jpayne@69 913
jpayne@69 914 The record is formatted, and then sent to the syslog server. If
jpayne@69 915 exception information is present, it is NOT sent to the server.
jpayne@69 916 """
jpayne@69 917 try:
jpayne@69 918 msg = self.format(record)
jpayne@69 919 if self.ident:
jpayne@69 920 msg = self.ident + msg
jpayne@69 921 if self.append_nul:
jpayne@69 922 msg += '\000'
jpayne@69 923
jpayne@69 924 # We need to convert record level to lowercase, maybe this will
jpayne@69 925 # change in the future.
jpayne@69 926 prio = '<%d>' % self.encodePriority(self.facility,
jpayne@69 927 self.mapPriority(record.levelname))
jpayne@69 928 prio = prio.encode('utf-8')
jpayne@69 929 # Message is a string. Convert to bytes as required by RFC 5424
jpayne@69 930 msg = msg.encode('utf-8')
jpayne@69 931 msg = prio + msg
jpayne@69 932 if self.unixsocket:
jpayne@69 933 try:
jpayne@69 934 self.socket.send(msg)
jpayne@69 935 except OSError:
jpayne@69 936 self.socket.close()
jpayne@69 937 self._connect_unixsocket(self.address)
jpayne@69 938 self.socket.send(msg)
jpayne@69 939 elif self.socktype == socket.SOCK_DGRAM:
jpayne@69 940 self.socket.sendto(msg, self.address)
jpayne@69 941 else:
jpayne@69 942 self.socket.sendall(msg)
jpayne@69 943 except Exception:
jpayne@69 944 self.handleError(record)
jpayne@69 945
jpayne@69 946 class SMTPHandler(logging.Handler):
jpayne@69 947 """
jpayne@69 948 A handler class which sends an SMTP email for each logging event.
jpayne@69 949 """
jpayne@69 950 def __init__(self, mailhost, fromaddr, toaddrs, subject,
jpayne@69 951 credentials=None, secure=None, timeout=5.0):
jpayne@69 952 """
jpayne@69 953 Initialize the handler.
jpayne@69 954
jpayne@69 955 Initialize the instance with the from and to addresses and subject
jpayne@69 956 line of the email. To specify a non-standard SMTP port, use the
jpayne@69 957 (host, port) tuple format for the mailhost argument. To specify
jpayne@69 958 authentication credentials, supply a (username, password) tuple
jpayne@69 959 for the credentials argument. To specify the use of a secure
jpayne@69 960 protocol (TLS), pass in a tuple for the secure argument. This will
jpayne@69 961 only be used when authentication credentials are supplied. The tuple
jpayne@69 962 will be either an empty tuple, or a single-value tuple with the name
jpayne@69 963 of a keyfile, or a 2-value tuple with the names of the keyfile and
jpayne@69 964 certificate file. (This tuple is passed to the `starttls` method).
jpayne@69 965 A timeout in seconds can be specified for the SMTP connection (the
jpayne@69 966 default is one second).
jpayne@69 967 """
jpayne@69 968 logging.Handler.__init__(self)
jpayne@69 969 if isinstance(mailhost, (list, tuple)):
jpayne@69 970 self.mailhost, self.mailport = mailhost
jpayne@69 971 else:
jpayne@69 972 self.mailhost, self.mailport = mailhost, None
jpayne@69 973 if isinstance(credentials, (list, tuple)):
jpayne@69 974 self.username, self.password = credentials
jpayne@69 975 else:
jpayne@69 976 self.username = None
jpayne@69 977 self.fromaddr = fromaddr
jpayne@69 978 if isinstance(toaddrs, str):
jpayne@69 979 toaddrs = [toaddrs]
jpayne@69 980 self.toaddrs = toaddrs
jpayne@69 981 self.subject = subject
jpayne@69 982 self.secure = secure
jpayne@69 983 self.timeout = timeout
jpayne@69 984
jpayne@69 985 def getSubject(self, record):
jpayne@69 986 """
jpayne@69 987 Determine the subject for the email.
jpayne@69 988
jpayne@69 989 If you want to specify a subject line which is record-dependent,
jpayne@69 990 override this method.
jpayne@69 991 """
jpayne@69 992 return self.subject
jpayne@69 993
jpayne@69 994 def emit(self, record):
jpayne@69 995 """
jpayne@69 996 Emit a record.
jpayne@69 997
jpayne@69 998 Format the record and send it to the specified addressees.
jpayne@69 999 """
jpayne@69 1000 try:
jpayne@69 1001 import smtplib
jpayne@69 1002 from email.message import EmailMessage
jpayne@69 1003 import email.utils
jpayne@69 1004
jpayne@69 1005 port = self.mailport
jpayne@69 1006 if not port:
jpayne@69 1007 port = smtplib.SMTP_PORT
jpayne@69 1008 smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
jpayne@69 1009 msg = EmailMessage()
jpayne@69 1010 msg['From'] = self.fromaddr
jpayne@69 1011 msg['To'] = ','.join(self.toaddrs)
jpayne@69 1012 msg['Subject'] = self.getSubject(record)
jpayne@69 1013 msg['Date'] = email.utils.localtime()
jpayne@69 1014 msg.set_content(self.format(record))
jpayne@69 1015 if self.username:
jpayne@69 1016 if self.secure is not None:
jpayne@69 1017 smtp.ehlo()
jpayne@69 1018 smtp.starttls(*self.secure)
jpayne@69 1019 smtp.ehlo()
jpayne@69 1020 smtp.login(self.username, self.password)
jpayne@69 1021 smtp.send_message(msg)
jpayne@69 1022 smtp.quit()
jpayne@69 1023 except Exception:
jpayne@69 1024 self.handleError(record)
jpayne@69 1025
jpayne@69 1026 class NTEventLogHandler(logging.Handler):
jpayne@69 1027 """
jpayne@69 1028 A handler class which sends events to the NT Event Log. Adds a
jpayne@69 1029 registry entry for the specified application name. If no dllname is
jpayne@69 1030 provided, win32service.pyd (which contains some basic message
jpayne@69 1031 placeholders) is used. Note that use of these placeholders will make
jpayne@69 1032 your event logs big, as the entire message source is held in the log.
jpayne@69 1033 If you want slimmer logs, you have to pass in the name of your own DLL
jpayne@69 1034 which contains the message definitions you want to use in the event log.
jpayne@69 1035 """
jpayne@69 1036 def __init__(self, appname, dllname=None, logtype="Application"):
jpayne@69 1037 logging.Handler.__init__(self)
jpayne@69 1038 try:
jpayne@69 1039 import win32evtlogutil, win32evtlog
jpayne@69 1040 self.appname = appname
jpayne@69 1041 self._welu = win32evtlogutil
jpayne@69 1042 if not dllname:
jpayne@69 1043 dllname = os.path.split(self._welu.__file__)
jpayne@69 1044 dllname = os.path.split(dllname[0])
jpayne@69 1045 dllname = os.path.join(dllname[0], r'win32service.pyd')
jpayne@69 1046 self.dllname = dllname
jpayne@69 1047 self.logtype = logtype
jpayne@69 1048 self._welu.AddSourceToRegistry(appname, dllname, logtype)
jpayne@69 1049 self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
jpayne@69 1050 self.typemap = {
jpayne@69 1051 logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
jpayne@69 1052 logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
jpayne@69 1053 logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
jpayne@69 1054 logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
jpayne@69 1055 logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
jpayne@69 1056 }
jpayne@69 1057 except ImportError:
jpayne@69 1058 print("The Python Win32 extensions for NT (service, event "\
jpayne@69 1059 "logging) appear not to be available.")
jpayne@69 1060 self._welu = None
jpayne@69 1061
jpayne@69 1062 def getMessageID(self, record):
jpayne@69 1063 """
jpayne@69 1064 Return the message ID for the event record. If you are using your
jpayne@69 1065 own messages, you could do this by having the msg passed to the
jpayne@69 1066 logger being an ID rather than a formatting string. Then, in here,
jpayne@69 1067 you could use a dictionary lookup to get the message ID. This
jpayne@69 1068 version returns 1, which is the base message ID in win32service.pyd.
jpayne@69 1069 """
jpayne@69 1070 return 1
jpayne@69 1071
jpayne@69 1072 def getEventCategory(self, record):
jpayne@69 1073 """
jpayne@69 1074 Return the event category for the record.
jpayne@69 1075
jpayne@69 1076 Override this if you want to specify your own categories. This version
jpayne@69 1077 returns 0.
jpayne@69 1078 """
jpayne@69 1079 return 0
jpayne@69 1080
jpayne@69 1081 def getEventType(self, record):
jpayne@69 1082 """
jpayne@69 1083 Return the event type for the record.
jpayne@69 1084
jpayne@69 1085 Override this if you want to specify your own types. This version does
jpayne@69 1086 a mapping using the handler's typemap attribute, which is set up in
jpayne@69 1087 __init__() to a dictionary which contains mappings for DEBUG, INFO,
jpayne@69 1088 WARNING, ERROR and CRITICAL. If you are using your own levels you will
jpayne@69 1089 either need to override this method or place a suitable dictionary in
jpayne@69 1090 the handler's typemap attribute.
jpayne@69 1091 """
jpayne@69 1092 return self.typemap.get(record.levelno, self.deftype)
jpayne@69 1093
jpayne@69 1094 def emit(self, record):
jpayne@69 1095 """
jpayne@69 1096 Emit a record.
jpayne@69 1097
jpayne@69 1098 Determine the message ID, event category and event type. Then
jpayne@69 1099 log the message in the NT event log.
jpayne@69 1100 """
jpayne@69 1101 if self._welu:
jpayne@69 1102 try:
jpayne@69 1103 id = self.getMessageID(record)
jpayne@69 1104 cat = self.getEventCategory(record)
jpayne@69 1105 type = self.getEventType(record)
jpayne@69 1106 msg = self.format(record)
jpayne@69 1107 self._welu.ReportEvent(self.appname, id, cat, type, [msg])
jpayne@69 1108 except Exception:
jpayne@69 1109 self.handleError(record)
jpayne@69 1110
jpayne@69 1111 def close(self):
jpayne@69 1112 """
jpayne@69 1113 Clean up this handler.
jpayne@69 1114
jpayne@69 1115 You can remove the application name from the registry as a
jpayne@69 1116 source of event log entries. However, if you do this, you will
jpayne@69 1117 not be able to see the events as you intended in the Event Log
jpayne@69 1118 Viewer - it needs to be able to access the registry to get the
jpayne@69 1119 DLL name.
jpayne@69 1120 """
jpayne@69 1121 #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
jpayne@69 1122 logging.Handler.close(self)
jpayne@69 1123
jpayne@69 1124 class HTTPHandler(logging.Handler):
jpayne@69 1125 """
jpayne@69 1126 A class which sends records to a Web server, using either GET or
jpayne@69 1127 POST semantics.
jpayne@69 1128 """
jpayne@69 1129 def __init__(self, host, url, method="GET", secure=False, credentials=None,
jpayne@69 1130 context=None):
jpayne@69 1131 """
jpayne@69 1132 Initialize the instance with the host, the request URL, and the method
jpayne@69 1133 ("GET" or "POST")
jpayne@69 1134 """
jpayne@69 1135 logging.Handler.__init__(self)
jpayne@69 1136 method = method.upper()
jpayne@69 1137 if method not in ["GET", "POST"]:
jpayne@69 1138 raise ValueError("method must be GET or POST")
jpayne@69 1139 if not secure and context is not None:
jpayne@69 1140 raise ValueError("context parameter only makes sense "
jpayne@69 1141 "with secure=True")
jpayne@69 1142 self.host = host
jpayne@69 1143 self.url = url
jpayne@69 1144 self.method = method
jpayne@69 1145 self.secure = secure
jpayne@69 1146 self.credentials = credentials
jpayne@69 1147 self.context = context
jpayne@69 1148
jpayne@69 1149 def mapLogRecord(self, record):
jpayne@69 1150 """
jpayne@69 1151 Default implementation of mapping the log record into a dict
jpayne@69 1152 that is sent as the CGI data. Overwrite in your class.
jpayne@69 1153 Contributed by Franz Glasner.
jpayne@69 1154 """
jpayne@69 1155 return record.__dict__
jpayne@69 1156
jpayne@69 1157 def emit(self, record):
jpayne@69 1158 """
jpayne@69 1159 Emit a record.
jpayne@69 1160
jpayne@69 1161 Send the record to the Web server as a percent-encoded dictionary
jpayne@69 1162 """
jpayne@69 1163 try:
jpayne@69 1164 import http.client, urllib.parse
jpayne@69 1165 host = self.host
jpayne@69 1166 if self.secure:
jpayne@69 1167 h = http.client.HTTPSConnection(host, context=self.context)
jpayne@69 1168 else:
jpayne@69 1169 h = http.client.HTTPConnection(host)
jpayne@69 1170 url = self.url
jpayne@69 1171 data = urllib.parse.urlencode(self.mapLogRecord(record))
jpayne@69 1172 if self.method == "GET":
jpayne@69 1173 if (url.find('?') >= 0):
jpayne@69 1174 sep = '&'
jpayne@69 1175 else:
jpayne@69 1176 sep = '?'
jpayne@69 1177 url = url + "%c%s" % (sep, data)
jpayne@69 1178 h.putrequest(self.method, url)
jpayne@69 1179 # support multiple hosts on one IP address...
jpayne@69 1180 # need to strip optional :port from host, if present
jpayne@69 1181 i = host.find(":")
jpayne@69 1182 if i >= 0:
jpayne@69 1183 host = host[:i]
jpayne@69 1184 # See issue #30904: putrequest call above already adds this header
jpayne@69 1185 # on Python 3.x.
jpayne@69 1186 # h.putheader("Host", host)
jpayne@69 1187 if self.method == "POST":
jpayne@69 1188 h.putheader("Content-type",
jpayne@69 1189 "application/x-www-form-urlencoded")
jpayne@69 1190 h.putheader("Content-length", str(len(data)))
jpayne@69 1191 if self.credentials:
jpayne@69 1192 import base64
jpayne@69 1193 s = ('%s:%s' % self.credentials).encode('utf-8')
jpayne@69 1194 s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
jpayne@69 1195 h.putheader('Authorization', s)
jpayne@69 1196 h.endheaders()
jpayne@69 1197 if self.method == "POST":
jpayne@69 1198 h.send(data.encode('utf-8'))
jpayne@69 1199 h.getresponse() #can't do anything with the result
jpayne@69 1200 except Exception:
jpayne@69 1201 self.handleError(record)
jpayne@69 1202
jpayne@69 1203 class BufferingHandler(logging.Handler):
jpayne@69 1204 """
jpayne@69 1205 A handler class which buffers logging records in memory. Whenever each
jpayne@69 1206 record is added to the buffer, a check is made to see if the buffer should
jpayne@69 1207 be flushed. If it should, then flush() is expected to do what's needed.
jpayne@69 1208 """
jpayne@69 1209 def __init__(self, capacity):
jpayne@69 1210 """
jpayne@69 1211 Initialize the handler with the buffer size.
jpayne@69 1212 """
jpayne@69 1213 logging.Handler.__init__(self)
jpayne@69 1214 self.capacity = capacity
jpayne@69 1215 self.buffer = []
jpayne@69 1216
jpayne@69 1217 def shouldFlush(self, record):
jpayne@69 1218 """
jpayne@69 1219 Should the handler flush its buffer?
jpayne@69 1220
jpayne@69 1221 Returns true if the buffer is up to capacity. This method can be
jpayne@69 1222 overridden to implement custom flushing strategies.
jpayne@69 1223 """
jpayne@69 1224 return (len(self.buffer) >= self.capacity)
jpayne@69 1225
jpayne@69 1226 def emit(self, record):
jpayne@69 1227 """
jpayne@69 1228 Emit a record.
jpayne@69 1229
jpayne@69 1230 Append the record. If shouldFlush() tells us to, call flush() to process
jpayne@69 1231 the buffer.
jpayne@69 1232 """
jpayne@69 1233 self.buffer.append(record)
jpayne@69 1234 if self.shouldFlush(record):
jpayne@69 1235 self.flush()
jpayne@69 1236
jpayne@69 1237 def flush(self):
jpayne@69 1238 """
jpayne@69 1239 Override to implement custom flushing behaviour.
jpayne@69 1240
jpayne@69 1241 This version just zaps the buffer to empty.
jpayne@69 1242 """
jpayne@69 1243 self.acquire()
jpayne@69 1244 try:
jpayne@69 1245 self.buffer = []
jpayne@69 1246 finally:
jpayne@69 1247 self.release()
jpayne@69 1248
jpayne@69 1249 def close(self):
jpayne@69 1250 """
jpayne@69 1251 Close the handler.
jpayne@69 1252
jpayne@69 1253 This version just flushes and chains to the parent class' close().
jpayne@69 1254 """
jpayne@69 1255 try:
jpayne@69 1256 self.flush()
jpayne@69 1257 finally:
jpayne@69 1258 logging.Handler.close(self)
jpayne@69 1259
jpayne@69 1260 class MemoryHandler(BufferingHandler):
jpayne@69 1261 """
jpayne@69 1262 A handler class which buffers logging records in memory, periodically
jpayne@69 1263 flushing them to a target handler. Flushing occurs whenever the buffer
jpayne@69 1264 is full, or when an event of a certain severity or greater is seen.
jpayne@69 1265 """
jpayne@69 1266 def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
jpayne@69 1267 flushOnClose=True):
jpayne@69 1268 """
jpayne@69 1269 Initialize the handler with the buffer size, the level at which
jpayne@69 1270 flushing should occur and an optional target.
jpayne@69 1271
jpayne@69 1272 Note that without a target being set either here or via setTarget(),
jpayne@69 1273 a MemoryHandler is no use to anyone!
jpayne@69 1274
jpayne@69 1275 The ``flushOnClose`` argument is ``True`` for backward compatibility
jpayne@69 1276 reasons - the old behaviour is that when the handler is closed, the
jpayne@69 1277 buffer is flushed, even if the flush level hasn't been exceeded nor the
jpayne@69 1278 capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
jpayne@69 1279 """
jpayne@69 1280 BufferingHandler.__init__(self, capacity)
jpayne@69 1281 self.flushLevel = flushLevel
jpayne@69 1282 self.target = target
jpayne@69 1283 # See Issue #26559 for why this has been added
jpayne@69 1284 self.flushOnClose = flushOnClose
jpayne@69 1285
jpayne@69 1286 def shouldFlush(self, record):
jpayne@69 1287 """
jpayne@69 1288 Check for buffer full or a record at the flushLevel or higher.
jpayne@69 1289 """
jpayne@69 1290 return (len(self.buffer) >= self.capacity) or \
jpayne@69 1291 (record.levelno >= self.flushLevel)
jpayne@69 1292
jpayne@69 1293 def setTarget(self, target):
jpayne@69 1294 """
jpayne@69 1295 Set the target handler for this handler.
jpayne@69 1296 """
jpayne@69 1297 self.target = target
jpayne@69 1298
jpayne@69 1299 def flush(self):
jpayne@69 1300 """
jpayne@69 1301 For a MemoryHandler, flushing means just sending the buffered
jpayne@69 1302 records to the target, if there is one. Override if you want
jpayne@69 1303 different behaviour.
jpayne@69 1304
jpayne@69 1305 The record buffer is also cleared by this operation.
jpayne@69 1306 """
jpayne@69 1307 self.acquire()
jpayne@69 1308 try:
jpayne@69 1309 if self.target:
jpayne@69 1310 for record in self.buffer:
jpayne@69 1311 self.target.handle(record)
jpayne@69 1312 self.buffer = []
jpayne@69 1313 finally:
jpayne@69 1314 self.release()
jpayne@69 1315
jpayne@69 1316 def close(self):
jpayne@69 1317 """
jpayne@69 1318 Flush, if appropriately configured, set the target to None and lose the
jpayne@69 1319 buffer.
jpayne@69 1320 """
jpayne@69 1321 try:
jpayne@69 1322 if self.flushOnClose:
jpayne@69 1323 self.flush()
jpayne@69 1324 finally:
jpayne@69 1325 self.acquire()
jpayne@69 1326 try:
jpayne@69 1327 self.target = None
jpayne@69 1328 BufferingHandler.close(self)
jpayne@69 1329 finally:
jpayne@69 1330 self.release()
jpayne@69 1331
jpayne@69 1332
jpayne@69 1333 class QueueHandler(logging.Handler):
jpayne@69 1334 """
jpayne@69 1335 This handler sends events to a queue. Typically, it would be used together
jpayne@69 1336 with a multiprocessing Queue to centralise logging to file in one process
jpayne@69 1337 (in a multi-process application), so as to avoid file write contention
jpayne@69 1338 between processes.
jpayne@69 1339
jpayne@69 1340 This code is new in Python 3.2, but this class can be copy pasted into
jpayne@69 1341 user code for use with earlier Python versions.
jpayne@69 1342 """
jpayne@69 1343
jpayne@69 1344 def __init__(self, queue):
jpayne@69 1345 """
jpayne@69 1346 Initialise an instance, using the passed queue.
jpayne@69 1347 """
jpayne@69 1348 logging.Handler.__init__(self)
jpayne@69 1349 self.queue = queue
jpayne@69 1350
jpayne@69 1351 def enqueue(self, record):
jpayne@69 1352 """
jpayne@69 1353 Enqueue a record.
jpayne@69 1354
jpayne@69 1355 The base implementation uses put_nowait. You may want to override
jpayne@69 1356 this method if you want to use blocking, timeouts or custom queue
jpayne@69 1357 implementations.
jpayne@69 1358 """
jpayne@69 1359 self.queue.put_nowait(record)
jpayne@69 1360
jpayne@69 1361 def prepare(self, record):
jpayne@69 1362 """
jpayne@69 1363 Prepares a record for queuing. The object returned by this method is
jpayne@69 1364 enqueued.
jpayne@69 1365
jpayne@69 1366 The base implementation formats the record to merge the message
jpayne@69 1367 and arguments, and removes unpickleable items from the record
jpayne@69 1368 in-place.
jpayne@69 1369
jpayne@69 1370 You might want to override this method if you want to convert
jpayne@69 1371 the record to a dict or JSON string, or send a modified copy
jpayne@69 1372 of the record while leaving the original intact.
jpayne@69 1373 """
jpayne@69 1374 # The format operation gets traceback text into record.exc_text
jpayne@69 1375 # (if there's exception data), and also returns the formatted
jpayne@69 1376 # message. We can then use this to replace the original
jpayne@69 1377 # msg + args, as these might be unpickleable. We also zap the
jpayne@69 1378 # exc_info and exc_text attributes, as they are no longer
jpayne@69 1379 # needed and, if not None, will typically not be pickleable.
jpayne@69 1380 msg = self.format(record)
jpayne@69 1381 # bpo-35726: make copy of record to avoid affecting other handlers in the chain.
jpayne@69 1382 record = copy.copy(record)
jpayne@69 1383 record.message = msg
jpayne@69 1384 record.msg = msg
jpayne@69 1385 record.args = None
jpayne@69 1386 record.exc_info = None
jpayne@69 1387 record.exc_text = None
jpayne@69 1388 return record
jpayne@69 1389
jpayne@69 1390 def emit(self, record):
jpayne@69 1391 """
jpayne@69 1392 Emit a record.
jpayne@69 1393
jpayne@69 1394 Writes the LogRecord to the queue, preparing it for pickling first.
jpayne@69 1395 """
jpayne@69 1396 try:
jpayne@69 1397 self.enqueue(self.prepare(record))
jpayne@69 1398 except Exception:
jpayne@69 1399 self.handleError(record)
jpayne@69 1400
jpayne@69 1401
jpayne@69 1402 class QueueListener(object):
jpayne@69 1403 """
jpayne@69 1404 This class implements an internal threaded listener which watches for
jpayne@69 1405 LogRecords being added to a queue, removes them and passes them to a
jpayne@69 1406 list of handlers for processing.
jpayne@69 1407 """
jpayne@69 1408 _sentinel = None
jpayne@69 1409
jpayne@69 1410 def __init__(self, queue, *handlers, respect_handler_level=False):
jpayne@69 1411 """
jpayne@69 1412 Initialise an instance with the specified queue and
jpayne@69 1413 handlers.
jpayne@69 1414 """
jpayne@69 1415 self.queue = queue
jpayne@69 1416 self.handlers = handlers
jpayne@69 1417 self._thread = None
jpayne@69 1418 self.respect_handler_level = respect_handler_level
jpayne@69 1419
jpayne@69 1420 def dequeue(self, block):
jpayne@69 1421 """
jpayne@69 1422 Dequeue a record and return it, optionally blocking.
jpayne@69 1423
jpayne@69 1424 The base implementation uses get. You may want to override this method
jpayne@69 1425 if you want to use timeouts or work with custom queue implementations.
jpayne@69 1426 """
jpayne@69 1427 return self.queue.get(block)
jpayne@69 1428
jpayne@69 1429 def start(self):
jpayne@69 1430 """
jpayne@69 1431 Start the listener.
jpayne@69 1432
jpayne@69 1433 This starts up a background thread to monitor the queue for
jpayne@69 1434 LogRecords to process.
jpayne@69 1435 """
jpayne@69 1436 self._thread = t = threading.Thread(target=self._monitor)
jpayne@69 1437 t.daemon = True
jpayne@69 1438 t.start()
jpayne@69 1439
jpayne@69 1440 def prepare(self, record):
jpayne@69 1441 """
jpayne@69 1442 Prepare a record for handling.
jpayne@69 1443
jpayne@69 1444 This method just returns the passed-in record. You may want to
jpayne@69 1445 override this method if you need to do any custom marshalling or
jpayne@69 1446 manipulation of the record before passing it to the handlers.
jpayne@69 1447 """
jpayne@69 1448 return record
jpayne@69 1449
jpayne@69 1450 def handle(self, record):
jpayne@69 1451 """
jpayne@69 1452 Handle a record.
jpayne@69 1453
jpayne@69 1454 This just loops through the handlers offering them the record
jpayne@69 1455 to handle.
jpayne@69 1456 """
jpayne@69 1457 record = self.prepare(record)
jpayne@69 1458 for handler in self.handlers:
jpayne@69 1459 if not self.respect_handler_level:
jpayne@69 1460 process = True
jpayne@69 1461 else:
jpayne@69 1462 process = record.levelno >= handler.level
jpayne@69 1463 if process:
jpayne@69 1464 handler.handle(record)
jpayne@69 1465
jpayne@69 1466 def _monitor(self):
jpayne@69 1467 """
jpayne@69 1468 Monitor the queue for records, and ask the handler
jpayne@69 1469 to deal with them.
jpayne@69 1470
jpayne@69 1471 This method runs on a separate, internal thread.
jpayne@69 1472 The thread will terminate if it sees a sentinel object in the queue.
jpayne@69 1473 """
jpayne@69 1474 q = self.queue
jpayne@69 1475 has_task_done = hasattr(q, 'task_done')
jpayne@69 1476 while True:
jpayne@69 1477 try:
jpayne@69 1478 record = self.dequeue(True)
jpayne@69 1479 if record is self._sentinel:
jpayne@69 1480 if has_task_done:
jpayne@69 1481 q.task_done()
jpayne@69 1482 break
jpayne@69 1483 self.handle(record)
jpayne@69 1484 if has_task_done:
jpayne@69 1485 q.task_done()
jpayne@69 1486 except queue.Empty:
jpayne@69 1487 break
jpayne@69 1488
jpayne@69 1489 def enqueue_sentinel(self):
jpayne@69 1490 """
jpayne@69 1491 This is used to enqueue the sentinel record.
jpayne@69 1492
jpayne@69 1493 The base implementation uses put_nowait. You may want to override this
jpayne@69 1494 method if you want to use timeouts or work with custom queue
jpayne@69 1495 implementations.
jpayne@69 1496 """
jpayne@69 1497 self.queue.put_nowait(self._sentinel)
jpayne@69 1498
jpayne@69 1499 def stop(self):
jpayne@69 1500 """
jpayne@69 1501 Stop the listener.
jpayne@69 1502
jpayne@69 1503 This asks the thread to terminate, and then waits for it to do so.
jpayne@69 1504 Note that if you don't call this before your application exits, there
jpayne@69 1505 may be some records still left on the queue, which won't be processed.
jpayne@69 1506 """
jpayne@69 1507 self.enqueue_sentinel()
jpayne@69 1508 self._thread.join()
jpayne@69 1509 self._thread = None