jpayne@68
|
1 # Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
|
jpayne@68
|
2 #
|
jpayne@68
|
3 # Permission to use, copy, modify, and distribute this software and its
|
jpayne@68
|
4 # documentation for any purpose and without fee is hereby granted,
|
jpayne@68
|
5 # provided that the above copyright notice appear in all copies and that
|
jpayne@68
|
6 # both that copyright notice and this permission notice appear in
|
jpayne@68
|
7 # supporting documentation, and that the name of Vinay Sajip
|
jpayne@68
|
8 # not be used in advertising or publicity pertaining to distribution
|
jpayne@68
|
9 # of the software without specific, written prior permission.
|
jpayne@68
|
10 # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
|
jpayne@68
|
11 # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
|
jpayne@68
|
12 # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
|
jpayne@68
|
13 # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
|
jpayne@68
|
14 # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
|
jpayne@68
|
15 # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
jpayne@68
|
16
|
jpayne@68
|
17 """
|
jpayne@68
|
18 Additional handlers for the logging package for Python. The core package is
|
jpayne@68
|
19 based on PEP 282 and comments thereto in comp.lang.python.
|
jpayne@68
|
20
|
jpayne@68
|
21 Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
|
jpayne@68
|
22
|
jpayne@68
|
23 To use, simply 'import logging.handlers' and log away!
|
jpayne@68
|
24 """
|
jpayne@68
|
25
|
jpayne@68
|
26 import logging, socket, os, pickle, struct, time, re
|
jpayne@68
|
27 from stat import ST_DEV, ST_INO, ST_MTIME
|
jpayne@68
|
28 import queue
|
jpayne@68
|
29 import threading
|
jpayne@68
|
30 import copy
|
jpayne@68
|
31
|
jpayne@68
|
32 #
|
jpayne@68
|
33 # Some constants...
|
jpayne@68
|
34 #
|
jpayne@68
|
35
|
jpayne@68
|
36 DEFAULT_TCP_LOGGING_PORT = 9020
|
jpayne@68
|
37 DEFAULT_UDP_LOGGING_PORT = 9021
|
jpayne@68
|
38 DEFAULT_HTTP_LOGGING_PORT = 9022
|
jpayne@68
|
39 DEFAULT_SOAP_LOGGING_PORT = 9023
|
jpayne@68
|
40 SYSLOG_UDP_PORT = 514
|
jpayne@68
|
41 SYSLOG_TCP_PORT = 514
|
jpayne@68
|
42
|
jpayne@68
|
43 _MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
|
jpayne@68
|
44
|
jpayne@68
|
45 class BaseRotatingHandler(logging.FileHandler):
|
jpayne@68
|
46 """
|
jpayne@68
|
47 Base class for handlers that rotate log files at a certain point.
|
jpayne@68
|
48 Not meant to be instantiated directly. Instead, use RotatingFileHandler
|
jpayne@68
|
49 or TimedRotatingFileHandler.
|
jpayne@68
|
50 """
|
jpayne@68
|
51 def __init__(self, filename, mode, encoding=None, delay=False):
|
jpayne@68
|
52 """
|
jpayne@68
|
53 Use the specified filename for streamed logging
|
jpayne@68
|
54 """
|
jpayne@68
|
55 logging.FileHandler.__init__(self, filename, mode, encoding, delay)
|
jpayne@68
|
56 self.mode = mode
|
jpayne@68
|
57 self.encoding = encoding
|
jpayne@68
|
58 self.namer = None
|
jpayne@68
|
59 self.rotator = None
|
jpayne@68
|
60
|
jpayne@68
|
61 def emit(self, record):
|
jpayne@68
|
62 """
|
jpayne@68
|
63 Emit a record.
|
jpayne@68
|
64
|
jpayne@68
|
65 Output the record to the file, catering for rollover as described
|
jpayne@68
|
66 in doRollover().
|
jpayne@68
|
67 """
|
jpayne@68
|
68 try:
|
jpayne@68
|
69 if self.shouldRollover(record):
|
jpayne@68
|
70 self.doRollover()
|
jpayne@68
|
71 logging.FileHandler.emit(self, record)
|
jpayne@68
|
72 except Exception:
|
jpayne@68
|
73 self.handleError(record)
|
jpayne@68
|
74
|
jpayne@68
|
75 def rotation_filename(self, default_name):
|
jpayne@68
|
76 """
|
jpayne@68
|
77 Modify the filename of a log file when rotating.
|
jpayne@68
|
78
|
jpayne@68
|
79 This is provided so that a custom filename can be provided.
|
jpayne@68
|
80
|
jpayne@68
|
81 The default implementation calls the 'namer' attribute of the
|
jpayne@68
|
82 handler, if it's callable, passing the default name to
|
jpayne@68
|
83 it. If the attribute isn't callable (the default is None), the name
|
jpayne@68
|
84 is returned unchanged.
|
jpayne@68
|
85
|
jpayne@68
|
86 :param default_name: The default name for the log file.
|
jpayne@68
|
87 """
|
jpayne@68
|
88 if not callable(self.namer):
|
jpayne@68
|
89 result = default_name
|
jpayne@68
|
90 else:
|
jpayne@68
|
91 result = self.namer(default_name)
|
jpayne@68
|
92 return result
|
jpayne@68
|
93
|
jpayne@68
|
94 def rotate(self, source, dest):
|
jpayne@68
|
95 """
|
jpayne@68
|
96 When rotating, rotate the current log.
|
jpayne@68
|
97
|
jpayne@68
|
98 The default implementation calls the 'rotator' attribute of the
|
jpayne@68
|
99 handler, if it's callable, passing the source and dest arguments to
|
jpayne@68
|
100 it. If the attribute isn't callable (the default is None), the source
|
jpayne@68
|
101 is simply renamed to the destination.
|
jpayne@68
|
102
|
jpayne@68
|
103 :param source: The source filename. This is normally the base
|
jpayne@68
|
104 filename, e.g. 'test.log'
|
jpayne@68
|
105 :param dest: The destination filename. This is normally
|
jpayne@68
|
106 what the source is rotated to, e.g. 'test.log.1'.
|
jpayne@68
|
107 """
|
jpayne@68
|
108 if not callable(self.rotator):
|
jpayne@68
|
109 # Issue 18940: A file may not have been created if delay is True.
|
jpayne@68
|
110 if os.path.exists(source):
|
jpayne@68
|
111 os.rename(source, dest)
|
jpayne@68
|
112 else:
|
jpayne@68
|
113 self.rotator(source, dest)
|
jpayne@68
|
114
|
jpayne@68
|
115 class RotatingFileHandler(BaseRotatingHandler):
|
jpayne@68
|
116 """
|
jpayne@68
|
117 Handler for logging to a set of files, which switches from one file
|
jpayne@68
|
118 to the next when the current file reaches a certain size.
|
jpayne@68
|
119 """
|
jpayne@68
|
120 def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
|
jpayne@68
|
121 """
|
jpayne@68
|
122 Open the specified file and use it as the stream for logging.
|
jpayne@68
|
123
|
jpayne@68
|
124 By default, the file grows indefinitely. You can specify particular
|
jpayne@68
|
125 values of maxBytes and backupCount to allow the file to rollover at
|
jpayne@68
|
126 a predetermined size.
|
jpayne@68
|
127
|
jpayne@68
|
128 Rollover occurs whenever the current log file is nearly maxBytes in
|
jpayne@68
|
129 length. If backupCount is >= 1, the system will successively create
|
jpayne@68
|
130 new files with the same pathname as the base file, but with extensions
|
jpayne@68
|
131 ".1", ".2" etc. appended to it. For example, with a backupCount of 5
|
jpayne@68
|
132 and a base file name of "app.log", you would get "app.log",
|
jpayne@68
|
133 "app.log.1", "app.log.2", ... through to "app.log.5". The file being
|
jpayne@68
|
134 written to is always "app.log" - when it gets filled up, it is closed
|
jpayne@68
|
135 and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
|
jpayne@68
|
136 exist, then they are renamed to "app.log.2", "app.log.3" etc.
|
jpayne@68
|
137 respectively.
|
jpayne@68
|
138
|
jpayne@68
|
139 If maxBytes is zero, rollover never occurs.
|
jpayne@68
|
140 """
|
jpayne@68
|
141 # If rotation/rollover is wanted, it doesn't make sense to use another
|
jpayne@68
|
142 # mode. If for example 'w' were specified, then if there were multiple
|
jpayne@68
|
143 # runs of the calling application, the logs from previous runs would be
|
jpayne@68
|
144 # lost if the 'w' is respected, because the log file would be truncated
|
jpayne@68
|
145 # on each run.
|
jpayne@68
|
146 if maxBytes > 0:
|
jpayne@68
|
147 mode = 'a'
|
jpayne@68
|
148 BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
|
jpayne@68
|
149 self.maxBytes = maxBytes
|
jpayne@68
|
150 self.backupCount = backupCount
|
jpayne@68
|
151
|
jpayne@68
|
152 def doRollover(self):
|
jpayne@68
|
153 """
|
jpayne@68
|
154 Do a rollover, as described in __init__().
|
jpayne@68
|
155 """
|
jpayne@68
|
156 if self.stream:
|
jpayne@68
|
157 self.stream.close()
|
jpayne@68
|
158 self.stream = None
|
jpayne@68
|
159 if self.backupCount > 0:
|
jpayne@68
|
160 for i in range(self.backupCount - 1, 0, -1):
|
jpayne@68
|
161 sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
|
jpayne@68
|
162 dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
|
jpayne@68
|
163 i + 1))
|
jpayne@68
|
164 if os.path.exists(sfn):
|
jpayne@68
|
165 if os.path.exists(dfn):
|
jpayne@68
|
166 os.remove(dfn)
|
jpayne@68
|
167 os.rename(sfn, dfn)
|
jpayne@68
|
168 dfn = self.rotation_filename(self.baseFilename + ".1")
|
jpayne@68
|
169 if os.path.exists(dfn):
|
jpayne@68
|
170 os.remove(dfn)
|
jpayne@68
|
171 self.rotate(self.baseFilename, dfn)
|
jpayne@68
|
172 if not self.delay:
|
jpayne@68
|
173 self.stream = self._open()
|
jpayne@68
|
174
|
jpayne@68
|
175 def shouldRollover(self, record):
|
jpayne@68
|
176 """
|
jpayne@68
|
177 Determine if rollover should occur.
|
jpayne@68
|
178
|
jpayne@68
|
179 Basically, see if the supplied record would cause the file to exceed
|
jpayne@68
|
180 the size limit we have.
|
jpayne@68
|
181 """
|
jpayne@68
|
182 if self.stream is None: # delay was set...
|
jpayne@68
|
183 self.stream = self._open()
|
jpayne@68
|
184 if self.maxBytes > 0: # are we rolling over?
|
jpayne@68
|
185 msg = "%s\n" % self.format(record)
|
jpayne@68
|
186 self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
|
jpayne@68
|
187 if self.stream.tell() + len(msg) >= self.maxBytes:
|
jpayne@68
|
188 return 1
|
jpayne@68
|
189 return 0
|
jpayne@68
|
190
|
jpayne@68
|
191 class TimedRotatingFileHandler(BaseRotatingHandler):
|
jpayne@68
|
192 """
|
jpayne@68
|
193 Handler for logging to a file, rotating the log file at certain timed
|
jpayne@68
|
194 intervals.
|
jpayne@68
|
195
|
jpayne@68
|
196 If backupCount is > 0, when rollover is done, no more than backupCount
|
jpayne@68
|
197 files are kept - the oldest ones are deleted.
|
jpayne@68
|
198 """
|
jpayne@68
|
199 def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False, atTime=None):
|
jpayne@68
|
200 BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
|
jpayne@68
|
201 self.when = when.upper()
|
jpayne@68
|
202 self.backupCount = backupCount
|
jpayne@68
|
203 self.utc = utc
|
jpayne@68
|
204 self.atTime = atTime
|
jpayne@68
|
205 # Calculate the real rollover interval, which is just the number of
|
jpayne@68
|
206 # seconds between rollovers. Also set the filename suffix used when
|
jpayne@68
|
207 # a rollover occurs. Current 'when' events supported:
|
jpayne@68
|
208 # S - Seconds
|
jpayne@68
|
209 # M - Minutes
|
jpayne@68
|
210 # H - Hours
|
jpayne@68
|
211 # D - Days
|
jpayne@68
|
212 # midnight - roll over at midnight
|
jpayne@68
|
213 # W{0-6} - roll over on a certain day; 0 - Monday
|
jpayne@68
|
214 #
|
jpayne@68
|
215 # Case of the 'when' specifier is not important; lower or upper case
|
jpayne@68
|
216 # will work.
|
jpayne@68
|
217 if self.when == 'S':
|
jpayne@68
|
218 self.interval = 1 # one second
|
jpayne@68
|
219 self.suffix = "%Y-%m-%d_%H-%M-%S"
|
jpayne@68
|
220 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
|
jpayne@68
|
221 elif self.when == 'M':
|
jpayne@68
|
222 self.interval = 60 # one minute
|
jpayne@68
|
223 self.suffix = "%Y-%m-%d_%H-%M"
|
jpayne@68
|
224 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
|
jpayne@68
|
225 elif self.when == 'H':
|
jpayne@68
|
226 self.interval = 60 * 60 # one hour
|
jpayne@68
|
227 self.suffix = "%Y-%m-%d_%H"
|
jpayne@68
|
228 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
|
jpayne@68
|
229 elif self.when == 'D' or self.when == 'MIDNIGHT':
|
jpayne@68
|
230 self.interval = 60 * 60 * 24 # one day
|
jpayne@68
|
231 self.suffix = "%Y-%m-%d"
|
jpayne@68
|
232 self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
|
jpayne@68
|
233 elif self.when.startswith('W'):
|
jpayne@68
|
234 self.interval = 60 * 60 * 24 * 7 # one week
|
jpayne@68
|
235 if len(self.when) != 2:
|
jpayne@68
|
236 raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
|
jpayne@68
|
237 if self.when[1] < '0' or self.when[1] > '6':
|
jpayne@68
|
238 raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
|
jpayne@68
|
239 self.dayOfWeek = int(self.when[1])
|
jpayne@68
|
240 self.suffix = "%Y-%m-%d"
|
jpayne@68
|
241 self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
|
jpayne@68
|
242 else:
|
jpayne@68
|
243 raise ValueError("Invalid rollover interval specified: %s" % self.when)
|
jpayne@68
|
244
|
jpayne@68
|
245 self.extMatch = re.compile(self.extMatch, re.ASCII)
|
jpayne@68
|
246 self.interval = self.interval * interval # multiply by units requested
|
jpayne@68
|
247 # The following line added because the filename passed in could be a
|
jpayne@68
|
248 # path object (see Issue #27493), but self.baseFilename will be a string
|
jpayne@68
|
249 filename = self.baseFilename
|
jpayne@68
|
250 if os.path.exists(filename):
|
jpayne@68
|
251 t = os.stat(filename)[ST_MTIME]
|
jpayne@68
|
252 else:
|
jpayne@68
|
253 t = int(time.time())
|
jpayne@68
|
254 self.rolloverAt = self.computeRollover(t)
|
jpayne@68
|
255
|
jpayne@68
|
256 def computeRollover(self, currentTime):
|
jpayne@68
|
257 """
|
jpayne@68
|
258 Work out the rollover time based on the specified time.
|
jpayne@68
|
259 """
|
jpayne@68
|
260 result = currentTime + self.interval
|
jpayne@68
|
261 # If we are rolling over at midnight or weekly, then the interval is already known.
|
jpayne@68
|
262 # What we need to figure out is WHEN the next interval is. In other words,
|
jpayne@68
|
263 # if you are rolling over at midnight, then your base interval is 1 day,
|
jpayne@68
|
264 # but you want to start that one day clock at midnight, not now. So, we
|
jpayne@68
|
265 # have to fudge the rolloverAt value in order to trigger the first rollover
|
jpayne@68
|
266 # at the right time. After that, the regular interval will take care of
|
jpayne@68
|
267 # the rest. Note that this code doesn't care about leap seconds. :)
|
jpayne@68
|
268 if self.when == 'MIDNIGHT' or self.when.startswith('W'):
|
jpayne@68
|
269 # This could be done with less code, but I wanted it to be clear
|
jpayne@68
|
270 if self.utc:
|
jpayne@68
|
271 t = time.gmtime(currentTime)
|
jpayne@68
|
272 else:
|
jpayne@68
|
273 t = time.localtime(currentTime)
|
jpayne@68
|
274 currentHour = t[3]
|
jpayne@68
|
275 currentMinute = t[4]
|
jpayne@68
|
276 currentSecond = t[5]
|
jpayne@68
|
277 currentDay = t[6]
|
jpayne@68
|
278 # r is the number of seconds left between now and the next rotation
|
jpayne@68
|
279 if self.atTime is None:
|
jpayne@68
|
280 rotate_ts = _MIDNIGHT
|
jpayne@68
|
281 else:
|
jpayne@68
|
282 rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
|
jpayne@68
|
283 self.atTime.second)
|
jpayne@68
|
284
|
jpayne@68
|
285 r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
|
jpayne@68
|
286 currentSecond)
|
jpayne@68
|
287 if r < 0:
|
jpayne@68
|
288 # Rotate time is before the current time (for example when
|
jpayne@68
|
289 # self.rotateAt is 13:45 and it now 14:15), rotation is
|
jpayne@68
|
290 # tomorrow.
|
jpayne@68
|
291 r += _MIDNIGHT
|
jpayne@68
|
292 currentDay = (currentDay + 1) % 7
|
jpayne@68
|
293 result = currentTime + r
|
jpayne@68
|
294 # If we are rolling over on a certain day, add in the number of days until
|
jpayne@68
|
295 # the next rollover, but offset by 1 since we just calculated the time
|
jpayne@68
|
296 # until the next day starts. There are three cases:
|
jpayne@68
|
297 # Case 1) The day to rollover is today; in this case, do nothing
|
jpayne@68
|
298 # Case 2) The day to rollover is further in the interval (i.e., today is
|
jpayne@68
|
299 # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
|
jpayne@68
|
300 # next rollover is simply 6 - 2 - 1, or 3.
|
jpayne@68
|
301 # Case 3) The day to rollover is behind us in the interval (i.e., today
|
jpayne@68
|
302 # is day 5 (Saturday) and rollover is on day 3 (Thursday).
|
jpayne@68
|
303 # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
|
jpayne@68
|
304 # number of days left in the current week (1) plus the number
|
jpayne@68
|
305 # of days in the next week until the rollover day (3).
|
jpayne@68
|
306 # The calculations described in 2) and 3) above need to have a day added.
|
jpayne@68
|
307 # This is because the above time calculation takes us to midnight on this
|
jpayne@68
|
308 # day, i.e. the start of the next day.
|
jpayne@68
|
309 if self.when.startswith('W'):
|
jpayne@68
|
310 day = currentDay # 0 is Monday
|
jpayne@68
|
311 if day != self.dayOfWeek:
|
jpayne@68
|
312 if day < self.dayOfWeek:
|
jpayne@68
|
313 daysToWait = self.dayOfWeek - day
|
jpayne@68
|
314 else:
|
jpayne@68
|
315 daysToWait = 6 - day + self.dayOfWeek + 1
|
jpayne@68
|
316 newRolloverAt = result + (daysToWait * (60 * 60 * 24))
|
jpayne@68
|
317 if not self.utc:
|
jpayne@68
|
318 dstNow = t[-1]
|
jpayne@68
|
319 dstAtRollover = time.localtime(newRolloverAt)[-1]
|
jpayne@68
|
320 if dstNow != dstAtRollover:
|
jpayne@68
|
321 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
|
jpayne@68
|
322 addend = -3600
|
jpayne@68
|
323 else: # DST bows out before next rollover, so we need to add an hour
|
jpayne@68
|
324 addend = 3600
|
jpayne@68
|
325 newRolloverAt += addend
|
jpayne@68
|
326 result = newRolloverAt
|
jpayne@68
|
327 return result
|
jpayne@68
|
328
|
jpayne@68
|
329 def shouldRollover(self, record):
|
jpayne@68
|
330 """
|
jpayne@68
|
331 Determine if rollover should occur.
|
jpayne@68
|
332
|
jpayne@68
|
333 record is not used, as we are just comparing times, but it is needed so
|
jpayne@68
|
334 the method signatures are the same
|
jpayne@68
|
335 """
|
jpayne@68
|
336 t = int(time.time())
|
jpayne@68
|
337 if t >= self.rolloverAt:
|
jpayne@68
|
338 return 1
|
jpayne@68
|
339 return 0
|
jpayne@68
|
340
|
jpayne@68
|
341 def getFilesToDelete(self):
|
jpayne@68
|
342 """
|
jpayne@68
|
343 Determine the files to delete when rolling over.
|
jpayne@68
|
344
|
jpayne@68
|
345 More specific than the earlier method, which just used glob.glob().
|
jpayne@68
|
346 """
|
jpayne@68
|
347 dirName, baseName = os.path.split(self.baseFilename)
|
jpayne@68
|
348 fileNames = os.listdir(dirName)
|
jpayne@68
|
349 result = []
|
jpayne@68
|
350 prefix = baseName + "."
|
jpayne@68
|
351 plen = len(prefix)
|
jpayne@68
|
352 for fileName in fileNames:
|
jpayne@68
|
353 if fileName[:plen] == prefix:
|
jpayne@68
|
354 suffix = fileName[plen:]
|
jpayne@68
|
355 if self.extMatch.match(suffix):
|
jpayne@68
|
356 result.append(os.path.join(dirName, fileName))
|
jpayne@68
|
357 if len(result) < self.backupCount:
|
jpayne@68
|
358 result = []
|
jpayne@68
|
359 else:
|
jpayne@68
|
360 result.sort()
|
jpayne@68
|
361 result = result[:len(result) - self.backupCount]
|
jpayne@68
|
362 return result
|
jpayne@68
|
363
|
jpayne@68
|
364 def doRollover(self):
|
jpayne@68
|
365 """
|
jpayne@68
|
366 do a rollover; in this case, a date/time stamp is appended to the filename
|
jpayne@68
|
367 when the rollover happens. However, you want the file to be named for the
|
jpayne@68
|
368 start of the interval, not the current time. If there is a backup count,
|
jpayne@68
|
369 then we have to get a list of matching filenames, sort them and remove
|
jpayne@68
|
370 the one with the oldest suffix.
|
jpayne@68
|
371 """
|
jpayne@68
|
372 if self.stream:
|
jpayne@68
|
373 self.stream.close()
|
jpayne@68
|
374 self.stream = None
|
jpayne@68
|
375 # get the time that this sequence started at and make it a TimeTuple
|
jpayne@68
|
376 currentTime = int(time.time())
|
jpayne@68
|
377 dstNow = time.localtime(currentTime)[-1]
|
jpayne@68
|
378 t = self.rolloverAt - self.interval
|
jpayne@68
|
379 if self.utc:
|
jpayne@68
|
380 timeTuple = time.gmtime(t)
|
jpayne@68
|
381 else:
|
jpayne@68
|
382 timeTuple = time.localtime(t)
|
jpayne@68
|
383 dstThen = timeTuple[-1]
|
jpayne@68
|
384 if dstNow != dstThen:
|
jpayne@68
|
385 if dstNow:
|
jpayne@68
|
386 addend = 3600
|
jpayne@68
|
387 else:
|
jpayne@68
|
388 addend = -3600
|
jpayne@68
|
389 timeTuple = time.localtime(t + addend)
|
jpayne@68
|
390 dfn = self.rotation_filename(self.baseFilename + "." +
|
jpayne@68
|
391 time.strftime(self.suffix, timeTuple))
|
jpayne@68
|
392 if os.path.exists(dfn):
|
jpayne@68
|
393 os.remove(dfn)
|
jpayne@68
|
394 self.rotate(self.baseFilename, dfn)
|
jpayne@68
|
395 if self.backupCount > 0:
|
jpayne@68
|
396 for s in self.getFilesToDelete():
|
jpayne@68
|
397 os.remove(s)
|
jpayne@68
|
398 if not self.delay:
|
jpayne@68
|
399 self.stream = self._open()
|
jpayne@68
|
400 newRolloverAt = self.computeRollover(currentTime)
|
jpayne@68
|
401 while newRolloverAt <= currentTime:
|
jpayne@68
|
402 newRolloverAt = newRolloverAt + self.interval
|
jpayne@68
|
403 #If DST changes and midnight or weekly rollover, adjust for this.
|
jpayne@68
|
404 if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
|
jpayne@68
|
405 dstAtRollover = time.localtime(newRolloverAt)[-1]
|
jpayne@68
|
406 if dstNow != dstAtRollover:
|
jpayne@68
|
407 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
|
jpayne@68
|
408 addend = -3600
|
jpayne@68
|
409 else: # DST bows out before next rollover, so we need to add an hour
|
jpayne@68
|
410 addend = 3600
|
jpayne@68
|
411 newRolloverAt += addend
|
jpayne@68
|
412 self.rolloverAt = newRolloverAt
|
jpayne@68
|
413
|
jpayne@68
|
414 class WatchedFileHandler(logging.FileHandler):
|
jpayne@68
|
415 """
|
jpayne@68
|
416 A handler for logging to a file, which watches the file
|
jpayne@68
|
417 to see if it has changed while in use. This can happen because of
|
jpayne@68
|
418 usage of programs such as newsyslog and logrotate which perform
|
jpayne@68
|
419 log file rotation. This handler, intended for use under Unix,
|
jpayne@68
|
420 watches the file to see if it has changed since the last emit.
|
jpayne@68
|
421 (A file has changed if its device or inode have changed.)
|
jpayne@68
|
422 If it has changed, the old file stream is closed, and the file
|
jpayne@68
|
423 opened to get a new stream.
|
jpayne@68
|
424
|
jpayne@68
|
425 This handler is not appropriate for use under Windows, because
|
jpayne@68
|
426 under Windows open files cannot be moved or renamed - logging
|
jpayne@68
|
427 opens the files with exclusive locks - and so there is no need
|
jpayne@68
|
428 for such a handler. Furthermore, ST_INO is not supported under
|
jpayne@68
|
429 Windows; stat always returns zero for this value.
|
jpayne@68
|
430
|
jpayne@68
|
431 This handler is based on a suggestion and patch by Chad J.
|
jpayne@68
|
432 Schroeder.
|
jpayne@68
|
433 """
|
jpayne@68
|
434 def __init__(self, filename, mode='a', encoding=None, delay=False):
|
jpayne@68
|
435 logging.FileHandler.__init__(self, filename, mode, encoding, delay)
|
jpayne@68
|
436 self.dev, self.ino = -1, -1
|
jpayne@68
|
437 self._statstream()
|
jpayne@68
|
438
|
jpayne@68
|
439 def _statstream(self):
|
jpayne@68
|
440 if self.stream:
|
jpayne@68
|
441 sres = os.fstat(self.stream.fileno())
|
jpayne@68
|
442 self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
|
jpayne@68
|
443
|
jpayne@68
|
444 def reopenIfNeeded(self):
|
jpayne@68
|
445 """
|
jpayne@68
|
446 Reopen log file if needed.
|
jpayne@68
|
447
|
jpayne@68
|
448 Checks if the underlying file has changed, and if it
|
jpayne@68
|
449 has, close the old stream and reopen the file to get the
|
jpayne@68
|
450 current stream.
|
jpayne@68
|
451 """
|
jpayne@68
|
452 # Reduce the chance of race conditions by stat'ing by path only
|
jpayne@68
|
453 # once and then fstat'ing our new fd if we opened a new log stream.
|
jpayne@68
|
454 # See issue #14632: Thanks to John Mulligan for the problem report
|
jpayne@68
|
455 # and patch.
|
jpayne@68
|
456 try:
|
jpayne@68
|
457 # stat the file by path, checking for existence
|
jpayne@68
|
458 sres = os.stat(self.baseFilename)
|
jpayne@68
|
459 except FileNotFoundError:
|
jpayne@68
|
460 sres = None
|
jpayne@68
|
461 # compare file system stat with that of our stream file handle
|
jpayne@68
|
462 if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
|
jpayne@68
|
463 if self.stream is not None:
|
jpayne@68
|
464 # we have an open file handle, clean it up
|
jpayne@68
|
465 self.stream.flush()
|
jpayne@68
|
466 self.stream.close()
|
jpayne@68
|
467 self.stream = None # See Issue #21742: _open () might fail.
|
jpayne@68
|
468 # open a new file handle and get new stat info from that fd
|
jpayne@68
|
469 self.stream = self._open()
|
jpayne@68
|
470 self._statstream()
|
jpayne@68
|
471
|
jpayne@68
|
472 def emit(self, record):
|
jpayne@68
|
473 """
|
jpayne@68
|
474 Emit a record.
|
jpayne@68
|
475
|
jpayne@68
|
476 If underlying file has changed, reopen the file before emitting the
|
jpayne@68
|
477 record to it.
|
jpayne@68
|
478 """
|
jpayne@68
|
479 self.reopenIfNeeded()
|
jpayne@68
|
480 logging.FileHandler.emit(self, record)
|
jpayne@68
|
481
|
jpayne@68
|
482
|
jpayne@68
|
483 class SocketHandler(logging.Handler):
|
jpayne@68
|
484 """
|
jpayne@68
|
485 A handler class which writes logging records, in pickle format, to
|
jpayne@68
|
486 a streaming socket. The socket is kept open across logging calls.
|
jpayne@68
|
487 If the peer resets it, an attempt is made to reconnect on the next call.
|
jpayne@68
|
488 The pickle which is sent is that of the LogRecord's attribute dictionary
|
jpayne@68
|
489 (__dict__), so that the receiver does not need to have the logging module
|
jpayne@68
|
490 installed in order to process the logging event.
|
jpayne@68
|
491
|
jpayne@68
|
492 To unpickle the record at the receiving end into a LogRecord, use the
|
jpayne@68
|
493 makeLogRecord function.
|
jpayne@68
|
494 """
|
jpayne@68
|
495
|
jpayne@68
|
496 def __init__(self, host, port):
|
jpayne@68
|
497 """
|
jpayne@68
|
498 Initializes the handler with a specific host address and port.
|
jpayne@68
|
499
|
jpayne@68
|
500 When the attribute *closeOnError* is set to True - if a socket error
|
jpayne@68
|
501 occurs, the socket is silently closed and then reopened on the next
|
jpayne@68
|
502 logging call.
|
jpayne@68
|
503 """
|
jpayne@68
|
504 logging.Handler.__init__(self)
|
jpayne@68
|
505 self.host = host
|
jpayne@68
|
506 self.port = port
|
jpayne@68
|
507 if port is None:
|
jpayne@68
|
508 self.address = host
|
jpayne@68
|
509 else:
|
jpayne@68
|
510 self.address = (host, port)
|
jpayne@68
|
511 self.sock = None
|
jpayne@68
|
512 self.closeOnError = False
|
jpayne@68
|
513 self.retryTime = None
|
jpayne@68
|
514 #
|
jpayne@68
|
515 # Exponential backoff parameters.
|
jpayne@68
|
516 #
|
jpayne@68
|
517 self.retryStart = 1.0
|
jpayne@68
|
518 self.retryMax = 30.0
|
jpayne@68
|
519 self.retryFactor = 2.0
|
jpayne@68
|
520
|
jpayne@68
|
521 def makeSocket(self, timeout=1):
|
jpayne@68
|
522 """
|
jpayne@68
|
523 A factory method which allows subclasses to define the precise
|
jpayne@68
|
524 type of socket they want.
|
jpayne@68
|
525 """
|
jpayne@68
|
526 if self.port is not None:
|
jpayne@68
|
527 result = socket.create_connection(self.address, timeout=timeout)
|
jpayne@68
|
528 else:
|
jpayne@68
|
529 result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
jpayne@68
|
530 result.settimeout(timeout)
|
jpayne@68
|
531 try:
|
jpayne@68
|
532 result.connect(self.address)
|
jpayne@68
|
533 except OSError:
|
jpayne@68
|
534 result.close() # Issue 19182
|
jpayne@68
|
535 raise
|
jpayne@68
|
536 return result
|
jpayne@68
|
537
|
jpayne@68
|
538 def createSocket(self):
|
jpayne@68
|
539 """
|
jpayne@68
|
540 Try to create a socket, using an exponential backoff with
|
jpayne@68
|
541 a max retry time. Thanks to Robert Olson for the original patch
|
jpayne@68
|
542 (SF #815911) which has been slightly refactored.
|
jpayne@68
|
543 """
|
jpayne@68
|
544 now = time.time()
|
jpayne@68
|
545 # Either retryTime is None, in which case this
|
jpayne@68
|
546 # is the first time back after a disconnect, or
|
jpayne@68
|
547 # we've waited long enough.
|
jpayne@68
|
548 if self.retryTime is None:
|
jpayne@68
|
549 attempt = True
|
jpayne@68
|
550 else:
|
jpayne@68
|
551 attempt = (now >= self.retryTime)
|
jpayne@68
|
552 if attempt:
|
jpayne@68
|
553 try:
|
jpayne@68
|
554 self.sock = self.makeSocket()
|
jpayne@68
|
555 self.retryTime = None # next time, no delay before trying
|
jpayne@68
|
556 except OSError:
|
jpayne@68
|
557 #Creation failed, so set the retry time and return.
|
jpayne@68
|
558 if self.retryTime is None:
|
jpayne@68
|
559 self.retryPeriod = self.retryStart
|
jpayne@68
|
560 else:
|
jpayne@68
|
561 self.retryPeriod = self.retryPeriod * self.retryFactor
|
jpayne@68
|
562 if self.retryPeriod > self.retryMax:
|
jpayne@68
|
563 self.retryPeriod = self.retryMax
|
jpayne@68
|
564 self.retryTime = now + self.retryPeriod
|
jpayne@68
|
565
|
jpayne@68
|
566 def send(self, s):
|
jpayne@68
|
567 """
|
jpayne@68
|
568 Send a pickled string to the socket.
|
jpayne@68
|
569
|
jpayne@68
|
570 This function allows for partial sends which can happen when the
|
jpayne@68
|
571 network is busy.
|
jpayne@68
|
572 """
|
jpayne@68
|
573 if self.sock is None:
|
jpayne@68
|
574 self.createSocket()
|
jpayne@68
|
575 #self.sock can be None either because we haven't reached the retry
|
jpayne@68
|
576 #time yet, or because we have reached the retry time and retried,
|
jpayne@68
|
577 #but are still unable to connect.
|
jpayne@68
|
578 if self.sock:
|
jpayne@68
|
579 try:
|
jpayne@68
|
580 self.sock.sendall(s)
|
jpayne@68
|
581 except OSError: #pragma: no cover
|
jpayne@68
|
582 self.sock.close()
|
jpayne@68
|
583 self.sock = None # so we can call createSocket next time
|
jpayne@68
|
584
|
jpayne@68
|
585 def makePickle(self, record):
|
jpayne@68
|
586 """
|
jpayne@68
|
587 Pickles the record in binary format with a length prefix, and
|
jpayne@68
|
588 returns it ready for transmission across the socket.
|
jpayne@68
|
589 """
|
jpayne@68
|
590 ei = record.exc_info
|
jpayne@68
|
591 if ei:
|
jpayne@68
|
592 # just to get traceback text into record.exc_text ...
|
jpayne@68
|
593 dummy = self.format(record)
|
jpayne@68
|
594 # See issue #14436: If msg or args are objects, they may not be
|
jpayne@68
|
595 # available on the receiving end. So we convert the msg % args
|
jpayne@68
|
596 # to a string, save it as msg and zap the args.
|
jpayne@68
|
597 d = dict(record.__dict__)
|
jpayne@68
|
598 d['msg'] = record.getMessage()
|
jpayne@68
|
599 d['args'] = None
|
jpayne@68
|
600 d['exc_info'] = None
|
jpayne@68
|
601 # Issue #25685: delete 'message' if present: redundant with 'msg'
|
jpayne@68
|
602 d.pop('message', None)
|
jpayne@68
|
603 s = pickle.dumps(d, 1)
|
jpayne@68
|
604 slen = struct.pack(">L", len(s))
|
jpayne@68
|
605 return slen + s
|
jpayne@68
|
606
|
jpayne@68
|
607 def handleError(self, record):
|
jpayne@68
|
608 """
|
jpayne@68
|
609 Handle an error during logging.
|
jpayne@68
|
610
|
jpayne@68
|
611 An error has occurred during logging. Most likely cause -
|
jpayne@68
|
612 connection lost. Close the socket so that we can retry on the
|
jpayne@68
|
613 next event.
|
jpayne@68
|
614 """
|
jpayne@68
|
615 if self.closeOnError and self.sock:
|
jpayne@68
|
616 self.sock.close()
|
jpayne@68
|
617 self.sock = None #try to reconnect next time
|
jpayne@68
|
618 else:
|
jpayne@68
|
619 logging.Handler.handleError(self, record)
|
jpayne@68
|
620
|
jpayne@68
|
621 def emit(self, record):
|
jpayne@68
|
622 """
|
jpayne@68
|
623 Emit a record.
|
jpayne@68
|
624
|
jpayne@68
|
625 Pickles the record and writes it to the socket in binary format.
|
jpayne@68
|
626 If there is an error with the socket, silently drop the packet.
|
jpayne@68
|
627 If there was a problem with the socket, re-establishes the
|
jpayne@68
|
628 socket.
|
jpayne@68
|
629 """
|
jpayne@68
|
630 try:
|
jpayne@68
|
631 s = self.makePickle(record)
|
jpayne@68
|
632 self.send(s)
|
jpayne@68
|
633 except Exception:
|
jpayne@68
|
634 self.handleError(record)
|
jpayne@68
|
635
|
jpayne@68
|
636 def close(self):
|
jpayne@68
|
637 """
|
jpayne@68
|
638 Closes the socket.
|
jpayne@68
|
639 """
|
jpayne@68
|
640 self.acquire()
|
jpayne@68
|
641 try:
|
jpayne@68
|
642 sock = self.sock
|
jpayne@68
|
643 if sock:
|
jpayne@68
|
644 self.sock = None
|
jpayne@68
|
645 sock.close()
|
jpayne@68
|
646 logging.Handler.close(self)
|
jpayne@68
|
647 finally:
|
jpayne@68
|
648 self.release()
|
jpayne@68
|
649
|
jpayne@68
|
650 class DatagramHandler(SocketHandler):
|
jpayne@68
|
651 """
|
jpayne@68
|
652 A handler class which writes logging records, in pickle format, to
|
jpayne@68
|
653 a datagram socket. The pickle which is sent is that of the LogRecord's
|
jpayne@68
|
654 attribute dictionary (__dict__), so that the receiver does not need to
|
jpayne@68
|
655 have the logging module installed in order to process the logging event.
|
jpayne@68
|
656
|
jpayne@68
|
657 To unpickle the record at the receiving end into a LogRecord, use the
|
jpayne@68
|
658 makeLogRecord function.
|
jpayne@68
|
659
|
jpayne@68
|
660 """
|
jpayne@68
|
661 def __init__(self, host, port):
|
jpayne@68
|
662 """
|
jpayne@68
|
663 Initializes the handler with a specific host address and port.
|
jpayne@68
|
664 """
|
jpayne@68
|
665 SocketHandler.__init__(self, host, port)
|
jpayne@68
|
666 self.closeOnError = False
|
jpayne@68
|
667
|
jpayne@68
|
668 def makeSocket(self):
|
jpayne@68
|
669 """
|
jpayne@68
|
670 The factory method of SocketHandler is here overridden to create
|
jpayne@68
|
671 a UDP socket (SOCK_DGRAM).
|
jpayne@68
|
672 """
|
jpayne@68
|
673 if self.port is None:
|
jpayne@68
|
674 family = socket.AF_UNIX
|
jpayne@68
|
675 else:
|
jpayne@68
|
676 family = socket.AF_INET
|
jpayne@68
|
677 s = socket.socket(family, socket.SOCK_DGRAM)
|
jpayne@68
|
678 return s
|
jpayne@68
|
679
|
jpayne@68
|
680 def send(self, s):
|
jpayne@68
|
681 """
|
jpayne@68
|
682 Send a pickled string to a socket.
|
jpayne@68
|
683
|
jpayne@68
|
684 This function no longer allows for partial sends which can happen
|
jpayne@68
|
685 when the network is busy - UDP does not guarantee delivery and
|
jpayne@68
|
686 can deliver packets out of sequence.
|
jpayne@68
|
687 """
|
jpayne@68
|
688 if self.sock is None:
|
jpayne@68
|
689 self.createSocket()
|
jpayne@68
|
690 self.sock.sendto(s, self.address)
|
jpayne@68
|
691
|
jpayne@68
|
692 class SysLogHandler(logging.Handler):
|
jpayne@68
|
693 """
|
jpayne@68
|
694 A handler class which sends formatted logging records to a syslog
|
jpayne@68
|
695 server. Based on Sam Rushing's syslog module:
|
jpayne@68
|
696 http://www.nightmare.com/squirl/python-ext/misc/syslog.py
|
jpayne@68
|
697 Contributed by Nicolas Untz (after which minor refactoring changes
|
jpayne@68
|
698 have been made).
|
jpayne@68
|
699 """
|
jpayne@68
|
700
|
jpayne@68
|
701 # from <linux/sys/syslog.h>:
|
jpayne@68
|
702 # ======================================================================
|
jpayne@68
|
703 # priorities/facilities are encoded into a single 32-bit quantity, where
|
jpayne@68
|
704 # the bottom 3 bits are the priority (0-7) and the top 28 bits are the
|
jpayne@68
|
705 # facility (0-big number). Both the priorities and the facilities map
|
jpayne@68
|
706 # roughly one-to-one to strings in the syslogd(8) source code. This
|
jpayne@68
|
707 # mapping is included in this file.
|
jpayne@68
|
708 #
|
jpayne@68
|
709 # priorities (these are ordered)
|
jpayne@68
|
710
|
jpayne@68
|
711 LOG_EMERG = 0 # system is unusable
|
jpayne@68
|
712 LOG_ALERT = 1 # action must be taken immediately
|
jpayne@68
|
713 LOG_CRIT = 2 # critical conditions
|
jpayne@68
|
714 LOG_ERR = 3 # error conditions
|
jpayne@68
|
715 LOG_WARNING = 4 # warning conditions
|
jpayne@68
|
716 LOG_NOTICE = 5 # normal but significant condition
|
jpayne@68
|
717 LOG_INFO = 6 # informational
|
jpayne@68
|
718 LOG_DEBUG = 7 # debug-level messages
|
jpayne@68
|
719
|
jpayne@68
|
720 # facility codes
|
jpayne@68
|
721 LOG_KERN = 0 # kernel messages
|
jpayne@68
|
722 LOG_USER = 1 # random user-level messages
|
jpayne@68
|
723 LOG_MAIL = 2 # mail system
|
jpayne@68
|
724 LOG_DAEMON = 3 # system daemons
|
jpayne@68
|
725 LOG_AUTH = 4 # security/authorization messages
|
jpayne@68
|
726 LOG_SYSLOG = 5 # messages generated internally by syslogd
|
jpayne@68
|
727 LOG_LPR = 6 # line printer subsystem
|
jpayne@68
|
728 LOG_NEWS = 7 # network news subsystem
|
jpayne@68
|
729 LOG_UUCP = 8 # UUCP subsystem
|
jpayne@68
|
730 LOG_CRON = 9 # clock daemon
|
jpayne@68
|
731 LOG_AUTHPRIV = 10 # security/authorization messages (private)
|
jpayne@68
|
732 LOG_FTP = 11 # FTP daemon
|
jpayne@68
|
733
|
jpayne@68
|
734 # other codes through 15 reserved for system use
|
jpayne@68
|
735 LOG_LOCAL0 = 16 # reserved for local use
|
jpayne@68
|
736 LOG_LOCAL1 = 17 # reserved for local use
|
jpayne@68
|
737 LOG_LOCAL2 = 18 # reserved for local use
|
jpayne@68
|
738 LOG_LOCAL3 = 19 # reserved for local use
|
jpayne@68
|
739 LOG_LOCAL4 = 20 # reserved for local use
|
jpayne@68
|
740 LOG_LOCAL5 = 21 # reserved for local use
|
jpayne@68
|
741 LOG_LOCAL6 = 22 # reserved for local use
|
jpayne@68
|
742 LOG_LOCAL7 = 23 # reserved for local use
|
jpayne@68
|
743
|
jpayne@68
|
744 priority_names = {
|
jpayne@68
|
745 "alert": LOG_ALERT,
|
jpayne@68
|
746 "crit": LOG_CRIT,
|
jpayne@68
|
747 "critical": LOG_CRIT,
|
jpayne@68
|
748 "debug": LOG_DEBUG,
|
jpayne@68
|
749 "emerg": LOG_EMERG,
|
jpayne@68
|
750 "err": LOG_ERR,
|
jpayne@68
|
751 "error": LOG_ERR, # DEPRECATED
|
jpayne@68
|
752 "info": LOG_INFO,
|
jpayne@68
|
753 "notice": LOG_NOTICE,
|
jpayne@68
|
754 "panic": LOG_EMERG, # DEPRECATED
|
jpayne@68
|
755 "warn": LOG_WARNING, # DEPRECATED
|
jpayne@68
|
756 "warning": LOG_WARNING,
|
jpayne@68
|
757 }
|
jpayne@68
|
758
|
jpayne@68
|
759 facility_names = {
|
jpayne@68
|
760 "auth": LOG_AUTH,
|
jpayne@68
|
761 "authpriv": LOG_AUTHPRIV,
|
jpayne@68
|
762 "cron": LOG_CRON,
|
jpayne@68
|
763 "daemon": LOG_DAEMON,
|
jpayne@68
|
764 "ftp": LOG_FTP,
|
jpayne@68
|
765 "kern": LOG_KERN,
|
jpayne@68
|
766 "lpr": LOG_LPR,
|
jpayne@68
|
767 "mail": LOG_MAIL,
|
jpayne@68
|
768 "news": LOG_NEWS,
|
jpayne@68
|
769 "security": LOG_AUTH, # DEPRECATED
|
jpayne@68
|
770 "syslog": LOG_SYSLOG,
|
jpayne@68
|
771 "user": LOG_USER,
|
jpayne@68
|
772 "uucp": LOG_UUCP,
|
jpayne@68
|
773 "local0": LOG_LOCAL0,
|
jpayne@68
|
774 "local1": LOG_LOCAL1,
|
jpayne@68
|
775 "local2": LOG_LOCAL2,
|
jpayne@68
|
776 "local3": LOG_LOCAL3,
|
jpayne@68
|
777 "local4": LOG_LOCAL4,
|
jpayne@68
|
778 "local5": LOG_LOCAL5,
|
jpayne@68
|
779 "local6": LOG_LOCAL6,
|
jpayne@68
|
780 "local7": LOG_LOCAL7,
|
jpayne@68
|
781 }
|
jpayne@68
|
782
|
jpayne@68
|
783 #The map below appears to be trivially lowercasing the key. However,
|
jpayne@68
|
784 #there's more to it than meets the eye - in some locales, lowercasing
|
jpayne@68
|
785 #gives unexpected results. See SF #1524081: in the Turkish locale,
|
jpayne@68
|
786 #"INFO".lower() != "info"
|
jpayne@68
|
787 priority_map = {
|
jpayne@68
|
788 "DEBUG" : "debug",
|
jpayne@68
|
789 "INFO" : "info",
|
jpayne@68
|
790 "WARNING" : "warning",
|
jpayne@68
|
791 "ERROR" : "error",
|
jpayne@68
|
792 "CRITICAL" : "critical"
|
jpayne@68
|
793 }
|
jpayne@68
|
794
|
jpayne@68
|
795 def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
|
jpayne@68
|
796 facility=LOG_USER, socktype=None):
|
jpayne@68
|
797 """
|
jpayne@68
|
798 Initialize a handler.
|
jpayne@68
|
799
|
jpayne@68
|
800 If address is specified as a string, a UNIX socket is used. To log to a
|
jpayne@68
|
801 local syslogd, "SysLogHandler(address="/dev/log")" can be used.
|
jpayne@68
|
802 If facility is not specified, LOG_USER is used. If socktype is
|
jpayne@68
|
803 specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
|
jpayne@68
|
804 socket type will be used. For Unix sockets, you can also specify a
|
jpayne@68
|
805 socktype of None, in which case socket.SOCK_DGRAM will be used, falling
|
jpayne@68
|
806 back to socket.SOCK_STREAM.
|
jpayne@68
|
807 """
|
jpayne@68
|
808 logging.Handler.__init__(self)
|
jpayne@68
|
809
|
jpayne@68
|
810 self.address = address
|
jpayne@68
|
811 self.facility = facility
|
jpayne@68
|
812 self.socktype = socktype
|
jpayne@68
|
813
|
jpayne@68
|
814 if isinstance(address, str):
|
jpayne@68
|
815 self.unixsocket = True
|
jpayne@68
|
816 # Syslog server may be unavailable during handler initialisation.
|
jpayne@68
|
817 # C's openlog() function also ignores connection errors.
|
jpayne@68
|
818 # Moreover, we ignore these errors while logging, so it not worse
|
jpayne@68
|
819 # to ignore it also here.
|
jpayne@68
|
820 try:
|
jpayne@68
|
821 self._connect_unixsocket(address)
|
jpayne@68
|
822 except OSError:
|
jpayne@68
|
823 pass
|
jpayne@68
|
824 else:
|
jpayne@68
|
825 self.unixsocket = False
|
jpayne@68
|
826 if socktype is None:
|
jpayne@68
|
827 socktype = socket.SOCK_DGRAM
|
jpayne@68
|
828 host, port = address
|
jpayne@68
|
829 ress = socket.getaddrinfo(host, port, 0, socktype)
|
jpayne@68
|
830 if not ress:
|
jpayne@68
|
831 raise OSError("getaddrinfo returns an empty list")
|
jpayne@68
|
832 for res in ress:
|
jpayne@68
|
833 af, socktype, proto, _, sa = res
|
jpayne@68
|
834 err = sock = None
|
jpayne@68
|
835 try:
|
jpayne@68
|
836 sock = socket.socket(af, socktype, proto)
|
jpayne@68
|
837 if socktype == socket.SOCK_STREAM:
|
jpayne@68
|
838 sock.connect(sa)
|
jpayne@68
|
839 break
|
jpayne@68
|
840 except OSError as exc:
|
jpayne@68
|
841 err = exc
|
jpayne@68
|
842 if sock is not None:
|
jpayne@68
|
843 sock.close()
|
jpayne@68
|
844 if err is not None:
|
jpayne@68
|
845 raise err
|
jpayne@68
|
846 self.socket = sock
|
jpayne@68
|
847 self.socktype = socktype
|
jpayne@68
|
848
|
jpayne@68
|
849 def _connect_unixsocket(self, address):
|
jpayne@68
|
850 use_socktype = self.socktype
|
jpayne@68
|
851 if use_socktype is None:
|
jpayne@68
|
852 use_socktype = socket.SOCK_DGRAM
|
jpayne@68
|
853 self.socket = socket.socket(socket.AF_UNIX, use_socktype)
|
jpayne@68
|
854 try:
|
jpayne@68
|
855 self.socket.connect(address)
|
jpayne@68
|
856 # it worked, so set self.socktype to the used type
|
jpayne@68
|
857 self.socktype = use_socktype
|
jpayne@68
|
858 except OSError:
|
jpayne@68
|
859 self.socket.close()
|
jpayne@68
|
860 if self.socktype is not None:
|
jpayne@68
|
861 # user didn't specify falling back, so fail
|
jpayne@68
|
862 raise
|
jpayne@68
|
863 use_socktype = socket.SOCK_STREAM
|
jpayne@68
|
864 self.socket = socket.socket(socket.AF_UNIX, use_socktype)
|
jpayne@68
|
865 try:
|
jpayne@68
|
866 self.socket.connect(address)
|
jpayne@68
|
867 # it worked, so set self.socktype to the used type
|
jpayne@68
|
868 self.socktype = use_socktype
|
jpayne@68
|
869 except OSError:
|
jpayne@68
|
870 self.socket.close()
|
jpayne@68
|
871 raise
|
jpayne@68
|
872
|
jpayne@68
|
873 def encodePriority(self, facility, priority):
|
jpayne@68
|
874 """
|
jpayne@68
|
875 Encode the facility and priority. You can pass in strings or
|
jpayne@68
|
876 integers - if strings are passed, the facility_names and
|
jpayne@68
|
877 priority_names mapping dictionaries are used to convert them to
|
jpayne@68
|
878 integers.
|
jpayne@68
|
879 """
|
jpayne@68
|
880 if isinstance(facility, str):
|
jpayne@68
|
881 facility = self.facility_names[facility]
|
jpayne@68
|
882 if isinstance(priority, str):
|
jpayne@68
|
883 priority = self.priority_names[priority]
|
jpayne@68
|
884 return (facility << 3) | priority
|
jpayne@68
|
885
|
jpayne@68
|
886 def close(self):
|
jpayne@68
|
887 """
|
jpayne@68
|
888 Closes the socket.
|
jpayne@68
|
889 """
|
jpayne@68
|
890 self.acquire()
|
jpayne@68
|
891 try:
|
jpayne@68
|
892 self.socket.close()
|
jpayne@68
|
893 logging.Handler.close(self)
|
jpayne@68
|
894 finally:
|
jpayne@68
|
895 self.release()
|
jpayne@68
|
896
|
jpayne@68
|
897 def mapPriority(self, levelName):
|
jpayne@68
|
898 """
|
jpayne@68
|
899 Map a logging level name to a key in the priority_names map.
|
jpayne@68
|
900 This is useful in two scenarios: when custom levels are being
|
jpayne@68
|
901 used, and in the case where you can't do a straightforward
|
jpayne@68
|
902 mapping by lowercasing the logging level name because of locale-
|
jpayne@68
|
903 specific issues (see SF #1524081).
|
jpayne@68
|
904 """
|
jpayne@68
|
905 return self.priority_map.get(levelName, "warning")
|
jpayne@68
|
906
|
jpayne@68
|
907 ident = '' # prepended to all messages
|
jpayne@68
|
908 append_nul = True # some old syslog daemons expect a NUL terminator
|
jpayne@68
|
909
|
jpayne@68
|
910 def emit(self, record):
|
jpayne@68
|
911 """
|
jpayne@68
|
912 Emit a record.
|
jpayne@68
|
913
|
jpayne@68
|
914 The record is formatted, and then sent to the syslog server. If
|
jpayne@68
|
915 exception information is present, it is NOT sent to the server.
|
jpayne@68
|
916 """
|
jpayne@68
|
917 try:
|
jpayne@68
|
918 msg = self.format(record)
|
jpayne@68
|
919 if self.ident:
|
jpayne@68
|
920 msg = self.ident + msg
|
jpayne@68
|
921 if self.append_nul:
|
jpayne@68
|
922 msg += '\000'
|
jpayne@68
|
923
|
jpayne@68
|
924 # We need to convert record level to lowercase, maybe this will
|
jpayne@68
|
925 # change in the future.
|
jpayne@68
|
926 prio = '<%d>' % self.encodePriority(self.facility,
|
jpayne@68
|
927 self.mapPriority(record.levelname))
|
jpayne@68
|
928 prio = prio.encode('utf-8')
|
jpayne@68
|
929 # Message is a string. Convert to bytes as required by RFC 5424
|
jpayne@68
|
930 msg = msg.encode('utf-8')
|
jpayne@68
|
931 msg = prio + msg
|
jpayne@68
|
932 if self.unixsocket:
|
jpayne@68
|
933 try:
|
jpayne@68
|
934 self.socket.send(msg)
|
jpayne@68
|
935 except OSError:
|
jpayne@68
|
936 self.socket.close()
|
jpayne@68
|
937 self._connect_unixsocket(self.address)
|
jpayne@68
|
938 self.socket.send(msg)
|
jpayne@68
|
939 elif self.socktype == socket.SOCK_DGRAM:
|
jpayne@68
|
940 self.socket.sendto(msg, self.address)
|
jpayne@68
|
941 else:
|
jpayne@68
|
942 self.socket.sendall(msg)
|
jpayne@68
|
943 except Exception:
|
jpayne@68
|
944 self.handleError(record)
|
jpayne@68
|
945
|
jpayne@68
|
946 class SMTPHandler(logging.Handler):
|
jpayne@68
|
947 """
|
jpayne@68
|
948 A handler class which sends an SMTP email for each logging event.
|
jpayne@68
|
949 """
|
jpayne@68
|
950 def __init__(self, mailhost, fromaddr, toaddrs, subject,
|
jpayne@68
|
951 credentials=None, secure=None, timeout=5.0):
|
jpayne@68
|
952 """
|
jpayne@68
|
953 Initialize the handler.
|
jpayne@68
|
954
|
jpayne@68
|
955 Initialize the instance with the from and to addresses and subject
|
jpayne@68
|
956 line of the email. To specify a non-standard SMTP port, use the
|
jpayne@68
|
957 (host, port) tuple format for the mailhost argument. To specify
|
jpayne@68
|
958 authentication credentials, supply a (username, password) tuple
|
jpayne@68
|
959 for the credentials argument. To specify the use of a secure
|
jpayne@68
|
960 protocol (TLS), pass in a tuple for the secure argument. This will
|
jpayne@68
|
961 only be used when authentication credentials are supplied. The tuple
|
jpayne@68
|
962 will be either an empty tuple, or a single-value tuple with the name
|
jpayne@68
|
963 of a keyfile, or a 2-value tuple with the names of the keyfile and
|
jpayne@68
|
964 certificate file. (This tuple is passed to the `starttls` method).
|
jpayne@68
|
965 A timeout in seconds can be specified for the SMTP connection (the
|
jpayne@68
|
966 default is one second).
|
jpayne@68
|
967 """
|
jpayne@68
|
968 logging.Handler.__init__(self)
|
jpayne@68
|
969 if isinstance(mailhost, (list, tuple)):
|
jpayne@68
|
970 self.mailhost, self.mailport = mailhost
|
jpayne@68
|
971 else:
|
jpayne@68
|
972 self.mailhost, self.mailport = mailhost, None
|
jpayne@68
|
973 if isinstance(credentials, (list, tuple)):
|
jpayne@68
|
974 self.username, self.password = credentials
|
jpayne@68
|
975 else:
|
jpayne@68
|
976 self.username = None
|
jpayne@68
|
977 self.fromaddr = fromaddr
|
jpayne@68
|
978 if isinstance(toaddrs, str):
|
jpayne@68
|
979 toaddrs = [toaddrs]
|
jpayne@68
|
980 self.toaddrs = toaddrs
|
jpayne@68
|
981 self.subject = subject
|
jpayne@68
|
982 self.secure = secure
|
jpayne@68
|
983 self.timeout = timeout
|
jpayne@68
|
984
|
jpayne@68
|
985 def getSubject(self, record):
|
jpayne@68
|
986 """
|
jpayne@68
|
987 Determine the subject for the email.
|
jpayne@68
|
988
|
jpayne@68
|
989 If you want to specify a subject line which is record-dependent,
|
jpayne@68
|
990 override this method.
|
jpayne@68
|
991 """
|
jpayne@68
|
992 return self.subject
|
jpayne@68
|
993
|
jpayne@68
|
994 def emit(self, record):
|
jpayne@68
|
995 """
|
jpayne@68
|
996 Emit a record.
|
jpayne@68
|
997
|
jpayne@68
|
998 Format the record and send it to the specified addressees.
|
jpayne@68
|
999 """
|
jpayne@68
|
1000 try:
|
jpayne@68
|
1001 import smtplib
|
jpayne@68
|
1002 from email.message import EmailMessage
|
jpayne@68
|
1003 import email.utils
|
jpayne@68
|
1004
|
jpayne@68
|
1005 port = self.mailport
|
jpayne@68
|
1006 if not port:
|
jpayne@68
|
1007 port = smtplib.SMTP_PORT
|
jpayne@68
|
1008 smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
|
jpayne@68
|
1009 msg = EmailMessage()
|
jpayne@68
|
1010 msg['From'] = self.fromaddr
|
jpayne@68
|
1011 msg['To'] = ','.join(self.toaddrs)
|
jpayne@68
|
1012 msg['Subject'] = self.getSubject(record)
|
jpayne@68
|
1013 msg['Date'] = email.utils.localtime()
|
jpayne@68
|
1014 msg.set_content(self.format(record))
|
jpayne@68
|
1015 if self.username:
|
jpayne@68
|
1016 if self.secure is not None:
|
jpayne@68
|
1017 smtp.ehlo()
|
jpayne@68
|
1018 smtp.starttls(*self.secure)
|
jpayne@68
|
1019 smtp.ehlo()
|
jpayne@68
|
1020 smtp.login(self.username, self.password)
|
jpayne@68
|
1021 smtp.send_message(msg)
|
jpayne@68
|
1022 smtp.quit()
|
jpayne@68
|
1023 except Exception:
|
jpayne@68
|
1024 self.handleError(record)
|
jpayne@68
|
1025
|
jpayne@68
|
1026 class NTEventLogHandler(logging.Handler):
|
jpayne@68
|
1027 """
|
jpayne@68
|
1028 A handler class which sends events to the NT Event Log. Adds a
|
jpayne@68
|
1029 registry entry for the specified application name. If no dllname is
|
jpayne@68
|
1030 provided, win32service.pyd (which contains some basic message
|
jpayne@68
|
1031 placeholders) is used. Note that use of these placeholders will make
|
jpayne@68
|
1032 your event logs big, as the entire message source is held in the log.
|
jpayne@68
|
1033 If you want slimmer logs, you have to pass in the name of your own DLL
|
jpayne@68
|
1034 which contains the message definitions you want to use in the event log.
|
jpayne@68
|
1035 """
|
jpayne@68
|
1036 def __init__(self, appname, dllname=None, logtype="Application"):
|
jpayne@68
|
1037 logging.Handler.__init__(self)
|
jpayne@68
|
1038 try:
|
jpayne@68
|
1039 import win32evtlogutil, win32evtlog
|
jpayne@68
|
1040 self.appname = appname
|
jpayne@68
|
1041 self._welu = win32evtlogutil
|
jpayne@68
|
1042 if not dllname:
|
jpayne@68
|
1043 dllname = os.path.split(self._welu.__file__)
|
jpayne@68
|
1044 dllname = os.path.split(dllname[0])
|
jpayne@68
|
1045 dllname = os.path.join(dllname[0], r'win32service.pyd')
|
jpayne@68
|
1046 self.dllname = dllname
|
jpayne@68
|
1047 self.logtype = logtype
|
jpayne@68
|
1048 self._welu.AddSourceToRegistry(appname, dllname, logtype)
|
jpayne@68
|
1049 self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
|
jpayne@68
|
1050 self.typemap = {
|
jpayne@68
|
1051 logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
|
jpayne@68
|
1052 logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
|
jpayne@68
|
1053 logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
|
jpayne@68
|
1054 logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
|
jpayne@68
|
1055 logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
|
jpayne@68
|
1056 }
|
jpayne@68
|
1057 except ImportError:
|
jpayne@68
|
1058 print("The Python Win32 extensions for NT (service, event "\
|
jpayne@68
|
1059 "logging) appear not to be available.")
|
jpayne@68
|
1060 self._welu = None
|
jpayne@68
|
1061
|
jpayne@68
|
1062 def getMessageID(self, record):
|
jpayne@68
|
1063 """
|
jpayne@68
|
1064 Return the message ID for the event record. If you are using your
|
jpayne@68
|
1065 own messages, you could do this by having the msg passed to the
|
jpayne@68
|
1066 logger being an ID rather than a formatting string. Then, in here,
|
jpayne@68
|
1067 you could use a dictionary lookup to get the message ID. This
|
jpayne@68
|
1068 version returns 1, which is the base message ID in win32service.pyd.
|
jpayne@68
|
1069 """
|
jpayne@68
|
1070 return 1
|
jpayne@68
|
1071
|
jpayne@68
|
1072 def getEventCategory(self, record):
|
jpayne@68
|
1073 """
|
jpayne@68
|
1074 Return the event category for the record.
|
jpayne@68
|
1075
|
jpayne@68
|
1076 Override this if you want to specify your own categories. This version
|
jpayne@68
|
1077 returns 0.
|
jpayne@68
|
1078 """
|
jpayne@68
|
1079 return 0
|
jpayne@68
|
1080
|
jpayne@68
|
1081 def getEventType(self, record):
|
jpayne@68
|
1082 """
|
jpayne@68
|
1083 Return the event type for the record.
|
jpayne@68
|
1084
|
jpayne@68
|
1085 Override this if you want to specify your own types. This version does
|
jpayne@68
|
1086 a mapping using the handler's typemap attribute, which is set up in
|
jpayne@68
|
1087 __init__() to a dictionary which contains mappings for DEBUG, INFO,
|
jpayne@68
|
1088 WARNING, ERROR and CRITICAL. If you are using your own levels you will
|
jpayne@68
|
1089 either need to override this method or place a suitable dictionary in
|
jpayne@68
|
1090 the handler's typemap attribute.
|
jpayne@68
|
1091 """
|
jpayne@68
|
1092 return self.typemap.get(record.levelno, self.deftype)
|
jpayne@68
|
1093
|
jpayne@68
|
1094 def emit(self, record):
|
jpayne@68
|
1095 """
|
jpayne@68
|
1096 Emit a record.
|
jpayne@68
|
1097
|
jpayne@68
|
1098 Determine the message ID, event category and event type. Then
|
jpayne@68
|
1099 log the message in the NT event log.
|
jpayne@68
|
1100 """
|
jpayne@68
|
1101 if self._welu:
|
jpayne@68
|
1102 try:
|
jpayne@68
|
1103 id = self.getMessageID(record)
|
jpayne@68
|
1104 cat = self.getEventCategory(record)
|
jpayne@68
|
1105 type = self.getEventType(record)
|
jpayne@68
|
1106 msg = self.format(record)
|
jpayne@68
|
1107 self._welu.ReportEvent(self.appname, id, cat, type, [msg])
|
jpayne@68
|
1108 except Exception:
|
jpayne@68
|
1109 self.handleError(record)
|
jpayne@68
|
1110
|
jpayne@68
|
1111 def close(self):
|
jpayne@68
|
1112 """
|
jpayne@68
|
1113 Clean up this handler.
|
jpayne@68
|
1114
|
jpayne@68
|
1115 You can remove the application name from the registry as a
|
jpayne@68
|
1116 source of event log entries. However, if you do this, you will
|
jpayne@68
|
1117 not be able to see the events as you intended in the Event Log
|
jpayne@68
|
1118 Viewer - it needs to be able to access the registry to get the
|
jpayne@68
|
1119 DLL name.
|
jpayne@68
|
1120 """
|
jpayne@68
|
1121 #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
|
jpayne@68
|
1122 logging.Handler.close(self)
|
jpayne@68
|
1123
|
jpayne@68
|
1124 class HTTPHandler(logging.Handler):
|
jpayne@68
|
1125 """
|
jpayne@68
|
1126 A class which sends records to a Web server, using either GET or
|
jpayne@68
|
1127 POST semantics.
|
jpayne@68
|
1128 """
|
jpayne@68
|
1129 def __init__(self, host, url, method="GET", secure=False, credentials=None,
|
jpayne@68
|
1130 context=None):
|
jpayne@68
|
1131 """
|
jpayne@68
|
1132 Initialize the instance with the host, the request URL, and the method
|
jpayne@68
|
1133 ("GET" or "POST")
|
jpayne@68
|
1134 """
|
jpayne@68
|
1135 logging.Handler.__init__(self)
|
jpayne@68
|
1136 method = method.upper()
|
jpayne@68
|
1137 if method not in ["GET", "POST"]:
|
jpayne@68
|
1138 raise ValueError("method must be GET or POST")
|
jpayne@68
|
1139 if not secure and context is not None:
|
jpayne@68
|
1140 raise ValueError("context parameter only makes sense "
|
jpayne@68
|
1141 "with secure=True")
|
jpayne@68
|
1142 self.host = host
|
jpayne@68
|
1143 self.url = url
|
jpayne@68
|
1144 self.method = method
|
jpayne@68
|
1145 self.secure = secure
|
jpayne@68
|
1146 self.credentials = credentials
|
jpayne@68
|
1147 self.context = context
|
jpayne@68
|
1148
|
jpayne@68
|
1149 def mapLogRecord(self, record):
|
jpayne@68
|
1150 """
|
jpayne@68
|
1151 Default implementation of mapping the log record into a dict
|
jpayne@68
|
1152 that is sent as the CGI data. Overwrite in your class.
|
jpayne@68
|
1153 Contributed by Franz Glasner.
|
jpayne@68
|
1154 """
|
jpayne@68
|
1155 return record.__dict__
|
jpayne@68
|
1156
|
jpayne@68
|
1157 def emit(self, record):
|
jpayne@68
|
1158 """
|
jpayne@68
|
1159 Emit a record.
|
jpayne@68
|
1160
|
jpayne@68
|
1161 Send the record to the Web server as a percent-encoded dictionary
|
jpayne@68
|
1162 """
|
jpayne@68
|
1163 try:
|
jpayne@68
|
1164 import http.client, urllib.parse
|
jpayne@68
|
1165 host = self.host
|
jpayne@68
|
1166 if self.secure:
|
jpayne@68
|
1167 h = http.client.HTTPSConnection(host, context=self.context)
|
jpayne@68
|
1168 else:
|
jpayne@68
|
1169 h = http.client.HTTPConnection(host)
|
jpayne@68
|
1170 url = self.url
|
jpayne@68
|
1171 data = urllib.parse.urlencode(self.mapLogRecord(record))
|
jpayne@68
|
1172 if self.method == "GET":
|
jpayne@68
|
1173 if (url.find('?') >= 0):
|
jpayne@68
|
1174 sep = '&'
|
jpayne@68
|
1175 else:
|
jpayne@68
|
1176 sep = '?'
|
jpayne@68
|
1177 url = url + "%c%s" % (sep, data)
|
jpayne@68
|
1178 h.putrequest(self.method, url)
|
jpayne@68
|
1179 # support multiple hosts on one IP address...
|
jpayne@68
|
1180 # need to strip optional :port from host, if present
|
jpayne@68
|
1181 i = host.find(":")
|
jpayne@68
|
1182 if i >= 0:
|
jpayne@68
|
1183 host = host[:i]
|
jpayne@68
|
1184 # See issue #30904: putrequest call above already adds this header
|
jpayne@68
|
1185 # on Python 3.x.
|
jpayne@68
|
1186 # h.putheader("Host", host)
|
jpayne@68
|
1187 if self.method == "POST":
|
jpayne@68
|
1188 h.putheader("Content-type",
|
jpayne@68
|
1189 "application/x-www-form-urlencoded")
|
jpayne@68
|
1190 h.putheader("Content-length", str(len(data)))
|
jpayne@68
|
1191 if self.credentials:
|
jpayne@68
|
1192 import base64
|
jpayne@68
|
1193 s = ('%s:%s' % self.credentials).encode('utf-8')
|
jpayne@68
|
1194 s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
|
jpayne@68
|
1195 h.putheader('Authorization', s)
|
jpayne@68
|
1196 h.endheaders()
|
jpayne@68
|
1197 if self.method == "POST":
|
jpayne@68
|
1198 h.send(data.encode('utf-8'))
|
jpayne@68
|
1199 h.getresponse() #can't do anything with the result
|
jpayne@68
|
1200 except Exception:
|
jpayne@68
|
1201 self.handleError(record)
|
jpayne@68
|
1202
|
jpayne@68
|
1203 class BufferingHandler(logging.Handler):
|
jpayne@68
|
1204 """
|
jpayne@68
|
1205 A handler class which buffers logging records in memory. Whenever each
|
jpayne@68
|
1206 record is added to the buffer, a check is made to see if the buffer should
|
jpayne@68
|
1207 be flushed. If it should, then flush() is expected to do what's needed.
|
jpayne@68
|
1208 """
|
jpayne@68
|
1209 def __init__(self, capacity):
|
jpayne@68
|
1210 """
|
jpayne@68
|
1211 Initialize the handler with the buffer size.
|
jpayne@68
|
1212 """
|
jpayne@68
|
1213 logging.Handler.__init__(self)
|
jpayne@68
|
1214 self.capacity = capacity
|
jpayne@68
|
1215 self.buffer = []
|
jpayne@68
|
1216
|
jpayne@68
|
1217 def shouldFlush(self, record):
|
jpayne@68
|
1218 """
|
jpayne@68
|
1219 Should the handler flush its buffer?
|
jpayne@68
|
1220
|
jpayne@68
|
1221 Returns true if the buffer is up to capacity. This method can be
|
jpayne@68
|
1222 overridden to implement custom flushing strategies.
|
jpayne@68
|
1223 """
|
jpayne@68
|
1224 return (len(self.buffer) >= self.capacity)
|
jpayne@68
|
1225
|
jpayne@68
|
1226 def emit(self, record):
|
jpayne@68
|
1227 """
|
jpayne@68
|
1228 Emit a record.
|
jpayne@68
|
1229
|
jpayne@68
|
1230 Append the record. If shouldFlush() tells us to, call flush() to process
|
jpayne@68
|
1231 the buffer.
|
jpayne@68
|
1232 """
|
jpayne@68
|
1233 self.buffer.append(record)
|
jpayne@68
|
1234 if self.shouldFlush(record):
|
jpayne@68
|
1235 self.flush()
|
jpayne@68
|
1236
|
jpayne@68
|
1237 def flush(self):
|
jpayne@68
|
1238 """
|
jpayne@68
|
1239 Override to implement custom flushing behaviour.
|
jpayne@68
|
1240
|
jpayne@68
|
1241 This version just zaps the buffer to empty.
|
jpayne@68
|
1242 """
|
jpayne@68
|
1243 self.acquire()
|
jpayne@68
|
1244 try:
|
jpayne@68
|
1245 self.buffer = []
|
jpayne@68
|
1246 finally:
|
jpayne@68
|
1247 self.release()
|
jpayne@68
|
1248
|
jpayne@68
|
1249 def close(self):
|
jpayne@68
|
1250 """
|
jpayne@68
|
1251 Close the handler.
|
jpayne@68
|
1252
|
jpayne@68
|
1253 This version just flushes and chains to the parent class' close().
|
jpayne@68
|
1254 """
|
jpayne@68
|
1255 try:
|
jpayne@68
|
1256 self.flush()
|
jpayne@68
|
1257 finally:
|
jpayne@68
|
1258 logging.Handler.close(self)
|
jpayne@68
|
1259
|
jpayne@68
|
1260 class MemoryHandler(BufferingHandler):
|
jpayne@68
|
1261 """
|
jpayne@68
|
1262 A handler class which buffers logging records in memory, periodically
|
jpayne@68
|
1263 flushing them to a target handler. Flushing occurs whenever the buffer
|
jpayne@68
|
1264 is full, or when an event of a certain severity or greater is seen.
|
jpayne@68
|
1265 """
|
jpayne@68
|
1266 def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
|
jpayne@68
|
1267 flushOnClose=True):
|
jpayne@68
|
1268 """
|
jpayne@68
|
1269 Initialize the handler with the buffer size, the level at which
|
jpayne@68
|
1270 flushing should occur and an optional target.
|
jpayne@68
|
1271
|
jpayne@68
|
1272 Note that without a target being set either here or via setTarget(),
|
jpayne@68
|
1273 a MemoryHandler is no use to anyone!
|
jpayne@68
|
1274
|
jpayne@68
|
1275 The ``flushOnClose`` argument is ``True`` for backward compatibility
|
jpayne@68
|
1276 reasons - the old behaviour is that when the handler is closed, the
|
jpayne@68
|
1277 buffer is flushed, even if the flush level hasn't been exceeded nor the
|
jpayne@68
|
1278 capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
|
jpayne@68
|
1279 """
|
jpayne@68
|
1280 BufferingHandler.__init__(self, capacity)
|
jpayne@68
|
1281 self.flushLevel = flushLevel
|
jpayne@68
|
1282 self.target = target
|
jpayne@68
|
1283 # See Issue #26559 for why this has been added
|
jpayne@68
|
1284 self.flushOnClose = flushOnClose
|
jpayne@68
|
1285
|
jpayne@68
|
1286 def shouldFlush(self, record):
|
jpayne@68
|
1287 """
|
jpayne@68
|
1288 Check for buffer full or a record at the flushLevel or higher.
|
jpayne@68
|
1289 """
|
jpayne@68
|
1290 return (len(self.buffer) >= self.capacity) or \
|
jpayne@68
|
1291 (record.levelno >= self.flushLevel)
|
jpayne@68
|
1292
|
jpayne@68
|
1293 def setTarget(self, target):
|
jpayne@68
|
1294 """
|
jpayne@68
|
1295 Set the target handler for this handler.
|
jpayne@68
|
1296 """
|
jpayne@68
|
1297 self.target = target
|
jpayne@68
|
1298
|
jpayne@68
|
1299 def flush(self):
|
jpayne@68
|
1300 """
|
jpayne@68
|
1301 For a MemoryHandler, flushing means just sending the buffered
|
jpayne@68
|
1302 records to the target, if there is one. Override if you want
|
jpayne@68
|
1303 different behaviour.
|
jpayne@68
|
1304
|
jpayne@68
|
1305 The record buffer is also cleared by this operation.
|
jpayne@68
|
1306 """
|
jpayne@68
|
1307 self.acquire()
|
jpayne@68
|
1308 try:
|
jpayne@68
|
1309 if self.target:
|
jpayne@68
|
1310 for record in self.buffer:
|
jpayne@68
|
1311 self.target.handle(record)
|
jpayne@68
|
1312 self.buffer = []
|
jpayne@68
|
1313 finally:
|
jpayne@68
|
1314 self.release()
|
jpayne@68
|
1315
|
jpayne@68
|
1316 def close(self):
|
jpayne@68
|
1317 """
|
jpayne@68
|
1318 Flush, if appropriately configured, set the target to None and lose the
|
jpayne@68
|
1319 buffer.
|
jpayne@68
|
1320 """
|
jpayne@68
|
1321 try:
|
jpayne@68
|
1322 if self.flushOnClose:
|
jpayne@68
|
1323 self.flush()
|
jpayne@68
|
1324 finally:
|
jpayne@68
|
1325 self.acquire()
|
jpayne@68
|
1326 try:
|
jpayne@68
|
1327 self.target = None
|
jpayne@68
|
1328 BufferingHandler.close(self)
|
jpayne@68
|
1329 finally:
|
jpayne@68
|
1330 self.release()
|
jpayne@68
|
1331
|
jpayne@68
|
1332
|
jpayne@68
|
1333 class QueueHandler(logging.Handler):
|
jpayne@68
|
1334 """
|
jpayne@68
|
1335 This handler sends events to a queue. Typically, it would be used together
|
jpayne@68
|
1336 with a multiprocessing Queue to centralise logging to file in one process
|
jpayne@68
|
1337 (in a multi-process application), so as to avoid file write contention
|
jpayne@68
|
1338 between processes.
|
jpayne@68
|
1339
|
jpayne@68
|
1340 This code is new in Python 3.2, but this class can be copy pasted into
|
jpayne@68
|
1341 user code for use with earlier Python versions.
|
jpayne@68
|
1342 """
|
jpayne@68
|
1343
|
jpayne@68
|
1344 def __init__(self, queue):
|
jpayne@68
|
1345 """
|
jpayne@68
|
1346 Initialise an instance, using the passed queue.
|
jpayne@68
|
1347 """
|
jpayne@68
|
1348 logging.Handler.__init__(self)
|
jpayne@68
|
1349 self.queue = queue
|
jpayne@68
|
1350
|
jpayne@68
|
1351 def enqueue(self, record):
|
jpayne@68
|
1352 """
|
jpayne@68
|
1353 Enqueue a record.
|
jpayne@68
|
1354
|
jpayne@68
|
1355 The base implementation uses put_nowait. You may want to override
|
jpayne@68
|
1356 this method if you want to use blocking, timeouts or custom queue
|
jpayne@68
|
1357 implementations.
|
jpayne@68
|
1358 """
|
jpayne@68
|
1359 self.queue.put_nowait(record)
|
jpayne@68
|
1360
|
jpayne@68
|
1361 def prepare(self, record):
|
jpayne@68
|
1362 """
|
jpayne@68
|
1363 Prepares a record for queuing. The object returned by this method is
|
jpayne@68
|
1364 enqueued.
|
jpayne@68
|
1365
|
jpayne@68
|
1366 The base implementation formats the record to merge the message
|
jpayne@68
|
1367 and arguments, and removes unpickleable items from the record
|
jpayne@68
|
1368 in-place.
|
jpayne@68
|
1369
|
jpayne@68
|
1370 You might want to override this method if you want to convert
|
jpayne@68
|
1371 the record to a dict or JSON string, or send a modified copy
|
jpayne@68
|
1372 of the record while leaving the original intact.
|
jpayne@68
|
1373 """
|
jpayne@68
|
1374 # The format operation gets traceback text into record.exc_text
|
jpayne@68
|
1375 # (if there's exception data), and also returns the formatted
|
jpayne@68
|
1376 # message. We can then use this to replace the original
|
jpayne@68
|
1377 # msg + args, as these might be unpickleable. We also zap the
|
jpayne@68
|
1378 # exc_info and exc_text attributes, as they are no longer
|
jpayne@68
|
1379 # needed and, if not None, will typically not be pickleable.
|
jpayne@68
|
1380 msg = self.format(record)
|
jpayne@68
|
1381 # bpo-35726: make copy of record to avoid affecting other handlers in the chain.
|
jpayne@68
|
1382 record = copy.copy(record)
|
jpayne@68
|
1383 record.message = msg
|
jpayne@68
|
1384 record.msg = msg
|
jpayne@68
|
1385 record.args = None
|
jpayne@68
|
1386 record.exc_info = None
|
jpayne@68
|
1387 record.exc_text = None
|
jpayne@68
|
1388 return record
|
jpayne@68
|
1389
|
jpayne@68
|
1390 def emit(self, record):
|
jpayne@68
|
1391 """
|
jpayne@68
|
1392 Emit a record.
|
jpayne@68
|
1393
|
jpayne@68
|
1394 Writes the LogRecord to the queue, preparing it for pickling first.
|
jpayne@68
|
1395 """
|
jpayne@68
|
1396 try:
|
jpayne@68
|
1397 self.enqueue(self.prepare(record))
|
jpayne@68
|
1398 except Exception:
|
jpayne@68
|
1399 self.handleError(record)
|
jpayne@68
|
1400
|
jpayne@68
|
1401
|
jpayne@68
|
1402 class QueueListener(object):
|
jpayne@68
|
1403 """
|
jpayne@68
|
1404 This class implements an internal threaded listener which watches for
|
jpayne@68
|
1405 LogRecords being added to a queue, removes them and passes them to a
|
jpayne@68
|
1406 list of handlers for processing.
|
jpayne@68
|
1407 """
|
jpayne@68
|
1408 _sentinel = None
|
jpayne@68
|
1409
|
jpayne@68
|
1410 def __init__(self, queue, *handlers, respect_handler_level=False):
|
jpayne@68
|
1411 """
|
jpayne@68
|
1412 Initialise an instance with the specified queue and
|
jpayne@68
|
1413 handlers.
|
jpayne@68
|
1414 """
|
jpayne@68
|
1415 self.queue = queue
|
jpayne@68
|
1416 self.handlers = handlers
|
jpayne@68
|
1417 self._thread = None
|
jpayne@68
|
1418 self.respect_handler_level = respect_handler_level
|
jpayne@68
|
1419
|
jpayne@68
|
1420 def dequeue(self, block):
|
jpayne@68
|
1421 """
|
jpayne@68
|
1422 Dequeue a record and return it, optionally blocking.
|
jpayne@68
|
1423
|
jpayne@68
|
1424 The base implementation uses get. You may want to override this method
|
jpayne@68
|
1425 if you want to use timeouts or work with custom queue implementations.
|
jpayne@68
|
1426 """
|
jpayne@68
|
1427 return self.queue.get(block)
|
jpayne@68
|
1428
|
jpayne@68
|
1429 def start(self):
|
jpayne@68
|
1430 """
|
jpayne@68
|
1431 Start the listener.
|
jpayne@68
|
1432
|
jpayne@68
|
1433 This starts up a background thread to monitor the queue for
|
jpayne@68
|
1434 LogRecords to process.
|
jpayne@68
|
1435 """
|
jpayne@68
|
1436 self._thread = t = threading.Thread(target=self._monitor)
|
jpayne@68
|
1437 t.daemon = True
|
jpayne@68
|
1438 t.start()
|
jpayne@68
|
1439
|
jpayne@68
|
1440 def prepare(self, record):
|
jpayne@68
|
1441 """
|
jpayne@68
|
1442 Prepare a record for handling.
|
jpayne@68
|
1443
|
jpayne@68
|
1444 This method just returns the passed-in record. You may want to
|
jpayne@68
|
1445 override this method if you need to do any custom marshalling or
|
jpayne@68
|
1446 manipulation of the record before passing it to the handlers.
|
jpayne@68
|
1447 """
|
jpayne@68
|
1448 return record
|
jpayne@68
|
1449
|
jpayne@68
|
1450 def handle(self, record):
|
jpayne@68
|
1451 """
|
jpayne@68
|
1452 Handle a record.
|
jpayne@68
|
1453
|
jpayne@68
|
1454 This just loops through the handlers offering them the record
|
jpayne@68
|
1455 to handle.
|
jpayne@68
|
1456 """
|
jpayne@68
|
1457 record = self.prepare(record)
|
jpayne@68
|
1458 for handler in self.handlers:
|
jpayne@68
|
1459 if not self.respect_handler_level:
|
jpayne@68
|
1460 process = True
|
jpayne@68
|
1461 else:
|
jpayne@68
|
1462 process = record.levelno >= handler.level
|
jpayne@68
|
1463 if process:
|
jpayne@68
|
1464 handler.handle(record)
|
jpayne@68
|
1465
|
jpayne@68
|
1466 def _monitor(self):
|
jpayne@68
|
1467 """
|
jpayne@68
|
1468 Monitor the queue for records, and ask the handler
|
jpayne@68
|
1469 to deal with them.
|
jpayne@68
|
1470
|
jpayne@68
|
1471 This method runs on a separate, internal thread.
|
jpayne@68
|
1472 The thread will terminate if it sees a sentinel object in the queue.
|
jpayne@68
|
1473 """
|
jpayne@68
|
1474 q = self.queue
|
jpayne@68
|
1475 has_task_done = hasattr(q, 'task_done')
|
jpayne@68
|
1476 while True:
|
jpayne@68
|
1477 try:
|
jpayne@68
|
1478 record = self.dequeue(True)
|
jpayne@68
|
1479 if record is self._sentinel:
|
jpayne@68
|
1480 if has_task_done:
|
jpayne@68
|
1481 q.task_done()
|
jpayne@68
|
1482 break
|
jpayne@68
|
1483 self.handle(record)
|
jpayne@68
|
1484 if has_task_done:
|
jpayne@68
|
1485 q.task_done()
|
jpayne@68
|
1486 except queue.Empty:
|
jpayne@68
|
1487 break
|
jpayne@68
|
1488
|
jpayne@68
|
1489 def enqueue_sentinel(self):
|
jpayne@68
|
1490 """
|
jpayne@68
|
1491 This is used to enqueue the sentinel record.
|
jpayne@68
|
1492
|
jpayne@68
|
1493 The base implementation uses put_nowait. You may want to override this
|
jpayne@68
|
1494 method if you want to use timeouts or work with custom queue
|
jpayne@68
|
1495 implementations.
|
jpayne@68
|
1496 """
|
jpayne@68
|
1497 self.queue.put_nowait(self._sentinel)
|
jpayne@68
|
1498
|
jpayne@68
|
1499 def stop(self):
|
jpayne@68
|
1500 """
|
jpayne@68
|
1501 Stop the listener.
|
jpayne@68
|
1502
|
jpayne@68
|
1503 This asks the thread to terminate, and then waits for it to do so.
|
jpayne@68
|
1504 Note that if you don't call this before your application exits, there
|
jpayne@68
|
1505 may be some records still left on the queue, which won't be processed.
|
jpayne@68
|
1506 """
|
jpayne@68
|
1507 self.enqueue_sentinel()
|
jpayne@68
|
1508 self._thread.join()
|
jpayne@68
|
1509 self._thread = None
|