|
1 # Copyright 2001-2007 by Vinay Sajip. All Rights Reserved. |
|
2 # |
|
3 # Permission to use, copy, modify, and distribute this software and its |
|
4 # documentation for any purpose and without fee is hereby granted, |
|
5 # provided that the above copyright notice appear in all copies and that |
|
6 # both that copyright notice and this permission notice appear in |
|
7 # supporting documentation, and that the name of Vinay Sajip |
|
8 # not be used in advertising or publicity pertaining to distribution |
|
9 # of the software without specific, written prior permission. |
|
10 # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING |
|
11 # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL |
|
12 # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR |
|
13 # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER |
|
14 # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT |
|
15 # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
|
16 |
|
17 """ |
|
18 Additional handlers for the logging package for Python. The core package is |
|
19 based on PEP 282 and comments thereto in comp.lang.python, and influenced by |
|
20 Apache's log4j system. |
|
21 |
|
22 Should work under Python versions >= 1.5.2, except that source line |
|
23 information is not available unless 'sys._getframe()' is. |
|
24 |
|
25 Copyright (C) 2001-2007 Vinay Sajip. All Rights Reserved. |
|
26 |
|
27 To use, simply 'import logging' and log away! |
|
28 """ |
|
29 |
|
30 import sys, logging, socket, types, os, string, cPickle, struct, time, glob |
|
31 |
|
32 try: |
|
33 import codecs |
|
34 except ImportError: |
|
35 codecs = None |
|
36 |
|
37 # |
|
38 # Some constants... |
|
39 # |
|
40 |
|
41 DEFAULT_TCP_LOGGING_PORT = 9020 |
|
42 DEFAULT_UDP_LOGGING_PORT = 9021 |
|
43 DEFAULT_HTTP_LOGGING_PORT = 9022 |
|
44 DEFAULT_SOAP_LOGGING_PORT = 9023 |
|
45 SYSLOG_UDP_PORT = 514 |
|
46 |
|
47 _MIDNIGHT = 24 * 60 * 60 # number of seconds in a day |
|
48 |
|
49 class BaseRotatingHandler(logging.FileHandler): |
|
50 """ |
|
51 Base class for handlers that rotate log files at a certain point. |
|
52 Not meant to be instantiated directly. Instead, use RotatingFileHandler |
|
53 or TimedRotatingFileHandler. |
|
54 """ |
|
55 def __init__(self, filename, mode, encoding=None): |
|
56 """ |
|
57 Use the specified filename for streamed logging |
|
58 """ |
|
59 if codecs is None: |
|
60 encoding = None |
|
61 logging.FileHandler.__init__(self, filename, mode, encoding) |
|
62 self.mode = mode |
|
63 self.encoding = encoding |
|
64 |
|
65 def emit(self, record): |
|
66 """ |
|
67 Emit a record. |
|
68 |
|
69 Output the record to the file, catering for rollover as described |
|
70 in doRollover(). |
|
71 """ |
|
72 try: |
|
73 if self.shouldRollover(record): |
|
74 self.doRollover() |
|
75 logging.FileHandler.emit(self, record) |
|
76 except (KeyboardInterrupt, SystemExit): |
|
77 raise |
|
78 except: |
|
79 self.handleError(record) |
|
80 |
|
81 class RotatingFileHandler(BaseRotatingHandler): |
|
82 """ |
|
83 Handler for logging to a set of files, which switches from one file |
|
84 to the next when the current file reaches a certain size. |
|
85 """ |
|
86 def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None): |
|
87 """ |
|
88 Open the specified file and use it as the stream for logging. |
|
89 |
|
90 By default, the file grows indefinitely. You can specify particular |
|
91 values of maxBytes and backupCount to allow the file to rollover at |
|
92 a predetermined size. |
|
93 |
|
94 Rollover occurs whenever the current log file is nearly maxBytes in |
|
95 length. If backupCount is >= 1, the system will successively create |
|
96 new files with the same pathname as the base file, but with extensions |
|
97 ".1", ".2" etc. appended to it. For example, with a backupCount of 5 |
|
98 and a base file name of "app.log", you would get "app.log", |
|
99 "app.log.1", "app.log.2", ... through to "app.log.5". The file being |
|
100 written to is always "app.log" - when it gets filled up, it is closed |
|
101 and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. |
|
102 exist, then they are renamed to "app.log.2", "app.log.3" etc. |
|
103 respectively. |
|
104 |
|
105 If maxBytes is zero, rollover never occurs. |
|
106 """ |
|
107 if maxBytes > 0: |
|
108 mode = 'a' # doesn't make sense otherwise! |
|
109 BaseRotatingHandler.__init__(self, filename, mode, encoding) |
|
110 self.maxBytes = maxBytes |
|
111 self.backupCount = backupCount |
|
112 |
|
113 def doRollover(self): |
|
114 """ |
|
115 Do a rollover, as described in __init__(). |
|
116 """ |
|
117 |
|
118 self.stream.close() |
|
119 if self.backupCount > 0: |
|
120 for i in range(self.backupCount - 1, 0, -1): |
|
121 sfn = "%s.%d" % (self.baseFilename, i) |
|
122 dfn = "%s.%d" % (self.baseFilename, i + 1) |
|
123 if os.path.exists(sfn): |
|
124 #print "%s -> %s" % (sfn, dfn) |
|
125 if os.path.exists(dfn): |
|
126 os.remove(dfn) |
|
127 os.rename(sfn, dfn) |
|
128 dfn = self.baseFilename + ".1" |
|
129 if os.path.exists(dfn): |
|
130 os.remove(dfn) |
|
131 os.rename(self.baseFilename, dfn) |
|
132 #print "%s -> %s" % (self.baseFilename, dfn) |
|
133 if self.encoding: |
|
134 self.stream = codecs.open(self.baseFilename, 'w', self.encoding) |
|
135 else: |
|
136 self.stream = open(self.baseFilename, 'w') |
|
137 |
|
138 def shouldRollover(self, record): |
|
139 """ |
|
140 Determine if rollover should occur. |
|
141 |
|
142 Basically, see if the supplied record would cause the file to exceed |
|
143 the size limit we have. |
|
144 """ |
|
145 if self.maxBytes > 0: # are we rolling over? |
|
146 msg = "%s\n" % self.format(record) |
|
147 self.stream.seek(0, 2) #due to non-posix-compliant Windows feature |
|
148 if self.stream.tell() + len(msg) >= self.maxBytes: |
|
149 return 1 |
|
150 return 0 |
|
151 |
|
152 class TimedRotatingFileHandler(BaseRotatingHandler): |
|
153 """ |
|
154 Handler for logging to a file, rotating the log file at certain timed |
|
155 intervals. |
|
156 |
|
157 If backupCount is > 0, when rollover is done, no more than backupCount |
|
158 files are kept - the oldest ones are deleted. |
|
159 """ |
|
160 def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None): |
|
161 BaseRotatingHandler.__init__(self, filename, 'a', encoding) |
|
162 self.when = string.upper(when) |
|
163 self.backupCount = backupCount |
|
164 # Calculate the real rollover interval, which is just the number of |
|
165 # seconds between rollovers. Also set the filename suffix used when |
|
166 # a rollover occurs. Current 'when' events supported: |
|
167 # S - Seconds |
|
168 # M - Minutes |
|
169 # H - Hours |
|
170 # D - Days |
|
171 # midnight - roll over at midnight |
|
172 # W{0-6} - roll over on a certain day; 0 - Monday |
|
173 # |
|
174 # Case of the 'when' specifier is not important; lower or upper case |
|
175 # will work. |
|
176 currentTime = int(time.time()) |
|
177 if self.when == 'S': |
|
178 self.interval = 1 # one second |
|
179 self.suffix = "%Y-%m-%d_%H-%M-%S" |
|
180 elif self.when == 'M': |
|
181 self.interval = 60 # one minute |
|
182 self.suffix = "%Y-%m-%d_%H-%M" |
|
183 elif self.when == 'H': |
|
184 self.interval = 60 * 60 # one hour |
|
185 self.suffix = "%Y-%m-%d_%H" |
|
186 elif self.when == 'D' or self.when == 'MIDNIGHT': |
|
187 self.interval = 60 * 60 * 24 # one day |
|
188 self.suffix = "%Y-%m-%d" |
|
189 elif self.when.startswith('W'): |
|
190 self.interval = 60 * 60 * 24 * 7 # one week |
|
191 if len(self.when) != 2: |
|
192 raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when) |
|
193 if self.when[1] < '0' or self.when[1] > '6': |
|
194 raise ValueError("Invalid day specified for weekly rollover: %s" % self.when) |
|
195 self.dayOfWeek = int(self.when[1]) |
|
196 self.suffix = "%Y-%m-%d" |
|
197 else: |
|
198 raise ValueError("Invalid rollover interval specified: %s" % self.when) |
|
199 |
|
200 self.interval = self.interval * interval # multiply by units requested |
|
201 self.rolloverAt = currentTime + self.interval |
|
202 |
|
203 # If we are rolling over at midnight or weekly, then the interval is already known. |
|
204 # What we need to figure out is WHEN the next interval is. In other words, |
|
205 # if you are rolling over at midnight, then your base interval is 1 day, |
|
206 # but you want to start that one day clock at midnight, not now. So, we |
|
207 # have to fudge the rolloverAt value in order to trigger the first rollover |
|
208 # at the right time. After that, the regular interval will take care of |
|
209 # the rest. Note that this code doesn't care about leap seconds. :) |
|
210 if self.when == 'MIDNIGHT' or self.when.startswith('W'): |
|
211 # This could be done with less code, but I wanted it to be clear |
|
212 t = time.localtime(currentTime) |
|
213 currentHour = t[3] |
|
214 currentMinute = t[4] |
|
215 currentSecond = t[5] |
|
216 # r is the number of seconds left between now and midnight |
|
217 r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 + |
|
218 currentSecond) |
|
219 self.rolloverAt = currentTime + r |
|
220 # If we are rolling over on a certain day, add in the number of days until |
|
221 # the next rollover, but offset by 1 since we just calculated the time |
|
222 # until the next day starts. There are three cases: |
|
223 # Case 1) The day to rollover is today; in this case, do nothing |
|
224 # Case 2) The day to rollover is further in the interval (i.e., today is |
|
225 # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to |
|
226 # next rollover is simply 6 - 2 - 1, or 3. |
|
227 # Case 3) The day to rollover is behind us in the interval (i.e., today |
|
228 # is day 5 (Saturday) and rollover is on day 3 (Thursday). |
|
229 # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the |
|
230 # number of days left in the current week (1) plus the number |
|
231 # of days in the next week until the rollover day (3). |
|
232 # The calculations described in 2) and 3) above need to have a day added. |
|
233 # This is because the above time calculation takes us to midnight on this |
|
234 # day, i.e. the start of the next day. |
|
235 if when.startswith('W'): |
|
236 day = t[6] # 0 is Monday |
|
237 if day != self.dayOfWeek: |
|
238 if day < self.dayOfWeek: |
|
239 daysToWait = self.dayOfWeek - day |
|
240 else: |
|
241 daysToWait = 6 - day + self.dayOfWeek + 1 |
|
242 self.rolloverAt = self.rolloverAt + (daysToWait * (60 * 60 * 24)) |
|
243 |
|
244 #print "Will rollover at %d, %d seconds from now" % (self.rolloverAt, self.rolloverAt - currentTime) |
|
245 |
|
246 def shouldRollover(self, record): |
|
247 """ |
|
248 Determine if rollover should occur |
|
249 |
|
250 record is not used, as we are just comparing times, but it is needed so |
|
251 the method siguratures are the same |
|
252 """ |
|
253 t = int(time.time()) |
|
254 if t >= self.rolloverAt: |
|
255 return 1 |
|
256 #print "No need to rollover: %d, %d" % (t, self.rolloverAt) |
|
257 return 0 |
|
258 |
|
259 def doRollover(self): |
|
260 """ |
|
261 do a rollover; in this case, a date/time stamp is appended to the filename |
|
262 when the rollover happens. However, you want the file to be named for the |
|
263 start of the interval, not the current time. If there is a backup count, |
|
264 then we have to get a list of matching filenames, sort them and remove |
|
265 the one with the oldest suffix. |
|
266 """ |
|
267 self.stream.close() |
|
268 # get the time that this sequence started at and make it a TimeTuple |
|
269 t = self.rolloverAt - self.interval |
|
270 timeTuple = time.localtime(t) |
|
271 dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple) |
|
272 if os.path.exists(dfn): |
|
273 os.remove(dfn) |
|
274 os.rename(self.baseFilename, dfn) |
|
275 if self.backupCount > 0: |
|
276 # find the oldest log file and delete it |
|
277 s = glob.glob(self.baseFilename + ".20*") |
|
278 if len(s) > self.backupCount: |
|
279 s.sort() |
|
280 os.remove(s[0]) |
|
281 #print "%s -> %s" % (self.baseFilename, dfn) |
|
282 if self.encoding: |
|
283 self.stream = codecs.open(self.baseFilename, 'w', self.encoding) |
|
284 else: |
|
285 self.stream = open(self.baseFilename, 'w') |
|
286 self.rolloverAt = self.rolloverAt + self.interval |
|
287 |
|
288 class SocketHandler(logging.Handler): |
|
289 """ |
|
290 A handler class which writes logging records, in pickle format, to |
|
291 a streaming socket. The socket is kept open across logging calls. |
|
292 If the peer resets it, an attempt is made to reconnect on the next call. |
|
293 The pickle which is sent is that of the LogRecord's attribute dictionary |
|
294 (__dict__), so that the receiver does not need to have the logging module |
|
295 installed in order to process the logging event. |
|
296 |
|
297 To unpickle the record at the receiving end into a LogRecord, use the |
|
298 makeLogRecord function. |
|
299 """ |
|
300 |
|
301 def __init__(self, host, port): |
|
302 """ |
|
303 Initializes the handler with a specific host address and port. |
|
304 |
|
305 The attribute 'closeOnError' is set to 1 - which means that if |
|
306 a socket error occurs, the socket is silently closed and then |
|
307 reopened on the next logging call. |
|
308 """ |
|
309 logging.Handler.__init__(self) |
|
310 self.host = host |
|
311 self.port = port |
|
312 self.sock = None |
|
313 self.closeOnError = 0 |
|
314 self.retryTime = None |
|
315 # |
|
316 # Exponential backoff parameters. |
|
317 # |
|
318 self.retryStart = 1.0 |
|
319 self.retryMax = 30.0 |
|
320 self.retryFactor = 2.0 |
|
321 |
|
322 def makeSocket(self): |
|
323 """ |
|
324 A factory method which allows subclasses to define the precise |
|
325 type of socket they want. |
|
326 """ |
|
327 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
|
328 s.connect((self.host, self.port)) |
|
329 return s |
|
330 |
|
331 def createSocket(self): |
|
332 """ |
|
333 Try to create a socket, using an exponential backoff with |
|
334 a max retry time. Thanks to Robert Olson for the original patch |
|
335 (SF #815911) which has been slightly refactored. |
|
336 """ |
|
337 now = time.time() |
|
338 # Either retryTime is None, in which case this |
|
339 # is the first time back after a disconnect, or |
|
340 # we've waited long enough. |
|
341 if self.retryTime is None: |
|
342 attempt = 1 |
|
343 else: |
|
344 attempt = (now >= self.retryTime) |
|
345 if attempt: |
|
346 try: |
|
347 self.sock = self.makeSocket() |
|
348 self.retryTime = None # next time, no delay before trying |
|
349 except: |
|
350 #Creation failed, so set the retry time and return. |
|
351 if self.retryTime is None: |
|
352 self.retryPeriod = self.retryStart |
|
353 else: |
|
354 self.retryPeriod = self.retryPeriod * self.retryFactor |
|
355 if self.retryPeriod > self.retryMax: |
|
356 self.retryPeriod = self.retryMax |
|
357 self.retryTime = now + self.retryPeriod |
|
358 |
|
359 def send(self, s): |
|
360 """ |
|
361 Send a pickled string to the socket. |
|
362 |
|
363 This function allows for partial sends which can happen when the |
|
364 network is busy. |
|
365 """ |
|
366 if self.sock is None: |
|
367 self.createSocket() |
|
368 #self.sock can be None either because we haven't reached the retry |
|
369 #time yet, or because we have reached the retry time and retried, |
|
370 #but are still unable to connect. |
|
371 if self.sock: |
|
372 try: |
|
373 if hasattr(self.sock, "sendall"): |
|
374 self.sock.sendall(s) |
|
375 else: |
|
376 sentsofar = 0 |
|
377 left = len(s) |
|
378 while left > 0: |
|
379 sent = self.sock.send(s[sentsofar:]) |
|
380 sentsofar = sentsofar + sent |
|
381 left = left - sent |
|
382 except socket.error: |
|
383 self.sock.close() |
|
384 self.sock = None # so we can call createSocket next time |
|
385 |
|
386 def makePickle(self, record): |
|
387 """ |
|
388 Pickles the record in binary format with a length prefix, and |
|
389 returns it ready for transmission across the socket. |
|
390 """ |
|
391 ei = record.exc_info |
|
392 if ei: |
|
393 dummy = self.format(record) # just to get traceback text into record.exc_text |
|
394 record.exc_info = None # to avoid Unpickleable error |
|
395 s = cPickle.dumps(record.__dict__, 1) |
|
396 if ei: |
|
397 record.exc_info = ei # for next handler |
|
398 slen = struct.pack(">L", len(s)) |
|
399 return slen + s |
|
400 |
|
401 def handleError(self, record): |
|
402 """ |
|
403 Handle an error during logging. |
|
404 |
|
405 An error has occurred during logging. Most likely cause - |
|
406 connection lost. Close the socket so that we can retry on the |
|
407 next event. |
|
408 """ |
|
409 if self.closeOnError and self.sock: |
|
410 self.sock.close() |
|
411 self.sock = None #try to reconnect next time |
|
412 else: |
|
413 logging.Handler.handleError(self, record) |
|
414 |
|
415 def emit(self, record): |
|
416 """ |
|
417 Emit a record. |
|
418 |
|
419 Pickles the record and writes it to the socket in binary format. |
|
420 If there is an error with the socket, silently drop the packet. |
|
421 If there was a problem with the socket, re-establishes the |
|
422 socket. |
|
423 """ |
|
424 try: |
|
425 s = self.makePickle(record) |
|
426 self.send(s) |
|
427 except (KeyboardInterrupt, SystemExit): |
|
428 raise |
|
429 except: |
|
430 self.handleError(record) |
|
431 |
|
432 def close(self): |
|
433 """ |
|
434 Closes the socket. |
|
435 """ |
|
436 if self.sock: |
|
437 self.sock.close() |
|
438 self.sock = None |
|
439 logging.Handler.close(self) |
|
440 |
|
441 class DatagramHandler(SocketHandler): |
|
442 """ |
|
443 A handler class which writes logging records, in pickle format, to |
|
444 a datagram socket. The pickle which is sent is that of the LogRecord's |
|
445 attribute dictionary (__dict__), so that the receiver does not need to |
|
446 have the logging module installed in order to process the logging event. |
|
447 |
|
448 To unpickle the record at the receiving end into a LogRecord, use the |
|
449 makeLogRecord function. |
|
450 |
|
451 """ |
|
452 def __init__(self, host, port): |
|
453 """ |
|
454 Initializes the handler with a specific host address and port. |
|
455 """ |
|
456 SocketHandler.__init__(self, host, port) |
|
457 self.closeOnError = 0 |
|
458 |
|
459 def makeSocket(self): |
|
460 """ |
|
461 The factory method of SocketHandler is here overridden to create |
|
462 a UDP socket (SOCK_DGRAM). |
|
463 """ |
|
464 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) |
|
465 return s |
|
466 |
|
467 def send(self, s): |
|
468 """ |
|
469 Send a pickled string to a socket. |
|
470 |
|
471 This function no longer allows for partial sends which can happen |
|
472 when the network is busy - UDP does not guarantee delivery and |
|
473 can deliver packets out of sequence. |
|
474 """ |
|
475 if self.sock is None: |
|
476 self.createSocket() |
|
477 self.sock.sendto(s, (self.host, self.port)) |
|
478 |
|
479 class SysLogHandler(logging.Handler): |
|
480 """ |
|
481 A handler class which sends formatted logging records to a syslog |
|
482 server. Based on Sam Rushing's syslog module: |
|
483 http://www.nightmare.com/squirl/python-ext/misc/syslog.py |
|
484 Contributed by Nicolas Untz (after which minor refactoring changes |
|
485 have been made). |
|
486 """ |
|
487 |
|
488 # from <linux/sys/syslog.h>: |
|
489 # ====================================================================== |
|
490 # priorities/facilities are encoded into a single 32-bit quantity, where |
|
491 # the bottom 3 bits are the priority (0-7) and the top 28 bits are the |
|
492 # facility (0-big number). Both the priorities and the facilities map |
|
493 # roughly one-to-one to strings in the syslogd(8) source code. This |
|
494 # mapping is included in this file. |
|
495 # |
|
496 # priorities (these are ordered) |
|
497 |
|
498 LOG_EMERG = 0 # system is unusable |
|
499 LOG_ALERT = 1 # action must be taken immediately |
|
500 LOG_CRIT = 2 # critical conditions |
|
501 LOG_ERR = 3 # error conditions |
|
502 LOG_WARNING = 4 # warning conditions |
|
503 LOG_NOTICE = 5 # normal but significant condition |
|
504 LOG_INFO = 6 # informational |
|
505 LOG_DEBUG = 7 # debug-level messages |
|
506 |
|
507 # facility codes |
|
508 LOG_KERN = 0 # kernel messages |
|
509 LOG_USER = 1 # random user-level messages |
|
510 LOG_MAIL = 2 # mail system |
|
511 LOG_DAEMON = 3 # system daemons |
|
512 LOG_AUTH = 4 # security/authorization messages |
|
513 LOG_SYSLOG = 5 # messages generated internally by syslogd |
|
514 LOG_LPR = 6 # line printer subsystem |
|
515 LOG_NEWS = 7 # network news subsystem |
|
516 LOG_UUCP = 8 # UUCP subsystem |
|
517 LOG_CRON = 9 # clock daemon |
|
518 LOG_AUTHPRIV = 10 # security/authorization messages (private) |
|
519 |
|
520 # other codes through 15 reserved for system use |
|
521 LOG_LOCAL0 = 16 # reserved for local use |
|
522 LOG_LOCAL1 = 17 # reserved for local use |
|
523 LOG_LOCAL2 = 18 # reserved for local use |
|
524 LOG_LOCAL3 = 19 # reserved for local use |
|
525 LOG_LOCAL4 = 20 # reserved for local use |
|
526 LOG_LOCAL5 = 21 # reserved for local use |
|
527 LOG_LOCAL6 = 22 # reserved for local use |
|
528 LOG_LOCAL7 = 23 # reserved for local use |
|
529 |
|
530 priority_names = { |
|
531 "alert": LOG_ALERT, |
|
532 "crit": LOG_CRIT, |
|
533 "critical": LOG_CRIT, |
|
534 "debug": LOG_DEBUG, |
|
535 "emerg": LOG_EMERG, |
|
536 "err": LOG_ERR, |
|
537 "error": LOG_ERR, # DEPRECATED |
|
538 "info": LOG_INFO, |
|
539 "notice": LOG_NOTICE, |
|
540 "panic": LOG_EMERG, # DEPRECATED |
|
541 "warn": LOG_WARNING, # DEPRECATED |
|
542 "warning": LOG_WARNING, |
|
543 } |
|
544 |
|
545 facility_names = { |
|
546 "auth": LOG_AUTH, |
|
547 "authpriv": LOG_AUTHPRIV, |
|
548 "cron": LOG_CRON, |
|
549 "daemon": LOG_DAEMON, |
|
550 "kern": LOG_KERN, |
|
551 "lpr": LOG_LPR, |
|
552 "mail": LOG_MAIL, |
|
553 "news": LOG_NEWS, |
|
554 "security": LOG_AUTH, # DEPRECATED |
|
555 "syslog": LOG_SYSLOG, |
|
556 "user": LOG_USER, |
|
557 "uucp": LOG_UUCP, |
|
558 "local0": LOG_LOCAL0, |
|
559 "local1": LOG_LOCAL1, |
|
560 "local2": LOG_LOCAL2, |
|
561 "local3": LOG_LOCAL3, |
|
562 "local4": LOG_LOCAL4, |
|
563 "local5": LOG_LOCAL5, |
|
564 "local6": LOG_LOCAL6, |
|
565 "local7": LOG_LOCAL7, |
|
566 } |
|
567 |
|
568 #The map below appears to be trivially lowercasing the key. However, |
|
569 #there's more to it than meets the eye - in some locales, lowercasing |
|
570 #gives unexpected results. See SF #1524081: in the Turkish locale, |
|
571 #"INFO".lower() != "info" |
|
572 priority_map = { |
|
573 "DEBUG" : "debug", |
|
574 "INFO" : "info", |
|
575 "WARNING" : "warning", |
|
576 "ERROR" : "error", |
|
577 "CRITICAL" : "critical" |
|
578 } |
|
579 |
|
580 def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=LOG_USER): |
|
581 """ |
|
582 Initialize a handler. |
|
583 |
|
584 If address is specified as a string, a UNIX socket is used. To log to a |
|
585 local syslogd, "SysLogHandler(address="/dev/log")" can be used. |
|
586 If facility is not specified, LOG_USER is used. |
|
587 """ |
|
588 logging.Handler.__init__(self) |
|
589 |
|
590 self.address = address |
|
591 self.facility = facility |
|
592 if type(address) == types.StringType: |
|
593 self.unixsocket = 1 |
|
594 self._connect_unixsocket(address) |
|
595 else: |
|
596 self.unixsocket = 0 |
|
597 self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) |
|
598 |
|
599 self.formatter = None |
|
600 |
|
601 def _connect_unixsocket(self, address): |
|
602 self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) |
|
603 # syslog may require either DGRAM or STREAM sockets |
|
604 try: |
|
605 self.socket.connect(address) |
|
606 except socket.error: |
|
607 self.socket.close() |
|
608 self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) |
|
609 self.socket.connect(address) |
|
610 |
|
611 # curious: when talking to the unix-domain '/dev/log' socket, a |
|
612 # zero-terminator seems to be required. this string is placed |
|
613 # into a class variable so that it can be overridden if |
|
614 # necessary. |
|
615 log_format_string = '<%d>%s\000' |
|
616 |
|
617 def encodePriority(self, facility, priority): |
|
618 """ |
|
619 Encode the facility and priority. You can pass in strings or |
|
620 integers - if strings are passed, the facility_names and |
|
621 priority_names mapping dictionaries are used to convert them to |
|
622 integers. |
|
623 """ |
|
624 if type(facility) == types.StringType: |
|
625 facility = self.facility_names[facility] |
|
626 if type(priority) == types.StringType: |
|
627 priority = self.priority_names[priority] |
|
628 return (facility << 3) | priority |
|
629 |
|
630 def close (self): |
|
631 """ |
|
632 Closes the socket. |
|
633 """ |
|
634 if self.unixsocket: |
|
635 self.socket.close() |
|
636 logging.Handler.close(self) |
|
637 |
|
638 def mapPriority(self, levelName): |
|
639 """ |
|
640 Map a logging level name to a key in the priority_names map. |
|
641 This is useful in two scenarios: when custom levels are being |
|
642 used, and in the case where you can't do a straightforward |
|
643 mapping by lowercasing the logging level name because of locale- |
|
644 specific issues (see SF #1524081). |
|
645 """ |
|
646 return self.priority_map.get(levelName, "warning") |
|
647 |
|
648 def emit(self, record): |
|
649 """ |
|
650 Emit a record. |
|
651 |
|
652 The record is formatted, and then sent to the syslog server. If |
|
653 exception information is present, it is NOT sent to the server. |
|
654 """ |
|
655 msg = self.format(record) |
|
656 """ |
|
657 We need to convert record level to lowercase, maybe this will |
|
658 change in the future. |
|
659 """ |
|
660 msg = self.log_format_string % ( |
|
661 self.encodePriority(self.facility, |
|
662 self.mapPriority(record.levelname)), |
|
663 msg) |
|
664 try: |
|
665 if self.unixsocket: |
|
666 try: |
|
667 self.socket.send(msg) |
|
668 except socket.error: |
|
669 self._connect_unixsocket(self.address) |
|
670 self.socket.send(msg) |
|
671 else: |
|
672 self.socket.sendto(msg, self.address) |
|
673 except (KeyboardInterrupt, SystemExit): |
|
674 raise |
|
675 except: |
|
676 self.handleError(record) |
|
677 |
|
678 class SMTPHandler(logging.Handler): |
|
679 """ |
|
680 A handler class which sends an SMTP email for each logging event. |
|
681 """ |
|
682 def __init__(self, mailhost, fromaddr, toaddrs, subject): |
|
683 """ |
|
684 Initialize the handler. |
|
685 |
|
686 Initialize the instance with the from and to addresses and subject |
|
687 line of the email. To specify a non-standard SMTP port, use the |
|
688 (host, port) tuple format for the mailhost argument. |
|
689 """ |
|
690 logging.Handler.__init__(self) |
|
691 if type(mailhost) == types.TupleType: |
|
692 host, port = mailhost |
|
693 self.mailhost = host |
|
694 self.mailport = port |
|
695 else: |
|
696 self.mailhost = mailhost |
|
697 self.mailport = None |
|
698 self.fromaddr = fromaddr |
|
699 if type(toaddrs) == types.StringType: |
|
700 toaddrs = [toaddrs] |
|
701 self.toaddrs = toaddrs |
|
702 self.subject = subject |
|
703 |
|
704 def getSubject(self, record): |
|
705 """ |
|
706 Determine the subject for the email. |
|
707 |
|
708 If you want to specify a subject line which is record-dependent, |
|
709 override this method. |
|
710 """ |
|
711 return self.subject |
|
712 |
|
713 weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] |
|
714 |
|
715 monthname = [None, |
|
716 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', |
|
717 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] |
|
718 |
|
719 def date_time(self): |
|
720 """ |
|
721 Return the current date and time formatted for a MIME header. |
|
722 Needed for Python 1.5.2 (no email package available) |
|
723 """ |
|
724 year, month, day, hh, mm, ss, wd, y, z = time.gmtime(time.time()) |
|
725 s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( |
|
726 self.weekdayname[wd], |
|
727 day, self.monthname[month], year, |
|
728 hh, mm, ss) |
|
729 return s |
|
730 |
|
731 def emit(self, record): |
|
732 """ |
|
733 Emit a record. |
|
734 |
|
735 Format the record and send it to the specified addressees. |
|
736 """ |
|
737 try: |
|
738 import smtplib |
|
739 try: |
|
740 from email.Utils import formatdate |
|
741 except: |
|
742 formatdate = self.date_time |
|
743 port = self.mailport |
|
744 if not port: |
|
745 port = smtplib.SMTP_PORT |
|
746 smtp = smtplib.SMTP(self.mailhost, port) |
|
747 msg = self.format(record) |
|
748 msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( |
|
749 self.fromaddr, |
|
750 string.join(self.toaddrs, ","), |
|
751 self.getSubject(record), |
|
752 formatdate(), msg) |
|
753 smtp.sendmail(self.fromaddr, self.toaddrs, msg) |
|
754 smtp.quit() |
|
755 except (KeyboardInterrupt, SystemExit): |
|
756 raise |
|
757 except: |
|
758 self.handleError(record) |
|
759 |
|
760 class NTEventLogHandler(logging.Handler): |
|
761 """ |
|
762 A handler class which sends events to the NT Event Log. Adds a |
|
763 registry entry for the specified application name. If no dllname is |
|
764 provided, win32service.pyd (which contains some basic message |
|
765 placeholders) is used. Note that use of these placeholders will make |
|
766 your event logs big, as the entire message source is held in the log. |
|
767 If you want slimmer logs, you have to pass in the name of your own DLL |
|
768 which contains the message definitions you want to use in the event log. |
|
769 """ |
|
770 def __init__(self, appname, dllname=None, logtype="Application"): |
|
771 logging.Handler.__init__(self) |
|
772 try: |
|
773 import win32evtlogutil, win32evtlog |
|
774 self.appname = appname |
|
775 self._welu = win32evtlogutil |
|
776 if not dllname: |
|
777 dllname = os.path.split(self._welu.__file__) |
|
778 dllname = os.path.split(dllname[0]) |
|
779 dllname = os.path.join(dllname[0], r'win32service.pyd') |
|
780 self.dllname = dllname |
|
781 self.logtype = logtype |
|
782 self._welu.AddSourceToRegistry(appname, dllname, logtype) |
|
783 self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE |
|
784 self.typemap = { |
|
785 logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, |
|
786 logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, |
|
787 logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, |
|
788 logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, |
|
789 logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, |
|
790 } |
|
791 except ImportError: |
|
792 print "The Python Win32 extensions for NT (service, event "\ |
|
793 "logging) appear not to be available." |
|
794 self._welu = None |
|
795 |
|
796 def getMessageID(self, record): |
|
797 """ |
|
798 Return the message ID for the event record. If you are using your |
|
799 own messages, you could do this by having the msg passed to the |
|
800 logger being an ID rather than a formatting string. Then, in here, |
|
801 you could use a dictionary lookup to get the message ID. This |
|
802 version returns 1, which is the base message ID in win32service.pyd. |
|
803 """ |
|
804 return 1 |
|
805 |
|
806 def getEventCategory(self, record): |
|
807 """ |
|
808 Return the event category for the record. |
|
809 |
|
810 Override this if you want to specify your own categories. This version |
|
811 returns 0. |
|
812 """ |
|
813 return 0 |
|
814 |
|
815 def getEventType(self, record): |
|
816 """ |
|
817 Return the event type for the record. |
|
818 |
|
819 Override this if you want to specify your own types. This version does |
|
820 a mapping using the handler's typemap attribute, which is set up in |
|
821 __init__() to a dictionary which contains mappings for DEBUG, INFO, |
|
822 WARNING, ERROR and CRITICAL. If you are using your own levels you will |
|
823 either need to override this method or place a suitable dictionary in |
|
824 the handler's typemap attribute. |
|
825 """ |
|
826 return self.typemap.get(record.levelno, self.deftype) |
|
827 |
|
828 def emit(self, record): |
|
829 """ |
|
830 Emit a record. |
|
831 |
|
832 Determine the message ID, event category and event type. Then |
|
833 log the message in the NT event log. |
|
834 """ |
|
835 if self._welu: |
|
836 try: |
|
837 id = self.getMessageID(record) |
|
838 cat = self.getEventCategory(record) |
|
839 type = self.getEventType(record) |
|
840 msg = self.format(record) |
|
841 self._welu.ReportEvent(self.appname, id, cat, type, [msg]) |
|
842 except (KeyboardInterrupt, SystemExit): |
|
843 raise |
|
844 except: |
|
845 self.handleError(record) |
|
846 |
|
847 def close(self): |
|
848 """ |
|
849 Clean up this handler. |
|
850 |
|
851 You can remove the application name from the registry as a |
|
852 source of event log entries. However, if you do this, you will |
|
853 not be able to see the events as you intended in the Event Log |
|
854 Viewer - it needs to be able to access the registry to get the |
|
855 DLL name. |
|
856 """ |
|
857 #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype) |
|
858 logging.Handler.close(self) |
|
859 |
|
860 class HTTPHandler(logging.Handler): |
|
861 """ |
|
862 A class which sends records to a Web server, using either GET or |
|
863 POST semantics. |
|
864 """ |
|
865 def __init__(self, host, url, method="GET"): |
|
866 """ |
|
867 Initialize the instance with the host, the request URL, and the method |
|
868 ("GET" or "POST") |
|
869 """ |
|
870 logging.Handler.__init__(self) |
|
871 method = string.upper(method) |
|
872 if method not in ["GET", "POST"]: |
|
873 raise ValueError, "method must be GET or POST" |
|
874 self.host = host |
|
875 self.url = url |
|
876 self.method = method |
|
877 |
|
878 def mapLogRecord(self, record): |
|
879 """ |
|
880 Default implementation of mapping the log record into a dict |
|
881 that is sent as the CGI data. Overwrite in your class. |
|
882 Contributed by Franz Glasner. |
|
883 """ |
|
884 return record.__dict__ |
|
885 |
|
886 def emit(self, record): |
|
887 """ |
|
888 Emit a record. |
|
889 |
|
890 Send the record to the Web server as an URL-encoded dictionary |
|
891 """ |
|
892 try: |
|
893 import httplib, urllib |
|
894 host = self.host |
|
895 h = httplib.HTTP(host) |
|
896 url = self.url |
|
897 data = urllib.urlencode(self.mapLogRecord(record)) |
|
898 if self.method == "GET": |
|
899 if (string.find(url, '?') >= 0): |
|
900 sep = '&' |
|
901 else: |
|
902 sep = '?' |
|
903 url = url + "%c%s" % (sep, data) |
|
904 h.putrequest(self.method, url) |
|
905 # support multiple hosts on one IP address... |
|
906 # need to strip optional :port from host, if present |
|
907 i = string.find(host, ":") |
|
908 if i >= 0: |
|
909 host = host[:i] |
|
910 h.putheader("Host", host) |
|
911 if self.method == "POST": |
|
912 h.putheader("Content-type", |
|
913 "application/x-www-form-urlencoded") |
|
914 h.putheader("Content-length", str(len(data))) |
|
915 h.endheaders() |
|
916 if self.method == "POST": |
|
917 h.send(data) |
|
918 h.getreply() #can't do anything with the result |
|
919 except (KeyboardInterrupt, SystemExit): |
|
920 raise |
|
921 except: |
|
922 self.handleError(record) |
|
923 |
|
924 class BufferingHandler(logging.Handler): |
|
925 """ |
|
926 A handler class which buffers logging records in memory. Whenever each |
|
927 record is added to the buffer, a check is made to see if the buffer should |
|
928 be flushed. If it should, then flush() is expected to do what's needed. |
|
929 """ |
|
930 def __init__(self, capacity): |
|
931 """ |
|
932 Initialize the handler with the buffer size. |
|
933 """ |
|
934 logging.Handler.__init__(self) |
|
935 self.capacity = capacity |
|
936 self.buffer = [] |
|
937 |
|
938 def shouldFlush(self, record): |
|
939 """ |
|
940 Should the handler flush its buffer? |
|
941 |
|
942 Returns true if the buffer is up to capacity. This method can be |
|
943 overridden to implement custom flushing strategies. |
|
944 """ |
|
945 return (len(self.buffer) >= self.capacity) |
|
946 |
|
947 def emit(self, record): |
|
948 """ |
|
949 Emit a record. |
|
950 |
|
951 Append the record. If shouldFlush() tells us to, call flush() to process |
|
952 the buffer. |
|
953 """ |
|
954 self.buffer.append(record) |
|
955 if self.shouldFlush(record): |
|
956 self.flush() |
|
957 |
|
958 def flush(self): |
|
959 """ |
|
960 Override to implement custom flushing behaviour. |
|
961 |
|
962 This version just zaps the buffer to empty. |
|
963 """ |
|
964 self.buffer = [] |
|
965 |
|
966 def close(self): |
|
967 """ |
|
968 Close the handler. |
|
969 |
|
970 This version just flushes and chains to the parent class' close(). |
|
971 """ |
|
972 self.flush() |
|
973 logging.Handler.close(self) |
|
974 |
|
975 class MemoryHandler(BufferingHandler): |
|
976 """ |
|
977 A handler class which buffers logging records in memory, periodically |
|
978 flushing them to a target handler. Flushing occurs whenever the buffer |
|
979 is full, or when an event of a certain severity or greater is seen. |
|
980 """ |
|
981 def __init__(self, capacity, flushLevel=logging.ERROR, target=None): |
|
982 """ |
|
983 Initialize the handler with the buffer size, the level at which |
|
984 flushing should occur and an optional target. |
|
985 |
|
986 Note that without a target being set either here or via setTarget(), |
|
987 a MemoryHandler is no use to anyone! |
|
988 """ |
|
989 BufferingHandler.__init__(self, capacity) |
|
990 self.flushLevel = flushLevel |
|
991 self.target = target |
|
992 |
|
993 def shouldFlush(self, record): |
|
994 """ |
|
995 Check for buffer full or a record at the flushLevel or higher. |
|
996 """ |
|
997 return (len(self.buffer) >= self.capacity) or \ |
|
998 (record.levelno >= self.flushLevel) |
|
999 |
|
1000 def setTarget(self, target): |
|
1001 """ |
|
1002 Set the target handler for this handler. |
|
1003 """ |
|
1004 self.target = target |
|
1005 |
|
1006 def flush(self): |
|
1007 """ |
|
1008 For a MemoryHandler, flushing means just sending the buffered |
|
1009 records to the target, if there is one. Override if you want |
|
1010 different behaviour. |
|
1011 """ |
|
1012 if self.target: |
|
1013 for record in self.buffer: |
|
1014 self.target.handle(record) |
|
1015 self.buffer = [] |
|
1016 |
|
1017 def close(self): |
|
1018 """ |
|
1019 Flush, set the target to None and lose the buffer. |
|
1020 """ |
|
1021 self.flush() |
|
1022 self.target = None |
|
1023 BufferingHandler.close(self) |