|
1 # Copyright 2001-2007 by Vinay Sajip. All Rights Reserved. |
|
2 # |
|
3 # Permission to use, copy, modify, and distribute this software and its |
|
4 # documentation for any purpose and without fee is hereby granted, |
|
5 # provided that the above copyright notice appear in all copies and that |
|
6 # both that copyright notice and this permission notice appear in |
|
7 # supporting documentation, and that the name of Vinay Sajip |
|
8 # not be used in advertising or publicity pertaining to distribution |
|
9 # of the software without specific, written prior permission. |
|
10 # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING |
|
11 # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL |
|
12 # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR |
|
13 # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER |
|
14 # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT |
|
15 # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
|
16 |
|
17 """ |
|
18 Additional handlers for the logging package for Python. The core package is |
|
19 based on PEP 282 and comments thereto in comp.lang.python, and influenced by |
|
20 Apache's log4j system. |
|
21 |
|
22 Should work under Python versions >= 1.5.2, except that source line |
|
23 information is not available unless 'sys._getframe()' is. |
|
24 |
|
25 Copyright (C) 2001-2008 Vinay Sajip. All Rights Reserved. |
|
26 |
|
27 To use, simply 'import logging' and log away! |
|
28 """ |
|
29 |
|
30 import logging, socket, types, os, string, cPickle, struct, time, re |
|
31 from stat import ST_DEV, ST_INO |
|
32 |
|
33 try: |
|
34 import codecs |
|
35 except ImportError: |
|
36 codecs = None |
|
37 |
|
38 # |
|
39 # Some constants... |
|
40 # |
|
41 |
|
42 DEFAULT_TCP_LOGGING_PORT = 9020 |
|
43 DEFAULT_UDP_LOGGING_PORT = 9021 |
|
44 DEFAULT_HTTP_LOGGING_PORT = 9022 |
|
45 DEFAULT_SOAP_LOGGING_PORT = 9023 |
|
46 SYSLOG_UDP_PORT = 514 |
|
47 |
|
48 _MIDNIGHT = 24 * 60 * 60 # number of seconds in a day |
|
49 |
|
50 class BaseRotatingHandler(logging.FileHandler): |
|
51 """ |
|
52 Base class for handlers that rotate log files at a certain point. |
|
53 Not meant to be instantiated directly. Instead, use RotatingFileHandler |
|
54 or TimedRotatingFileHandler. |
|
55 """ |
|
56 def __init__(self, filename, mode, encoding=None, delay=0): |
|
57 """ |
|
58 Use the specified filename for streamed logging |
|
59 """ |
|
60 if codecs is None: |
|
61 encoding = None |
|
62 logging.FileHandler.__init__(self, filename, mode, encoding, delay) |
|
63 self.mode = mode |
|
64 self.encoding = encoding |
|
65 |
|
66 def emit(self, record): |
|
67 """ |
|
68 Emit a record. |
|
69 |
|
70 Output the record to the file, catering for rollover as described |
|
71 in doRollover(). |
|
72 """ |
|
73 try: |
|
74 if self.shouldRollover(record): |
|
75 self.doRollover() |
|
76 logging.FileHandler.emit(self, record) |
|
77 except (KeyboardInterrupt, SystemExit): |
|
78 raise |
|
79 except: |
|
80 self.handleError(record) |
|
81 |
|
82 class RotatingFileHandler(BaseRotatingHandler): |
|
83 """ |
|
84 Handler for logging to a set of files, which switches from one file |
|
85 to the next when the current file reaches a certain size. |
|
86 """ |
|
87 def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0): |
|
88 """ |
|
89 Open the specified file and use it as the stream for logging. |
|
90 |
|
91 By default, the file grows indefinitely. You can specify particular |
|
92 values of maxBytes and backupCount to allow the file to rollover at |
|
93 a predetermined size. |
|
94 |
|
95 Rollover occurs whenever the current log file is nearly maxBytes in |
|
96 length. If backupCount is >= 1, the system will successively create |
|
97 new files with the same pathname as the base file, but with extensions |
|
98 ".1", ".2" etc. appended to it. For example, with a backupCount of 5 |
|
99 and a base file name of "app.log", you would get "app.log", |
|
100 "app.log.1", "app.log.2", ... through to "app.log.5". The file being |
|
101 written to is always "app.log" - when it gets filled up, it is closed |
|
102 and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. |
|
103 exist, then they are renamed to "app.log.2", "app.log.3" etc. |
|
104 respectively. |
|
105 |
|
106 If maxBytes is zero, rollover never occurs. |
|
107 """ |
|
108 if maxBytes > 0: |
|
109 mode = 'a' # doesn't make sense otherwise! |
|
110 BaseRotatingHandler.__init__(self, filename, mode, encoding, delay) |
|
111 self.maxBytes = maxBytes |
|
112 self.backupCount = backupCount |
|
113 |
|
114 def doRollover(self): |
|
115 """ |
|
116 Do a rollover, as described in __init__(). |
|
117 """ |
|
118 |
|
119 self.stream.close() |
|
120 if self.backupCount > 0: |
|
121 for i in range(self.backupCount - 1, 0, -1): |
|
122 sfn = "%s.%d" % (self.baseFilename, i) |
|
123 dfn = "%s.%d" % (self.baseFilename, i + 1) |
|
124 if os.path.exists(sfn): |
|
125 #print "%s -> %s" % (sfn, dfn) |
|
126 if os.path.exists(dfn): |
|
127 os.remove(dfn) |
|
128 os.rename(sfn, dfn) |
|
129 dfn = self.baseFilename + ".1" |
|
130 if os.path.exists(dfn): |
|
131 os.remove(dfn) |
|
132 os.rename(self.baseFilename, dfn) |
|
133 #print "%s -> %s" % (self.baseFilename, dfn) |
|
134 self.mode = 'w' |
|
135 self.stream = self._open() |
|
136 |
|
137 def shouldRollover(self, record): |
|
138 """ |
|
139 Determine if rollover should occur. |
|
140 |
|
141 Basically, see if the supplied record would cause the file to exceed |
|
142 the size limit we have. |
|
143 """ |
|
144 if self.maxBytes > 0: # are we rolling over? |
|
145 msg = "%s\n" % self.format(record) |
|
146 self.stream.seek(0, 2) #due to non-posix-compliant Windows feature |
|
147 if self.stream.tell() + len(msg) >= self.maxBytes: |
|
148 return 1 |
|
149 return 0 |
|
150 |
|
151 class TimedRotatingFileHandler(BaseRotatingHandler): |
|
152 """ |
|
153 Handler for logging to a file, rotating the log file at certain timed |
|
154 intervals. |
|
155 |
|
156 If backupCount is > 0, when rollover is done, no more than backupCount |
|
157 files are kept - the oldest ones are deleted. |
|
158 """ |
|
159 def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=0, utc=0): |
|
160 BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay) |
|
161 self.when = string.upper(when) |
|
162 self.backupCount = backupCount |
|
163 self.utc = utc |
|
164 # Calculate the real rollover interval, which is just the number of |
|
165 # seconds between rollovers. Also set the filename suffix used when |
|
166 # a rollover occurs. Current 'when' events supported: |
|
167 # S - Seconds |
|
168 # M - Minutes |
|
169 # H - Hours |
|
170 # D - Days |
|
171 # midnight - roll over at midnight |
|
172 # W{0-6} - roll over on a certain day; 0 - Monday |
|
173 # |
|
174 # Case of the 'when' specifier is not important; lower or upper case |
|
175 # will work. |
|
176 currentTime = int(time.time()) |
|
177 if self.when == 'S': |
|
178 self.interval = 1 # one second |
|
179 self.suffix = "%Y-%m-%d_%H-%M-%S" |
|
180 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$" |
|
181 elif self.when == 'M': |
|
182 self.interval = 60 # one minute |
|
183 self.suffix = "%Y-%m-%d_%H-%M" |
|
184 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$" |
|
185 elif self.when == 'H': |
|
186 self.interval = 60 * 60 # one hour |
|
187 self.suffix = "%Y-%m-%d_%H" |
|
188 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$" |
|
189 elif self.when == 'D' or self.when == 'MIDNIGHT': |
|
190 self.interval = 60 * 60 * 24 # one day |
|
191 self.suffix = "%Y-%m-%d" |
|
192 self.extMatch = r"^\d{4}-\d{2}-\d{2}$" |
|
193 elif self.when.startswith('W'): |
|
194 self.interval = 60 * 60 * 24 * 7 # one week |
|
195 if len(self.when) != 2: |
|
196 raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when) |
|
197 if self.when[1] < '0' or self.when[1] > '6': |
|
198 raise ValueError("Invalid day specified for weekly rollover: %s" % self.when) |
|
199 self.dayOfWeek = int(self.when[1]) |
|
200 self.suffix = "%Y-%m-%d" |
|
201 self.extMatch = r"^\d{4}-\d{2}-\d{2}$" |
|
202 else: |
|
203 raise ValueError("Invalid rollover interval specified: %s" % self.when) |
|
204 |
|
205 self.extMatch = re.compile(self.extMatch) |
|
206 self.interval = self.interval * interval # multiply by units requested |
|
207 self.rolloverAt = currentTime + self.interval |
|
208 |
|
209 # If we are rolling over at midnight or weekly, then the interval is already known. |
|
210 # What we need to figure out is WHEN the next interval is. In other words, |
|
211 # if you are rolling over at midnight, then your base interval is 1 day, |
|
212 # but you want to start that one day clock at midnight, not now. So, we |
|
213 # have to fudge the rolloverAt value in order to trigger the first rollover |
|
214 # at the right time. After that, the regular interval will take care of |
|
215 # the rest. Note that this code doesn't care about leap seconds. :) |
|
216 if self.when == 'MIDNIGHT' or self.when.startswith('W'): |
|
217 # This could be done with less code, but I wanted it to be clear |
|
218 if utc: |
|
219 t = time.gmtime(currentTime) |
|
220 else: |
|
221 t = time.localtime(currentTime) |
|
222 currentHour = t[3] |
|
223 currentMinute = t[4] |
|
224 currentSecond = t[5] |
|
225 # r is the number of seconds left between now and midnight |
|
226 r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 + |
|
227 currentSecond) |
|
228 self.rolloverAt = currentTime + r |
|
229 # If we are rolling over on a certain day, add in the number of days until |
|
230 # the next rollover, but offset by 1 since we just calculated the time |
|
231 # until the next day starts. There are three cases: |
|
232 # Case 1) The day to rollover is today; in this case, do nothing |
|
233 # Case 2) The day to rollover is further in the interval (i.e., today is |
|
234 # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to |
|
235 # next rollover is simply 6 - 2 - 1, or 3. |
|
236 # Case 3) The day to rollover is behind us in the interval (i.e., today |
|
237 # is day 5 (Saturday) and rollover is on day 3 (Thursday). |
|
238 # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the |
|
239 # number of days left in the current week (1) plus the number |
|
240 # of days in the next week until the rollover day (3). |
|
241 # The calculations described in 2) and 3) above need to have a day added. |
|
242 # This is because the above time calculation takes us to midnight on this |
|
243 # day, i.e. the start of the next day. |
|
244 if when.startswith('W'): |
|
245 day = t[6] # 0 is Monday |
|
246 if day != self.dayOfWeek: |
|
247 if day < self.dayOfWeek: |
|
248 daysToWait = self.dayOfWeek - day |
|
249 else: |
|
250 daysToWait = 6 - day + self.dayOfWeek + 1 |
|
251 newRolloverAt = self.rolloverAt + (daysToWait * (60 * 60 * 24)) |
|
252 if not utc: |
|
253 dstNow = t[-1] |
|
254 dstAtRollover = time.localtime(newRolloverAt)[-1] |
|
255 if dstNow != dstAtRollover: |
|
256 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour |
|
257 newRolloverAt = newRolloverAt - 3600 |
|
258 else: # DST bows out before next rollover, so we need to add an hour |
|
259 newRolloverAt = newRolloverAt + 3600 |
|
260 self.rolloverAt = newRolloverAt |
|
261 |
|
262 #print "Will rollover at %d, %d seconds from now" % (self.rolloverAt, self.rolloverAt - currentTime) |
|
263 |
|
264 def shouldRollover(self, record): |
|
265 """ |
|
266 Determine if rollover should occur. |
|
267 |
|
268 record is not used, as we are just comparing times, but it is needed so |
|
269 the method signatures are the same |
|
270 """ |
|
271 t = int(time.time()) |
|
272 if t >= self.rolloverAt: |
|
273 return 1 |
|
274 #print "No need to rollover: %d, %d" % (t, self.rolloverAt) |
|
275 return 0 |
|
276 |
|
277 def getFilesToDelete(self): |
|
278 """ |
|
279 Determine the files to delete when rolling over. |
|
280 |
|
281 More specific than the earlier method, which just used glob.glob(). |
|
282 """ |
|
283 dirName, baseName = os.path.split(self.baseFilename) |
|
284 fileNames = os.listdir(dirName) |
|
285 result = [] |
|
286 prefix = baseName + "." |
|
287 plen = len(prefix) |
|
288 for fileName in fileNames: |
|
289 if fileName[:plen] == prefix: |
|
290 suffix = fileName[plen:] |
|
291 if self.extMatch.match(suffix): |
|
292 result.append(os.path.join(dirName, fileName)) |
|
293 result.sort() |
|
294 if len(result) < self.backupCount: |
|
295 result = [] |
|
296 else: |
|
297 result = result[:len(result) - self.backupCount] |
|
298 return result |
|
299 |
|
300 def doRollover(self): |
|
301 """ |
|
302 do a rollover; in this case, a date/time stamp is appended to the filename |
|
303 when the rollover happens. However, you want the file to be named for the |
|
304 start of the interval, not the current time. If there is a backup count, |
|
305 then we have to get a list of matching filenames, sort them and remove |
|
306 the one with the oldest suffix. |
|
307 """ |
|
308 self.stream.close() |
|
309 # get the time that this sequence started at and make it a TimeTuple |
|
310 t = self.rolloverAt - self.interval |
|
311 if self.utc: |
|
312 timeTuple = time.gmtime(t) |
|
313 else: |
|
314 timeTuple = time.localtime(t) |
|
315 dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple) |
|
316 if os.path.exists(dfn): |
|
317 os.remove(dfn) |
|
318 os.rename(self.baseFilename, dfn) |
|
319 if self.backupCount > 0: |
|
320 # find the oldest log file and delete it |
|
321 #s = glob.glob(self.baseFilename + ".20*") |
|
322 #if len(s) > self.backupCount: |
|
323 # s.sort() |
|
324 # os.remove(s[0]) |
|
325 for s in self.getFilesToDelete(): |
|
326 os.remove(s) |
|
327 #print "%s -> %s" % (self.baseFilename, dfn) |
|
328 self.mode = 'w' |
|
329 self.stream = self._open() |
|
330 newRolloverAt = self.rolloverAt + self.interval |
|
331 currentTime = int(time.time()) |
|
332 while newRolloverAt <= currentTime: |
|
333 newRolloverAt = newRolloverAt + self.interval |
|
334 #If DST changes and midnight or weekly rollover, adjust for this. |
|
335 if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc: |
|
336 dstNow = time.localtime(currentTime)[-1] |
|
337 dstAtRollover = time.localtime(newRolloverAt)[-1] |
|
338 if dstNow != dstAtRollover: |
|
339 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour |
|
340 newRolloverAt = newRolloverAt - 3600 |
|
341 else: # DST bows out before next rollover, so we need to add an hour |
|
342 newRolloverAt = newRolloverAt + 3600 |
|
343 self.rolloverAt = newRolloverAt |
|
344 |
|
345 class WatchedFileHandler(logging.FileHandler): |
|
346 """ |
|
347 A handler for logging to a file, which watches the file |
|
348 to see if it has changed while in use. This can happen because of |
|
349 usage of programs such as newsyslog and logrotate which perform |
|
350 log file rotation. This handler, intended for use under Unix, |
|
351 watches the file to see if it has changed since the last emit. |
|
352 (A file has changed if its device or inode have changed.) |
|
353 If it has changed, the old file stream is closed, and the file |
|
354 opened to get a new stream. |
|
355 |
|
356 This handler is not appropriate for use under Windows, because |
|
357 under Windows open files cannot be moved or renamed - logging |
|
358 opens the files with exclusive locks - and so there is no need |
|
359 for such a handler. Furthermore, ST_INO is not supported under |
|
360 Windows; stat always returns zero for this value. |
|
361 |
|
362 This handler is based on a suggestion and patch by Chad J. |
|
363 Schroeder. |
|
364 """ |
|
365 def __init__(self, filename, mode='a', encoding=None, delay=0): |
|
366 logging.FileHandler.__init__(self, filename, mode, encoding, delay) |
|
367 if not os.path.exists(self.baseFilename): |
|
368 self.dev, self.ino = -1, -1 |
|
369 else: |
|
370 stat = os.stat(self.baseFilename) |
|
371 self.dev, self.ino = stat[ST_DEV], stat[ST_INO] |
|
372 |
|
373 def emit(self, record): |
|
374 """ |
|
375 Emit a record. |
|
376 |
|
377 First check if the underlying file has changed, and if it |
|
378 has, close the old stream and reopen the file to get the |
|
379 current stream. |
|
380 """ |
|
381 if not os.path.exists(self.baseFilename): |
|
382 stat = None |
|
383 changed = 1 |
|
384 else: |
|
385 stat = os.stat(self.baseFilename) |
|
386 changed = (stat[ST_DEV] != self.dev) or (stat[ST_INO] != self.ino) |
|
387 if changed and self.stream is not None: |
|
388 self.stream.flush() |
|
389 self.stream.close() |
|
390 self.stream = self._open() |
|
391 if stat is None: |
|
392 stat = os.stat(self.baseFilename) |
|
393 self.dev, self.ino = stat[ST_DEV], stat[ST_INO] |
|
394 logging.FileHandler.emit(self, record) |
|
395 |
|
396 class SocketHandler(logging.Handler): |
|
397 """ |
|
398 A handler class which writes logging records, in pickle format, to |
|
399 a streaming socket. The socket is kept open across logging calls. |
|
400 If the peer resets it, an attempt is made to reconnect on the next call. |
|
401 The pickle which is sent is that of the LogRecord's attribute dictionary |
|
402 (__dict__), so that the receiver does not need to have the logging module |
|
403 installed in order to process the logging event. |
|
404 |
|
405 To unpickle the record at the receiving end into a LogRecord, use the |
|
406 makeLogRecord function. |
|
407 """ |
|
408 |
|
409 def __init__(self, host, port): |
|
410 """ |
|
411 Initializes the handler with a specific host address and port. |
|
412 |
|
413 The attribute 'closeOnError' is set to 1 - which means that if |
|
414 a socket error occurs, the socket is silently closed and then |
|
415 reopened on the next logging call. |
|
416 """ |
|
417 logging.Handler.__init__(self) |
|
418 self.host = host |
|
419 self.port = port |
|
420 self.sock = None |
|
421 self.closeOnError = 0 |
|
422 self.retryTime = None |
|
423 # |
|
424 # Exponential backoff parameters. |
|
425 # |
|
426 self.retryStart = 1.0 |
|
427 self.retryMax = 30.0 |
|
428 self.retryFactor = 2.0 |
|
429 |
|
430 def makeSocket(self, timeout=1): |
|
431 """ |
|
432 A factory method which allows subclasses to define the precise |
|
433 type of socket they want. |
|
434 """ |
|
435 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
|
436 if hasattr(s, 'settimeout'): |
|
437 s.settimeout(timeout) |
|
438 s.connect((self.host, self.port)) |
|
439 return s |
|
440 |
|
441 def createSocket(self): |
|
442 """ |
|
443 Try to create a socket, using an exponential backoff with |
|
444 a max retry time. Thanks to Robert Olson for the original patch |
|
445 (SF #815911) which has been slightly refactored. |
|
446 """ |
|
447 now = time.time() |
|
448 # Either retryTime is None, in which case this |
|
449 # is the first time back after a disconnect, or |
|
450 # we've waited long enough. |
|
451 if self.retryTime is None: |
|
452 attempt = 1 |
|
453 else: |
|
454 attempt = (now >= self.retryTime) |
|
455 if attempt: |
|
456 try: |
|
457 self.sock = self.makeSocket() |
|
458 self.retryTime = None # next time, no delay before trying |
|
459 except socket.error: |
|
460 #Creation failed, so set the retry time and return. |
|
461 if self.retryTime is None: |
|
462 self.retryPeriod = self.retryStart |
|
463 else: |
|
464 self.retryPeriod = self.retryPeriod * self.retryFactor |
|
465 if self.retryPeriod > self.retryMax: |
|
466 self.retryPeriod = self.retryMax |
|
467 self.retryTime = now + self.retryPeriod |
|
468 |
|
469 def send(self, s): |
|
470 """ |
|
471 Send a pickled string to the socket. |
|
472 |
|
473 This function allows for partial sends which can happen when the |
|
474 network is busy. |
|
475 """ |
|
476 if self.sock is None: |
|
477 self.createSocket() |
|
478 #self.sock can be None either because we haven't reached the retry |
|
479 #time yet, or because we have reached the retry time and retried, |
|
480 #but are still unable to connect. |
|
481 if self.sock: |
|
482 try: |
|
483 if hasattr(self.sock, "sendall"): |
|
484 self.sock.sendall(s) |
|
485 else: |
|
486 sentsofar = 0 |
|
487 left = len(s) |
|
488 while left > 0: |
|
489 sent = self.sock.send(s[sentsofar:]) |
|
490 sentsofar = sentsofar + sent |
|
491 left = left - sent |
|
492 except socket.error: |
|
493 self.sock.close() |
|
494 self.sock = None # so we can call createSocket next time |
|
495 |
|
496 def makePickle(self, record): |
|
497 """ |
|
498 Pickles the record in binary format with a length prefix, and |
|
499 returns it ready for transmission across the socket. |
|
500 """ |
|
501 ei = record.exc_info |
|
502 if ei: |
|
503 dummy = self.format(record) # just to get traceback text into record.exc_text |
|
504 record.exc_info = None # to avoid Unpickleable error |
|
505 s = cPickle.dumps(record.__dict__, 1) |
|
506 if ei: |
|
507 record.exc_info = ei # for next handler |
|
508 slen = struct.pack(">L", len(s)) |
|
509 return slen + s |
|
510 |
|
511 def handleError(self, record): |
|
512 """ |
|
513 Handle an error during logging. |
|
514 |
|
515 An error has occurred during logging. Most likely cause - |
|
516 connection lost. Close the socket so that we can retry on the |
|
517 next event. |
|
518 """ |
|
519 if self.closeOnError and self.sock: |
|
520 self.sock.close() |
|
521 self.sock = None #try to reconnect next time |
|
522 else: |
|
523 logging.Handler.handleError(self, record) |
|
524 |
|
525 def emit(self, record): |
|
526 """ |
|
527 Emit a record. |
|
528 |
|
529 Pickles the record and writes it to the socket in binary format. |
|
530 If there is an error with the socket, silently drop the packet. |
|
531 If there was a problem with the socket, re-establishes the |
|
532 socket. |
|
533 """ |
|
534 try: |
|
535 s = self.makePickle(record) |
|
536 self.send(s) |
|
537 except (KeyboardInterrupt, SystemExit): |
|
538 raise |
|
539 except: |
|
540 self.handleError(record) |
|
541 |
|
542 def close(self): |
|
543 """ |
|
544 Closes the socket. |
|
545 """ |
|
546 if self.sock: |
|
547 self.sock.close() |
|
548 self.sock = None |
|
549 logging.Handler.close(self) |
|
550 |
|
551 class DatagramHandler(SocketHandler): |
|
552 """ |
|
553 A handler class which writes logging records, in pickle format, to |
|
554 a datagram socket. The pickle which is sent is that of the LogRecord's |
|
555 attribute dictionary (__dict__), so that the receiver does not need to |
|
556 have the logging module installed in order to process the logging event. |
|
557 |
|
558 To unpickle the record at the receiving end into a LogRecord, use the |
|
559 makeLogRecord function. |
|
560 |
|
561 """ |
|
562 def __init__(self, host, port): |
|
563 """ |
|
564 Initializes the handler with a specific host address and port. |
|
565 """ |
|
566 SocketHandler.__init__(self, host, port) |
|
567 self.closeOnError = 0 |
|
568 |
|
569 def makeSocket(self): |
|
570 """ |
|
571 The factory method of SocketHandler is here overridden to create |
|
572 a UDP socket (SOCK_DGRAM). |
|
573 """ |
|
574 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) |
|
575 return s |
|
576 |
|
577 def send(self, s): |
|
578 """ |
|
579 Send a pickled string to a socket. |
|
580 |
|
581 This function no longer allows for partial sends which can happen |
|
582 when the network is busy - UDP does not guarantee delivery and |
|
583 can deliver packets out of sequence. |
|
584 """ |
|
585 if self.sock is None: |
|
586 self.createSocket() |
|
587 self.sock.sendto(s, (self.host, self.port)) |
|
588 |
|
589 class SysLogHandler(logging.Handler): |
|
590 """ |
|
591 A handler class which sends formatted logging records to a syslog |
|
592 server. Based on Sam Rushing's syslog module: |
|
593 http://www.nightmare.com/squirl/python-ext/misc/syslog.py |
|
594 Contributed by Nicolas Untz (after which minor refactoring changes |
|
595 have been made). |
|
596 """ |
|
597 |
|
598 # from <linux/sys/syslog.h>: |
|
599 # ====================================================================== |
|
600 # priorities/facilities are encoded into a single 32-bit quantity, where |
|
601 # the bottom 3 bits are the priority (0-7) and the top 28 bits are the |
|
602 # facility (0-big number). Both the priorities and the facilities map |
|
603 # roughly one-to-one to strings in the syslogd(8) source code. This |
|
604 # mapping is included in this file. |
|
605 # |
|
606 # priorities (these are ordered) |
|
607 |
|
608 LOG_EMERG = 0 # system is unusable |
|
609 LOG_ALERT = 1 # action must be taken immediately |
|
610 LOG_CRIT = 2 # critical conditions |
|
611 LOG_ERR = 3 # error conditions |
|
612 LOG_WARNING = 4 # warning conditions |
|
613 LOG_NOTICE = 5 # normal but significant condition |
|
614 LOG_INFO = 6 # informational |
|
615 LOG_DEBUG = 7 # debug-level messages |
|
616 |
|
617 # facility codes |
|
618 LOG_KERN = 0 # kernel messages |
|
619 LOG_USER = 1 # random user-level messages |
|
620 LOG_MAIL = 2 # mail system |
|
621 LOG_DAEMON = 3 # system daemons |
|
622 LOG_AUTH = 4 # security/authorization messages |
|
623 LOG_SYSLOG = 5 # messages generated internally by syslogd |
|
624 LOG_LPR = 6 # line printer subsystem |
|
625 LOG_NEWS = 7 # network news subsystem |
|
626 LOG_UUCP = 8 # UUCP subsystem |
|
627 LOG_CRON = 9 # clock daemon |
|
628 LOG_AUTHPRIV = 10 # security/authorization messages (private) |
|
629 |
|
630 # other codes through 15 reserved for system use |
|
631 LOG_LOCAL0 = 16 # reserved for local use |
|
632 LOG_LOCAL1 = 17 # reserved for local use |
|
633 LOG_LOCAL2 = 18 # reserved for local use |
|
634 LOG_LOCAL3 = 19 # reserved for local use |
|
635 LOG_LOCAL4 = 20 # reserved for local use |
|
636 LOG_LOCAL5 = 21 # reserved for local use |
|
637 LOG_LOCAL6 = 22 # reserved for local use |
|
638 LOG_LOCAL7 = 23 # reserved for local use |
|
639 |
|
640 priority_names = { |
|
641 "alert": LOG_ALERT, |
|
642 "crit": LOG_CRIT, |
|
643 "critical": LOG_CRIT, |
|
644 "debug": LOG_DEBUG, |
|
645 "emerg": LOG_EMERG, |
|
646 "err": LOG_ERR, |
|
647 "error": LOG_ERR, # DEPRECATED |
|
648 "info": LOG_INFO, |
|
649 "notice": LOG_NOTICE, |
|
650 "panic": LOG_EMERG, # DEPRECATED |
|
651 "warn": LOG_WARNING, # DEPRECATED |
|
652 "warning": LOG_WARNING, |
|
653 } |
|
654 |
|
655 facility_names = { |
|
656 "auth": LOG_AUTH, |
|
657 "authpriv": LOG_AUTHPRIV, |
|
658 "cron": LOG_CRON, |
|
659 "daemon": LOG_DAEMON, |
|
660 "kern": LOG_KERN, |
|
661 "lpr": LOG_LPR, |
|
662 "mail": LOG_MAIL, |
|
663 "news": LOG_NEWS, |
|
664 "security": LOG_AUTH, # DEPRECATED |
|
665 "syslog": LOG_SYSLOG, |
|
666 "user": LOG_USER, |
|
667 "uucp": LOG_UUCP, |
|
668 "local0": LOG_LOCAL0, |
|
669 "local1": LOG_LOCAL1, |
|
670 "local2": LOG_LOCAL2, |
|
671 "local3": LOG_LOCAL3, |
|
672 "local4": LOG_LOCAL4, |
|
673 "local5": LOG_LOCAL5, |
|
674 "local6": LOG_LOCAL6, |
|
675 "local7": LOG_LOCAL7, |
|
676 } |
|
677 |
|
678 #The map below appears to be trivially lowercasing the key. However, |
|
679 #there's more to it than meets the eye - in some locales, lowercasing |
|
680 #gives unexpected results. See SF #1524081: in the Turkish locale, |
|
681 #"INFO".lower() != "info" |
|
682 priority_map = { |
|
683 "DEBUG" : "debug", |
|
684 "INFO" : "info", |
|
685 "WARNING" : "warning", |
|
686 "ERROR" : "error", |
|
687 "CRITICAL" : "critical" |
|
688 } |
|
689 |
|
690 def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=LOG_USER): |
|
691 """ |
|
692 Initialize a handler. |
|
693 |
|
694 If address is specified as a string, a UNIX socket is used. To log to a |
|
695 local syslogd, "SysLogHandler(address="/dev/log")" can be used. |
|
696 If facility is not specified, LOG_USER is used. |
|
697 """ |
|
698 logging.Handler.__init__(self) |
|
699 |
|
700 self.address = address |
|
701 self.facility = facility |
|
702 if type(address) == types.StringType: |
|
703 self.unixsocket = 1 |
|
704 self._connect_unixsocket(address) |
|
705 else: |
|
706 self.unixsocket = 0 |
|
707 self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) |
|
708 |
|
709 self.formatter = None |
|
710 |
|
711 def _connect_unixsocket(self, address): |
|
712 self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) |
|
713 # syslog may require either DGRAM or STREAM sockets |
|
714 try: |
|
715 self.socket.connect(address) |
|
716 except socket.error: |
|
717 self.socket.close() |
|
718 self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) |
|
719 self.socket.connect(address) |
|
720 |
|
721 # curious: when talking to the unix-domain '/dev/log' socket, a |
|
722 # zero-terminator seems to be required. this string is placed |
|
723 # into a class variable so that it can be overridden if |
|
724 # necessary. |
|
725 log_format_string = '<%d>%s\000' |
|
726 |
|
727 def encodePriority(self, facility, priority): |
|
728 """ |
|
729 Encode the facility and priority. You can pass in strings or |
|
730 integers - if strings are passed, the facility_names and |
|
731 priority_names mapping dictionaries are used to convert them to |
|
732 integers. |
|
733 """ |
|
734 if type(facility) == types.StringType: |
|
735 facility = self.facility_names[facility] |
|
736 if type(priority) == types.StringType: |
|
737 priority = self.priority_names[priority] |
|
738 return (facility << 3) | priority |
|
739 |
|
740 def close (self): |
|
741 """ |
|
742 Closes the socket. |
|
743 """ |
|
744 if self.unixsocket: |
|
745 self.socket.close() |
|
746 logging.Handler.close(self) |
|
747 |
|
748 def mapPriority(self, levelName): |
|
749 """ |
|
750 Map a logging level name to a key in the priority_names map. |
|
751 This is useful in two scenarios: when custom levels are being |
|
752 used, and in the case where you can't do a straightforward |
|
753 mapping by lowercasing the logging level name because of locale- |
|
754 specific issues (see SF #1524081). |
|
755 """ |
|
756 return self.priority_map.get(levelName, "warning") |
|
757 |
|
758 def emit(self, record): |
|
759 """ |
|
760 Emit a record. |
|
761 |
|
762 The record is formatted, and then sent to the syslog server. If |
|
763 exception information is present, it is NOT sent to the server. |
|
764 """ |
|
765 msg = self.format(record) |
|
766 """ |
|
767 We need to convert record level to lowercase, maybe this will |
|
768 change in the future. |
|
769 """ |
|
770 msg = self.log_format_string % ( |
|
771 self.encodePriority(self.facility, |
|
772 self.mapPriority(record.levelname)), |
|
773 msg) |
|
774 try: |
|
775 if self.unixsocket: |
|
776 try: |
|
777 self.socket.send(msg) |
|
778 except socket.error: |
|
779 self._connect_unixsocket(self.address) |
|
780 self.socket.send(msg) |
|
781 else: |
|
782 self.socket.sendto(msg, self.address) |
|
783 except (KeyboardInterrupt, SystemExit): |
|
784 raise |
|
785 except: |
|
786 self.handleError(record) |
|
787 |
|
788 class SMTPHandler(logging.Handler): |
|
789 """ |
|
790 A handler class which sends an SMTP email for each logging event. |
|
791 """ |
|
792 def __init__(self, mailhost, fromaddr, toaddrs, subject, credentials=None): |
|
793 """ |
|
794 Initialize the handler. |
|
795 |
|
796 Initialize the instance with the from and to addresses and subject |
|
797 line of the email. To specify a non-standard SMTP port, use the |
|
798 (host, port) tuple format for the mailhost argument. To specify |
|
799 authentication credentials, supply a (username, password) tuple |
|
800 for the credentials argument. |
|
801 """ |
|
802 logging.Handler.__init__(self) |
|
803 if type(mailhost) == types.TupleType: |
|
804 self.mailhost, self.mailport = mailhost |
|
805 else: |
|
806 self.mailhost, self.mailport = mailhost, None |
|
807 if type(credentials) == types.TupleType: |
|
808 self.username, self.password = credentials |
|
809 else: |
|
810 self.username = None |
|
811 self.fromaddr = fromaddr |
|
812 if type(toaddrs) == types.StringType: |
|
813 toaddrs = [toaddrs] |
|
814 self.toaddrs = toaddrs |
|
815 self.subject = subject |
|
816 |
|
817 def getSubject(self, record): |
|
818 """ |
|
819 Determine the subject for the email. |
|
820 |
|
821 If you want to specify a subject line which is record-dependent, |
|
822 override this method. |
|
823 """ |
|
824 return self.subject |
|
825 |
|
826 weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] |
|
827 |
|
828 monthname = [None, |
|
829 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', |
|
830 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] |
|
831 |
|
832 def date_time(self): |
|
833 """ |
|
834 Return the current date and time formatted for a MIME header. |
|
835 Needed for Python 1.5.2 (no email package available) |
|
836 """ |
|
837 year, month, day, hh, mm, ss, wd, y, z = time.gmtime(time.time()) |
|
838 s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( |
|
839 self.weekdayname[wd], |
|
840 day, self.monthname[month], year, |
|
841 hh, mm, ss) |
|
842 return s |
|
843 |
|
844 def emit(self, record): |
|
845 """ |
|
846 Emit a record. |
|
847 |
|
848 Format the record and send it to the specified addressees. |
|
849 """ |
|
850 try: |
|
851 import smtplib |
|
852 try: |
|
853 from email.utils import formatdate |
|
854 except ImportError: |
|
855 formatdate = self.date_time |
|
856 port = self.mailport |
|
857 if not port: |
|
858 port = smtplib.SMTP_PORT |
|
859 smtp = smtplib.SMTP(self.mailhost, port) |
|
860 msg = self.format(record) |
|
861 msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( |
|
862 self.fromaddr, |
|
863 string.join(self.toaddrs, ","), |
|
864 self.getSubject(record), |
|
865 formatdate(), msg) |
|
866 if self.username: |
|
867 smtp.login(self.username, self.password) |
|
868 smtp.sendmail(self.fromaddr, self.toaddrs, msg) |
|
869 smtp.quit() |
|
870 except (KeyboardInterrupt, SystemExit): |
|
871 raise |
|
872 except: |
|
873 self.handleError(record) |
|
874 |
|
875 class NTEventLogHandler(logging.Handler): |
|
876 """ |
|
877 A handler class which sends events to the NT Event Log. Adds a |
|
878 registry entry for the specified application name. If no dllname is |
|
879 provided, win32service.pyd (which contains some basic message |
|
880 placeholders) is used. Note that use of these placeholders will make |
|
881 your event logs big, as the entire message source is held in the log. |
|
882 If you want slimmer logs, you have to pass in the name of your own DLL |
|
883 which contains the message definitions you want to use in the event log. |
|
884 """ |
|
885 def __init__(self, appname, dllname=None, logtype="Application"): |
|
886 logging.Handler.__init__(self) |
|
887 try: |
|
888 import win32evtlogutil, win32evtlog |
|
889 self.appname = appname |
|
890 self._welu = win32evtlogutil |
|
891 if not dllname: |
|
892 dllname = os.path.split(self._welu.__file__) |
|
893 dllname = os.path.split(dllname[0]) |
|
894 dllname = os.path.join(dllname[0], r'win32service.pyd') |
|
895 self.dllname = dllname |
|
896 self.logtype = logtype |
|
897 self._welu.AddSourceToRegistry(appname, dllname, logtype) |
|
898 self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE |
|
899 self.typemap = { |
|
900 logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, |
|
901 logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, |
|
902 logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, |
|
903 logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, |
|
904 logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, |
|
905 } |
|
906 except ImportError: |
|
907 print "The Python Win32 extensions for NT (service, event "\ |
|
908 "logging) appear not to be available." |
|
909 self._welu = None |
|
910 |
|
911 def getMessageID(self, record): |
|
912 """ |
|
913 Return the message ID for the event record. If you are using your |
|
914 own messages, you could do this by having the msg passed to the |
|
915 logger being an ID rather than a formatting string. Then, in here, |
|
916 you could use a dictionary lookup to get the message ID. This |
|
917 version returns 1, which is the base message ID in win32service.pyd. |
|
918 """ |
|
919 return 1 |
|
920 |
|
921 def getEventCategory(self, record): |
|
922 """ |
|
923 Return the event category for the record. |
|
924 |
|
925 Override this if you want to specify your own categories. This version |
|
926 returns 0. |
|
927 """ |
|
928 return 0 |
|
929 |
|
930 def getEventType(self, record): |
|
931 """ |
|
932 Return the event type for the record. |
|
933 |
|
934 Override this if you want to specify your own types. This version does |
|
935 a mapping using the handler's typemap attribute, which is set up in |
|
936 __init__() to a dictionary which contains mappings for DEBUG, INFO, |
|
937 WARNING, ERROR and CRITICAL. If you are using your own levels you will |
|
938 either need to override this method or place a suitable dictionary in |
|
939 the handler's typemap attribute. |
|
940 """ |
|
941 return self.typemap.get(record.levelno, self.deftype) |
|
942 |
|
943 def emit(self, record): |
|
944 """ |
|
945 Emit a record. |
|
946 |
|
947 Determine the message ID, event category and event type. Then |
|
948 log the message in the NT event log. |
|
949 """ |
|
950 if self._welu: |
|
951 try: |
|
952 id = self.getMessageID(record) |
|
953 cat = self.getEventCategory(record) |
|
954 type = self.getEventType(record) |
|
955 msg = self.format(record) |
|
956 self._welu.ReportEvent(self.appname, id, cat, type, [msg]) |
|
957 except (KeyboardInterrupt, SystemExit): |
|
958 raise |
|
959 except: |
|
960 self.handleError(record) |
|
961 |
|
962 def close(self): |
|
963 """ |
|
964 Clean up this handler. |
|
965 |
|
966 You can remove the application name from the registry as a |
|
967 source of event log entries. However, if you do this, you will |
|
968 not be able to see the events as you intended in the Event Log |
|
969 Viewer - it needs to be able to access the registry to get the |
|
970 DLL name. |
|
971 """ |
|
972 #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype) |
|
973 logging.Handler.close(self) |
|
974 |
|
975 class HTTPHandler(logging.Handler): |
|
976 """ |
|
977 A class which sends records to a Web server, using either GET or |
|
978 POST semantics. |
|
979 """ |
|
980 def __init__(self, host, url, method="GET"): |
|
981 """ |
|
982 Initialize the instance with the host, the request URL, and the method |
|
983 ("GET" or "POST") |
|
984 """ |
|
985 logging.Handler.__init__(self) |
|
986 method = string.upper(method) |
|
987 if method not in ["GET", "POST"]: |
|
988 raise ValueError, "method must be GET or POST" |
|
989 self.host = host |
|
990 self.url = url |
|
991 self.method = method |
|
992 |
|
993 def mapLogRecord(self, record): |
|
994 """ |
|
995 Default implementation of mapping the log record into a dict |
|
996 that is sent as the CGI data. Overwrite in your class. |
|
997 Contributed by Franz Glasner. |
|
998 """ |
|
999 return record.__dict__ |
|
1000 |
|
1001 def emit(self, record): |
|
1002 """ |
|
1003 Emit a record. |
|
1004 |
|
1005 Send the record to the Web server as an URL-encoded dictionary |
|
1006 """ |
|
1007 try: |
|
1008 import httplib, urllib |
|
1009 host = self.host |
|
1010 h = httplib.HTTP(host) |
|
1011 url = self.url |
|
1012 data = urllib.urlencode(self.mapLogRecord(record)) |
|
1013 if self.method == "GET": |
|
1014 if (string.find(url, '?') >= 0): |
|
1015 sep = '&' |
|
1016 else: |
|
1017 sep = '?' |
|
1018 url = url + "%c%s" % (sep, data) |
|
1019 h.putrequest(self.method, url) |
|
1020 # support multiple hosts on one IP address... |
|
1021 # need to strip optional :port from host, if present |
|
1022 i = string.find(host, ":") |
|
1023 if i >= 0: |
|
1024 host = host[:i] |
|
1025 h.putheader("Host", host) |
|
1026 if self.method == "POST": |
|
1027 h.putheader("Content-type", |
|
1028 "application/x-www-form-urlencoded") |
|
1029 h.putheader("Content-length", str(len(data))) |
|
1030 h.endheaders() |
|
1031 if self.method == "POST": |
|
1032 h.send(data) |
|
1033 h.getreply() #can't do anything with the result |
|
1034 except (KeyboardInterrupt, SystemExit): |
|
1035 raise |
|
1036 except: |
|
1037 self.handleError(record) |
|
1038 |
|
1039 class BufferingHandler(logging.Handler): |
|
1040 """ |
|
1041 A handler class which buffers logging records in memory. Whenever each |
|
1042 record is added to the buffer, a check is made to see if the buffer should |
|
1043 be flushed. If it should, then flush() is expected to do what's needed. |
|
1044 """ |
|
1045 def __init__(self, capacity): |
|
1046 """ |
|
1047 Initialize the handler with the buffer size. |
|
1048 """ |
|
1049 logging.Handler.__init__(self) |
|
1050 self.capacity = capacity |
|
1051 self.buffer = [] |
|
1052 |
|
1053 def shouldFlush(self, record): |
|
1054 """ |
|
1055 Should the handler flush its buffer? |
|
1056 |
|
1057 Returns true if the buffer is up to capacity. This method can be |
|
1058 overridden to implement custom flushing strategies. |
|
1059 """ |
|
1060 return (len(self.buffer) >= self.capacity) |
|
1061 |
|
1062 def emit(self, record): |
|
1063 """ |
|
1064 Emit a record. |
|
1065 |
|
1066 Append the record. If shouldFlush() tells us to, call flush() to process |
|
1067 the buffer. |
|
1068 """ |
|
1069 self.buffer.append(record) |
|
1070 if self.shouldFlush(record): |
|
1071 self.flush() |
|
1072 |
|
1073 def flush(self): |
|
1074 """ |
|
1075 Override to implement custom flushing behaviour. |
|
1076 |
|
1077 This version just zaps the buffer to empty. |
|
1078 """ |
|
1079 self.buffer = [] |
|
1080 |
|
1081 def close(self): |
|
1082 """ |
|
1083 Close the handler. |
|
1084 |
|
1085 This version just flushes and chains to the parent class' close(). |
|
1086 """ |
|
1087 self.flush() |
|
1088 logging.Handler.close(self) |
|
1089 |
|
1090 class MemoryHandler(BufferingHandler): |
|
1091 """ |
|
1092 A handler class which buffers logging records in memory, periodically |
|
1093 flushing them to a target handler. Flushing occurs whenever the buffer |
|
1094 is full, or when an event of a certain severity or greater is seen. |
|
1095 """ |
|
1096 def __init__(self, capacity, flushLevel=logging.ERROR, target=None): |
|
1097 """ |
|
1098 Initialize the handler with the buffer size, the level at which |
|
1099 flushing should occur and an optional target. |
|
1100 |
|
1101 Note that without a target being set either here or via setTarget(), |
|
1102 a MemoryHandler is no use to anyone! |
|
1103 """ |
|
1104 BufferingHandler.__init__(self, capacity) |
|
1105 self.flushLevel = flushLevel |
|
1106 self.target = target |
|
1107 |
|
1108 def shouldFlush(self, record): |
|
1109 """ |
|
1110 Check for buffer full or a record at the flushLevel or higher. |
|
1111 """ |
|
1112 return (len(self.buffer) >= self.capacity) or \ |
|
1113 (record.levelno >= self.flushLevel) |
|
1114 |
|
1115 def setTarget(self, target): |
|
1116 """ |
|
1117 Set the target handler for this handler. |
|
1118 """ |
|
1119 self.target = target |
|
1120 |
|
1121 def flush(self): |
|
1122 """ |
|
1123 For a MemoryHandler, flushing means just sending the buffered |
|
1124 records to the target, if there is one. Override if you want |
|
1125 different behaviour. |
|
1126 """ |
|
1127 if self.target: |
|
1128 for record in self.buffer: |
|
1129 self.target.handle(record) |
|
1130 self.buffer = [] |
|
1131 |
|
1132 def close(self): |
|
1133 """ |
|
1134 Flush, set the target to None and lose the buffer. |
|
1135 """ |
|
1136 self.flush() |
|
1137 self.target = None |
|
1138 BufferingHandler.close(self) |