|
1 # This contains most of the executable examples from Guido's descr |
|
2 # tutorial, once at |
|
3 # |
|
4 # http://www.python.org/2.2/descrintro.html |
|
5 # |
|
6 # A few examples left implicit in the writeup were fleshed out, a few were |
|
7 # skipped due to lack of interest (e.g., faking super() by hand isn't |
|
8 # of much interest anymore), and a few were fiddled to make the output |
|
9 # deterministic. |
|
10 |
|
11 from test.test_support import sortdict |
|
12 import pprint |
|
13 |
|
14 class defaultdict(dict): |
|
15 def __init__(self, default=None): |
|
16 dict.__init__(self) |
|
17 self.default = default |
|
18 |
|
19 def __getitem__(self, key): |
|
20 try: |
|
21 return dict.__getitem__(self, key) |
|
22 except KeyError: |
|
23 return self.default |
|
24 |
|
25 def get(self, key, *args): |
|
26 if not args: |
|
27 args = (self.default,) |
|
28 return dict.get(self, key, *args) |
|
29 |
|
30 def merge(self, other): |
|
31 for key in other: |
|
32 if key not in self: |
|
33 self[key] = other[key] |
|
34 |
|
35 test_1 = """ |
|
36 |
|
37 Here's the new type at work: |
|
38 |
|
39 >>> print defaultdict # show our type |
|
40 <class 'test.test_descrtut.defaultdict'> |
|
41 >>> print type(defaultdict) # its metatype |
|
42 <type 'type'> |
|
43 >>> a = defaultdict(default=0.0) # create an instance |
|
44 >>> print a # show the instance |
|
45 {} |
|
46 >>> print type(a) # show its type |
|
47 <class 'test.test_descrtut.defaultdict'> |
|
48 >>> print a.__class__ # show its class |
|
49 <class 'test.test_descrtut.defaultdict'> |
|
50 >>> print type(a) is a.__class__ # its type is its class |
|
51 True |
|
52 >>> a[1] = 3.25 # modify the instance |
|
53 >>> print a # show the new value |
|
54 {1: 3.25} |
|
55 >>> print a[1] # show the new item |
|
56 3.25 |
|
57 >>> print a[0] # a non-existant item |
|
58 0.0 |
|
59 >>> a.merge({1:100, 2:200}) # use a dict method |
|
60 >>> print sortdict(a) # show the result |
|
61 {1: 3.25, 2: 200} |
|
62 >>> |
|
63 |
|
64 We can also use the new type in contexts where classic only allows "real" |
|
65 dictionaries, such as the locals/globals dictionaries for the exec |
|
66 statement or the built-in function eval(): |
|
67 |
|
68 >>> def sorted(seq): |
|
69 ... seq.sort() |
|
70 ... return seq |
|
71 >>> print sorted(a.keys()) |
|
72 [1, 2] |
|
73 >>> exec "x = 3; print x" in a |
|
74 3 |
|
75 >>> print sorted(a.keys()) |
|
76 [1, 2, '__builtins__', 'x'] |
|
77 >>> print a['x'] |
|
78 3 |
|
79 >>> |
|
80 |
|
81 Now I'll show that defaultdict instances have dynamic instance variables, |
|
82 just like classic classes: |
|
83 |
|
84 >>> a.default = -1 |
|
85 >>> print a["noway"] |
|
86 -1 |
|
87 >>> a.default = -1000 |
|
88 >>> print a["noway"] |
|
89 -1000 |
|
90 >>> 'default' in dir(a) |
|
91 True |
|
92 >>> a.x1 = 100 |
|
93 >>> a.x2 = 200 |
|
94 >>> print a.x1 |
|
95 100 |
|
96 >>> d = dir(a) |
|
97 >>> 'default' in d and 'x1' in d and 'x2' in d |
|
98 True |
|
99 >>> print sortdict(a.__dict__) |
|
100 {'default': -1000, 'x1': 100, 'x2': 200} |
|
101 >>> |
|
102 """ |
|
103 |
|
104 class defaultdict2(dict): |
|
105 __slots__ = ['default'] |
|
106 |
|
107 def __init__(self, default=None): |
|
108 dict.__init__(self) |
|
109 self.default = default |
|
110 |
|
111 def __getitem__(self, key): |
|
112 try: |
|
113 return dict.__getitem__(self, key) |
|
114 except KeyError: |
|
115 return self.default |
|
116 |
|
117 def get(self, key, *args): |
|
118 if not args: |
|
119 args = (self.default,) |
|
120 return dict.get(self, key, *args) |
|
121 |
|
122 def merge(self, other): |
|
123 for key in other: |
|
124 if key not in self: |
|
125 self[key] = other[key] |
|
126 |
|
127 test_2 = """ |
|
128 |
|
129 The __slots__ declaration takes a list of instance variables, and reserves |
|
130 space for exactly these in the instance. When __slots__ is used, other |
|
131 instance variables cannot be assigned to: |
|
132 |
|
133 >>> a = defaultdict2(default=0.0) |
|
134 >>> a[1] |
|
135 0.0 |
|
136 >>> a.default = -1 |
|
137 >>> a[1] |
|
138 -1 |
|
139 >>> a.x1 = 1 |
|
140 Traceback (most recent call last): |
|
141 File "<stdin>", line 1, in ? |
|
142 AttributeError: 'defaultdict2' object has no attribute 'x1' |
|
143 >>> |
|
144 |
|
145 """ |
|
146 |
|
147 test_3 = """ |
|
148 |
|
149 Introspecting instances of built-in types |
|
150 |
|
151 For instance of built-in types, x.__class__ is now the same as type(x): |
|
152 |
|
153 >>> type([]) |
|
154 <type 'list'> |
|
155 >>> [].__class__ |
|
156 <type 'list'> |
|
157 >>> list |
|
158 <type 'list'> |
|
159 >>> isinstance([], list) |
|
160 True |
|
161 >>> isinstance([], dict) |
|
162 False |
|
163 >>> isinstance([], object) |
|
164 True |
|
165 >>> |
|
166 |
|
167 Under the new proposal, the __methods__ attribute no longer exists: |
|
168 |
|
169 >>> [].__methods__ |
|
170 Traceback (most recent call last): |
|
171 File "<stdin>", line 1, in ? |
|
172 AttributeError: 'list' object has no attribute '__methods__' |
|
173 >>> |
|
174 |
|
175 Instead, you can get the same information from the list type: |
|
176 |
|
177 >>> pprint.pprint(dir(list)) # like list.__dict__.keys(), but sorted |
|
178 ['__add__', |
|
179 '__class__', |
|
180 '__contains__', |
|
181 '__delattr__', |
|
182 '__delitem__', |
|
183 '__delslice__', |
|
184 '__doc__', |
|
185 '__eq__', |
|
186 '__ge__', |
|
187 '__getattribute__', |
|
188 '__getitem__', |
|
189 '__getslice__', |
|
190 '__gt__', |
|
191 '__hash__', |
|
192 '__iadd__', |
|
193 '__imul__', |
|
194 '__init__', |
|
195 '__iter__', |
|
196 '__le__', |
|
197 '__len__', |
|
198 '__lt__', |
|
199 '__mul__', |
|
200 '__ne__', |
|
201 '__new__', |
|
202 '__reduce__', |
|
203 '__reduce_ex__', |
|
204 '__repr__', |
|
205 '__reversed__', |
|
206 '__rmul__', |
|
207 '__setattr__', |
|
208 '__setitem__', |
|
209 '__setslice__', |
|
210 '__str__', |
|
211 'append', |
|
212 'count', |
|
213 'extend', |
|
214 'index', |
|
215 'insert', |
|
216 'pop', |
|
217 'remove', |
|
218 'reverse', |
|
219 'sort'] |
|
220 |
|
221 The new introspection API gives more information than the old one: in |
|
222 addition to the regular methods, it also shows the methods that are |
|
223 normally invoked through special notations, e.g. __iadd__ (+=), __len__ |
|
224 (len), __ne__ (!=). You can invoke any method from this list directly: |
|
225 |
|
226 >>> a = ['tic', 'tac'] |
|
227 >>> list.__len__(a) # same as len(a) |
|
228 2 |
|
229 >>> a.__len__() # ditto |
|
230 2 |
|
231 >>> list.append(a, 'toe') # same as a.append('toe') |
|
232 >>> a |
|
233 ['tic', 'tac', 'toe'] |
|
234 >>> |
|
235 |
|
236 This is just like it is for user-defined classes. |
|
237 """ |
|
238 |
|
239 test_4 = """ |
|
240 |
|
241 Static methods and class methods |
|
242 |
|
243 The new introspection API makes it possible to add static methods and class |
|
244 methods. Static methods are easy to describe: they behave pretty much like |
|
245 static methods in C++ or Java. Here's an example: |
|
246 |
|
247 >>> class C: |
|
248 ... |
|
249 ... @staticmethod |
|
250 ... def foo(x, y): |
|
251 ... print "staticmethod", x, y |
|
252 |
|
253 >>> C.foo(1, 2) |
|
254 staticmethod 1 2 |
|
255 >>> c = C() |
|
256 >>> c.foo(1, 2) |
|
257 staticmethod 1 2 |
|
258 |
|
259 Class methods use a similar pattern to declare methods that receive an |
|
260 implicit first argument that is the *class* for which they are invoked. |
|
261 |
|
262 >>> class C: |
|
263 ... @classmethod |
|
264 ... def foo(cls, y): |
|
265 ... print "classmethod", cls, y |
|
266 |
|
267 >>> C.foo(1) |
|
268 classmethod test.test_descrtut.C 1 |
|
269 >>> c = C() |
|
270 >>> c.foo(1) |
|
271 classmethod test.test_descrtut.C 1 |
|
272 |
|
273 >>> class D(C): |
|
274 ... pass |
|
275 |
|
276 >>> D.foo(1) |
|
277 classmethod test.test_descrtut.D 1 |
|
278 >>> d = D() |
|
279 >>> d.foo(1) |
|
280 classmethod test.test_descrtut.D 1 |
|
281 |
|
282 This prints "classmethod __main__.D 1" both times; in other words, the |
|
283 class passed as the first argument of foo() is the class involved in the |
|
284 call, not the class involved in the definition of foo(). |
|
285 |
|
286 But notice this: |
|
287 |
|
288 >>> class E(C): |
|
289 ... @classmethod |
|
290 ... def foo(cls, y): # override C.foo |
|
291 ... print "E.foo() called" |
|
292 ... C.foo(y) |
|
293 |
|
294 >>> E.foo(1) |
|
295 E.foo() called |
|
296 classmethod test.test_descrtut.C 1 |
|
297 >>> e = E() |
|
298 >>> e.foo(1) |
|
299 E.foo() called |
|
300 classmethod test.test_descrtut.C 1 |
|
301 |
|
302 In this example, the call to C.foo() from E.foo() will see class C as its |
|
303 first argument, not class E. This is to be expected, since the call |
|
304 specifies the class C. But it stresses the difference between these class |
|
305 methods and methods defined in metaclasses (where an upcall to a metamethod |
|
306 would pass the target class as an explicit first argument). |
|
307 """ |
|
308 |
|
309 test_5 = """ |
|
310 |
|
311 Attributes defined by get/set methods |
|
312 |
|
313 |
|
314 >>> class property(object): |
|
315 ... |
|
316 ... def __init__(self, get, set=None): |
|
317 ... self.__get = get |
|
318 ... self.__set = set |
|
319 ... |
|
320 ... def __get__(self, inst, type=None): |
|
321 ... return self.__get(inst) |
|
322 ... |
|
323 ... def __set__(self, inst, value): |
|
324 ... if self.__set is None: |
|
325 ... raise AttributeError, "this attribute is read-only" |
|
326 ... return self.__set(inst, value) |
|
327 |
|
328 Now let's define a class with an attribute x defined by a pair of methods, |
|
329 getx() and and setx(): |
|
330 |
|
331 >>> class C(object): |
|
332 ... |
|
333 ... def __init__(self): |
|
334 ... self.__x = 0 |
|
335 ... |
|
336 ... def getx(self): |
|
337 ... return self.__x |
|
338 ... |
|
339 ... def setx(self, x): |
|
340 ... if x < 0: x = 0 |
|
341 ... self.__x = x |
|
342 ... |
|
343 ... x = property(getx, setx) |
|
344 |
|
345 Here's a small demonstration: |
|
346 |
|
347 >>> a = C() |
|
348 >>> a.x = 10 |
|
349 >>> print a.x |
|
350 10 |
|
351 >>> a.x = -10 |
|
352 >>> print a.x |
|
353 0 |
|
354 >>> |
|
355 |
|
356 Hmm -- property is builtin now, so let's try it that way too. |
|
357 |
|
358 >>> del property # unmask the builtin |
|
359 >>> property |
|
360 <type 'property'> |
|
361 |
|
362 >>> class C(object): |
|
363 ... def __init__(self): |
|
364 ... self.__x = 0 |
|
365 ... def getx(self): |
|
366 ... return self.__x |
|
367 ... def setx(self, x): |
|
368 ... if x < 0: x = 0 |
|
369 ... self.__x = x |
|
370 ... x = property(getx, setx) |
|
371 |
|
372 |
|
373 >>> a = C() |
|
374 >>> a.x = 10 |
|
375 >>> print a.x |
|
376 10 |
|
377 >>> a.x = -10 |
|
378 >>> print a.x |
|
379 0 |
|
380 >>> |
|
381 """ |
|
382 |
|
383 test_6 = """ |
|
384 |
|
385 Method resolution order |
|
386 |
|
387 This example is implicit in the writeup. |
|
388 |
|
389 >>> class A: # classic class |
|
390 ... def save(self): |
|
391 ... print "called A.save()" |
|
392 >>> class B(A): |
|
393 ... pass |
|
394 >>> class C(A): |
|
395 ... def save(self): |
|
396 ... print "called C.save()" |
|
397 >>> class D(B, C): |
|
398 ... pass |
|
399 |
|
400 >>> D().save() |
|
401 called A.save() |
|
402 |
|
403 >>> class A(object): # new class |
|
404 ... def save(self): |
|
405 ... print "called A.save()" |
|
406 >>> class B(A): |
|
407 ... pass |
|
408 >>> class C(A): |
|
409 ... def save(self): |
|
410 ... print "called C.save()" |
|
411 >>> class D(B, C): |
|
412 ... pass |
|
413 |
|
414 >>> D().save() |
|
415 called C.save() |
|
416 """ |
|
417 |
|
418 class A(object): |
|
419 def m(self): |
|
420 return "A" |
|
421 |
|
422 class B(A): |
|
423 def m(self): |
|
424 return "B" + super(B, self).m() |
|
425 |
|
426 class C(A): |
|
427 def m(self): |
|
428 return "C" + super(C, self).m() |
|
429 |
|
430 class D(C, B): |
|
431 def m(self): |
|
432 return "D" + super(D, self).m() |
|
433 |
|
434 |
|
435 test_7 = """ |
|
436 |
|
437 Cooperative methods and "super" |
|
438 |
|
439 >>> print D().m() # "DCBA" |
|
440 DCBA |
|
441 """ |
|
442 |
|
443 test_8 = """ |
|
444 |
|
445 Backwards incompatibilities |
|
446 |
|
447 >>> class A: |
|
448 ... def foo(self): |
|
449 ... print "called A.foo()" |
|
450 |
|
451 >>> class B(A): |
|
452 ... pass |
|
453 |
|
454 >>> class C(A): |
|
455 ... def foo(self): |
|
456 ... B.foo(self) |
|
457 |
|
458 >>> C().foo() |
|
459 Traceback (most recent call last): |
|
460 ... |
|
461 TypeError: unbound method foo() must be called with B instance as first argument (got C instance instead) |
|
462 |
|
463 >>> class C(A): |
|
464 ... def foo(self): |
|
465 ... A.foo(self) |
|
466 >>> C().foo() |
|
467 called A.foo() |
|
468 """ |
|
469 |
|
470 __test__ = {"tut1": test_1, |
|
471 "tut2": test_2, |
|
472 "tut3": test_3, |
|
473 "tut4": test_4, |
|
474 "tut5": test_5, |
|
475 "tut6": test_6, |
|
476 "tut7": test_7, |
|
477 "tut8": test_8} |
|
478 |
|
479 # Magic test name that regrtest.py invokes *after* importing this module. |
|
480 # This worms around a bootstrap problem. |
|
481 # Note that doctest and regrtest both look in sys.argv for a "-v" argument, |
|
482 # so this works as expected in both ways of running regrtest. |
|
483 def test_main(verbose=None): |
|
484 # Obscure: import this module as test.test_descrtut instead of as |
|
485 # plain test_descrtut because the name of this module works its way |
|
486 # into the doctest examples, and unless the full test.test_descrtut |
|
487 # business is used the name can change depending on how the test is |
|
488 # invoked. |
|
489 from test import test_support, test_descrtut |
|
490 test_support.run_doctest(test_descrtut, verbose) |
|
491 |
|
492 # This part isn't needed for regrtest, but for running the test directly. |
|
493 if __name__ == "__main__": |
|
494 test_main(1) |