|
1 /* Frame object implementation */ |
|
2 |
|
3 #include "Python.h" |
|
4 |
|
5 #include "code.h" |
|
6 #include "frameobject.h" |
|
7 #include "opcode.h" |
|
8 #include "structmember.h" |
|
9 |
|
10 #undef MIN |
|
11 #undef MAX |
|
12 #define MIN(a, b) ((a) < (b) ? (a) : (b)) |
|
13 #define MAX(a, b) ((a) > (b) ? (a) : (b)) |
|
14 |
|
15 #define OFF(x) offsetof(PyFrameObject, x) |
|
16 |
|
17 static PyMemberDef frame_memberlist[] = { |
|
18 {"f_back", T_OBJECT, OFF(f_back), RO}, |
|
19 {"f_code", T_OBJECT, OFF(f_code), RO}, |
|
20 {"f_builtins", T_OBJECT, OFF(f_builtins),RO}, |
|
21 {"f_globals", T_OBJECT, OFF(f_globals), RO}, |
|
22 {"f_lasti", T_INT, OFF(f_lasti), RO}, |
|
23 {"f_exc_type", T_OBJECT, OFF(f_exc_type)}, |
|
24 {"f_exc_value", T_OBJECT, OFF(f_exc_value)}, |
|
25 {"f_exc_traceback", T_OBJECT, OFF(f_exc_traceback)}, |
|
26 {NULL} /* Sentinel */ |
|
27 }; |
|
28 |
|
29 static PyObject * |
|
30 frame_getlocals(PyFrameObject *f, void *closure) |
|
31 { |
|
32 PyFrame_FastToLocals(f); |
|
33 Py_INCREF(f->f_locals); |
|
34 return f->f_locals; |
|
35 } |
|
36 |
|
37 static PyObject * |
|
38 frame_getlineno(PyFrameObject *f, void *closure) |
|
39 { |
|
40 int lineno; |
|
41 |
|
42 if (f->f_trace) |
|
43 lineno = f->f_lineno; |
|
44 else |
|
45 lineno = PyCode_Addr2Line(f->f_code, f->f_lasti); |
|
46 |
|
47 return PyInt_FromLong(lineno); |
|
48 } |
|
49 |
|
50 /* Setter for f_lineno - you can set f_lineno from within a trace function in |
|
51 * order to jump to a given line of code, subject to some restrictions. Most |
|
52 * lines are OK to jump to because they don't make any assumptions about the |
|
53 * state of the stack (obvious because you could remove the line and the code |
|
54 * would still work without any stack errors), but there are some constructs |
|
55 * that limit jumping: |
|
56 * |
|
57 * o Lines with an 'except' statement on them can't be jumped to, because |
|
58 * they expect an exception to be on the top of the stack. |
|
59 * o Lines that live in a 'finally' block can't be jumped from or to, since |
|
60 * the END_FINALLY expects to clean up the stack after the 'try' block. |
|
61 * o 'try'/'for'/'while' blocks can't be jumped into because the blockstack |
|
62 * needs to be set up before their code runs, and for 'for' loops the |
|
63 * iterator needs to be on the stack. |
|
64 */ |
|
65 static int |
|
66 frame_setlineno(PyFrameObject *f, PyObject* p_new_lineno) |
|
67 { |
|
68 int new_lineno = 0; /* The new value of f_lineno */ |
|
69 int new_lasti = 0; /* The new value of f_lasti */ |
|
70 int new_iblock = 0; /* The new value of f_iblock */ |
|
71 unsigned char *code = NULL; /* The bytecode for the frame... */ |
|
72 Py_ssize_t code_len = 0; /* ...and its length */ |
|
73 char *lnotab = NULL; /* Iterating over co_lnotab */ |
|
74 Py_ssize_t lnotab_len = 0; /* (ditto) */ |
|
75 int offset = 0; /* (ditto) */ |
|
76 int line = 0; /* (ditto) */ |
|
77 int addr = 0; /* (ditto) */ |
|
78 int min_addr = 0; /* Scanning the SETUPs and POPs */ |
|
79 int max_addr = 0; /* (ditto) */ |
|
80 int delta_iblock = 0; /* (ditto) */ |
|
81 int min_delta_iblock = 0; /* (ditto) */ |
|
82 int min_iblock = 0; /* (ditto) */ |
|
83 int f_lasti_setup_addr = 0; /* Policing no-jump-into-finally */ |
|
84 int new_lasti_setup_addr = 0; /* (ditto) */ |
|
85 int blockstack[CO_MAXBLOCKS]; /* Walking the 'finally' blocks */ |
|
86 int in_finally[CO_MAXBLOCKS]; /* (ditto) */ |
|
87 int blockstack_top = 0; /* (ditto) */ |
|
88 unsigned char setup_op = 0; /* (ditto) */ |
|
89 |
|
90 /* f_lineno must be an integer. */ |
|
91 if (!PyInt_Check(p_new_lineno)) { |
|
92 PyErr_SetString(PyExc_ValueError, |
|
93 "lineno must be an integer"); |
|
94 return -1; |
|
95 } |
|
96 |
|
97 /* You can only do this from within a trace function, not via |
|
98 * _getframe or similar hackery. */ |
|
99 if (!f->f_trace) |
|
100 { |
|
101 PyErr_Format(PyExc_ValueError, |
|
102 "f_lineno can only be set by a trace function"); |
|
103 return -1; |
|
104 } |
|
105 |
|
106 /* Fail if the line comes before the start of the code block. */ |
|
107 new_lineno = (int) PyInt_AsLong(p_new_lineno); |
|
108 if (new_lineno < f->f_code->co_firstlineno) { |
|
109 PyErr_Format(PyExc_ValueError, |
|
110 "line %d comes before the current code block", |
|
111 new_lineno); |
|
112 return -1; |
|
113 } |
|
114 |
|
115 /* Find the bytecode offset for the start of the given line, or the |
|
116 * first code-owning line after it. */ |
|
117 PyString_AsStringAndSize(f->f_code->co_lnotab, &lnotab, &lnotab_len); |
|
118 addr = 0; |
|
119 line = f->f_code->co_firstlineno; |
|
120 new_lasti = -1; |
|
121 for (offset = 0; offset < lnotab_len; offset += 2) { |
|
122 addr += lnotab[offset]; |
|
123 line += lnotab[offset+1]; |
|
124 if (line >= new_lineno) { |
|
125 new_lasti = addr; |
|
126 new_lineno = line; |
|
127 break; |
|
128 } |
|
129 } |
|
130 |
|
131 /* If we didn't reach the requested line, return an error. */ |
|
132 if (new_lasti == -1) { |
|
133 PyErr_Format(PyExc_ValueError, |
|
134 "line %d comes after the current code block", |
|
135 new_lineno); |
|
136 return -1; |
|
137 } |
|
138 |
|
139 /* We're now ready to look at the bytecode. */ |
|
140 PyString_AsStringAndSize(f->f_code->co_code, (char **)&code, &code_len); |
|
141 min_addr = MIN(new_lasti, f->f_lasti); |
|
142 max_addr = MAX(new_lasti, f->f_lasti); |
|
143 |
|
144 /* You can't jump onto a line with an 'except' statement on it - |
|
145 * they expect to have an exception on the top of the stack, which |
|
146 * won't be true if you jump to them. They always start with code |
|
147 * that either pops the exception using POP_TOP (plain 'except:' |
|
148 * lines do this) or duplicates the exception on the stack using |
|
149 * DUP_TOP (if there's an exception type specified). See compile.c, |
|
150 * 'com_try_except' for the full details. There aren't any other |
|
151 * cases (AFAIK) where a line's code can start with DUP_TOP or |
|
152 * POP_TOP, but if any ever appear, they'll be subject to the same |
|
153 * restriction (but with a different error message). */ |
|
154 if (code[new_lasti] == DUP_TOP || code[new_lasti] == POP_TOP) { |
|
155 PyErr_SetString(PyExc_ValueError, |
|
156 "can't jump to 'except' line as there's no exception"); |
|
157 return -1; |
|
158 } |
|
159 |
|
160 /* You can't jump into or out of a 'finally' block because the 'try' |
|
161 * block leaves something on the stack for the END_FINALLY to clean |
|
162 * up. So we walk the bytecode, maintaining a simulated blockstack. |
|
163 * When we reach the old or new address and it's in a 'finally' block |
|
164 * we note the address of the corresponding SETUP_FINALLY. The jump |
|
165 * is only legal if neither address is in a 'finally' block or |
|
166 * they're both in the same one. 'blockstack' is a stack of the |
|
167 * bytecode addresses of the SETUP_X opcodes, and 'in_finally' tracks |
|
168 * whether we're in a 'finally' block at each blockstack level. */ |
|
169 f_lasti_setup_addr = -1; |
|
170 new_lasti_setup_addr = -1; |
|
171 memset(blockstack, '\0', sizeof(blockstack)); |
|
172 memset(in_finally, '\0', sizeof(in_finally)); |
|
173 blockstack_top = 0; |
|
174 for (addr = 0; addr < code_len; addr++) { |
|
175 unsigned char op = code[addr]; |
|
176 switch (op) { |
|
177 case SETUP_LOOP: |
|
178 case SETUP_EXCEPT: |
|
179 case SETUP_FINALLY: |
|
180 blockstack[blockstack_top++] = addr; |
|
181 in_finally[blockstack_top-1] = 0; |
|
182 break; |
|
183 |
|
184 case POP_BLOCK: |
|
185 assert(blockstack_top > 0); |
|
186 setup_op = code[blockstack[blockstack_top-1]]; |
|
187 if (setup_op == SETUP_FINALLY) { |
|
188 in_finally[blockstack_top-1] = 1; |
|
189 } |
|
190 else { |
|
191 blockstack_top--; |
|
192 } |
|
193 break; |
|
194 |
|
195 case END_FINALLY: |
|
196 /* Ignore END_FINALLYs for SETUP_EXCEPTs - they exist |
|
197 * in the bytecode but don't correspond to an actual |
|
198 * 'finally' block. (If blockstack_top is 0, we must |
|
199 * be seeing such an END_FINALLY.) */ |
|
200 if (blockstack_top > 0) { |
|
201 setup_op = code[blockstack[blockstack_top-1]]; |
|
202 if (setup_op == SETUP_FINALLY) { |
|
203 blockstack_top--; |
|
204 } |
|
205 } |
|
206 break; |
|
207 } |
|
208 |
|
209 /* For the addresses we're interested in, see whether they're |
|
210 * within a 'finally' block and if so, remember the address |
|
211 * of the SETUP_FINALLY. */ |
|
212 if (addr == new_lasti || addr == f->f_lasti) { |
|
213 int i = 0; |
|
214 int setup_addr = -1; |
|
215 for (i = blockstack_top-1; i >= 0; i--) { |
|
216 if (in_finally[i]) { |
|
217 setup_addr = blockstack[i]; |
|
218 break; |
|
219 } |
|
220 } |
|
221 |
|
222 if (setup_addr != -1) { |
|
223 if (addr == new_lasti) { |
|
224 new_lasti_setup_addr = setup_addr; |
|
225 } |
|
226 |
|
227 if (addr == f->f_lasti) { |
|
228 f_lasti_setup_addr = setup_addr; |
|
229 } |
|
230 } |
|
231 } |
|
232 |
|
233 if (op >= HAVE_ARGUMENT) { |
|
234 addr += 2; |
|
235 } |
|
236 } |
|
237 |
|
238 /* Verify that the blockstack tracking code didn't get lost. */ |
|
239 assert(blockstack_top == 0); |
|
240 |
|
241 /* After all that, are we jumping into / out of a 'finally' block? */ |
|
242 if (new_lasti_setup_addr != f_lasti_setup_addr) { |
|
243 PyErr_SetString(PyExc_ValueError, |
|
244 "can't jump into or out of a 'finally' block"); |
|
245 return -1; |
|
246 } |
|
247 |
|
248 |
|
249 /* Police block-jumping (you can't jump into the middle of a block) |
|
250 * and ensure that the blockstack finishes up in a sensible state (by |
|
251 * popping any blocks we're jumping out of). We look at all the |
|
252 * blockstack operations between the current position and the new |
|
253 * one, and keep track of how many blocks we drop out of on the way. |
|
254 * By also keeping track of the lowest blockstack position we see, we |
|
255 * can tell whether the jump goes into any blocks without coming out |
|
256 * again - in that case we raise an exception below. */ |
|
257 delta_iblock = 0; |
|
258 for (addr = min_addr; addr < max_addr; addr++) { |
|
259 unsigned char op = code[addr]; |
|
260 switch (op) { |
|
261 case SETUP_LOOP: |
|
262 case SETUP_EXCEPT: |
|
263 case SETUP_FINALLY: |
|
264 delta_iblock++; |
|
265 break; |
|
266 |
|
267 case POP_BLOCK: |
|
268 delta_iblock--; |
|
269 break; |
|
270 } |
|
271 |
|
272 min_delta_iblock = MIN(min_delta_iblock, delta_iblock); |
|
273 |
|
274 if (op >= HAVE_ARGUMENT) { |
|
275 addr += 2; |
|
276 } |
|
277 } |
|
278 |
|
279 /* Derive the absolute iblock values from the deltas. */ |
|
280 min_iblock = f->f_iblock + min_delta_iblock; |
|
281 if (new_lasti > f->f_lasti) { |
|
282 /* Forwards jump. */ |
|
283 new_iblock = f->f_iblock + delta_iblock; |
|
284 } |
|
285 else { |
|
286 /* Backwards jump. */ |
|
287 new_iblock = f->f_iblock - delta_iblock; |
|
288 } |
|
289 |
|
290 /* Are we jumping into a block? */ |
|
291 if (new_iblock > min_iblock) { |
|
292 PyErr_SetString(PyExc_ValueError, |
|
293 "can't jump into the middle of a block"); |
|
294 return -1; |
|
295 } |
|
296 |
|
297 /* Pop any blocks that we're jumping out of. */ |
|
298 while (f->f_iblock > new_iblock) { |
|
299 PyTryBlock *b = &f->f_blockstack[--f->f_iblock]; |
|
300 while ((f->f_stacktop - f->f_valuestack) > b->b_level) { |
|
301 PyObject *v = (*--f->f_stacktop); |
|
302 Py_DECREF(v); |
|
303 } |
|
304 } |
|
305 |
|
306 /* Finally set the new f_lineno and f_lasti and return OK. */ |
|
307 f->f_lineno = new_lineno; |
|
308 f->f_lasti = new_lasti; |
|
309 return 0; |
|
310 } |
|
311 |
|
312 static PyObject * |
|
313 frame_gettrace(PyFrameObject *f, void *closure) |
|
314 { |
|
315 PyObject* trace = f->f_trace; |
|
316 |
|
317 if (trace == NULL) |
|
318 trace = Py_None; |
|
319 |
|
320 Py_INCREF(trace); |
|
321 |
|
322 return trace; |
|
323 } |
|
324 |
|
325 static int |
|
326 frame_settrace(PyFrameObject *f, PyObject* v, void *closure) |
|
327 { |
|
328 /* We rely on f_lineno being accurate when f_trace is set. */ |
|
329 |
|
330 PyObject* old_value = f->f_trace; |
|
331 |
|
332 Py_XINCREF(v); |
|
333 f->f_trace = v; |
|
334 |
|
335 if (v != NULL) |
|
336 f->f_lineno = PyCode_Addr2Line(f->f_code, f->f_lasti); |
|
337 |
|
338 Py_XDECREF(old_value); |
|
339 |
|
340 return 0; |
|
341 } |
|
342 |
|
343 static PyObject * |
|
344 frame_getrestricted(PyFrameObject *f, void *closure) |
|
345 { |
|
346 return PyBool_FromLong(PyFrame_IsRestricted(f)); |
|
347 } |
|
348 |
|
349 static PyGetSetDef frame_getsetlist[] = { |
|
350 {"f_locals", (getter)frame_getlocals, NULL, NULL}, |
|
351 {"f_lineno", (getter)frame_getlineno, |
|
352 (setter)frame_setlineno, NULL}, |
|
353 {"f_trace", (getter)frame_gettrace, (setter)frame_settrace, NULL}, |
|
354 {"f_restricted",(getter)frame_getrestricted,NULL, NULL}, |
|
355 {0} |
|
356 }; |
|
357 |
|
358 /* Stack frames are allocated and deallocated at a considerable rate. |
|
359 In an attempt to improve the speed of function calls, we: |
|
360 |
|
361 1. Hold a single "zombie" frame on each code object. This retains |
|
362 the allocated and initialised frame object from an invocation of |
|
363 the code object. The zombie is reanimated the next time we need a |
|
364 frame object for that code object. Doing this saves the malloc/ |
|
365 realloc required when using a free_list frame that isn't the |
|
366 correct size. It also saves some field initialisation. |
|
367 |
|
368 In zombie mode, no field of PyFrameObject holds a reference, but |
|
369 the following fields are still valid: |
|
370 |
|
371 * ob_type, ob_size, f_code, f_valuestack; |
|
372 |
|
373 * f_locals, f_trace, |
|
374 f_exc_type, f_exc_value, f_exc_traceback are NULL; |
|
375 |
|
376 * f_localsplus does not require re-allocation and |
|
377 the local variables in f_localsplus are NULL. |
|
378 |
|
379 2. We also maintain a separate free list of stack frames (just like |
|
380 integers are allocated in a special way -- see intobject.c). When |
|
381 a stack frame is on the free list, only the following members have |
|
382 a meaning: |
|
383 ob_type == &Frametype |
|
384 f_back next item on free list, or NULL |
|
385 f_stacksize size of value stack |
|
386 ob_size size of localsplus |
|
387 Note that the value and block stacks are preserved -- this can save |
|
388 another malloc() call or two (and two free() calls as well!). |
|
389 Also note that, unlike for integers, each frame object is a |
|
390 malloc'ed object in its own right -- it is only the actual calls to |
|
391 malloc() that we are trying to save here, not the administration. |
|
392 After all, while a typical program may make millions of calls, a |
|
393 call depth of more than 20 or 30 is probably already exceptional |
|
394 unless the program contains run-away recursion. I hope. |
|
395 |
|
396 Later, PyFrame_MAXFREELIST was added to bound the # of frames saved on |
|
397 free_list. Else programs creating lots of cyclic trash involving |
|
398 frames could provoke free_list into growing without bound. |
|
399 */ |
|
400 |
|
401 static PyFrameObject *free_list = NULL; |
|
402 static int numfree = 0; /* number of frames currently in free_list */ |
|
403 /* max value for numfree */ |
|
404 #define PyFrame_MAXFREELIST 200 |
|
405 |
|
406 static void |
|
407 frame_dealloc(PyFrameObject *f) |
|
408 { |
|
409 PyObject **p, **valuestack; |
|
410 PyCodeObject *co; |
|
411 |
|
412 PyObject_GC_UnTrack(f); |
|
413 Py_TRASHCAN_SAFE_BEGIN(f) |
|
414 /* Kill all local variables */ |
|
415 valuestack = f->f_valuestack; |
|
416 for (p = f->f_localsplus; p < valuestack; p++) |
|
417 Py_CLEAR(*p); |
|
418 |
|
419 /* Free stack */ |
|
420 if (f->f_stacktop != NULL) { |
|
421 for (p = valuestack; p < f->f_stacktop; p++) |
|
422 Py_XDECREF(*p); |
|
423 } |
|
424 |
|
425 Py_XDECREF(f->f_back); |
|
426 Py_DECREF(f->f_builtins); |
|
427 Py_DECREF(f->f_globals); |
|
428 Py_CLEAR(f->f_locals); |
|
429 Py_CLEAR(f->f_trace); |
|
430 Py_CLEAR(f->f_exc_type); |
|
431 Py_CLEAR(f->f_exc_value); |
|
432 Py_CLEAR(f->f_exc_traceback); |
|
433 |
|
434 co = f->f_code; |
|
435 if (co->co_zombieframe == NULL) |
|
436 co->co_zombieframe = f; |
|
437 else if (numfree < PyFrame_MAXFREELIST) { |
|
438 ++numfree; |
|
439 f->f_back = free_list; |
|
440 free_list = f; |
|
441 } |
|
442 else |
|
443 PyObject_GC_Del(f); |
|
444 |
|
445 Py_DECREF(co); |
|
446 Py_TRASHCAN_SAFE_END(f) |
|
447 } |
|
448 |
|
449 static int |
|
450 frame_traverse(PyFrameObject *f, visitproc visit, void *arg) |
|
451 { |
|
452 PyObject **fastlocals, **p; |
|
453 int i, slots; |
|
454 |
|
455 Py_VISIT(f->f_back); |
|
456 Py_VISIT(f->f_code); |
|
457 Py_VISIT(f->f_builtins); |
|
458 Py_VISIT(f->f_globals); |
|
459 Py_VISIT(f->f_locals); |
|
460 Py_VISIT(f->f_trace); |
|
461 Py_VISIT(f->f_exc_type); |
|
462 Py_VISIT(f->f_exc_value); |
|
463 Py_VISIT(f->f_exc_traceback); |
|
464 |
|
465 /* locals */ |
|
466 slots = f->f_code->co_nlocals + PyTuple_GET_SIZE(f->f_code->co_cellvars) + PyTuple_GET_SIZE(f->f_code->co_freevars); |
|
467 fastlocals = f->f_localsplus; |
|
468 for (i = slots; --i >= 0; ++fastlocals) |
|
469 Py_VISIT(*fastlocals); |
|
470 |
|
471 /* stack */ |
|
472 if (f->f_stacktop != NULL) { |
|
473 for (p = f->f_valuestack; p < f->f_stacktop; p++) |
|
474 Py_VISIT(*p); |
|
475 } |
|
476 return 0; |
|
477 } |
|
478 |
|
479 static void |
|
480 frame_clear(PyFrameObject *f) |
|
481 { |
|
482 PyObject **fastlocals, **p, **oldtop; |
|
483 int i, slots; |
|
484 |
|
485 /* Before anything else, make sure that this frame is clearly marked |
|
486 * as being defunct! Else, e.g., a generator reachable from this |
|
487 * frame may also point to this frame, believe itself to still be |
|
488 * active, and try cleaning up this frame again. |
|
489 */ |
|
490 oldtop = f->f_stacktop; |
|
491 f->f_stacktop = NULL; |
|
492 |
|
493 Py_CLEAR(f->f_exc_type); |
|
494 Py_CLEAR(f->f_exc_value); |
|
495 Py_CLEAR(f->f_exc_traceback); |
|
496 Py_CLEAR(f->f_trace); |
|
497 |
|
498 /* locals */ |
|
499 slots = f->f_code->co_nlocals + PyTuple_GET_SIZE(f->f_code->co_cellvars) + PyTuple_GET_SIZE(f->f_code->co_freevars); |
|
500 fastlocals = f->f_localsplus; |
|
501 for (i = slots; --i >= 0; ++fastlocals) |
|
502 Py_CLEAR(*fastlocals); |
|
503 |
|
504 /* stack */ |
|
505 if (oldtop != NULL) { |
|
506 for (p = f->f_valuestack; p < oldtop; p++) |
|
507 Py_CLEAR(*p); |
|
508 } |
|
509 } |
|
510 |
|
511 static PyObject * |
|
512 frame_sizeof(PyFrameObject *f) |
|
513 { |
|
514 Py_ssize_t res, extras, ncells, nfrees; |
|
515 |
|
516 ncells = PyTuple_GET_SIZE(f->f_code->co_cellvars); |
|
517 nfrees = PyTuple_GET_SIZE(f->f_code->co_freevars); |
|
518 extras = f->f_code->co_stacksize + f->f_code->co_nlocals + |
|
519 ncells + nfrees; |
|
520 /* subtract one as it is already included in PyFrameObject */ |
|
521 res = sizeof(PyFrameObject) + (extras-1) * sizeof(PyObject *); |
|
522 |
|
523 return PyInt_FromSsize_t(res); |
|
524 } |
|
525 |
|
526 PyDoc_STRVAR(sizeof__doc__, |
|
527 "F.__sizeof__() -> size of F in memory, in bytes"); |
|
528 |
|
529 static PyMethodDef frame_methods[] = { |
|
530 {"__sizeof__", (PyCFunction)frame_sizeof, METH_NOARGS, |
|
531 sizeof__doc__}, |
|
532 {NULL, NULL} /* sentinel */ |
|
533 }; |
|
534 |
|
535 PyTypeObject PyFrame_Type = { |
|
536 PyVarObject_HEAD_INIT(&PyType_Type, 0) |
|
537 "frame", |
|
538 sizeof(PyFrameObject), |
|
539 sizeof(PyObject *), |
|
540 (destructor)frame_dealloc, /* tp_dealloc */ |
|
541 0, /* tp_print */ |
|
542 0, /* tp_getattr */ |
|
543 0, /* tp_setattr */ |
|
544 0, /* tp_compare */ |
|
545 0, /* tp_repr */ |
|
546 0, /* tp_as_number */ |
|
547 0, /* tp_as_sequence */ |
|
548 0, /* tp_as_mapping */ |
|
549 0, /* tp_hash */ |
|
550 0, /* tp_call */ |
|
551 0, /* tp_str */ |
|
552 PyObject_GenericGetAttr, /* tp_getattro */ |
|
553 PyObject_GenericSetAttr, /* tp_setattro */ |
|
554 0, /* tp_as_buffer */ |
|
555 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */ |
|
556 0, /* tp_doc */ |
|
557 (traverseproc)frame_traverse, /* tp_traverse */ |
|
558 (inquiry)frame_clear, /* tp_clear */ |
|
559 0, /* tp_richcompare */ |
|
560 0, /* tp_weaklistoffset */ |
|
561 0, /* tp_iter */ |
|
562 0, /* tp_iternext */ |
|
563 frame_methods, /* tp_methods */ |
|
564 frame_memberlist, /* tp_members */ |
|
565 frame_getsetlist, /* tp_getset */ |
|
566 0, /* tp_base */ |
|
567 0, /* tp_dict */ |
|
568 }; |
|
569 |
|
570 static PyObject *builtin_object; |
|
571 |
|
572 int _PyFrame_Init() |
|
573 { |
|
574 builtin_object = PyString_InternFromString("__builtins__"); |
|
575 return (builtin_object != NULL); |
|
576 } |
|
577 |
|
578 PyFrameObject * |
|
579 PyFrame_New(PyThreadState *tstate, PyCodeObject *code, PyObject *globals, |
|
580 PyObject *locals) |
|
581 { |
|
582 PyFrameObject *back = tstate->frame; |
|
583 PyFrameObject *f; |
|
584 PyObject *builtins; |
|
585 Py_ssize_t i; |
|
586 |
|
587 #ifdef Py_DEBUG |
|
588 if (code == NULL || globals == NULL || !PyDict_Check(globals) || |
|
589 (locals != NULL && !PyMapping_Check(locals))) { |
|
590 PyErr_BadInternalCall(); |
|
591 return NULL; |
|
592 } |
|
593 #endif |
|
594 if (back == NULL || back->f_globals != globals) { |
|
595 builtins = PyDict_GetItem(globals, builtin_object); |
|
596 if (builtins) { |
|
597 if (PyModule_Check(builtins)) { |
|
598 builtins = PyModule_GetDict(builtins); |
|
599 assert(!builtins || PyDict_Check(builtins)); |
|
600 } |
|
601 else if (!PyDict_Check(builtins)) |
|
602 builtins = NULL; |
|
603 } |
|
604 if (builtins == NULL) { |
|
605 /* No builtins! Make up a minimal one |
|
606 Give them 'None', at least. */ |
|
607 builtins = PyDict_New(); |
|
608 if (builtins == NULL || |
|
609 PyDict_SetItemString( |
|
610 builtins, "None", Py_None) < 0) |
|
611 return NULL; |
|
612 } |
|
613 else |
|
614 Py_INCREF(builtins); |
|
615 |
|
616 } |
|
617 else { |
|
618 /* If we share the globals, we share the builtins. |
|
619 Save a lookup and a call. */ |
|
620 builtins = back->f_builtins; |
|
621 assert(builtins != NULL && PyDict_Check(builtins)); |
|
622 Py_INCREF(builtins); |
|
623 } |
|
624 if (code->co_zombieframe != NULL) { |
|
625 f = code->co_zombieframe; |
|
626 code->co_zombieframe = NULL; |
|
627 _Py_NewReference((PyObject *)f); |
|
628 assert(f->f_code == code); |
|
629 } |
|
630 else { |
|
631 Py_ssize_t extras, ncells, nfrees; |
|
632 ncells = PyTuple_GET_SIZE(code->co_cellvars); |
|
633 nfrees = PyTuple_GET_SIZE(code->co_freevars); |
|
634 extras = code->co_stacksize + code->co_nlocals + ncells + |
|
635 nfrees; |
|
636 if (free_list == NULL) { |
|
637 f = PyObject_GC_NewVar(PyFrameObject, &PyFrame_Type, |
|
638 extras); |
|
639 if (f == NULL) { |
|
640 Py_DECREF(builtins); |
|
641 return NULL; |
|
642 } |
|
643 } |
|
644 else { |
|
645 assert(numfree > 0); |
|
646 --numfree; |
|
647 f = free_list; |
|
648 free_list = free_list->f_back; |
|
649 if (Py_SIZE(f) < extras) { |
|
650 f = PyObject_GC_Resize(PyFrameObject, f, extras); |
|
651 if (f == NULL) { |
|
652 Py_DECREF(builtins); |
|
653 return NULL; |
|
654 } |
|
655 } |
|
656 _Py_NewReference((PyObject *)f); |
|
657 } |
|
658 |
|
659 f->f_code = code; |
|
660 extras = code->co_nlocals + ncells + nfrees; |
|
661 f->f_valuestack = f->f_localsplus + extras; |
|
662 for (i=0; i<extras; i++) |
|
663 f->f_localsplus[i] = NULL; |
|
664 f->f_locals = NULL; |
|
665 f->f_trace = NULL; |
|
666 f->f_exc_type = f->f_exc_value = f->f_exc_traceback = NULL; |
|
667 } |
|
668 f->f_stacktop = f->f_valuestack; |
|
669 f->f_builtins = builtins; |
|
670 Py_XINCREF(back); |
|
671 f->f_back = back; |
|
672 Py_INCREF(code); |
|
673 Py_INCREF(globals); |
|
674 f->f_globals = globals; |
|
675 /* Most functions have CO_NEWLOCALS and CO_OPTIMIZED set. */ |
|
676 if ((code->co_flags & (CO_NEWLOCALS | CO_OPTIMIZED)) == |
|
677 (CO_NEWLOCALS | CO_OPTIMIZED)) |
|
678 ; /* f_locals = NULL; will be set by PyFrame_FastToLocals() */ |
|
679 else if (code->co_flags & CO_NEWLOCALS) { |
|
680 locals = PyDict_New(); |
|
681 if (locals == NULL) { |
|
682 Py_DECREF(f); |
|
683 return NULL; |
|
684 } |
|
685 f->f_locals = locals; |
|
686 } |
|
687 else { |
|
688 if (locals == NULL) |
|
689 locals = globals; |
|
690 Py_INCREF(locals); |
|
691 f->f_locals = locals; |
|
692 } |
|
693 f->f_tstate = tstate; |
|
694 |
|
695 f->f_lasti = -1; |
|
696 f->f_lineno = code->co_firstlineno; |
|
697 f->f_iblock = 0; |
|
698 |
|
699 _PyObject_GC_TRACK(f); |
|
700 return f; |
|
701 } |
|
702 |
|
703 /* Block management */ |
|
704 |
|
705 void |
|
706 PyFrame_BlockSetup(PyFrameObject *f, int type, int handler, int level) |
|
707 { |
|
708 PyTryBlock *b; |
|
709 if (f->f_iblock >= CO_MAXBLOCKS) |
|
710 Py_FatalError("XXX block stack overflow"); |
|
711 b = &f->f_blockstack[f->f_iblock++]; |
|
712 b->b_type = type; |
|
713 b->b_level = level; |
|
714 b->b_handler = handler; |
|
715 } |
|
716 |
|
717 PyTryBlock * |
|
718 PyFrame_BlockPop(PyFrameObject *f) |
|
719 { |
|
720 PyTryBlock *b; |
|
721 if (f->f_iblock <= 0) |
|
722 Py_FatalError("XXX block stack underflow"); |
|
723 b = &f->f_blockstack[--f->f_iblock]; |
|
724 return b; |
|
725 } |
|
726 |
|
727 /* Convert between "fast" version of locals and dictionary version. |
|
728 |
|
729 map and values are input arguments. map is a tuple of strings. |
|
730 values is an array of PyObject*. At index i, map[i] is the name of |
|
731 the variable with value values[i]. The function copies the first |
|
732 nmap variable from map/values into dict. If values[i] is NULL, |
|
733 the variable is deleted from dict. |
|
734 |
|
735 If deref is true, then the values being copied are cell variables |
|
736 and the value is extracted from the cell variable before being put |
|
737 in dict. |
|
738 |
|
739 Exceptions raised while modifying the dict are silently ignored, |
|
740 because there is no good way to report them. |
|
741 */ |
|
742 |
|
743 static void |
|
744 map_to_dict(PyObject *map, Py_ssize_t nmap, PyObject *dict, PyObject **values, |
|
745 int deref) |
|
746 { |
|
747 Py_ssize_t j; |
|
748 assert(PyTuple_Check(map)); |
|
749 assert(PyDict_Check(dict)); |
|
750 assert(PyTuple_Size(map) >= nmap); |
|
751 for (j = nmap; --j >= 0; ) { |
|
752 PyObject *key = PyTuple_GET_ITEM(map, j); |
|
753 PyObject *value = values[j]; |
|
754 assert(PyString_Check(key)); |
|
755 if (deref) { |
|
756 assert(PyCell_Check(value)); |
|
757 value = PyCell_GET(value); |
|
758 } |
|
759 if (value == NULL) { |
|
760 if (PyObject_DelItem(dict, key) != 0) |
|
761 PyErr_Clear(); |
|
762 } |
|
763 else { |
|
764 if (PyObject_SetItem(dict, key, value) != 0) |
|
765 PyErr_Clear(); |
|
766 } |
|
767 } |
|
768 } |
|
769 |
|
770 /* Copy values from the "locals" dict into the fast locals. |
|
771 |
|
772 dict is an input argument containing string keys representing |
|
773 variables names and arbitrary PyObject* as values. |
|
774 |
|
775 map and values are input arguments. map is a tuple of strings. |
|
776 values is an array of PyObject*. At index i, map[i] is the name of |
|
777 the variable with value values[i]. The function copies the first |
|
778 nmap variable from map/values into dict. If values[i] is NULL, |
|
779 the variable is deleted from dict. |
|
780 |
|
781 If deref is true, then the values being copied are cell variables |
|
782 and the value is extracted from the cell variable before being put |
|
783 in dict. If clear is true, then variables in map but not in dict |
|
784 are set to NULL in map; if clear is false, variables missing in |
|
785 dict are ignored. |
|
786 |
|
787 Exceptions raised while modifying the dict are silently ignored, |
|
788 because there is no good way to report them. |
|
789 */ |
|
790 |
|
791 static void |
|
792 dict_to_map(PyObject *map, Py_ssize_t nmap, PyObject *dict, PyObject **values, |
|
793 int deref, int clear) |
|
794 { |
|
795 Py_ssize_t j; |
|
796 assert(PyTuple_Check(map)); |
|
797 assert(PyDict_Check(dict)); |
|
798 assert(PyTuple_Size(map) >= nmap); |
|
799 for (j = nmap; --j >= 0; ) { |
|
800 PyObject *key = PyTuple_GET_ITEM(map, j); |
|
801 PyObject *value = PyObject_GetItem(dict, key); |
|
802 assert(PyString_Check(key)); |
|
803 /* We only care about NULLs if clear is true. */ |
|
804 if (value == NULL) { |
|
805 PyErr_Clear(); |
|
806 if (!clear) |
|
807 continue; |
|
808 } |
|
809 if (deref) { |
|
810 assert(PyCell_Check(values[j])); |
|
811 if (PyCell_GET(values[j]) != value) { |
|
812 if (PyCell_Set(values[j], value) < 0) |
|
813 PyErr_Clear(); |
|
814 } |
|
815 } else if (values[j] != value) { |
|
816 Py_XINCREF(value); |
|
817 Py_XDECREF(values[j]); |
|
818 values[j] = value; |
|
819 } |
|
820 Py_XDECREF(value); |
|
821 } |
|
822 } |
|
823 |
|
824 void |
|
825 PyFrame_FastToLocals(PyFrameObject *f) |
|
826 { |
|
827 /* Merge fast locals into f->f_locals */ |
|
828 PyObject *locals, *map; |
|
829 PyObject **fast; |
|
830 PyObject *error_type, *error_value, *error_traceback; |
|
831 PyCodeObject *co; |
|
832 Py_ssize_t j; |
|
833 int ncells, nfreevars; |
|
834 if (f == NULL) |
|
835 return; |
|
836 locals = f->f_locals; |
|
837 if (locals == NULL) { |
|
838 locals = f->f_locals = PyDict_New(); |
|
839 if (locals == NULL) { |
|
840 PyErr_Clear(); /* Can't report it :-( */ |
|
841 return; |
|
842 } |
|
843 } |
|
844 co = f->f_code; |
|
845 map = co->co_varnames; |
|
846 if (!PyTuple_Check(map)) |
|
847 return; |
|
848 PyErr_Fetch(&error_type, &error_value, &error_traceback); |
|
849 fast = f->f_localsplus; |
|
850 j = PyTuple_GET_SIZE(map); |
|
851 if (j > co->co_nlocals) |
|
852 j = co->co_nlocals; |
|
853 if (co->co_nlocals) |
|
854 map_to_dict(map, j, locals, fast, 0); |
|
855 ncells = PyTuple_GET_SIZE(co->co_cellvars); |
|
856 nfreevars = PyTuple_GET_SIZE(co->co_freevars); |
|
857 if (ncells || nfreevars) { |
|
858 map_to_dict(co->co_cellvars, ncells, |
|
859 locals, fast + co->co_nlocals, 1); |
|
860 /* If the namespace is unoptimized, then one of the |
|
861 following cases applies: |
|
862 1. It does not contain free variables, because it |
|
863 uses import * or is a top-level namespace. |
|
864 2. It is a class namespace. |
|
865 We don't want to accidentally copy free variables |
|
866 into the locals dict used by the class. |
|
867 */ |
|
868 if (co->co_flags & CO_OPTIMIZED) { |
|
869 map_to_dict(co->co_freevars, nfreevars, |
|
870 locals, fast + co->co_nlocals + ncells, 1); |
|
871 } |
|
872 } |
|
873 PyErr_Restore(error_type, error_value, error_traceback); |
|
874 } |
|
875 |
|
876 void |
|
877 PyFrame_LocalsToFast(PyFrameObject *f, int clear) |
|
878 { |
|
879 /* Merge f->f_locals into fast locals */ |
|
880 PyObject *locals, *map; |
|
881 PyObject **fast; |
|
882 PyObject *error_type, *error_value, *error_traceback; |
|
883 PyCodeObject *co; |
|
884 Py_ssize_t j; |
|
885 int ncells, nfreevars; |
|
886 if (f == NULL) |
|
887 return; |
|
888 locals = f->f_locals; |
|
889 co = f->f_code; |
|
890 map = co->co_varnames; |
|
891 if (locals == NULL) |
|
892 return; |
|
893 if (!PyTuple_Check(map)) |
|
894 return; |
|
895 PyErr_Fetch(&error_type, &error_value, &error_traceback); |
|
896 fast = f->f_localsplus; |
|
897 j = PyTuple_GET_SIZE(map); |
|
898 if (j > co->co_nlocals) |
|
899 j = co->co_nlocals; |
|
900 if (co->co_nlocals) |
|
901 dict_to_map(co->co_varnames, j, locals, fast, 0, clear); |
|
902 ncells = PyTuple_GET_SIZE(co->co_cellvars); |
|
903 nfreevars = PyTuple_GET_SIZE(co->co_freevars); |
|
904 if (ncells || nfreevars) { |
|
905 dict_to_map(co->co_cellvars, ncells, |
|
906 locals, fast + co->co_nlocals, 1, clear); |
|
907 /* Same test as in PyFrame_FastToLocals() above. */ |
|
908 if (co->co_flags & CO_OPTIMIZED) { |
|
909 dict_to_map(co->co_freevars, nfreevars, |
|
910 locals, fast + co->co_nlocals + ncells, 1, |
|
911 clear); |
|
912 } |
|
913 } |
|
914 PyErr_Restore(error_type, error_value, error_traceback); |
|
915 } |
|
916 |
|
917 /* Clear out the free list */ |
|
918 int |
|
919 PyFrame_ClearFreeList(void) |
|
920 { |
|
921 int freelist_size = numfree; |
|
922 |
|
923 while (free_list != NULL) { |
|
924 PyFrameObject *f = free_list; |
|
925 free_list = free_list->f_back; |
|
926 PyObject_GC_Del(f); |
|
927 --numfree; |
|
928 } |
|
929 assert(numfree == 0); |
|
930 return freelist_size; |
|
931 } |
|
932 |
|
933 void |
|
934 PyFrame_Fini(void) |
|
935 { |
|
936 (void)PyFrame_ClearFreeList(); |
|
937 Py_XDECREF(builtin_object); |
|
938 builtin_object = NULL; |
|
939 } |