/* Execute compiled code */
/* XXX TO DO:
XXX speed up searching for keywords by using a dictionary
XXX document it!
*/
/* enable more aggressive intra-module optimizations, where available */
#define PY_LOCAL_AGGRESSIVE
#include "Python.h"
#include "pycore_call.h"
#include "pycore_ceval.h"
#include "pycore_code.h"
#include "pycore_initconfig.h"
#include "pycore_object.h"
#include "pycore_pyerrors.h"
#include "pycore_pylifecycle.h"
#include "pycore_pystate.h"
#include "pycore_tupleobject.h"
#include "code.h"
#include "dictobject.h"
#include "frameobject.h"
#include "opcode.h"
#include "pydtrace.h"
#include "setobject.h"
#include "structmember.h"
#include <ctype.h>
#ifdef Py_DEBUG
/* For debugging the interpreter: */
#define LLTRACE 1 /* Low-level trace feature */
#define CHECKEXC 1 /* Double-check exception checking */
#endif
#if !defined(Py_BUILD_CORE)
# error "ceval.c must be build with Py_BUILD_CORE define for best performance"
_Py_IDENTIFIER(__name__);
/* Forward declarations */
Py_LOCAL_INLINE(PyObject *) call_function(
PyThreadState *tstate, PyObject ***pp_stack,
Py_ssize_t oparg, PyObject *kwnames);
static PyObject * do_call_core(
PyThreadState *tstate, PyObject *func,
PyObject *callargs, PyObject *kwdict);
#ifdef LLTRACE
static int lltrace;
static int prtrace(PyThreadState *, PyObject *, const char *);
static int call_trace(Py_tracefunc, PyObject *,
PyThreadState *, PyFrameObject *,
int, PyObject *);
static int call_trace_protected(Py_tracefunc, PyObject *,
static void call_exc_trace(Py_tracefunc, PyObject *,
PyThreadState *, PyFrameObject *);
static int maybe_call_line_trace(Py_tracefunc, PyObject *,
int *, int *, int *);
static void maybe_dtrace_line(PyFrameObject *, int *, int *, int *);
static void dtrace_function_entry(PyFrameObject *);
static void dtrace_function_return(PyFrameObject *);
static PyObject * import_name(PyThreadState *, PyFrameObject *,
PyObject *, PyObject *, PyObject *);
static PyObject * import_from(PyThreadState *, PyObject *, PyObject *);
static int import_all_from(PyThreadState *, PyObject *, PyObject *);
static void format_exc_check_arg(PyThreadState *, PyObject *, const char *, PyObject *);
static void format_exc_unbound(PyThreadState *tstate, PyCodeObject *co, int oparg);
static PyObject * unicode_concatenate(PyThreadState *, PyObject *, PyObject *,
PyFrameObject *, const _Py_CODEUNIT *);
static PyObject * special_lookup(PyThreadState *, PyObject *, _Py_Identifier *);
static int check_args_iterable(PyThreadState *, PyObject *func, PyObject *vararg);
static void format_kwargs_error(PyThreadState *, PyObject *func, PyObject *kwargs);
static void format_awaitable_error(PyThreadState *, PyTypeObject *, int, int);
#define NAME_ERROR_MSG \
"name '%.200s' is not defined"
#define UNBOUNDLOCAL_ERROR_MSG \
"local variable '%.200s' referenced before assignment"
#define UNBOUNDFREE_ERROR_MSG \
"free variable '%.200s' referenced before assignment" \
" in enclosing scope"
/* Dynamic execution profile */
#ifdef DYNAMIC_EXECUTION_PROFILE
#ifdef DXPAIRS
static long dxpairs[257][256];
#define dxp dxpairs[256]
#else
static long dxp[256];
/* per opcode cache */
// --with-pydebug is used to find memory leak. opcache makes it harder.
// So we disable opcache when Py_DEBUG is defined.
// See bpo-37146
#define OPCACHE_MIN_RUNS 0 /* disable opcache */
#define OPCACHE_MIN_RUNS 1024 /* create opcache when code executed this time */
#define OPCACHE_STATS 0 /* Enable stats */
#if OPCACHE_STATS
static size_t opcache_code_objects = 0;
static size_t opcache_code_objects_extra_mem = 0;
static size_t opcache_global_opts = 0;
static size_t opcache_global_hits = 0;
static size_t opcache_global_misses = 0;
#define GIL_REQUEST _Py_atomic_load_relaxed(&ceval->gil_drop_request)
/* This can set eval_breaker to 0 even though gil_drop_request became
1. We believe this is all right because the eval loop will release
the GIL eventually anyway. */
#define COMPUTE_EVAL_BREAKER(ceval) \
_Py_atomic_store_relaxed( \
&(ceval)->eval_breaker, \
GIL_REQUEST | \
_Py_atomic_load_relaxed(&(ceval)->signals_pending) | \
_Py_atomic_load_relaxed(&(ceval)->pending.calls_to_do) | \
(ceval)->pending.async_exc)
#define SET_GIL_DROP_REQUEST(ceval) \
do { \
_Py_atomic_store_relaxed(&(ceval)->gil_drop_request, 1); \
_Py_atomic_store_relaxed(&(ceval)->eval_breaker, 1); \
} while (0)
#define RESET_GIL_DROP_REQUEST(ceval) \
_Py_atomic_store_relaxed(&(ceval)->gil_drop_request, 0); \
COMPUTE_EVAL_BREAKER(ceval); \
/* Pending calls are only modified under pending_lock */
#define SIGNAL_PENDING_CALLS(ceval) \
_Py_atomic_store_relaxed(&(ceval)->pending.calls_to_do, 1); \
#define UNSIGNAL_PENDING_CALLS(ceval) \
_Py_atomic_store_relaxed(&(ceval)->pending.calls_to_do, 0); \
#define SIGNAL_PENDING_SIGNALS(ceval) \
_Py_atomic_store_relaxed(&(ceval)->signals_pending, 1); \
#define UNSIGNAL_PENDING_SIGNALS(ceval) \
_Py_atomic_store_relaxed(&(ceval)->signals_pending, 0); \
#define SIGNAL_ASYNC_EXC(ceval) \
(ceval)->pending.async_exc = 1; \
#define UNSIGNAL_ASYNC_EXC(ceval) \
(ceval)->pending.async_exc = 0; \
#ifdef HAVE_ERRNO_H
#include <errno.h>
#include "pythread.h"
#include "ceval_gil.h"
static void
ensure_tstate_not_null(const char *func, PyThreadState *tstate)
{
if (tstate == NULL) {
_Py_FatalErrorFunc(func, "current thread state is NULL");
}
int
_PyEval_ThreadsInitialized(_PyRuntimeState *runtime)
return gil_created(&runtime->ceval.gil);
PyEval_ThreadsInitialized(void)
_PyRuntimeState *runtime = &_PyRuntime;
return _PyEval_ThreadsInitialized(runtime);
PyStatus
_PyEval_InitThreads(PyThreadState *tstate)
return _PyStatus_ERR("tstate is NULL");
struct _ceval_runtime_state *ceval = &tstate->interp->runtime->ceval;
struct _gil_runtime_state *gil = &ceval->gil;
if (gil_created(gil)) {
return _PyStatus_OK();
PyThread_init_thread();
create_gil(gil);
take_gil(tstate);
struct _pending_calls *pending = &ceval->pending;
pending->lock = PyThread_allocate_lock();
if (pending->lock == NULL) {
return _PyStatus_NO_MEMORY();
void
PyEval_InitThreads(void)
/* Do nothing: kept for backward compatibility */
_PyEval_FiniThreads(struct _ceval_runtime_state *ceval)
if (!gil_created(gil)) {
return;
destroy_gil(gil);
assert(!gil_created(gil));
if (pending->lock != NULL) {
PyThread_free_lock(pending->lock);
pending->lock = NULL;
_PyEval_Fini(void)
fprintf(stderr, "-- Opcode cache number of objects = %zd\n",
opcache_code_objects);
fprintf(stderr, "-- Opcode cache total extra mem = %zd\n",
opcache_code_objects_extra_mem);
fprintf(stderr, "\n");
fprintf(stderr, "-- Opcode cache LOAD_GLOBAL hits = %zd (%d%%)\n",
opcache_global_hits,
(int) (100.0 * opcache_global_hits /
(opcache_global_hits + opcache_global_misses)));
fprintf(stderr, "-- Opcode cache LOAD_GLOBAL misses = %zd (%d%%)\n",
opcache_global_misses,
(int) (100.0 * opcache_global_misses /
fprintf(stderr, "-- Opcode cache LOAD_GLOBAL opts = %zd\n",
opcache_global_opts);
PyEval_AcquireLock(void)
PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);
ensure_tstate_not_null(__func__, tstate);
PyEval_ReleaseLock(void)
/* This function must succeed when the current thread state is NULL.
We therefore avoid PyThreadState_Get() which dumps a fatal error
in debug mode.
drop_gil(&runtime->ceval, tstate);
PyEval_AcquireThread(PyThreadState *tstate)
struct _gilstate_runtime_state *gilstate = &tstate->interp->runtime->gilstate;
if (_PyThreadState_Swap(gilstate, tstate) != NULL) {
Py_FatalError("non-NULL old thread state");
PyEval_ReleaseThread(PyThreadState *tstate)
assert(tstate != NULL);
_PyRuntimeState *runtime = tstate->interp->runtime;
PyThreadState *new_tstate = _PyThreadState_Swap(&runtime->gilstate, NULL);
if (new_tstate != tstate) {
Py_FatalError("wrong thread state");
/* This function is called from PyOS_AfterFork_Child to destroy all threads
* which are not running in the child process, and clear internal locks
* which might be held by those threads.
_PyEval_ReInitThreads(_PyRuntimeState *runtime)
struct _ceval_runtime_state *ceval = &runtime->ceval;
if (!gil_created(&ceval->gil)) {
recreate_gil(&ceval->gil);
Py_FatalError("Can't initialize threads for pending calls");
/* Destroy all threads except the current one */
_PyThreadState_DeleteExcept(runtime, tstate);
/* This function is used to signal that async exceptions are waiting to be
raised. */
_PyEval_SignalAsyncExc(struct _ceval_runtime_state *ceval)
SIGNAL_ASYNC_EXC(ceval);
PyThreadState *
PyEval_SaveThread(void)
PyThreadState *tstate = _PyThreadState_Swap(&runtime->gilstate, NULL);
Py_FatalError("NULL tstate");
assert(gil_created(&ceval->gil));
drop_gil(ceval, tstate);
return tstate;
PyEval_RestoreThread(PyThreadState *tstate)
_PyThreadState_Swap(gilstate, tstate);
/* Mechanism whereby asynchronously executing callbacks (e.g. UNIX
signal handlers or Mac I/O completion routines) can schedule calls
to a function to be called synchronously.
The synchronous function is called with one void* argument.
It should return 0 for success or -1 for failure -- failure should
be accompanied by an exception.
If registry succeeds, the registry function returns 0; if it fails
(e.g. due to too many pending calls) it returns -1 (without setting
an exception condition).
Note that because registry may occur from within signal handlers,
or other asynchronous events, calling malloc() is unsafe!
Any thread can schedule pending calls, but only the main thread
will execute them.
There is no facility to schedule calls to a particular thread, but
that should be easy to change, should that ever be required. In
that case, the static variables here should go into the python
threadstate.
_PyEval_SignalReceived(struct _ceval_runtime_state *ceval)
/* bpo-30703: Function called when the C signal handler of Python gets a
signal. We cannot queue a callback using Py_AddPendingCall() since
that function is not async-signal-safe. */
SIGNAL_PENDING_SIGNALS(ceval);
/* Push one item onto the queue while holding the lock. */
static int
_push_pending_call(struct _pending_calls *pending,
int (*func)(void *), void *arg)
int i = pending->last;
int j = (i + 1) % NPENDINGCALLS;
if (j == pending->first) {
return -1; /* Queue full */
pending->calls[i].func = func;
pending->calls[i].arg = arg;
pending->last = j;
return 0;
/* Pop one item off the queue while holding the lock. */
_pop_pending_call(struct _pending_calls *pending,
int (**func)(void *), void **arg)
int i = pending->first;
if (i == pending->last) {
return; /* Queue empty */
*func = pending->calls[i].func;
*arg = pending->calls[i].arg;
pending->first = (i + 1) % NPENDINGCALLS;
/* This implementation is thread-safe. It allows
scheduling to be made from any thread, and even from an executing
callback.
_PyEval_AddPendingCall(PyThreadState *tstate,
struct _ceval_runtime_state *ceval,
PyThread_acquire_lock(pending->lock, WAIT_LOCK);
if (pending->finishing) {
PyThread_release_lock(pending->lock);
PyObject *exc, *val, *tb;
_PyErr_Fetch(tstate, &exc, &val, &tb);
_PyErr_SetString(tstate, PyExc_SystemError,
"Py_AddPendingCall: cannot add pending calls "
"(Python shutting down)");
_PyErr_Print(tstate);
_PyErr_Restore(tstate, exc, val, tb);
return -1;
int result = _push_pending_call(pending, func, arg);
/* signal main loop */
SIGNAL_PENDING_CALLS(ceval);
return result;
Py_AddPendingCall(int (*func)(void *), void *arg)
return _PyEval_AddPendingCall(tstate, &runtime->ceval, func, arg);
handle_signals(_PyRuntimeState *runtime)
/* Only handle signals on main thread */
if (PyThread_get_thread_ident() != runtime->main_thread) {
/*
* Ensure that the thread isn't currently running some other
* interpreter.
PyInterpreterState *interp = _PyRuntimeState_GetThreadState(runtime)->interp;
if (interp != runtime->interpreters.main) {
UNSIGNAL_PENDING_SIGNALS(ceval);
if (_PyErr_CheckSignals() < 0) {
SIGNAL_PENDING_SIGNALS(ceval); /* We're not done yet */
make_pending_calls(_PyRuntimeState *runtime)
static int busy = 0;
/* only service pending calls on main thread */
/* don't perform recursive pending calls */
if (busy) {
busy = 1;
/* unsignal before starting to call callbacks, so that any callback
added in-between re-signals */
UNSIGNAL_PENDING_CALLS(ceval);
int res = 0;
/* perform a bounded number of calls, in case of recursion */
for (int i=0; i<NPENDINGCALLS; i++) {
int (*func)(void *) = NULL;
void *arg = NULL;
/* pop one item off the queue while holding the lock */
_pop_pending_call(pending, &func, &arg);
/* having released the lock, perform the callback */
if (func == NULL) {
break;
res = func(arg);
if (res) {
goto error;
busy = 0;
return res;
error:
_Py_FinishPendingCalls(PyThreadState *tstate)
assert(PyGILState_Check());
struct _pending_calls *pending = &runtime->ceval.pending;
pending->finishing = 1;
if (!_Py_atomic_load_relaxed(&(pending->calls_to_do))) {
if (make_pending_calls(runtime) < 0) {
PyErr_BadInternalCall();
_PyErr_ChainExceptions(exc, val, tb);
/* Py_MakePendingCalls() is a simple wrapper for the sake
of backward-compatibility. */
Py_MakePendingCalls(void)
/* Python signal handler doesn't really queue a callback: it only signals
that a signal was received, see _PyEval_SignalReceived(). */
int res = handle_signals(runtime);
if (res != 0) {
res = make_pending_calls(runtime);
/* The interpreter's recursion limit */
#ifndef Py_DEFAULT_RECURSION_LIMIT
#define Py_DEFAULT_RECURSION_LIMIT 1000
int _Py_CheckRecursionLimit = Py_DEFAULT_RECURSION_LIMIT;
_PyEval_InitRuntimeState(struct _ceval_runtime_state *ceval)
ceval->recursion_limit = Py_DEFAULT_RECURSION_LIMIT;
_Py_CheckRecursionLimit = Py_DEFAULT_RECURSION_LIMIT;
_gil_initialize(&ceval->gil);
_PyEval_InitState(struct _ceval_state *ceval)
/* PyInterpreterState_New() initializes ceval to zero */
Py_GetRecursionLimit(void)
struct _ceval_runtime_state *ceval = &_PyRuntime.ceval;
return ceval->recursion_limit;
Py_SetRecursionLimit(int new_limit)
ceval->recursion_limit = new_limit;
_Py_CheckRecursionLimit = new_limit;
/* The function _Py_EnterRecursiveCall() only calls _Py_CheckRecursiveCall()
if the recursion_depth reaches _Py_CheckRecursionLimit.
If USE_STACKCHECK, the macro decrements _Py_CheckRecursionLimit
to guarantee that _Py_CheckRecursiveCall() is regularly called.
Without USE_STACKCHECK, there is no need for this. */
_Py_CheckRecursiveCall(PyThreadState *tstate, const char *where)
int recursion_limit = runtime->ceval.recursion_limit;
#ifdef USE_STACKCHECK
tstate->stackcheck_counter = 0;
if (PyOS_CheckStack()) {
--tstate->recursion_depth;
_PyErr_SetString(tstate, PyExc_MemoryError, "Stack overflow");
/* Needed for ABI backwards-compatibility (see bpo-31857) */
_Py_CheckRecursionLimit = recursion_limit;
if (tstate->recursion_critical)
/* Somebody asked that we don't check for recursion. */
if (tstate->overflowed) {
if (tstate->recursion_depth > recursion_limit + 50) {
/* Overflowing while handling an overflow. Give up. */
Py_FatalError("Cannot recover from stack overflow.");
if (tstate->recursion_depth > recursion_limit) {
tstate->overflowed = 1;
_PyErr_Format(tstate, PyExc_RecursionError,
"maximum recursion depth exceeded%s",
where);
static int do_raise(PyThreadState *tstate, PyObject *exc, PyObject *cause);
static int unpack_iterable(PyThreadState *, PyObject *, int, int, PyObject **);
#define _Py_TracingPossible(ceval) ((ceval)->tracing_possible)
PyObject *
PyEval_EvalCode(PyObject *co, PyObject *globals, PyObject *locals)
return PyEval_EvalCodeEx(co,
globals, locals,
(PyObject **)NULL, 0,
NULL, NULL);
/* Interpreter main loop */
PyEval_EvalFrame(PyFrameObject *f)
/* Function kept for backward compatibility */
PyThreadState *tstate = _PyThreadState_GET();
return _PyEval_EvalFrame(tstate, f, 0);
PyEval_EvalFrameEx(PyFrameObject *f, int throwflag)
return _PyEval_EvalFrame(tstate, f, throwflag);
PyObject* _Py_HOT_FUNCTION
_PyEval_EvalFrameDefault(PyThreadState *tstate, PyFrameObject *f, int throwflag)
int lastopcode = 0;
PyObject **stack_pointer; /* Next free slot in value stack */
const _Py_CODEUNIT *next_instr;
int opcode; /* Current opcode */
int oparg; /* Current opcode argument, if any */
PyObject **fastlocals, **freevars;
PyObject *retval = NULL; /* Return value */
_PyRuntimeState * const runtime = &_PyRuntime;
struct _ceval_runtime_state * const ceval = &runtime->ceval;
struct _ceval_state * const ceval2 = &tstate->interp->ceval;
_Py_atomic_int * const eval_breaker = &ceval->eval_breaker;
PyCodeObject *co;
/* when tracing we set things up so that
not (instr_lb <= current_bytecode_offset < instr_ub)
is true when the line being executed has changed. The
initial values are such as to make this false the first
time it is tested. */
int instr_ub = -1, instr_lb = 0, instr_prev = -1;
const _Py_CODEUNIT *first_instr;
PyObject *names;
PyObject *consts;
_PyOpcache *co_opcache;
_Py_IDENTIFIER(__ltrace__);
/* Computed GOTOs, or
the-optimization-commonly-but-improperly-known-as-"threaded code"
using gcc's labels-as-values extension
(http://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html).
The traditional bytecode evaluation loop uses a "switch" statement, which
decent compilers will optimize as a single indirect branch instruction
combined with a lookup table of jump addresses. However, since the
indirect jump instruction is shared by all opcodes, the CPU will have a
hard time making the right prediction for where to jump next (actually,
it will be always wrong except in the uncommon case of a sequence of
several identical opcodes).
"Threaded code" in contrast, uses an explicit jump table and an explicit
indirect jump instruction at the end of each opcode. Since the jump
instruction is at a different address for each opcode, the CPU will make a
separate prediction for each of these instructions, which is equivalent to
predicting the second opcode of each opcode pair. These predictions have
a much better chance to turn out valid, especially in small bytecode loops.
A mispredicted branch on a modern CPU flushes the whole pipeline and
can cost several CPU cycles (depending on the pipeline depth),
and potentially many more instructions (depending on the pipeline width).
A correctly predicted branch, however, is nearly free.
At the time of this writing, the "threaded code" version is up to 15-20%
faster than the normal "switch" version, depending on the compiler and the
CPU architecture.
We disable the optimization if DYNAMIC_EXECUTION_PROFILE is defined,
because it would render the measurements invalid.
NOTE: care must be taken that the compiler doesn't try to "optimize" the
indirect jumps by sharing them between all opcodes. Such optimizations
can be disabled on gcc by using the -fno-gcse flag (or possibly
-fno-crossjumping).
#undef USE_COMPUTED_GOTOS
#define USE_COMPUTED_GOTOS 0
#ifdef HAVE_COMPUTED_GOTOS
#ifndef USE_COMPUTED_GOTOS
#define USE_COMPUTED_GOTOS 1
#if defined(USE_COMPUTED_GOTOS) && USE_COMPUTED_GOTOS
#error "Computed gotos are not supported on this compiler."
#if USE_COMPUTED_GOTOS
/* Import the static jump table */
#include "opcode_targets.h"
#define TARGET(op) \
op: \
TARGET_##op
#define FAST_DISPATCH() \
{ \
if (!lltrace && !_Py_TracingPossible(ceval2) && !PyDTrace_LINE_ENABLED()) { \
f->f_lasti = INSTR_OFFSET(); \
NEXTOPARG(); \
goto *opcode_targets[opcode]; \
} \
goto fast_next_opcode; \
if (!_Py_TracingPossible(ceval2) && !PyDTrace_LINE_ENABLED()) { \
#define DISPATCH() \
if (!_Py_atomic_load_relaxed(eval_breaker)) { \
FAST_DISPATCH(); \
continue; \
#define TARGET(op) op
#define FAST_DISPATCH() goto fast_next_opcode
#define DISPATCH() continue
/* Tuple access macros */
#ifndef Py_DEBUG
#define GETITEM(v, i) PyTuple_GET_ITEM((PyTupleObject *)(v), (i))
#define GETITEM(v, i) PyTuple_GetItem((v), (i))
/* Code access macros */
/* The integer overflow is checked by an assertion below. */
#define INSTR_OFFSET() \
(sizeof(_Py_CODEUNIT) * (int)(next_instr - first_instr))
#define NEXTOPARG() do { \
_Py_CODEUNIT word = *next_instr; \
opcode = _Py_OPCODE(word); \
oparg = _Py_OPARG(word); \
next_instr++; \
#define JUMPTO(x) (next_instr = first_instr + (x) / sizeof(_Py_CODEUNIT))
#define JUMPBY(x) (next_instr += (x) / sizeof(_Py_CODEUNIT))
/* OpCode prediction macros
Some opcodes tend to come in pairs thus making it possible to
predict the second code when the first is run. For example,
COMPARE_OP is often followed by POP_JUMP_IF_FALSE or POP_JUMP_IF_TRUE.
Verifying the prediction costs a single high-speed test of a register
variable against a constant. If the pairing was good, then the
processor's own internal branch predication has a high likelihood of
success, resulting in a nearly zero-overhead transition to the
next opcode. A successful prediction saves a trip through the eval-loop
including its unpredictable switch-case branch. Combined with the
processor's internal branch prediction, a successful PREDICT has the
effect of making the two opcodes run as if they were a single new opcode
with the bodies combined.
If collecting opcode statistics, your choices are to either keep the
predictions turned-on and interpret the results as if some opcodes
had been combined or turn-off predictions so that the opcode frequency
counter updates for both opcodes.
Opcode prediction is disabled with threaded code, since the latter allows
the CPU to record separate branch prediction information for each
opcode.
#define PREDICT_ID(op) PRED_##op
#if defined(DYNAMIC_EXECUTION_PROFILE) || USE_COMPUTED_GOTOS
#define PREDICT(op) if (0) goto PREDICT_ID(op)
#define PREDICT(op) \
if (opcode == op) { \
goto PREDICT_ID(op); \
} while(0)
#define PREDICTED(op) PREDICT_ID(op):
/* Stack manipulation macros */
/* The stack can grow at most MAXINT deep, as co_nlocals and
co_stacksize are ints. */
#define STACK_LEVEL() ((int)(stack_pointer - f->f_valuestack))
#define EMPTY() (STACK_LEVEL() == 0)
#define TOP() (stack_pointer[-1])
#define SECOND() (stack_pointer[-2])
#define THIRD() (stack_pointer[-3])
#define FOURTH() (stack_pointer[-4])
#define PEEK(n) (stack_pointer[-(n)])
#define SET_TOP(v) (stack_pointer[-1] = (v))
#define SET_SECOND(v) (stack_pointer[-2] = (v))
#define SET_THIRD(v) (stack_pointer[-3] = (v))
#define SET_FOURTH(v) (stack_pointer[-4] = (v))
#define SET_VALUE(n, v) (stack_pointer[-(n)] = (v))
#define BASIC_STACKADJ(n) (stack_pointer += n)
#define BASIC_PUSH(v) (*stack_pointer++ = (v))
#define BASIC_POP() (*--stack_pointer)
#define PUSH(v) { (void)(BASIC_PUSH(v), \
lltrace && prtrace(tstate, TOP(), "push")); \
assert(STACK_LEVEL() <= co->co_stacksize); }
#define POP() ((void)(lltrace && prtrace(tstate, TOP(), "pop")), \
BASIC_POP())
#define STACK_GROW(n) do { \
assert(n >= 0); \
(void)(BASIC_STACKADJ(n), \
lltrace && prtrace(tstate, TOP(), "stackadj")); \
assert(STACK_LEVEL() <= co->co_stacksize); \
#define STACK_SHRINK(n) do { \
(void)(lltrace && prtrace(tstate, TOP(), "stackadj")); \
(void)(BASIC_STACKADJ(-n)); \
#define EXT_POP(STACK_POINTER) ((void)(lltrace && \
prtrace(tstate, (STACK_POINTER)[-1], "ext_pop")), \
*--(STACK_POINTER))
#define PUSH(v) BASIC_PUSH(v)
#define POP() BASIC_POP()
#define STACK_GROW(n) BASIC_STACKADJ(n)
#define STACK_SHRINK(n) BASIC_STACKADJ(-n)
#define EXT_POP(STACK_POINTER) (*--(STACK_POINTER))
/* Local variable macros */
#define GETLOCAL(i) (fastlocals[i])
/* The SETLOCAL() macro must not DECREF the local variable in-place and
then store the new value; it must copy the old value to a temporary
value, then store the new value, and then DECREF the temporary value.
This is because it is possible that during the DECREF the frame is
accessed by other code (e.g. a __del__ method or gc.collect()) and the
variable would be pointing to already-freed memory. */
#define SETLOCAL(i, value) do { PyObject *tmp = GETLOCAL(i); \
GETLOCAL(i) = value; \
Py_XDECREF(tmp); } while (0)
#define UNWIND_BLOCK(b) \
while (STACK_LEVEL() > (b)->b_level) { \
PyObject *v = POP(); \
Py_XDECREF(v); \
#define UNWIND_EXCEPT_HANDLER(b) \
PyObject *type, *value, *traceback; \
_PyErr_StackItem *exc_info; \
assert(STACK_LEVEL() >= (b)->b_level + 3); \
while (STACK_LEVEL() > (b)->b_level + 3) { \
value = POP(); \
Py_XDECREF(value); \
exc_info = tstate->exc_info; \
type = exc_info->exc_type; \
value = exc_info->exc_value; \
traceback = exc_info->exc_traceback; \
exc_info->exc_type = POP(); \
exc_info->exc_value = POP(); \
exc_info->exc_traceback = POP(); \
Py_XDECREF(type); \
Py_XDECREF(traceback); \
/* macros for opcode cache */
#define OPCACHE_CHECK() \
co_opcache = NULL; \
if (co->co_opcache != NULL) { \
unsigned char co_opt_offset = \
co->co_opcache_map[next_instr - first_instr]; \
if (co_opt_offset > 0) { \
assert(co_opt_offset <= co->co_opcache_size); \
co_opcache = &co->co_opcache[co_opt_offset - 1]; \
assert(co_opcache != NULL); \
#define OPCACHE_STAT_GLOBAL_HIT() \
if (co->co_opcache != NULL) opcache_global_hits++; \
#define OPCACHE_STAT_GLOBAL_MISS() \
if (co->co_opcache != NULL) opcache_global_misses++; \
#define OPCACHE_STAT_GLOBAL_OPT() \
if (co->co_opcache != NULL) opcache_global_opts++; \
#else /* OPCACHE_STATS */
#define OPCACHE_STAT_GLOBAL_HIT()
#define OPCACHE_STAT_GLOBAL_MISS()
#define OPCACHE_STAT_GLOBAL_OPT()
/* Start of code */
/* push frame */
if (_Py_EnterRecursiveCall(tstate, "")) {
return NULL;
tstate->frame = f;
if (tstate->use_tracing) {
if (tstate->c_tracefunc != NULL) {
/* tstate->c_tracefunc, if defined, is a
function that will be called on *every* entry
to a code block. Its return value, if not
None, is a function that will be called at
the start of each executed line of code.
(Actually, the function must return itself
in order to continue tracing.) The trace
functions are called with three arguments:
a pointer to the current frame, a string
indicating why the function is called, and
an argument which depends on the situation.
The global trace function is also called
whenever an exception is detected. */
if (call_trace_protected(tstate->c_tracefunc,
tstate->c_traceobj,
tstate, f, PyTrace_CALL, Py_None)) {
/* Trace function raised an error */
goto exit_eval_frame;
if (tstate->c_profilefunc != NULL) {
/* Similar for c_profilefunc, except it needn't
return itself and isn't called for "line" events */
if (call_trace_protected(tstate->c_profilefunc,
tstate->c_profileobj,
/* Profile function raised an error */
if (PyDTrace_FUNCTION_ENTRY_ENABLED())
dtrace_function_entry(f);
co = f->f_code;
names = co->co_names;
consts = co->co_consts;
fastlocals = f->f_localsplus;
freevars = f->f_localsplus + co->co_nlocals;
assert(PyBytes_Check(co->co_code));
assert(PyBytes_GET_SIZE(co->co_code) <= INT_MAX);
assert(PyBytes_GET_SIZE(co->co_code) % sizeof(_Py_CODEUNIT) == 0);
assert(_Py_IS_ALIGNED(PyBytes_AS_STRING(co->co_code), sizeof(_Py_CODEUNIT)));
first_instr = (_Py_CODEUNIT *) PyBytes_AS_STRING(co->co_code);
f->f_lasti refers to the index of the last instruction,
unless it's -1 in which case next_instr should be first_instr.
YIELD_FROM sets f_lasti to itself, in order to repeatedly yield
multiple values.
When the PREDICT() macros are enabled, some opcode pairs follow in
direct succession without updating f->f_lasti. A successful
prediction effectively links the two codes together as if they
were a single new opcode; accordingly,f->f_lasti will point to
the first code in the pair (for instance, GET_ITER followed by
FOR_ITER is effectively a single opcode and f->f_lasti will point
to the beginning of the combined pair.)
assert(f->f_lasti >= -1);
next_instr = first_instr;
if (f->f_lasti >= 0) {
assert(f->f_lasti % sizeof(_Py_CODEUNIT) == 0);
next_instr += f->f_lasti / sizeof(_Py_CODEUNIT) + 1;
stack_pointer = f->f_stacktop;
assert(stack_pointer != NULL);
f->f_stacktop = NULL; /* remains NULL unless yield suspends frame */
f->f_executing = 1;
if (co->co_opcache_flag < OPCACHE_MIN_RUNS) {
co->co_opcache_flag++;
if (co->co_opcache_flag == OPCACHE_MIN_RUNS) {
if (_PyCode_InitOpcache(co) < 0) {
opcache_code_objects_extra_mem +=
PyBytes_Size(co->co_code) / sizeof(_Py_CODEUNIT) +
sizeof(_PyOpcache) * co->co_opcache_size;
opcache_code_objects++;
lltrace = _PyDict_GetItemId(f->f_globals, &PyId___ltrace__) != NULL;
if (throwflag) /* support for generator.throw() */
/* _PyEval_EvalFrameDefault() must not be called with an exception set,
because it can clear it (directly or indirectly) and so the
caller loses its exception */
assert(!_PyErr_Occurred(tstate));
main_loop:
for (;;) {
assert(stack_pointer >= f->f_valuestack); /* else underflow */
assert(STACK_LEVEL() <= co->co_stacksize); /* else overflow */
/* Do periodic things. Doing this every time through
the loop would add too much overhead, so we do it
only every Nth instruction. We also do it if
``pendingcalls_to_do'' is set, i.e. when an asynchronous
event needs attention (e.g. a signal handler or
async I/O handler); see Py_AddPendingCall() and
Py_MakePendingCalls() above. */
if (_Py_atomic_load_relaxed(eval_breaker)) {
opcode = _Py_OPCODE(*next_instr);
if (opcode == SETUP_FINALLY ||
opcode == SETUP_WITH ||
opcode == BEFORE_ASYNC_WITH ||
opcode == YIELD_FROM) {
/* Few cases where we skip running signal handlers and other
pending calls:
- If we're about to enter the 'with:'. It will prevent
emitting a resource warning in the common idiom
'with open(path) as file:'.
- If we're about to enter the 'async with:'.
- If we're about to enter the 'try:' of a try/finally (not
*very* useful, but might help in some cases and it's
traditional)
- If we're resuming a chain of nested 'yield from' or
'await' calls, then each frame is parked with YIELD_FROM
as its next opcode. If the user hit control-C we want to
wait until we've reached the innermost frame before
running the signal handler and raising KeyboardInterrupt
(see bpo-30039).
goto fast_next_opcode;
if (_Py_atomic_load_relaxed(&ceval->signals_pending)) {
if (handle_signals(runtime) != 0) {
if (_Py_atomic_load_relaxed(&ceval->pending.calls_to_do)) {
if (make_pending_calls(runtime) != 0) {
if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) {
/* Give another thread a chance */
if (_PyThreadState_Swap(&runtime->gilstate, NULL) != tstate) {
Py_FatalError("tstate mix-up");
/* Other threads may run now */
if (_PyThreadState_Swap(&runtime->gilstate, tstate) != NULL) {
Py_FatalError("orphan tstate");
/* Check for asynchronous exceptions. */
if (tstate->async_exc != NULL) {
PyObject *exc = tstate->async_exc;
tstate->async_exc = NULL;
UNSIGNAL_ASYNC_EXC(ceval);
_PyErr_SetNone(tstate, exc);
Py_DECREF(exc);
fast_next_opcode:
f->f_lasti = INSTR_OFFSET();
if (PyDTrace_LINE_ENABLED())
maybe_dtrace_line(f, &instr_lb, &instr_ub, &instr_prev);
/* line-by-line tracing support */
if (_Py_TracingPossible(ceval2) &&
tstate->c_tracefunc != NULL && !tstate->tracing) {
int err;
/* see maybe_call_line_trace
for expository comments */
f->f_stacktop = stack_pointer;
err = maybe_call_line_trace(tstate->c_tracefunc,
tstate, f,
&instr_lb, &instr_ub, &instr_prev);
/* Reload possibly changed frame fields */
JUMPTO(f->f_lasti);
if (f->f_stacktop != NULL) {
f->f_stacktop = NULL;
if (err)
/* trace function raised an exception */
/* Extract opcode and argument */
NEXTOPARG();
dispatch_opcode:
dxpairs[lastopcode][opcode]++;
lastopcode = opcode;
dxp[opcode]++;
/* Instruction tracing */
if (lltrace) {
if (HAS_ARG(opcode)) {
printf("%d: %d, %d\n",
f->f_lasti, opcode, oparg);
else {
printf("%d: %d\n",
f->f_lasti, opcode);
switch (opcode) {
/* BEWARE!
It is essential that any operation that fails must goto error
and that all operation that succeed call [FAST_]DISPATCH() ! */
case TARGET(NOP): {
FAST_DISPATCH();
case TARGET(LOAD_FAST): {
PyObject *value = GETLOCAL(oparg);
if (value == NULL) {
format_exc_check_arg(tstate, PyExc_UnboundLocalError,
UNBOUNDLOCAL_ERROR_MSG,
PyTuple_GetItem(co->co_varnames, oparg));
Py_INCREF(value);
PUSH(value);
case TARGET(LOAD_CONST): {
PREDICTED(LOAD_CONST);
PyObject *value = GETITEM(consts, oparg);
case TARGET(STORE_FAST): {
PREDICTED(STORE_FAST);
PyObject *value = POP();
SETLOCAL(oparg, value);
case TARGET(POP_TOP): {
Py_DECREF(value);
case TARGET(ROT_TWO): {
PyObject *top = TOP();
PyObject *second = SECOND();
SET_TOP(second);
SET_SECOND(top);
case TARGET(ROT_THREE): {
PyObject *third = THIRD();
SET_SECOND(third);
SET_THIRD(top);
case TARGET(ROT_FOUR): {
PyObject *fourth = FOURTH();
SET_THIRD(fourth);
SET_FOURTH(top);
case TARGET(DUP_TOP): {
Py_INCREF(top);
PUSH(top);
case TARGET(DUP_TOP_TWO): {
Py_INCREF(second);
STACK_GROW(2);
SET_TOP(top);
SET_SECOND(second);
case TARGET(UNARY_POSITIVE): {
PyObject *value = TOP();
PyObject *res = PyNumber_Positive(value);
SET_TOP(res);
if (res == NULL)
DISPATCH();
case TARGET(UNARY_NEGATIVE): {
PyObject *res = PyNumber_Negative(value);
case TARGET(UNARY_NOT): {
int err = PyObject_IsTrue(value);
if (err == 0) {
Py_INCREF(Py_True);
SET_TOP(Py_True);
else if (err > 0) {
Py_INCREF(Py_False);
SET_TOP(Py_False);
STACK_SHRINK(1);
case TARGET(UNARY_INVERT): {
PyObject *res = PyNumber_Invert(value);
case TARGET(BINARY_POWER): {
PyObject *exp = POP();
PyObject *base = TOP();
PyObject *res = PyNumber_Power(base, exp, Py_None);
Py_DECREF(base);
Py_DECREF(exp);
case TARGET(BINARY_MULTIPLY): {
PyObject *right = POP();
PyObject *left = TOP();
PyObject *res = PyNumber_Multiply(left, right);
Py_DECREF(left);
Py_DECREF(right);
case TARGET(BINARY_MATRIX_MULTIPLY): {
PyObject *res = PyNumber_MatrixMultiply(left, right);
case TARGET(BINARY_TRUE_DIVIDE): {
PyObject *divisor = POP();
PyObject *dividend = TOP();
PyObject *quotient = PyNumber_TrueDivide(dividend, divisor);
Py_DECREF(dividend);
Py_DECREF(divisor);
SET_TOP(quotient);
if (quotient == NULL)
case TARGET(BINARY_FLOOR_DIVIDE): {
PyObject *quotient = PyNumber_FloorDivide(dividend, divisor);
case TARGET(BINARY_MODULO): {
PyObject *res;
if (PyUnicode_CheckExact(dividend) && (
!PyUnicode_Check(divisor) || PyUnicode_CheckExact(divisor))) {
// fast path; string formatting, but not if the RHS is a str subclass
// (see issue28598)
res = PyUnicode_Format(dividend, divisor);
} else {
res = PyNumber_Remainder(dividend, divisor);
case TARGET(BINARY_ADD): {
PyObject *sum;
/* NOTE(haypo): Please don't try to micro-optimize int+int on
CPython using bytecode, it is simply worthless.
See http://bugs.python.org/issue21955 and
http://bugs.python.org/issue10044 for the discussion. In short,
no patch shown any impact on a realistic benchmark, only a minor
speedup on microbenchmarks. */
if (PyUnicode_CheckExact(left) &&
PyUnicode_CheckExact(right)) {
sum = unicode_concatenate(tstate, left, right, f, next_instr);
/* unicode_concatenate consumed the ref to left */
sum = PyNumber_Add(left, right);
SET_TOP(sum);
if (sum == NULL)
case TARGET(BINARY_SUBTRACT): {
PyObject *diff = PyNumber_Subtract(left, right);
SET_TOP(diff);
if (diff == NULL)
case TARGET(BINARY_SUBSCR): {
PyObject *sub = POP();
PyObject *container = TOP();
PyObject *res = PyObject_GetItem(container, sub);
Py_DECREF(container);
Py_DECREF(sub);
case TARGET(BINARY_LSHIFT): {
PyObject *res = PyNumber_Lshift(left, right);
case TARGET(BINARY_RSHIFT): {
PyObject *res = PyNumber_Rshift(left, right);
case TARGET(BINARY_AND): {
PyObject *res = PyNumber_And(left, right);
case TARGET(BINARY_XOR): {
PyObject *res = PyNumber_Xor(left, right);
case TARGET(BINARY_OR): {
PyObject *res = PyNumber_Or(left, right);
case TARGET(LIST_APPEND): {
PyObject *v = POP();
PyObject *list = PEEK(oparg);
err = PyList_Append(list, v);
Py_DECREF(v);
if (err != 0)
PREDICT(JUMP_ABSOLUTE);
case TARGET(SET_ADD): {
PyObject *set = PEEK(oparg);
err = PySet_Add(set, v);
case TARGET(INPLACE_POWER): {
PyObject *res = PyNumber_InPlacePower(base, exp, Py_None);
case TARGET(INPLACE_MULTIPLY): {
PyObject *res = PyNumber_InPlaceMultiply(left, right);
case TARGET(INPLACE_MATRIX_MULTIPLY): {
PyObject *res = PyNumber_InPlaceMatrixMultiply(left, right);
case TARGET(INPLACE_TRUE_DIVIDE): {
PyObject *quotient = PyNumber_InPlaceTrueDivide(dividend, divisor);
case TARGET(INPLACE_FLOOR_DIVIDE): {
PyObject *quotient = PyNumber_InPlaceFloorDivide(dividend, divisor);
case TARGET(INPLACE_MODULO): {
PyObject *mod = PyNumber_InPlaceRemainder(left, right);
SET_TOP(mod);
if (mod == NULL)
case TARGET(INPLACE_ADD): {
if (PyUnicode_CheckExact(left) && PyUnicode_CheckExact(right)) {
sum = PyNumber_InPlaceAdd(left, right);
case TARGET(INPLACE_SUBTRACT): {
PyObject *diff = PyNumber_InPlaceSubtract(left, right);
case TARGET(INPLACE_LSHIFT): {
PyObject *res = PyNumber_InPlaceLshift(left, right);
case TARGET(INPLACE_RSHIFT): {
PyObject *res = PyNumber_InPlaceRshift(left, right);
case TARGET(INPLACE_AND): {
PyObject *res = PyNumber_InPlaceAnd(left, right);
case TARGET(INPLACE_XOR): {
PyObject *res = PyNumber_InPlaceXor(left, right);
case TARGET(INPLACE_OR): {
PyObject *res = PyNumber_InPlaceOr(left, right);
case TARGET(STORE_SUBSCR): {
PyObject *sub = TOP();
PyObject *container = SECOND();
PyObject *v = THIRD();
STACK_SHRINK(3);
/* container[sub] = v */
err = PyObject_SetItem(container, sub, v);
case TARGET(DELETE_SUBSCR): {
STACK_SHRINK(2);
/* del container[sub] */
err = PyObject_DelItem(container, sub);
case TARGET(PRINT_EXPR): {
_Py_IDENTIFIER(displayhook);
PyObject *hook = _PySys_GetObjectId(&PyId_displayhook);
if (hook == NULL) {
_PyErr_SetString(tstate, PyExc_RuntimeError,
"lost sys.displayhook");
res = PyObject_CallOneArg(hook, value);
Py_DECREF(res);
case TARGET(RAISE_VARARGS): {
PyObject *cause = NULL, *exc = NULL;
switch (oparg) {
case 2:
cause = POP(); /* cause */
/* fall through */
case 1:
exc = POP(); /* exc */
case 0:
if (do_raise(tstate, exc, cause)) {
goto exception_unwind;
default:
"bad RAISE_VARARGS oparg");
case TARGET(RETURN_VALUE): {
retval = POP();
assert(f->f_iblock == 0);
assert(EMPTY());
goto exiting;
case TARGET(GET_AITER): {
unaryfunc getter = NULL;
PyObject *iter = NULL;
PyObject *obj = TOP();
PyTypeObject *type = Py_TYPE(obj);
if (type->tp_as_async != NULL) {
getter = type->tp_as_async->am_aiter;
if (getter != NULL) {
iter = (*getter)(obj);
Py_DECREF(obj);
if (iter == NULL) {
SET_TOP(NULL);
_PyErr_Format(tstate, PyExc_TypeError,
"'async for' requires an object with "
"__aiter__ method, got %.100s",
type->tp_name);
if (Py_TYPE(iter)->tp_as_async == NULL ||
Py_TYPE(iter)->tp_as_async->am_anext == NULL) {
"'async for' received an object from __aiter__ "
"that does not implement __anext__: %.100s",
Py_TYPE(iter)->tp_name);
Py_DECREF(iter);
SET_TOP(iter);
case TARGET(GET_ANEXT): {
PyObject *next_iter = NULL;
PyObject *awaitable = NULL;
PyObject *aiter = TOP();
PyTypeObject *type = Py_TYPE(aiter);
if (PyAsyncGen_CheckExact(aiter)) {
awaitable = type->tp_as_async->am_anext(aiter);
if (awaitable == NULL) {
if (type->tp_as_async != NULL){
getter = type->tp_as_async->am_anext;
next_iter = (*getter)(aiter);
if (next_iter == NULL) {
"'async for' requires an iterator with "
"__anext__ method, got %.100s",
awaitable = _PyCoro_GetAwaitableIter(next_iter);
_PyErr_FormatFromCause(
PyExc_TypeError,
"'async for' received an invalid object "
"from __anext__: %.100s",
Py_TYPE(next_iter)->tp_name);
Py_DECREF(next_iter);
PUSH(awaitable);
PREDICT(LOAD_CONST);
case TARGET(GET_AWAITABLE): {
PREDICTED(GET_AWAITABLE);
PyObject *iterable = TOP();
PyObject *iter = _PyCoro_GetAwaitableIter(iterable);
int opcode_at_minus_3 = 0;
if ((next_instr - first_instr) > 2) {
opcode_at_minus_3 = _Py_OPCODE(next_instr[-3]);
format_awaitable_error(tstate, Py_TYPE(iterable),
opcode_at_minus_3,
_Py_OPCODE(next_instr[-2]));
Py_DECREF(iterable);
if (iter != NULL && PyCoro_CheckExact(iter)) {
PyObject *yf = _PyGen_yf((PyGenObject*)iter);
if (yf != NULL) {
/* `iter` is a coroutine object that is being
awaited, `yf` is a pointer to the current awaitable
being awaited on. */
Py_DECREF(yf);
Py_CLEAR(iter);
"coroutine is being awaited already");
/* The code below jumps to `error` if `iter` is NULL. */
SET_TOP(iter); /* Even if it's NULL */
case TARGET(YIELD_FROM): {
PyObject *receiver = TOP();
if (PyGen_CheckExact(receiver) || PyCoro_CheckExact(receiver)) {
retval = _PyGen_Send((PyGenObject *)receiver, v);
_Py_IDENTIFIER(send);
if (v == Py_None)
retval = Py_TYPE(receiver)->tp_iternext(receiver);
else
retval = _PyObject_CallMethodIdOneArg(receiver, &PyId_send, v);
if (retval == NULL) {
PyObject *val;
if (tstate->c_tracefunc != NULL
&& _PyErr_ExceptionMatches(tstate, PyExc_StopIteration))
call_exc_trace(tstate->c_tracefunc, tstate->c_traceobj, tstate, f);
err = _PyGen_FetchStopIterationValue(&val);
if (err < 0)
Py_DECREF(receiver);
SET_TOP(val);
/* receiver remains on stack, retval is value to be yielded */
/* and repeat... */
assert(f->f_lasti >= (int)sizeof(_Py_CODEUNIT));
f->f_lasti -= sizeof(_Py_CODEUNIT);
case TARGET(YIELD_VALUE): {
if (co->co_flags & CO_ASYNC_GENERATOR) {
PyObject *w = _PyAsyncGenValueWrapperNew(retval);
Py_DECREF(retval);
if (w == NULL) {
retval = NULL;
retval = w;
case TARGET(POP_EXCEPT): {
PyObject *type, *value, *traceback;
_PyErr_StackItem *exc_info;
PyTryBlock *b = PyFrame_BlockPop(f);
if (b->b_type != EXCEPT_HANDLER) {
"popped block is not an except handler");
assert(STACK_LEVEL() >= (b)->b_level + 3 &&
STACK_LEVEL() <= (b)->b_level + 4);
exc_info = tstate->exc_info;
type = exc_info->exc_type;
value = exc_info->exc_value;
traceback = exc_info->exc_traceback;
exc_info->exc_type = POP();
exc_info->exc_value = POP();
exc_info->exc_traceback = POP();
Py_XDECREF(type);
Py_XDECREF(value);
Py_XDECREF(traceback);
case TARGET(POP_BLOCK): {
PREDICTED(POP_BLOCK);
PyFrame_BlockPop(f);
case TARGET(RERAISE): {
PyObject *exc = POP();
PyObject *val = POP();
PyObject *tb = POP();
assert(PyExceptionClass_Check(exc));
case TARGET(END_ASYNC_FOR): {
if (PyErr_GivenExceptionMatches(exc, PyExc_StopAsyncIteration)) {
assert(b->b_type == EXCEPT_HANDLER);
UNWIND_EXCEPT_HANDLER(b);
Py_DECREF(POP());
JUMPBY(oparg);
case TARGET(LOAD_ASSERTION_ERROR): {
PyObject *value = PyExc_AssertionError;
case TARGET(LOAD_BUILD_CLASS): {
_Py_IDENTIFIER(__build_class__);
PyObject *bc;
if (PyDict_CheckExact(f->f_builtins)) {
bc = _PyDict_GetItemIdWithError(f->f_builtins, &PyId___build_class__);
if (bc == NULL) {
if (!_PyErr_Occurred(tstate)) {
_PyErr_SetString(tstate, PyExc_NameError,
"__build_class__ not found");
Py_INCREF(bc);
PyObject *build_class_str = _PyUnicode_FromId(&PyId___build_class__);
if (build_class_str == NULL)
bc = PyObject_GetItem(f->f_builtins, build_class_str);
if (_PyErr_ExceptionMatches(tstate, PyExc_KeyError))
PUSH(bc);
case TARGET(STORE_NAME): {
PyObject *name = GETITEM(names, oparg);
PyObject *ns = f->f_locals;
if (ns == NULL) {
_PyErr_Format(tstate, PyExc_SystemError,
"no locals found when storing %R", name);
if (PyDict_CheckExact(ns))
err = PyDict_SetItem(ns, name, v);
err = PyObject_SetItem(ns, name, v);
case TARGET(DELETE_NAME): {
"no locals when deleting %R", name);
err = PyObject_DelItem(ns, name);
if (err != 0) {
format_exc_check_arg(tstate, PyExc_NameError,
NAME_ERROR_MSG,
name);
case TARGET(UNPACK_SEQUENCE): {
PREDICTED(UNPACK_SEQUENCE);
PyObject *seq = POP(), *item, **items;
if (PyTuple_CheckExact(seq) &&
PyTuple_GET_SIZE(seq) == oparg) {
items = ((PyTupleObject *)seq)->ob_item;
while (oparg--) {
item = items[oparg];
Py_INCREF(item);
PUSH(item);
} else if (PyList_CheckExact(seq) &&
PyList_GET_SIZE(seq) == oparg) {
items = ((PyListObject *)seq)->ob_item;
} else if (unpack_iterable(tstate, seq, oparg, -1,
stack_pointer + oparg)) {
STACK_GROW(oparg);
/* unpack_iterable() raised an exception */
Py_DECREF(seq);
case TARGET(UNPACK_EX): {
int totalargs = 1 + (oparg & 0xFF) + (oparg >> 8);
PyObject *seq = POP();
if (unpack_iterable(tstate, seq, oparg & 0xFF, oparg >> 8,
stack_pointer + totalargs)) {
stack_pointer += totalargs;
case TARGET(STORE_ATTR): {
PyObject *owner = TOP();
PyObject *v = SECOND();
err = PyObject_SetAttr(owner, name, v);
Py_DECREF(owner);
case TARGET(DELETE_ATTR): {
PyObject *owner = POP();
err = PyObject_SetAttr(owner, name, (PyObject *)NULL);
case TARGET(STORE_GLOBAL): {
err = PyDict_SetItem(f->f_globals, name, v);
case TARGET(DELETE_GLOBAL): {
err = PyDict_DelItem(f->f_globals, name);
if (_PyErr_ExceptionMatches(tstate, PyExc_KeyError)) {
NAME_ERROR_MSG, name);
case TARGET(LOAD_NAME): {
PyObject *locals = f->f_locals;
PyObject *v;
if (locals == NULL) {
"no locals when loading %R", name);
if (PyDict_CheckExact(locals)) {
v = PyDict_GetItemWithError(locals, name);
if (v != NULL) {
Py_INCREF(v);
else if (_PyErr_Occurred(tstate)) {
v = PyObject_GetItem(locals, name);
if (v == NULL) {
if (!_PyErr_ExceptionMatches(tstate, PyExc_KeyError))
_PyErr_Clear(tstate);
v = PyDict_GetItemWithError(f->f_globals, name);
v = PyDict_GetItemWithError(f->f_builtins, name);
format_exc_check_arg(
tstate, PyExc_NameError,
v = PyObject_GetItem(f->f_builtins, name);
PUSH(v);
case TARGET(LOAD_GLOBAL): {
PyObject *name;
if (PyDict_CheckExact(f->f_globals)
&& PyDict_CheckExact(f->f_builtins))
OPCACHE_CHECK();
if (co_opcache != NULL && co_opcache->optimized > 0) {
_PyOpcache_LoadGlobal *lg = &co_opcache->u.lg;
if (lg->globals_ver ==
((PyDictObject *)f->f_globals)->ma_version_tag
&& lg->builtins_ver ==
((PyDictObject *)f->f_builtins)->ma_version_tag)
PyObject *ptr = lg->ptr;
OPCACHE_STAT_GLOBAL_HIT();
assert(ptr != NULL);
Py_INCREF(ptr);
PUSH(ptr);
name = GETITEM(names, oparg);
v = _PyDict_LoadGlobal((PyDictObject *)f->f_globals,
(PyDictObject *)f->f_builtins,
if (!_PyErr_OCCURRED()) {
/* _PyDict_LoadGlobal() returns NULL without raising
* an exception if the key doesn't exist */
if (co_opcache != NULL) {
if (co_opcache->optimized == 0) {
/* Wasn't optimized before. */
OPCACHE_STAT_GLOBAL_OPT();
OPCACHE_STAT_GLOBAL_MISS();
co_opcache->optimized = 1;
lg->globals_ver =
((PyDictObject *)f->f_globals)->ma_version_tag;
lg->builtins_ver =
((PyDictObject *)f->f_builtins)->ma_version_tag;
lg->ptr = v; /* borrowed */
/* Slow-path if globals or builtins is not a dict */
/* namespace 1: globals */
v = PyObject_GetItem(f->f_globals, name);
if (!_PyErr_ExceptionMatches(tstate, PyExc_KeyError)) {
/* namespace 2: builtins */
case TARGET(DELETE_FAST): {
PyObject *v = GETLOCAL(oparg);
SETLOCAL(oparg, NULL);
tstate, PyExc_UnboundLocalError,
PyTuple_GetItem(co->co_varnames, oparg)
);
case TARGET(DELETE_DEREF): {
PyObject *cell = freevars[oparg];
PyObject *oldobj = PyCell_GET(cell);
if (oldobj != NULL) {
PyCell_SET(cell, NULL);
Py_DECREF(oldobj);
format_exc_unbound(tstate, co, oparg);
case TARGET(LOAD_CLOSURE): {
Py_INCREF(cell);
PUSH(cell);
case TARGET(LOAD_CLASSDEREF): {
PyObject *name, *value, *locals = f->f_locals;
Py_ssize_t idx;
assert(locals);
assert(oparg >= PyTuple_GET_SIZE(co->co_cellvars));
idx = oparg - PyTuple_GET_SIZE(co->co_cellvars);
assert(idx >= 0 && idx < PyTuple_GET_SIZE(co->co_freevars));
name = PyTuple_GET_ITEM(co->co_freevars, idx);
value = PyDict_GetItemWithError(locals, name);
if (value != NULL) {
value = PyObject_GetItem(locals, name);
if (!value) {
value = PyCell_GET(cell);
case TARGET(LOAD_DEREF): {
PyObject *value = PyCell_GET(cell);
case TARGET(STORE_DEREF): {
PyCell_SET(cell, v);
Py_XDECREF(oldobj);
case TARGET(BUILD_STRING): {
PyObject *str;
PyObject *empty = PyUnicode_New(0, 0);
if (empty == NULL) {
str = _PyUnicode_JoinArray(empty, stack_pointer - oparg, oparg);
Py_DECREF(empty);
if (str == NULL)
while (--oparg >= 0) {
PyObject *item = POP();
Py_DECREF(item);
PUSH(str);
case TARGET(BUILD_TUPLE): {
PyObject *tup = PyTuple_New(oparg);
if (tup == NULL)
PyTuple_SET_ITEM(tup, oparg, item);
PUSH(tup);
case TARGET(BUILD_LIST): {
PyObject *list = PyList_New(oparg);
if (list == NULL)
PyList_SET_ITEM(list, oparg, item);
PUSH(list);
case TARGET(LIST_TO_TUPLE): {
PyObject *list = POP();
PyObject *tuple = PyList_AsTuple(list);
Py_DECREF(list);
if (tuple == NULL) {
PUSH(tuple);
case TARGET(LIST_EXTEND): {
PyObject *iterable = POP();
PyObject *none_val = _PyList_Extend((PyListObject *)list, iterable);
if (none_val == NULL) {
if (_PyErr_ExceptionMatches(tstate, PyExc_TypeError) &&
(Py_TYPE(iterable)->tp_iter == NULL && !PySequence_Check(iterable)))
"Value after * must be an iterable, not %.200s",
Py_TYPE(iterable)->tp_name);
Py_DECREF(none_val);
case TARGET(SET_UPDATE): {
int err = _PySet_Update(set, iterable);
if (err < 0) {
case TARGET(BUILD_SET): {
PyObject *set = PySet_New(NULL);
int err = 0;
int i;
if (set == NULL)
for (i = oparg; i > 0; i--) {
PyObject *item = PEEK(i);
if (err == 0)
err = PySet_Add(set, item);
STACK_SHRINK(oparg);
Py_DECREF(set);
PUSH(set);
case TARGET(BUILD_MAP): {
Py_ssize_t i;
PyObject *map = _PyDict_NewPresized((Py_ssize_t)oparg);
if (map == NULL)
PyObject *key = PEEK(2*i);
PyObject *value = PEEK(2*i - 1);
err = PyDict_SetItem(map, key, value);
Py_DECREF(map);
PUSH(map);
case TARGET(SETUP_ANNOTATIONS): {
_Py_IDENTIFIER(__annotations__);
PyObject *ann_dict;
if (f->f_locals == NULL) {
"no locals found when setting up annotations");
/* check if __annotations__ in locals()... */
if (PyDict_CheckExact(f->f_locals)) {
ann_dict = _PyDict_GetItemIdWithError(f->f_locals,
&PyId___annotations__);
if (ann_dict == NULL) {
if (_PyErr_Occurred(tstate)) {
/* ...if not, create a new one */
ann_dict = PyDict_New();
err = _PyDict_SetItemId(f->f_locals,
&PyId___annotations__, ann_dict);
Py_DECREF(ann_dict);
/* do the same if locals() is not a dict */
PyObject *ann_str = _PyUnicode_FromId(&PyId___annotations__);
if (ann_str == NULL) {
ann_dict = PyObject_GetItem(f->f_locals, ann_str);
err = PyObject_SetItem(f->f_locals, ann_str, ann_dict);
case TARGET(BUILD_CONST_KEY_MAP): {
PyObject *map;
PyObject *keys = TOP();
if (!PyTuple_CheckExact(keys) ||
PyTuple_GET_SIZE(keys) != (Py_ssize_t)oparg) {
"bad BUILD_CONST_KEY_MAP keys argument");
map = _PyDict_NewPresized((Py_ssize_t)oparg);
if (map == NULL) {
PyObject *key = PyTuple_GET_ITEM(keys, oparg - i);
PyObject *value = PEEK(i + 1);
case TARGET(DICT_UPDATE): {
PyObject *update = POP();
PyObject *dict = PEEK(oparg);
if (PyDict_Update(dict, update) < 0) {
if (_PyErr_ExceptionMatches(tstate, PyExc_AttributeError)) {
"'%.200s' object is not a mapping",
Py_TYPE(update)->tp_name);
Py_DECREF(update);
case TARGET(DICT_MERGE): {
if (_PyDict_MergeEx(dict, update, 2) < 0) {
format_kwargs_error(tstate, PEEK(2 + oparg), update);
PREDICT(CALL_FUNCTION_EX);
case TARGET(MAP_ADD): {
PyObject *key = SECOND();
map = PEEK(oparg); /* dict */
assert(PyDict_CheckExact(map));
err = PyDict_SetItem(map, key, value); /* map[key] = value */
Py_DECREF(key);
case TARGET(LOAD_ATTR): {
PyObject *res = PyObject_GetAttr(owner, name);
case TARGET(COMPARE_OP): {
assert(oparg <= Py_GE);
PyObject *res = PyObject_RichCompare(left, right, oparg);
PREDICT(POP_JUMP_IF_FALSE);
PREDICT(POP_JUMP_IF_TRUE);
case TARGET(IS_OP): {
int res = (left == right)^oparg;
PyObject *b = res ? Py_True : Py_False;
Py_INCREF(b);
SET_TOP(b);
case TARGET(CONTAINS_OP): {
PyObject *left = POP();
int res = PySequence_Contains(right, left);
if (res < 0) {
PyObject *b = (res^oparg) ? Py_True : Py_False;
PUSH(b);
#define CANNOT_CATCH_MSG "catching classes that do not inherit from "\
"BaseException is not allowed"
case TARGET(JUMP_IF_NOT_EXC_MATCH): {
if (PyTuple_Check(right)) {
Py_ssize_t i, length;
length = PyTuple_GET_SIZE(right);
for (i = 0; i < length; i++) {
PyObject *exc = PyTuple_GET_ITEM(right, i);
if (!PyExceptionClass_Check(exc)) {
_PyErr_SetString(tstate, PyExc_TypeError,
CANNOT_CATCH_MSG);
if (!PyExceptionClass_Check(right)) {
int res = PyErr_GivenExceptionMatches(left, right);
if (res > 0) {
/* Exception matches -- Do nothing */;
else if (res == 0) {
JUMPTO(oparg);
case TARGET(IMPORT_NAME): {
PyObject *fromlist = POP();
PyObject *level = TOP();
res = import_name(tstate, f, name, fromlist, level);
Py_DECREF(level);
Py_DECREF(fromlist);
case TARGET(IMPORT_STAR): {
PyObject *from = POP(), *locals;
if (PyFrame_FastToLocalsWithError(f) < 0) {
Py_DECREF(from);
locals = f->f_locals;
"no locals found during 'import *'");
err = import_all_from(tstate, locals, from);
PyFrame_LocalsToFast(f, 0);
case TARGET(IMPORT_FROM): {
PyObject *from = TOP();
res = import_from(tstate, from, name);
PUSH(res);
case TARGET(JUMP_FORWARD): {
case TARGET(POP_JUMP_IF_FALSE): {
PREDICTED(POP_JUMP_IF_FALSE);
PyObject *cond = POP();
if (cond == Py_True) {
Py_DECREF(cond);
if (cond == Py_False) {
err = PyObject_IsTrue(cond);
if (err > 0)
;
else if (err == 0)
case TARGET(POP_JUMP_IF_TRUE): {
PREDICTED(POP_JUMP_IF_TRUE);
if (err > 0) {
case TARGET(JUMP_IF_FALSE_OR_POP): {
PyObject *cond = TOP();
case TARGET(JUMP_IF_TRUE_OR_POP): {
else if (err == 0) {
case TARGET(JUMP_ABSOLUTE): {
PREDICTED(JUMP_ABSOLUTE);
#if FAST_LOOPS
/* Enabling this path speeds-up all while and for-loops by bypassing
the per-loop checks for signals. By default, this should be turned-off
because it prevents detection of a control-break in tight loops like
"while 1: pass". Compile with this option turned-on when you need
the speed-up and do not need break checking inside tight loops (ones
that contain only instructions ending with FAST_DISPATCH).
case TARGET(GET_ITER): {
/* before: [obj]; after [getiter(obj)] */
PyObject *iter = PyObject_GetIter(iterable);
if (iter == NULL)
PREDICT(FOR_ITER);
PREDICT(CALL_FUNCTION);
case TARGET(GET_YIELD_FROM_ITER): {
PyObject *iter;
if (PyCoro_CheckExact(iterable)) {
/* `iterable` is a coroutine */
if (!(co->co_flags & (CO_COROUTINE | CO_ITERABLE_COROUTINE))) {
/* and it is used in a 'yield from' expression of a
regular generator. */
"cannot 'yield from' a coroutine object "
"in a non-coroutine generator");
else if (!PyGen_CheckExact(iterable)) {
/* `iterable` is not a generator. */
iter = PyObject_GetIter(iterable);
case TARGET(FOR_ITER): {
PREDICTED(FOR_ITER);
/* before: [iter]; after: [iter, iter()] *or* [] */
PyObject *iter = TOP();
PyObject *next = (*Py_TYPE(iter)->tp_iternext)(iter);
if (next != NULL) {
PUSH(next);
PREDICT(STORE_FAST);
PREDICT(UNPACK_SEQUENCE);
if (!_PyErr_ExceptionMatches(tstate, PyExc_StopIteration)) {
else if (tstate->c_tracefunc != NULL) {
/* iterator ended normally */
PREDICT(POP_BLOCK);
case TARGET(SETUP_FINALLY): {
PyFrame_BlockSetup(f, SETUP_FINALLY, INSTR_OFFSET() + oparg,
STACK_LEVEL());
case TARGET(BEFORE_ASYNC_WITH): {
_Py_IDENTIFIER(__aenter__);
_Py_IDENTIFIER(__aexit__);
PyObject *mgr = TOP();
PyObject *enter = special_lookup(tstate, mgr, &PyId___aenter__);
if (enter == NULL) {
PyObject *exit = special_lookup(tstate, mgr, &PyId___aexit__);
if (exit == NULL) {
Py_DECREF(enter);
SET_TOP(exit);
Py_DECREF(mgr);
res = _PyObject_CallNoArg(enter);
PREDICT(GET_AWAITABLE);
case TARGET(SETUP_ASYNC_WITH): {
PyObject *res = POP();
/* Setup the finally block before pushing the result
of __aenter__ on the stack. */
case TARGET(SETUP_WITH): {
_Py_IDENTIFIER(__enter__);
_Py_IDENTIFIER(__exit__);
PyObject *enter = special_lookup(tstate, mgr, &PyId___enter__);
PyObject *exit = special_lookup(tstate, mgr, &PyId___exit__);
of __enter__ on the stack. */
case TARGET(WITH_EXCEPT_START): {
/* At the top of the stack are 7 values:
- (TOP, SECOND, THIRD) = exc_info()
- (FOURTH, FIFTH, SIXTH) = previous exception for EXCEPT_HANDLER
- SEVENTH: the context.__exit__ bound method
We call SEVENTH(TOP, SECOND, THIRD).
Then we push again the TOP exception and the __exit__
return value.
PyObject *exit_func;
PyObject *exc, *val, *tb, *res;
exc = TOP();
val = SECOND();
tb = THIRD();
assert(exc != Py_None);
assert(!PyLong_Check(exc));
exit_func = PEEK(7);
PyObject *stack[4] = {NULL, exc, val, tb};
res = PyObject_Vectorcall(exit_func, stack + 1,
3 | PY_VECTORCALL_ARGUMENTS_OFFSET, NULL);
case TARGET(LOAD_METHOD): {
/* Designed to work in tandem with CALL_METHOD. */
PyObject *meth = NULL;
int meth_found = _PyObject_GetMethod(obj, name, &meth);
if (meth == NULL) {
/* Most likely attribute wasn't found. */
if (meth_found) {
/* We can bypass temporary bound method object.
meth is unbound method and obj is self.
meth | self | arg1 | ... | argN
SET_TOP(meth);
PUSH(obj); // self
/* meth is not an unbound method (but a regular attr, or
something was returned by a descriptor protocol). Set
the second element of the stack to NULL, to signal
CALL_METHOD that it's not a method call.
NULL | meth | arg1 | ... | argN
PUSH(meth);
case TARGET(CALL_METHOD): {
/* Designed to work in tamdem with LOAD_METHOD. */
PyObject **sp, *res, *meth;
sp = stack_pointer;
meth = PEEK(oparg + 2);
/* `meth` is NULL when LOAD_METHOD thinks that it's not
a method call.
Stack layout:
... | NULL | callable | arg1 | ... | argN
^- TOP()
^- (-oparg)
^- (-oparg-1)
^- (-oparg-2)
`callable` will be POPed by call_function.
NULL will will be POPed manually later.
res = call_function(tstate, &sp, oparg, NULL);
stack_pointer = sp;
(void)POP(); /* POP the NULL. */
/* This is a method call. Stack layout:
... | method | self | arg1 | ... | argN
`self` and `method` will be POPed by call_function.
We'll be passing `oparg + 1` to call_function, to
make it accept the `self` as a first argument.
res = call_function(tstate, &sp, oparg + 1, NULL);
case TARGET(CALL_FUNCTION): {
PREDICTED(CALL_FUNCTION);
PyObject **sp, *res;
if (res == NULL) {
case TARGET(CALL_FUNCTION_KW): {
PyObject **sp, *res, *names;
names = POP();
assert(PyTuple_Check(names));
assert(PyTuple_GET_SIZE(names) <= oparg);
/* We assume without checking that names contains only strings */
res = call_function(tstate, &sp, oparg, names);
Py_DECREF(names);
case TARGET(CALL_FUNCTION_EX): {
PREDICTED(CALL_FUNCTION_EX);
PyObject *func, *callargs, *kwargs = NULL, *result;
if (oparg & 0x01) {
kwargs = POP();
if (!PyDict_CheckExact(kwargs)) {
PyObject *d = PyDict_New();
if (d == NULL)
if (_PyDict_MergeEx(d, kwargs, 2) < 0) {
Py_DECREF(d);
format_kwargs_error(tstate, SECOND(), kwargs);
Py_DECREF(kwargs);
kwargs = d;
assert(PyDict_CheckExact(kwargs));
callargs = POP();
func = TOP();
if (!PyTuple_CheckExact(callargs)) {
if (check_args_iterable(tstate, func, callargs) < 0) {
Py_DECREF(callargs);
Py_SETREF(callargs, PySequence_Tuple(callargs));
if (callargs == NULL) {
assert(PyTuple_CheckExact(callargs));
result = do_call_core(tstate, func, callargs, kwargs);
Py_DECREF(func);
Py_XDECREF(kwargs);
SET_TOP(result);
if (result == NULL) {
case TARGET(MAKE_FUNCTION): {
PyObject *qualname = POP();
PyObject *codeobj = POP();
PyFunctionObject *func = (PyFunctionObject *)
PyFunction_NewWithQualName(codeobj, f->f_globals, qualname);
Py_DECREF(codeobj);
Py_DECREF(qualname);
if (oparg & 0x08) {
assert(PyTuple_CheckExact(TOP()));
func ->func_closure = POP();
if (oparg & 0x04) {
assert(PyDict_CheckExact(TOP()));
func->func_annotations = POP();
if (oparg & 0x02) {
func->func_kwdefaults = POP();
func->func_defaults = POP();
PUSH((PyObject *)func);
case TARGET(BUILD_SLICE): {
PyObject *start, *stop, *step, *slice;
if (oparg == 3)
step = POP();
step = NULL;
stop = POP();
start = TOP();
slice = PySlice_New(start, stop, step);
Py_DECREF(start);
Py_DECREF(stop);
Py_XDECREF(step);
SET_TOP(slice);
if (slice == NULL)
case TARGET(FORMAT_VALUE): {
/* Handles f-string value formatting. */
PyObject *result;
PyObject *fmt_spec;
PyObject *value;
PyObject *(*conv_fn)(PyObject *);
int which_conversion = oparg & FVC_MASK;
int have_fmt_spec = (oparg & FVS_MASK) == FVS_HAVE_SPEC;
fmt_spec = have_fmt_spec ? POP() : NULL;
value = POP();
/* See if any conversion is specified. */
switch (which_conversion) {
case FVC_NONE: conv_fn = NULL; break;
case FVC_STR: conv_fn = PyObject_Str; break;
case FVC_REPR: conv_fn = PyObject_Repr; break;
case FVC_ASCII: conv_fn = PyObject_ASCII; break;
"unexpected conversion flag %d",
which_conversion);
/* If there's a conversion function, call it and replace
value with that result. Otherwise, just use value,
without conversion. */
if (conv_fn != NULL) {
result = conv_fn(value);
Py_XDECREF(fmt_spec);
value = result;
/* If value is a unicode object, and there's no fmt_spec,
then we know the result of format(value) is value
itself. In that case, skip calling format(). I plan to
move this optimization in to PyObject_Format()
itself. */
if (PyUnicode_CheckExact(value) && fmt_spec == NULL) {
/* Do nothing, just transfer ownership to result. */
result = value;
/* Actually call format(). */
result = PyObject_Format(value, fmt_spec);
PUSH(result);
case TARGET(EXTENDED_ARG): {
int oldoparg = oparg;
oparg |= oldoparg << 8;
goto dispatch_opcode;
_unknown_opcode:
fprintf(stderr,
"XXX lineno: %d, opcode: %d\n",
PyFrame_GetLineNumber(f),
opcode);
_PyErr_SetString(tstate, PyExc_SystemError, "unknown opcode");
} /* switch */
/* This should never be reached. Every opcode should end with DISPATCH()
or goto error. */
Py_UNREACHABLE();
/* Double-check exception status. */
#ifdef NDEBUG
"error return without exception set");
assert(_PyErr_Occurred(tstate));
/* Log traceback info. */
PyTraceBack_Here(f);
if (tstate->c_tracefunc != NULL)
call_exc_trace(tstate->c_tracefunc, tstate->c_traceobj,
tstate, f);
exception_unwind:
/* Unwind stacks if an exception occurred */
while (f->f_iblock > 0) {
/* Pop the current block. */
PyTryBlock *b = &f->f_blockstack[--f->f_iblock];
if (b->b_type == EXCEPT_HANDLER) {
continue;
UNWIND_BLOCK(b);
if (b->b_type == SETUP_FINALLY) {
int handler = b->b_handler;
_PyErr_StackItem *exc_info = tstate->exc_info;
/* Beware, this invalidates all b->b_* fields */
PyFrame_BlockSetup(f, EXCEPT_HANDLER, -1, STACK_LEVEL());
PUSH(exc_info->exc_traceback);
PUSH(exc_info->exc_value);
if (exc_info->exc_type != NULL) {
PUSH(exc_info->exc_type);
Py_INCREF(Py_None);
PUSH(Py_None);
/* Make the raw exception data
available to the handler,
so a program can emulate the
Python main loop. */
_PyErr_NormalizeException(tstate, &exc, &val, &tb);
if (tb != NULL)
PyException_SetTraceback(val, tb);
PyException_SetTraceback(val, Py_None);
Py_INCREF(exc);
exc_info->exc_type = exc;
Py_INCREF(val);
exc_info->exc_value = val;
exc_info->exc_traceback = tb;
if (tb == NULL)
tb = Py_None;
Py_INCREF(tb);
PUSH(tb);
PUSH(val);
PUSH(exc);
JUMPTO(handler);
if (_Py_TracingPossible(ceval2)) {
int needs_new_execution_window = (f->f_lasti < instr_lb || f->f_lasti >= instr_ub);
int needs_line_update = (f->f_lasti == instr_lb || f->f_lasti < instr_prev);
/* Make sure that we trace line after exception if we are in a new execution
* window or we don't need a line update and we are not in the first instruction
* of the line. */
if (needs_new_execution_window || (!needs_line_update && instr_lb > 0)) {
instr_prev = INT_MAX;
/* Resume normal execution */
goto main_loop;
} /* unwind stack */
/* End the loop as we still have an error */
} /* main loop */
assert(retval == NULL);
/* Pop remaining stack entries. */
while (!EMPTY()) {
PyObject *o = POP();
Py_XDECREF(o);
exiting:
if (tstate->c_tracefunc) {
if (call_trace_protected(tstate->c_tracefunc, tstate->c_traceobj,
tstate, f, PyTrace_RETURN, retval)) {
Py_CLEAR(retval);
if (tstate->c_profilefunc) {
if (call_trace_protected(tstate->c_profilefunc, tstate->c_profileobj,
/* pop frame */
exit_eval_frame:
if (PyDTrace_FUNCTION_RETURN_ENABLED())
dtrace_function_return(f);
_Py_LeaveRecursiveCall(tstate);
f->f_executing = 0;
tstate->frame = f->f_back;
return _Py_CheckFunctionResult(tstate, NULL, retval, __func__);
format_missing(PyThreadState *tstate, const char *kind,
PyCodeObject *co, PyObject *names)
Py_ssize_t len = PyList_GET_SIZE(names);
PyObject *name_str, *comma, *tail, *tmp;
assert(PyList_CheckExact(names));
assert(len >= 1);
/* Deal with the joys of natural language. */
switch (len) {
name_str = PyList_GET_ITEM(names, 0);
Py_INCREF(name_str);
name_str = PyUnicode_FromFormat("%U and %U",
PyList_GET_ITEM(names, len - 2),
PyList_GET_ITEM(names, len - 1));
tail = PyUnicode_FromFormat(", %U, and %U",
if (tail == NULL)
/* Chop off the last two objects in the list. This shouldn't actually
fail, but we can't be too careful. */
err = PyList_SetSlice(names, len - 2, len, NULL);
if (err == -1) {
Py_DECREF(tail);
/* Stitch everything up into a nice comma-separated list. */
comma = PyUnicode_FromString(", ");
if (comma == NULL) {
tmp = PyUnicode_Join(comma, names);
Py_DECREF(comma);
if (tmp == NULL) {
name_str = PyUnicode_Concat(tmp, tail);
Py_DECREF(tmp);
if (name_str == NULL)
"%U() missing %i required %s argument%s: %U",
co->co_name,
len,
kind,
len == 1 ? "" : "s",
name_str);
Py_DECREF(name_str);
missing_arguments(PyThreadState *tstate, PyCodeObject *co,
Py_ssize_t missing, Py_ssize_t defcount,
PyObject **fastlocals)
Py_ssize_t i, j = 0;
Py_ssize_t start, end;
int positional = (defcount != -1);
const char *kind = positional ? "positional" : "keyword-only";
PyObject *missing_names;
/* Compute the names of the arguments that are missing. */
missing_names = PyList_New(missing);
if (missing_names == NULL)
if (positional) {
start = 0;
end = co->co_argcount - defcount;
start = co->co_argcount;
end = start + co->co_kwonlyargcount;
for (i = start; i < end; i++) {
if (GETLOCAL(i) == NULL) {
PyObject *raw = PyTuple_GET_ITEM(co->co_varnames, i);
PyObject *name = PyObject_Repr(raw);
if (name == NULL) {
Py_DECREF(missing_names);
PyList_SET_ITEM(missing_names, j++, name);
assert(j == missing);
format_missing(tstate, kind, co, missing_names);
too_many_positional(PyThreadState *tstate, PyCodeObject *co,
Py_ssize_t given, Py_ssize_t defcount,
int plural;
Py_ssize_t kwonly_given = 0;
PyObject *sig, *kwonly_sig;
Py_ssize_t co_argcount = co->co_argcount;
assert((co->co_flags & CO_VARARGS) == 0);
/* Count missing keyword-only args. */
for (i = co_argcount; i < co_argcount + co->co_kwonlyargcount; i++) {
if (GETLOCAL(i) != NULL) {
kwonly_given++;
if (defcount) {
Py_ssize_t atleast = co_argcount - defcount;
plural = 1;
sig = PyUnicode_FromFormat("from %zd to %zd", atleast, co_argcount);
plural = (co_argcount != 1);
sig = PyUnicode_FromFormat("%zd", co_argcount);
if (sig == NULL)
if (kwonly_given) {
const char *format = " positional argument%s (and %zd keyword-only argument%s)";
kwonly_sig = PyUnicode_FromFormat(format,
given != 1 ? "s" : "",
kwonly_given,
kwonly_given != 1 ? "s" : "");
if (kwonly_sig == NULL) {
Py_DECREF(sig);
/* This will not fail. */
kwonly_sig = PyUnicode_FromString("");
assert(kwonly_sig != NULL);
"%U() takes %U positional argument%s but %zd%U %s given",
sig,
plural ? "s" : "",
given,
kwonly_sig,
given == 1 && !kwonly_given ? "was" : "were");
Py_DECREF(kwonly_sig);
positional_only_passed_as_keyword(PyThreadState *tstate, PyCodeObject *co,
Py_ssize_t kwcount, PyObject* const* kwnames)
int posonly_conflicts = 0;
PyObject* posonly_names = PyList_New(0);
for(int k=0; k < co->co_posonlyargcount; k++){
PyObject* posonly_name = PyTuple_GET_ITEM(co->co_varnames, k);
for (int k2=0; k2<kwcount; k2++){
/* Compare the pointers first and fallback to PyObject_RichCompareBool*/
PyObject* kwname = kwnames[k2];
if (kwname == posonly_name){
if(PyList_Append(posonly_names, kwname) != 0) {
goto fail;
posonly_conflicts++;
int cmp = PyObject_RichCompareBool(posonly_name, kwname, Py_EQ);
if ( cmp > 0) {
} else if (cmp < 0) {
if (posonly_conflicts) {
PyObject* comma = PyUnicode_FromString(", ");
PyObject* error_names = PyUnicode_Join(comma, posonly_names);
if (error_names == NULL) {
"%U() got some positional-only arguments passed"
" as keyword arguments: '%U'",
co->co_name, error_names);
Py_DECREF(error_names);
Py_DECREF(posonly_names);
fail:
Py_XDECREF(posonly_names);
return 1;
/* This is gonna seem *real weird*, but if you put some other code between
PyEval_EvalFrame() and _PyEval_EvalFrameDefault() you will need to adjust
the test in the if statements in Misc/gdbinit (pystack and pystackv). */
_PyEval_EvalCode(PyThreadState *tstate,
PyObject *_co, PyObject *globals, PyObject *locals,
PyObject *const *args, Py_ssize_t argcount,
PyObject *const *kwnames, PyObject *const *kwargs,
Py_ssize_t kwcount, int kwstep,
PyObject *const *defs, Py_ssize_t defcount,
PyObject *kwdefs, PyObject *closure,
PyObject *name, PyObject *qualname)
PyCodeObject* co = (PyCodeObject*)_co;
PyFrameObject *f;
PyObject *retval = NULL;
PyObject *x, *u;
const Py_ssize_t total_args = co->co_argcount + co->co_kwonlyargcount;
Py_ssize_t i, j, n;
PyObject *kwdict;
if (globals == NULL) {
"PyEval_EvalCodeEx: NULL globals");
/* Create the frame */
f = _PyFrame_New_NoTrack(tstate, co, globals, locals);
if (f == NULL) {
/* Create a dictionary for keyword parameters (**kwags) */
if (co->co_flags & CO_VARKEYWORDS) {
kwdict = PyDict_New();
if (kwdict == NULL)
i = total_args;
if (co->co_flags & CO_VARARGS) {
i++;
SETLOCAL(i, kwdict);
kwdict = NULL;
/* Copy all positional arguments into local variables */
if (argcount > co->co_argcount) {
n = co->co_argcount;
n = argcount;
for (j = 0; j < n; j++) {
x = args[j];
Py_INCREF(x);
SETLOCAL(j, x);
/* Pack other positional arguments into the *args argument */
u = _PyTuple_FromArray(args + n, argcount - n);
if (u == NULL) {
SETLOCAL(total_args, u);
/* Handle keyword arguments passed as two strided arrays */
kwcount *= kwstep;
for (i = 0; i < kwcount; i += kwstep) {
PyObject **co_varnames;
PyObject *keyword = kwnames[i];
PyObject *value = kwargs[i];
Py_ssize_t j;
if (keyword == NULL || !PyUnicode_Check(keyword)) {
"%U() keywords must be strings",
co->co_name);
/* Speed hack: do raw pointer compares. As names are
normally interned this should almost always hit. */
co_varnames = ((PyTupleObject *)(co->co_varnames))->ob_item;
for (j = co->co_posonlyargcount; j < total_args; j++) {
PyObject *name = co_varnames[j];
if (name == keyword) {
goto kw_found;
/* Slow fallback, just in case */
int cmp = PyObject_RichCompareBool( keyword, name, Py_EQ);
if (cmp > 0) {
else if (cmp < 0) {
assert(j >= total_args);
if (kwdict == NULL) {
if (co->co_posonlyargcount
&& positional_only_passed_as_keyword(tstate, co,
kwcount, kwnames))
"%U() got an unexpected keyword argument '%S'",
co->co_name, keyword);
if (PyDict_SetItem(kwdict, keyword, value) == -1) {
kw_found:
if (GETLOCAL(j) != NULL) {
"%U() got multiple values for argument '%S'",
SETLOCAL(j, value);
/* Check the number of positional arguments */
if ((argcount > co->co_argcount) && !(co->co_flags & CO_VARARGS)) {
too_many_positional(tstate, co, argcount, defcount, fastlocals);
/* Add missing positional arguments (copy default values from defs) */
if (argcount < co->co_argcount) {
Py_ssize_t m = co->co_argcount - defcount;
Py_ssize_t missing = 0;
for (i = argcount; i < m; i++) {
missing++;
if (missing) {
missing_arguments(tstate, co, missing, defcount, fastlocals);
if (n > m)
i = n - m;
i = 0;
for (; i < defcount; i++) {
if (GETLOCAL(m+i) == NULL) {
PyObject *def = defs[i];
Py_INCREF(def);
SETLOCAL(m+i, def);
/* Add missing keyword arguments (copy default values from kwdefs) */
if (co->co_kwonlyargcount > 0) {
for (i = co->co_argcount; i < total_args; i++) {
if (GETLOCAL(i) != NULL)
name = PyTuple_GET_ITEM(co->co_varnames, i);
if (kwdefs != NULL) {
PyObject *def = PyDict_GetItemWithError(kwdefs, name);
if (def) {
SETLOCAL(i, def);
missing_arguments(tstate, co, missing, -1, fastlocals);
/* Allocate and initialize storage for cell vars, and copy free
vars into frame. */
for (i = 0; i < PyTuple_GET_SIZE(co->co_cellvars); ++i) {
PyObject *c;
Py_ssize_t arg;
/* Possibly account for the cell variable being an argument. */
if (co->co_cell2arg != NULL &&
(arg = co->co_cell2arg[i]) != CO_CELL_NOT_AN_ARG) {
c = PyCell_New(GETLOCAL(arg));
/* Clear the local copy. */
SETLOCAL(arg, NULL);
c = PyCell_New(NULL);
if (c == NULL)
SETLOCAL(co->co_nlocals + i, c);
/* Copy closure variables to free variables */
for (i = 0; i < PyTuple_GET_SIZE(co->co_freevars); ++i) {
PyObject *o = PyTuple_GET_ITEM(closure, i);
Py_INCREF(o);
freevars[PyTuple_GET_SIZE(co->co_cellvars) + i] = o;
/* Handle generator/coroutine/asynchronous generator */
if (co->co_flags & (CO_GENERATOR | CO_COROUTINE | CO_ASYNC_GENERATOR)) {
PyObject *gen;
int is_coro = co->co_flags & CO_COROUTINE;
/* Don't need to keep the reference to f_back, it will be set
* when the generator is resumed. */
Py_CLEAR(f->f_back);
/* Create a new generator that owns the ready to run frame
* and return that as the value. */
if (is_coro) {
gen = PyCoro_New(f, name, qualname);
} else if (co->co_flags & CO_ASYNC_GENERATOR) {
gen = PyAsyncGen_New(f, name, qualname);
gen = PyGen_NewWithQualName(f, name, qualname);
if (gen == NULL) {
_PyObject_GC_TRACK(f);
return gen;
retval = _PyEval_EvalFrame(tstate, f, 0);
fail: /* Jump here from prelude on failure */
/* decref'ing the frame can cause __del__ methods to get invoked,
which can call back into Python. While we're done with the
current Python frame (f), the associated C stack is still in use,
so recursion_depth must be boosted for the duration.
if (Py_REFCNT(f) > 1) {
Py_DECREF(f);
++tstate->recursion_depth;
return retval;
_PyEval_EvalCodeWithName(PyObject *_co, PyObject *globals, PyObject *locals,
return _PyEval_EvalCode(tstate, _co, globals, locals,
args, argcount,
kwnames, kwargs,
kwcount, kwstep,
defs, defcount,
kwdefs, closure,
name, qualname);
PyEval_EvalCodeEx(PyObject *_co, PyObject *globals, PyObject *locals,
PyObject *const *args, int argcount,
PyObject *const *kws, int kwcount,
PyObject *const *defs, int defcount,
PyObject *kwdefs, PyObject *closure)
return _PyEval_EvalCodeWithName(_co, globals, locals,
kws, kws != NULL ? kws + 1 : NULL,
kwcount, 2,
static PyObject *
special_lookup(PyThreadState *tstate, PyObject *o, _Py_Identifier *id)
res = _PyObject_LookupSpecial(o, id);
if (res == NULL && !_PyErr_Occurred(tstate)) {
_PyErr_SetObject(tstate, PyExc_AttributeError, id->object);
/* Logic for the raise statement (too complicated for inlining).
This *consumes* a reference count to each of its arguments. */
do_raise(PyThreadState *tstate, PyObject *exc, PyObject *cause)
PyObject *type = NULL, *value = NULL;
if (exc == NULL) {
/* Reraise */
_PyErr_StackItem *exc_info = _PyErr_GetTopmostException(tstate);
PyObject *tb;
tb = exc_info->exc_traceback;
if (type == Py_None || type == NULL) {
"No active exception to reraise");
Py_XINCREF(type);
Py_XINCREF(value);
Py_XINCREF(tb);
_PyErr_Restore(tstate, type, value, tb);
/* We support the following forms of raise:
raise
raise <instance>
raise <type> */
if (PyExceptionClass_Check(exc)) {
type = exc;
value = _PyObject_CallNoArg(exc);
if (value == NULL)
goto raise_error;
if (!PyExceptionInstance_Check(value)) {
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
else if (PyExceptionInstance_Check(exc)) {
value = exc;
type = PyExceptionInstance_Class(exc);
Py_INCREF(type);
/* Not something you can raise. You get an exception
anyway, just not what you specified :-) */
"exceptions must derive from BaseException");
assert(type != NULL);
assert(value != NULL);
if (cause) {
PyObject *fixed_cause;
if (PyExceptionClass_Check(cause)) {
fixed_cause = _PyObject_CallNoArg(cause);
if (fixed_cause == NULL)
Py_DECREF(cause);
else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
else if (cause == Py_None) {
fixed_cause = NULL;
"exception causes must derive from "
"BaseException");
PyException_SetCause(value, fixed_cause);
_PyErr_SetObject(tstate, type, value);
/* _PyErr_SetObject incref's its arguments */
Py_DECREF(type);
raise_error:
Py_XDECREF(cause);
/* Iterate v argcnt times and store the results on the stack (via decreasing
sp). Return 1 for success, 0 if error.
If argcntafter == -1, do a simple unpack. If it is >= 0, do an unpack
with a variable target.
unpack_iterable(PyThreadState *tstate, PyObject *v,
int argcnt, int argcntafter, PyObject **sp)
int i = 0, j = 0;
Py_ssize_t ll = 0;
PyObject *it; /* iter(v) */
PyObject *w;
PyObject *l = NULL; /* variable list */
assert(v != NULL);
it = PyObject_GetIter(v);
if (it == NULL) {
Py_TYPE(v)->tp_iter == NULL && !PySequence_Check(v))
"cannot unpack non-iterable %.200s object",
Py_TYPE(v)->tp_name);
for (; i < argcnt; i++) {
w = PyIter_Next(it);
/* Iterator done, via error or exhaustion. */
if (argcntafter == -1) {
_PyErr_Format(tstate, PyExc_ValueError,
"not enough values to unpack "
"(expected %d, got %d)",
argcnt, i);
"(expected at least %d, got %d)",
argcnt + argcntafter, i);
goto Error;
*--sp = w;
/* We better have exhausted the iterator now. */
if (_PyErr_Occurred(tstate))
Py_DECREF(it);
Py_DECREF(w);
"too many values to unpack (expected %d)",
argcnt);
l = PySequence_List(it);
if (l == NULL)
*--sp = l;
ll = PyList_GET_SIZE(l);
if (ll < argcntafter) {
"not enough values to unpack (expected at least %d, got %zd)",
argcnt + argcntafter, argcnt + ll);
/* Pop the "after-variable" args off the list. */
for (j = argcntafter; j > 0; j--, i++) {
*--sp = PyList_GET_ITEM(l, ll - j);
/* Resize the list. */
Py_SET_SIZE(l, ll - argcntafter);
Error:
for (; i > 0; i--, sp++)
Py_DECREF(*sp);
Py_XDECREF(it);
prtrace(PyThreadState *tstate, PyObject *v, const char *str)
printf("%s ", str);
if (PyObject_Print(v, stdout, 0) != 0) {
/* Don't know what else to do */
printf("\n");
call_exc_trace(Py_tracefunc func, PyObject *self,
PyThreadState *tstate, PyFrameObject *f)
PyObject *type, *value, *traceback, *orig_traceback, *arg;
_PyErr_Fetch(tstate, &type, &value, &orig_traceback);
value = Py_None;
_PyErr_NormalizeException(tstate, &type, &value, &orig_traceback);
traceback = (orig_traceback != NULL) ? orig_traceback : Py_None;
arg = PyTuple_Pack(3, type, value, traceback);
if (arg == NULL) {
_PyErr_Restore(tstate, type, value, orig_traceback);
err = call_trace(func, self, tstate, f, PyTrace_EXCEPTION, arg);
Py_DECREF(arg);
Py_XDECREF(orig_traceback);
call_trace_protected(Py_tracefunc func, PyObject *obj,
PyThreadState *tstate, PyFrameObject *frame,
int what, PyObject *arg)
_PyErr_Fetch(tstate, &type, &value, &traceback);
err = call_trace(func, obj, tstate, frame, what, arg);
_PyErr_Restore(tstate, type, value, traceback);
call_trace(Py_tracefunc func, PyObject *obj,
int result;
if (tstate->tracing)
tstate->tracing++;
tstate->use_tracing = 0;
result = func(obj, frame, what, arg);
tstate->use_tracing = ((tstate->c_tracefunc != NULL)
|| (tstate->c_profilefunc != NULL));
tstate->tracing--;
_PyEval_CallTracing(PyObject *func, PyObject *args)
int save_tracing = tstate->tracing;
int save_use_tracing = tstate->use_tracing;
tstate->tracing = 0;
result = PyObject_Call(func, args, NULL);
tstate->tracing = save_tracing;
tstate->use_tracing = save_use_tracing;
/* See Objects/lnotab_notes.txt for a description of how tracing works. */
maybe_call_line_trace(Py_tracefunc func, PyObject *obj,
int *instr_lb, int *instr_ub, int *instr_prev)
int result = 0;
int line = frame->f_lineno;
/* If the last instruction executed isn't in the current
instruction window, reset the window.
if (frame->f_lasti < *instr_lb || frame->f_lasti >= *instr_ub) {
PyAddrPair bounds;
line = _PyCode_CheckLineNumber(frame->f_code, frame->f_lasti,
&bounds);
*instr_lb = bounds.ap_lower;
*instr_ub = bounds.ap_upper;
/* If the last instruction falls at the start of a line or if it
represents a jump backwards, update the frame's line number and
then call the trace function if we're tracing source lines.
if ((frame->f_lasti == *instr_lb || frame->f_lasti < *instr_prev)) {
frame->f_lineno = line;
if (frame->f_trace_lines) {
result = call_trace(func, obj, tstate, frame, PyTrace_LINE, Py_None);
/* Always emit an opcode event if we're tracing all opcodes. */
if (frame->f_trace_opcodes) {
result = call_trace(func, obj, tstate, frame, PyTrace_OPCODE, Py_None);
*instr_prev = frame->f_lasti;
_PyEval_SetProfile(PyThreadState *tstate, Py_tracefunc func, PyObject *arg)
/* The caller must hold the GIL */
/* Call PySys_Audit() in the context of the current thread state,
even if tstate is not the current thread state. */
if (PySys_Audit("sys.setprofile", NULL) < 0) {
PyObject *profileobj = tstate->c_profileobj;
tstate->c_profilefunc = NULL;
tstate->c_profileobj = NULL;
/* Must make sure that tracing is not ignored if 'profileobj' is freed */
tstate->use_tracing = tstate->c_tracefunc != NULL;
Py_XDECREF(profileobj);
Py_XINCREF(arg);
tstate->c_profileobj = arg;
tstate->c_profilefunc = func;
/* Flag that tracing or profiling is turned on */
tstate->use_tracing = (func != NULL) || (tstate->c_tracefunc != NULL);
PyEval_SetProfile(Py_tracefunc func, PyObject *arg)
if (_PyEval_SetProfile(tstate, func, arg) < 0) {
/* Log PySys_Audit() error */
_PyErr_WriteUnraisableMsg("in PyEval_SetProfile", NULL);
_PyEval_SetTrace(PyThreadState *tstate, Py_tracefunc func, PyObject *arg)
if (PySys_Audit("sys.settrace", NULL) < 0) {
struct _ceval_state *ceval = &tstate->interp->ceval;
PyObject *traceobj = tstate->c_traceobj;
ceval->tracing_possible += (func != NULL) - (tstate->c_tracefunc != NULL);
tstate->c_tracefunc = NULL;
tstate->c_traceobj = NULL;
/* Must make sure that profiling is not ignored if 'traceobj' is freed */
tstate->use_tracing = (tstate->c_profilefunc != NULL);
Py_XDECREF(traceobj);
tstate->c_traceobj = arg;
tstate->c_tracefunc = func;
tstate->use_tracing = ((func != NULL)
PyEval_SetTrace(Py_tracefunc func, PyObject *arg)
if (_PyEval_SetTrace(tstate, func, arg) < 0) {
_PyErr_WriteUnraisableMsg("in PyEval_SetTrace", NULL);
_PyEval_SetCoroutineOriginTrackingDepth(PyThreadState *tstate, int new_depth)
assert(new_depth >= 0);
tstate->coroutine_origin_tracking_depth = new_depth;
_PyEval_GetCoroutineOriginTrackingDepth(void)
return tstate->coroutine_origin_tracking_depth;
_PyEval_SetAsyncGenFirstiter(PyObject *firstiter)
if (PySys_Audit("sys.set_asyncgen_hook_firstiter", NULL) < 0) {
Py_XINCREF(firstiter);
Py_XSETREF(tstate->async_gen_firstiter, firstiter);
_PyEval_GetAsyncGenFirstiter(void)
return tstate->async_gen_firstiter;
_PyEval_SetAsyncGenFinalizer(PyObject *finalizer)
if (PySys_Audit("sys.set_asyncgen_hook_finalizer", NULL) < 0) {
Py_XINCREF(finalizer);
Py_XSETREF(tstate->async_gen_finalizer, finalizer);
_PyEval_GetAsyncGenFinalizer(void)
return tstate->async_gen_finalizer;
static PyFrameObject *
_PyEval_GetFrame(PyThreadState *tstate)
return runtime->gilstate.getframe(tstate);
PyFrameObject *
PyEval_GetFrame(void)
return _PyEval_GetFrame(tstate);
PyEval_GetBuiltins(void)
PyFrameObject *current_frame = _PyEval_GetFrame(tstate);
if (current_frame == NULL)
return tstate->interp->builtins;
return current_frame->f_builtins;
/* Convenience function to get a builtin from its name */
_PyEval_GetBuiltinId(_Py_Identifier *name)
PyObject *attr = _PyDict_GetItemIdWithError(PyEval_GetBuiltins(), name);
if (attr) {
Py_INCREF(attr);
else if (!_PyErr_Occurred(tstate)) {
_PyErr_SetObject(tstate, PyExc_AttributeError, _PyUnicode_FromId(name));
return attr;
PyEval_GetLocals(void)
if (current_frame == NULL) {
_PyErr_SetString(tstate, PyExc_SystemError, "frame does not exist");
if (PyFrame_FastToLocalsWithError(current_frame) < 0) {
assert(current_frame->f_locals != NULL);
return current_frame->f_locals;
PyEval_GetGlobals(void)
assert(current_frame->f_globals != NULL);
return current_frame->f_globals;
PyEval_MergeCompilerFlags(PyCompilerFlags *cf)
int result = cf->cf_flags != 0;
if (current_frame != NULL) {
const int codeflags = current_frame->f_code->co_flags;
const int compilerflags = codeflags & PyCF_MASK;
if (compilerflags) {
result = 1;
cf->cf_flags |= compilerflags;
#if 0 /* future keyword */
if (codeflags & CO_GENERATOR_ALLOWED) {
cf->cf_flags |= CO_GENERATOR_ALLOWED;
const char *
PyEval_GetFuncName(PyObject *func)
if (PyMethod_Check(func))
return PyEval_GetFuncName(PyMethod_GET_FUNCTION(func));
else if (PyFunction_Check(func))
return PyUnicode_AsUTF8(((PyFunctionObject*)func)->func_name);
else if (PyCFunction_Check(func))
return ((PyCFunctionObject*)func)->m_ml->ml_name;
return Py_TYPE(func)->tp_name;
PyEval_GetFuncDesc(PyObject *func)
return "()";
return " object";
#define C_TRACE(x, call) \
if (tstate->use_tracing && tstate->c_profilefunc) { \
if (call_trace(tstate->c_profilefunc, tstate->c_profileobj, \
tstate, tstate->frame, \
PyTrace_C_CALL, func)) { \
x = NULL; \
else { \
x = call; \
if (tstate->c_profilefunc != NULL) { \
if (x == NULL) { \
call_trace_protected(tstate->c_profilefunc, \
tstate->c_profileobj, \
PyTrace_C_EXCEPTION, func); \
/* XXX should pass (type, value, tb) */ \
} else { \
if (call_trace(tstate->c_profilefunc, \
PyTrace_C_RETURN, func)) { \
Py_DECREF(x); \
trace_call_function(PyThreadState *tstate,
PyObject *func,
PyObject **args, Py_ssize_t nargs,
PyObject *kwnames)
PyObject *x;
if (PyCFunction_Check(func)) {
C_TRACE(x, PyObject_Vectorcall(func, args, nargs, kwnames));
return x;
else if (Py_IS_TYPE(func, &PyMethodDescr_Type) && nargs > 0) {
/* We need to create a temporary bound method as argument
for profiling.
If nargs == 0, then this cannot work because we have no
"self". In any case, the call itself would raise
TypeError (foo needs an argument), so we just skip
profiling. */
PyObject *self = args[0];
func = Py_TYPE(func)->tp_descr_get(func, self, (PyObject*)Py_TYPE(self));
C_TRACE(x, PyObject_Vectorcall(func,
args+1, nargs-1,
kwnames));
return PyObject_Vectorcall(func, args, nargs | PY_VECTORCALL_ARGUMENTS_OFFSET, kwnames);
/* Issue #29227: Inline call_function() into _PyEval_EvalFrameDefault()
to reduce the stack consumption. */
Py_LOCAL_INLINE(PyObject *) _Py_HOT_FUNCTION
call_function(PyThreadState *tstate, PyObject ***pp_stack, Py_ssize_t oparg, PyObject *kwnames)
PyObject **pfunc = (*pp_stack) - oparg - 1;
PyObject *func = *pfunc;
PyObject *x, *w;
Py_ssize_t nkwargs = (kwnames == NULL) ? 0 : PyTuple_GET_SIZE(kwnames);
Py_ssize_t nargs = oparg - nkwargs;
PyObject **stack = (*pp_stack) - nargs - nkwargs;
x = trace_call_function(tstate, func, stack, nargs, kwnames);
x = PyObject_Vectorcall(func, stack, nargs | PY_VECTORCALL_ARGUMENTS_OFFSET, kwnames);
assert((x != NULL) ^ (_PyErr_Occurred(tstate) != NULL));
/* Clear the stack of the function object. */
while ((*pp_stack) > pfunc) {
w = EXT_POP(*pp_stack);
do_call_core(PyThreadState *tstate, PyObject *func, PyObject *callargs, PyObject *kwdict)
C_TRACE(result, PyObject_Call(func, callargs, kwdict));
else if (Py_IS_TYPE(func, &PyMethodDescr_Type)) {
Py_ssize_t nargs = PyTuple_GET_SIZE(callargs);
if (nargs > 0 && tstate->use_tracing) {
PyObject *self = PyTuple_GET_ITEM(callargs, 0);
C_TRACE(result, _PyObject_FastCallDictTstate(
tstate, func,
&_PyTuple_ITEMS(callargs)[1],
nargs - 1,
kwdict));
return PyObject_Call(func, callargs, kwdict);
/* Extract a slice index from a PyLong or an object with the
nb_index slot defined, and store in *pi.
Silently reduce values larger than PY_SSIZE_T_MAX to PY_SSIZE_T_MAX,
and silently boost values less than PY_SSIZE_T_MIN to PY_SSIZE_T_MIN.
Return 0 on error, 1 on success.
_PyEval_SliceIndex(PyObject *v, Py_ssize_t *pi)
if (v != Py_None) {
Py_ssize_t x;
if (PyIndex_Check(v)) {
x = PyNumber_AsSsize_t(v, NULL);
if (x == -1 && _PyErr_Occurred(tstate))
"slice indices must be integers or "
"None or have an __index__ method");
*pi = x;
_PyEval_SliceIndexNotNone(PyObject *v, Py_ssize_t *pi)
"have an __index__ method");
import_name(PyThreadState *tstate, PyFrameObject *f,
PyObject *name, PyObject *fromlist, PyObject *level)
_Py_IDENTIFIER(__import__);
PyObject *import_func, *res;
PyObject* stack[5];
import_func = _PyDict_GetItemIdWithError(f->f_builtins, &PyId___import__);
if (import_func == NULL) {
_PyErr_SetString(tstate, PyExc_ImportError, "__import__ not found");
/* Fast path for not overloaded __import__. */
if (import_func == tstate->interp->import_func) {
int ilevel = _PyLong_AsInt(level);
if (ilevel == -1 && _PyErr_Occurred(tstate)) {
res = PyImport_ImportModuleLevelObject(
name,
f->f_globals,
f->f_locals == NULL ? Py_None : f->f_locals,
fromlist,
ilevel);
Py_INCREF(import_func);
stack[0] = name;
stack[1] = f->f_globals;
stack[2] = f->f_locals == NULL ? Py_None : f->f_locals;
stack[3] = fromlist;
stack[4] = level;
res = _PyObject_FastCall(import_func, stack, 5);
Py_DECREF(import_func);
import_from(PyThreadState *tstate, PyObject *v, PyObject *name)
PyObject *fullmodname, *pkgname, *pkgpath, *pkgname_or_unknown, *errmsg;
if (_PyObject_LookupAttr(v, name, &x) != 0) {
/* Issue #17636: in case this failed because of a circular relative
import, try to fallback on reading the module directly from
sys.modules. */
pkgname = _PyObject_GetAttrId(v, &PyId___name__);
if (pkgname == NULL) {
if (!PyUnicode_Check(pkgname)) {
Py_CLEAR(pkgname);
fullmodname = PyUnicode_FromFormat("%U.%U", pkgname, name);
if (fullmodname == NULL) {
Py_DECREF(pkgname);
x = PyImport_GetModule(fullmodname);
Py_DECREF(fullmodname);
if (x == NULL && !_PyErr_Occurred(tstate)) {
pkgpath = PyModule_GetFilenameObject(v);
pkgname_or_unknown = PyUnicode_FromString("<unknown module name>");
if (pkgname_or_unknown == NULL) {
Py_XDECREF(pkgpath);
pkgname_or_unknown = pkgname;
if (pkgpath == NULL || !PyUnicode_Check(pkgpath)) {
errmsg = PyUnicode_FromFormat(
"cannot import name %R from %R (unknown location)",
name, pkgname_or_unknown
/* NULL checks for errmsg and pkgname done by PyErr_SetImportError. */
PyErr_SetImportError(errmsg, pkgname, NULL);
_Py_IDENTIFIER(__spec__);
PyObject *spec = _PyObject_GetAttrId(v, &PyId___spec__);
const char *fmt =
_PyModuleSpec_IsInitializing(spec) ?
"cannot import name %R from partially initialized module %R "
"(most likely due to a circular import) (%S)" :
"cannot import name %R from %R (%S)";
Py_XDECREF(spec);
errmsg = PyUnicode_FromFormat(fmt, name, pkgname_or_unknown, pkgpath);
PyErr_SetImportError(errmsg, pkgname, pkgpath);
Py_XDECREF(errmsg);
Py_XDECREF(pkgname_or_unknown);
import_all_from(PyThreadState *tstate, PyObject *locals, PyObject *v)
_Py_IDENTIFIER(__all__);
_Py_IDENTIFIER(__dict__);
PyObject *all, *dict, *name, *value;
int skip_leading_underscores = 0;
int pos, err;
if (_PyObject_LookupAttrId(v, &PyId___all__, &all) < 0) {
return -1; /* Unexpected error */
if (all == NULL) {
if (_PyObject_LookupAttrId(v, &PyId___dict__, &dict) < 0) {
if (dict == NULL) {
_PyErr_SetString(tstate, PyExc_ImportError,
"from-import-* object has no __dict__ and no __all__");
all = PyMapping_Keys(dict);
Py_DECREF(dict);
if (all == NULL)
skip_leading_underscores = 1;
for (pos = 0, err = 0; ; pos++) {
name = PySequence_GetItem(all, pos);
if (!_PyErr_ExceptionMatches(tstate, PyExc_IndexError)) {
err = -1;
if (!PyUnicode_Check(name)) {
PyObject *modname = _PyObject_GetAttrId(v, &PyId___name__);
if (modname == NULL) {
Py_DECREF(name);
if (!PyUnicode_Check(modname)) {
"module __name__ must be a string, not %.100s",
Py_TYPE(modname)->tp_name);
"%s in %U.%s must be str, not %.100s",
skip_leading_underscores ? "Key" : "Item",
modname,
skip_leading_underscores ? "__dict__" : "__all__",
Py_TYPE(name)->tp_name);
Py_DECREF(modname);
if (skip_leading_underscores) {
if (PyUnicode_READY(name) == -1) {
if (PyUnicode_READ_CHAR(name, 0) == '_') {
value = PyObject_GetAttr(v, name);
else if (PyDict_CheckExact(locals))
err = PyDict_SetItem(locals, name, value);
err = PyObject_SetItem(locals, name, value);
Py_DECREF(all);
return err;
check_args_iterable(PyThreadState *tstate, PyObject *func, PyObject *args)
if (Py_TYPE(args)->tp_iter == NULL && !PySequence_Check(args)) {
/* check_args_iterable() may be called with a live exception:
* clear it to prevent calling _PyObject_FunctionStr() with an
* exception set. */
PyObject *funcstr = _PyObject_FunctionStr(func);
if (funcstr != NULL) {
"%U argument after * must be an iterable, not %.200s",
funcstr, Py_TYPE(args)->tp_name);
Py_DECREF(funcstr);
format_kwargs_error(PyThreadState *tstate, PyObject *func, PyObject *kwargs)
/* _PyDict_MergeEx raises attribute
* error (percolated from an attempt
* to get 'keys' attribute) instead of
* a type error if its second argument
* is not a mapping.
_PyErr_Format(
tstate, PyExc_TypeError,
"%U argument after ** must be a mapping, not %.200s",
funcstr, Py_TYPE(kwargs)->tp_name);
else if (_PyErr_ExceptionMatches(tstate, PyExc_KeyError)) {
if (val && PyTuple_Check(val) && PyTuple_GET_SIZE(val) == 1) {
PyObject *key = PyTuple_GET_ITEM(val, 0);
"%U got multiple values for keyword argument '%S'",
funcstr, key);
Py_XDECREF(exc);
Py_XDECREF(val);
Py_XDECREF(tb);
format_exc_check_arg(PyThreadState *tstate, PyObject *exc,
const char *format_str, PyObject *obj)
const char *obj_str;
if (!obj)
obj_str = PyUnicode_AsUTF8(obj);
if (!obj_str)
_PyErr_Format(tstate, exc, format_str, obj_str);
format_exc_unbound(PyThreadState *tstate, PyCodeObject *co, int oparg)
/* Don't stomp existing exception */
if (oparg < PyTuple_GET_SIZE(co->co_cellvars)) {
name = PyTuple_GET_ITEM(co->co_cellvars,
oparg);
format_exc_check_arg(tstate,
PyExc_UnboundLocalError,
name = PyTuple_GET_ITEM(co->co_freevars, oparg -
PyTuple_GET_SIZE(co->co_cellvars));
UNBOUNDFREE_ERROR_MSG, name);
format_awaitable_error(PyThreadState *tstate, PyTypeObject *type, int prevprevopcode, int prevopcode)
if (type->tp_as_async == NULL || type->tp_as_async->am_await == NULL) {
if (prevopcode == BEFORE_ASYNC_WITH) {
"'async with' received an object from __aenter__ "
"that does not implement __await__: %.100s",
else if (prevopcode == WITH_EXCEPT_START || (prevopcode == CALL_FUNCTION && prevprevopcode == DUP_TOP)) {
"'async with' received an object from __aexit__ "
unicode_concatenate(PyThreadState *tstate, PyObject *v, PyObject *w,
PyFrameObject *f, const _Py_CODEUNIT *next_instr)
if (Py_REFCNT(v) == 2) {
/* In the common case, there are 2 references to the value
* stored in 'variable' when the += is performed: one on the
* value stack (in 'v') and one still stored in the
* 'variable'. We try to delete the variable now to reduce
* the refcnt to 1.
int opcode, oparg;
case STORE_FAST:
PyObject **fastlocals = f->f_localsplus;
if (GETLOCAL(oparg) == v)
case STORE_DEREF:
PyObject **freevars = (f->f_localsplus +
f->f_code->co_nlocals);
PyObject *c = freevars[oparg];
if (PyCell_GET(c) == v) {
PyCell_SET(c, NULL);
case STORE_NAME:
PyObject *names = f->f_code->co_names;
if (locals && PyDict_CheckExact(locals)) {
PyObject *w = PyDict_GetItemWithError(locals, name);
if ((w == v && PyDict_DelItem(locals, name) != 0) ||
(w == NULL && _PyErr_Occurred(tstate)))
res = v;
PyUnicode_Append(&res, w);
getarray(long a[256])
PyObject *l = PyList_New(256);
if (l == NULL) return NULL;
for (i = 0; i < 256; i++) {
PyObject *x = PyLong_FromLong(a[i]);
if (x == NULL) {
Py_DECREF(l);
PyList_SET_ITEM(l, i, x);
for (i = 0; i < 256; i++)
a[i] = 0;
return l;
_Py_GetDXProfile(PyObject *self, PyObject *args)
#ifndef DXPAIRS
return getarray(dxp);
PyObject *l = PyList_New(257);
for (i = 0; i < 257; i++) {
PyObject *x = getarray(dxpairs[i]);
Py_ssize_t
_PyEval_RequestCodeExtraIndex(freefunc free)
PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE();
Py_ssize_t new_index;
if (interp->co_extra_user_count == MAX_CO_EXTRA_USERS - 1) {
new_index = interp->co_extra_user_count++;
interp->co_extra_freefuncs[new_index] = free;
return new_index;
dtrace_function_entry(PyFrameObject *f)
const char *filename;
const char *funcname;
int lineno;
filename = PyUnicode_AsUTF8(f->f_code->co_filename);
funcname = PyUnicode_AsUTF8(f->f_code->co_name);
lineno = PyCode_Addr2Line(f->f_code, f->f_lasti);
PyDTrace_FUNCTION_ENTRY(filename, funcname, lineno);
dtrace_function_return(PyFrameObject *f)
PyDTrace_FUNCTION_RETURN(filename, funcname, lineno);
/* DTrace equivalent of maybe_call_line_trace. */
maybe_dtrace_line(PyFrameObject *frame,
const char *co_filename, *co_name;
/* If the last instruction falls at the start of a line or if
it represents a jump backwards, update the frame's line
number and call the trace function. */
if (frame->f_lasti == *instr_lb || frame->f_lasti < *instr_prev) {
co_filename = PyUnicode_AsUTF8(frame->f_code->co_filename);
if (!co_filename)
co_filename = "?";
co_name = PyUnicode_AsUTF8(frame->f_code->co_name);
if (!co_name)
co_name = "?";
PyDTrace_LINE(co_filename, co_name, line);
/* Implement Py_EnterRecursiveCall() and Py_LeaveRecursiveCall() as functions
for the limited API. */
#undef Py_EnterRecursiveCall
int Py_EnterRecursiveCall(const char *where)
return _Py_EnterRecursiveCall_inline(where);
#undef Py_LeaveRecursiveCall
void Py_LeaveRecursiveCall(void)
_Py_LeaveRecursiveCall_inline();