1990-10-14 12:07:46 +00:00
|
|
|
/* Tuple object implementation */
|
|
|
|
|
1997-05-02 03:12:38 +00:00
|
|
|
#include "Python.h"
|
2020-06-24 15:21:54 +02:00
|
|
|
#include "pycore_abstract.h" // _PyIndex_Check()
|
2023-08-24 20:25:22 +02:00
|
|
|
#include "pycore_ceval.h" // _PyEval_GetBuiltin()
|
2025-03-20 12:35:23 +01:00
|
|
|
#include "pycore_freelist.h" // _Py_FREELIST_PUSH()
|
2020-06-24 15:21:54 +02:00
|
|
|
#include "pycore_gc.h" // _PyObject_GC_IS_TRACKED()
|
2025-03-19 18:46:24 +01:00
|
|
|
#include "pycore_list.h" // _Py_memory_repeat()
|
2023-07-03 11:39:11 +02:00
|
|
|
#include "pycore_modsupport.h" // _PyArg_NoKwnames()
|
2025-03-20 12:35:23 +01:00
|
|
|
#include "pycore_object.h" // _PyObject_GC_TRACK()
|
|
|
|
#include "pycore_stackref.h" // PyStackRef_AsPyObjectSteal()
|
2025-03-19 18:46:24 +01:00
|
|
|
#include "pycore_tuple.h" // _PyTupleIterObject
|
1990-10-14 12:07:46 +00:00
|
|
|
|
2025-03-20 12:35:23 +01:00
|
|
|
|
2017-03-19 08:47:58 +02:00
|
|
|
/*[clinic input]
|
|
|
|
class tuple "PyTupleObject *" "&PyTuple_Type"
|
|
|
|
[clinic start generated code]*/
|
|
|
|
/*[clinic end generated code: output=da39a3ee5e6b4b0d input=f051ba3cfdf9a189]*/
|
|
|
|
|
|
|
|
#include "clinic/tupleobject.c.h"
|
|
|
|
|
2020-06-23 16:40:40 +02:00
|
|
|
|
2022-02-28 15:15:48 -07:00
|
|
|
static inline int maybe_freelist_push(PyTupleObject *);
|
2020-06-23 16:40:40 +02:00
|
|
|
|
2009-03-23 18:52:06 +00:00
|
|
|
|
2021-07-01 02:30:46 +02:00
|
|
|
/* Allocate an uninitialized tuple object. Before making it public, following
|
2019-08-14 19:10:33 +05:00
|
|
|
steps must be done:
|
2021-07-01 02:30:46 +02:00
|
|
|
|
|
|
|
- Initialize its items.
|
|
|
|
- Call _PyObject_GC_TRACK() on it.
|
|
|
|
|
2019-08-14 19:10:33 +05:00
|
|
|
Because the empty tuple is always reused and it's already tracked by GC,
|
|
|
|
this function must not be called with size == 0 (unless from PyTuple_New()
|
|
|
|
which wraps this function).
|
|
|
|
*/
|
|
|
|
static PyTupleObject *
|
2020-06-24 15:21:54 +02:00
|
|
|
tuple_alloc(Py_ssize_t size)
|
1990-10-14 12:07:46 +00:00
|
|
|
{
|
2010-05-09 15:52:27 +00:00
|
|
|
if (size < 0) {
|
|
|
|
PyErr_BadInternalCall();
|
|
|
|
return NULL;
|
|
|
|
}
|
2022-02-28 15:15:48 -07:00
|
|
|
assert(size != 0); // The empty tuple is statically allocated.
|
2024-07-22 12:08:27 -04:00
|
|
|
Py_ssize_t index = size - 1;
|
|
|
|
if (index < PyTuple_MAXSAVESIZE) {
|
|
|
|
PyTupleObject *op = _Py_FREELIST_POP(PyTupleObject, tuples[index]);
|
|
|
|
if (op != NULL) {
|
2025-03-27 09:57:06 -04:00
|
|
|
_PyTuple_RESET_HASH_CACHE(op);
|
2024-07-22 12:08:27 -04:00
|
|
|
return op;
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
|
|
|
}
|
2024-07-22 12:08:27 -04:00
|
|
|
/* Check for overflow */
|
|
|
|
if ((size_t)size > ((size_t)PY_SSIZE_T_MAX - (sizeof(PyTupleObject) -
|
|
|
|
sizeof(PyObject *))) / sizeof(PyObject *)) {
|
|
|
|
return (PyTupleObject *)PyErr_NoMemory();
|
|
|
|
}
|
2025-03-27 09:57:06 -04:00
|
|
|
PyTupleObject *result = PyObject_GC_NewVar(PyTupleObject, &PyTuple_Type, size);
|
|
|
|
if (result != NULL) {
|
|
|
|
_PyTuple_RESET_HASH_CACHE(result);
|
|
|
|
}
|
|
|
|
return result;
|
2019-08-14 19:10:33 +05:00
|
|
|
}
|
|
|
|
|
2022-02-28 15:15:48 -07:00
|
|
|
// The empty tuple singleton is not tracked by the GC.
|
|
|
|
// It does not contain any Python object.
|
|
|
|
// Note that tuple subclasses have their own empty instances.
|
2020-06-24 15:21:54 +02:00
|
|
|
|
2022-02-28 15:15:48 -07:00
|
|
|
static inline PyObject *
|
2020-06-24 15:21:54 +02:00
|
|
|
tuple_get_empty(void)
|
|
|
|
{
|
2023-08-04 16:24:50 -07:00
|
|
|
return (PyObject *)&_Py_SINGLETON(tuple_empty);
|
2020-06-24 15:21:54 +02:00
|
|
|
}
|
|
|
|
|
2019-08-14 19:10:33 +05:00
|
|
|
PyObject *
|
|
|
|
PyTuple_New(Py_ssize_t size)
|
|
|
|
{
|
|
|
|
PyTupleObject *op;
|
2020-06-24 15:21:54 +02:00
|
|
|
if (size == 0) {
|
|
|
|
return tuple_get_empty();
|
2019-08-14 19:10:33 +05:00
|
|
|
}
|
2020-06-24 15:21:54 +02:00
|
|
|
op = tuple_alloc(size);
|
2019-09-04 07:58:05 -06:00
|
|
|
if (op == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2019-08-14 19:10:33 +05:00
|
|
|
for (Py_ssize_t i = 0; i < size; i++) {
|
2010-05-09 15:52:27 +00:00
|
|
|
op->ob_item[i] = NULL;
|
2019-08-14 19:10:33 +05:00
|
|
|
}
|
2021-07-01 02:30:46 +02:00
|
|
|
_PyObject_GC_TRACK(op);
|
2010-05-09 15:52:27 +00:00
|
|
|
return (PyObject *) op;
|
1990-10-14 12:07:46 +00:00
|
|
|
}
|
|
|
|
|
2006-02-15 17:27:45 +00:00
|
|
|
Py_ssize_t
|
2013-08-13 20:18:52 +02:00
|
|
|
PyTuple_Size(PyObject *op)
|
1990-10-14 12:07:46 +00:00
|
|
|
{
|
2010-05-09 15:52:27 +00:00
|
|
|
if (!PyTuple_Check(op)) {
|
|
|
|
PyErr_BadInternalCall();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return Py_SIZE(op);
|
1990-10-14 12:07:46 +00:00
|
|
|
}
|
|
|
|
|
1997-05-02 03:12:38 +00:00
|
|
|
PyObject *
|
2013-08-13 20:18:52 +02:00
|
|
|
PyTuple_GetItem(PyObject *op, Py_ssize_t i)
|
1990-10-14 12:07:46 +00:00
|
|
|
{
|
2010-05-09 15:52:27 +00:00
|
|
|
if (!PyTuple_Check(op)) {
|
|
|
|
PyErr_BadInternalCall();
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
if (i < 0 || i >= Py_SIZE(op)) {
|
|
|
|
PyErr_SetString(PyExc_IndexError, "tuple index out of range");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return ((PyTupleObject *)op) -> ob_item[i];
|
1990-10-14 12:07:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2013-08-13 20:18:52 +02:00
|
|
|
PyTuple_SetItem(PyObject *op, Py_ssize_t i, PyObject *newitem)
|
1990-10-14 12:07:46 +00:00
|
|
|
{
|
2013-08-13 20:18:52 +02:00
|
|
|
PyObject **p;
|
2020-02-07 00:38:59 +01:00
|
|
|
if (!PyTuple_Check(op) || Py_REFCNT(op) != 1) {
|
2010-05-09 15:52:27 +00:00
|
|
|
Py_XDECREF(newitem);
|
|
|
|
PyErr_BadInternalCall();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (i < 0 || i >= Py_SIZE(op)) {
|
|
|
|
Py_XDECREF(newitem);
|
|
|
|
PyErr_SetString(PyExc_IndexError,
|
|
|
|
"tuple assignment index out of range");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
p = ((PyTupleObject *)op) -> ob_item + i;
|
2016-04-06 09:50:03 +03:00
|
|
|
Py_XSETREF(*p, newitem);
|
2010-05-09 15:52:27 +00:00
|
|
|
return 0;
|
1990-10-14 12:07:46 +00:00
|
|
|
}
|
|
|
|
|
2009-03-23 18:52:06 +00:00
|
|
|
void
|
|
|
|
_PyTuple_MaybeUntrack(PyObject *op)
|
|
|
|
{
|
2010-05-09 15:52:27 +00:00
|
|
|
PyTupleObject *t;
|
|
|
|
Py_ssize_t i, n;
|
|
|
|
|
|
|
|
if (!PyTuple_CheckExact(op) || !_PyObject_GC_IS_TRACKED(op))
|
|
|
|
return;
|
|
|
|
t = (PyTupleObject *) op;
|
|
|
|
n = Py_SIZE(t);
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
PyObject *elt = PyTuple_GET_ITEM(t, i);
|
|
|
|
/* Tuple with NULL elements aren't
|
|
|
|
fully constructed, don't untrack
|
|
|
|
them yet. */
|
|
|
|
if (!elt ||
|
|
|
|
_PyObject_GC_MAY_BE_TRACKED(elt))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
_PyObject_GC_UNTRACK(op);
|
2009-03-23 18:52:06 +00:00
|
|
|
}
|
|
|
|
|
2003-10-12 18:24:34 +00:00
|
|
|
PyObject *
|
2006-02-15 17:27:45 +00:00
|
|
|
PyTuple_Pack(Py_ssize_t n, ...)
|
2003-10-12 18:24:34 +00:00
|
|
|
{
|
2010-05-09 15:52:27 +00:00
|
|
|
Py_ssize_t i;
|
|
|
|
PyObject *o;
|
|
|
|
PyObject **items;
|
|
|
|
va_list vargs;
|
|
|
|
|
2019-08-14 19:10:33 +05:00
|
|
|
if (n == 0) {
|
2020-06-24 15:21:54 +02:00
|
|
|
return tuple_get_empty();
|
2019-08-14 19:10:33 +05:00
|
|
|
}
|
|
|
|
|
2010-05-09 15:52:27 +00:00
|
|
|
va_start(vargs, n);
|
2020-06-24 15:21:54 +02:00
|
|
|
PyTupleObject *result = tuple_alloc(n);
|
2012-09-10 02:54:51 +02:00
|
|
|
if (result == NULL) {
|
|
|
|
va_end(vargs);
|
2010-05-09 15:52:27 +00:00
|
|
|
return NULL;
|
2012-09-10 02:54:51 +02:00
|
|
|
}
|
2019-08-14 19:10:33 +05:00
|
|
|
items = result->ob_item;
|
2010-05-09 15:52:27 +00:00
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
o = va_arg(vargs, PyObject *);
|
2022-11-10 23:40:31 +01:00
|
|
|
items[i] = Py_NewRef(o);
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
|
|
|
va_end(vargs);
|
2021-07-01 02:30:46 +02:00
|
|
|
_PyObject_GC_TRACK(result);
|
2019-08-14 19:10:33 +05:00
|
|
|
return (PyObject *)result;
|
2003-10-12 18:24:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
1990-10-14 12:07:46 +00:00
|
|
|
/* Methods */
|
|
|
|
|
|
|
|
static void
|
2024-10-02 13:37:04 +02:00
|
|
|
tuple_dealloc(PyObject *self)
|
1990-10-14 12:07:46 +00:00
|
|
|
{
|
2024-10-02 13:37:04 +02:00
|
|
|
PyTupleObject *op = _PyTuple_CAST(self);
|
2022-02-28 15:15:48 -07:00
|
|
|
if (Py_SIZE(op) == 0) {
|
|
|
|
/* The empty tuple is statically allocated. */
|
|
|
|
if (op == &_Py_SINGLETON(tuple_empty)) {
|
2020-06-08 02:14:47 +02:00
|
|
|
#ifdef Py_DEBUG
|
2022-02-28 15:15:48 -07:00
|
|
|
_Py_FatalRefcountError("deallocating the empty tuple singleton");
|
|
|
|
#else
|
|
|
|
return;
|
2020-06-08 02:14:47 +02:00
|
|
|
#endif
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
2022-02-28 15:15:48 -07:00
|
|
|
#ifdef Py_DEBUG
|
|
|
|
/* tuple subclasses have their own empty instances. */
|
|
|
|
assert(!PyTuple_CheckExact(op));
|
1993-10-15 16:18:48 +00:00
|
|
|
#endif
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
2022-02-28 15:15:48 -07:00
|
|
|
|
|
|
|
PyObject_GC_UnTrack(op);
|
|
|
|
|
|
|
|
Py_ssize_t i = Py_SIZE(op);
|
|
|
|
while (--i >= 0) {
|
|
|
|
Py_XDECREF(op->ob_item[i]);
|
|
|
|
}
|
|
|
|
// This will abort on the empty singleton (if there is one).
|
|
|
|
if (!maybe_freelist_push(op)) {
|
|
|
|
Py_TYPE(op)->tp_free((PyObject *)op);
|
2021-09-21 23:04:34 +02:00
|
|
|
}
|
1990-10-14 12:07:46 +00:00
|
|
|
}
|
|
|
|
|
1997-05-02 03:12:38 +00:00
|
|
|
static PyObject *
|
2024-10-02 13:37:04 +02:00
|
|
|
tuple_repr(PyObject *self)
|
1990-10-14 12:07:46 +00:00
|
|
|
{
|
2024-10-02 13:37:04 +02:00
|
|
|
PyTupleObject *v = _PyTuple_CAST(self);
|
|
|
|
Py_ssize_t n = PyTuple_GET_SIZE(v);
|
|
|
|
if (n == 0) {
|
2010-05-09 15:52:27 +00:00
|
|
|
return PyUnicode_FromString("()");
|
2024-10-02 13:37:04 +02:00
|
|
|
}
|
2010-05-09 15:52:27 +00:00
|
|
|
|
|
|
|
/* While not mutable, it is still possible to end up with a cycle in a
|
|
|
|
tuple through an object that stores itself within a tuple (and thus
|
|
|
|
infinitely asks for the repr of itself). This should only be
|
|
|
|
possible within a type. */
|
2024-10-02 13:37:04 +02:00
|
|
|
int res = Py_ReprEnter((PyObject *)v);
|
|
|
|
if (res != 0) {
|
|
|
|
return res > 0 ? PyUnicode_FromString("(...)") : NULL;
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
|
|
|
|
2024-10-10 12:20:53 +02:00
|
|
|
Py_ssize_t prealloc;
|
2024-10-02 13:37:04 +02:00
|
|
|
if (n > 1) {
|
2024-10-10 12:20:53 +02:00
|
|
|
// "(" + "1" + ", 2" * (len - 1) + ")"
|
|
|
|
prealloc = 1 + 1 + (2 + 1) * (n - 1) + 1;
|
2013-11-19 12:59:46 +01:00
|
|
|
}
|
|
|
|
else {
|
2024-10-10 12:20:53 +02:00
|
|
|
// "(1,)"
|
|
|
|
prealloc = 4;
|
|
|
|
}
|
|
|
|
PyUnicodeWriter *writer = PyUnicodeWriter_Create(prealloc);
|
|
|
|
if (writer == NULL) {
|
|
|
|
goto error;
|
2013-11-19 12:59:46 +01:00
|
|
|
}
|
2011-10-06 18:57:27 +02:00
|
|
|
|
2024-10-10 12:20:53 +02:00
|
|
|
if (PyUnicodeWriter_WriteChar(writer, '(') < 0) {
|
2011-10-06 18:57:27 +02:00
|
|
|
goto error;
|
2024-10-10 12:20:53 +02:00
|
|
|
}
|
2010-05-09 15:52:27 +00:00
|
|
|
|
|
|
|
/* Do repr() on each element. */
|
2024-10-02 13:37:04 +02:00
|
|
|
for (Py_ssize_t i = 0; i < n; ++i) {
|
2013-11-19 12:59:46 +01:00
|
|
|
if (i > 0) {
|
2024-10-10 12:20:53 +02:00
|
|
|
if (PyUnicodeWriter_WriteChar(writer, ',') < 0) {
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (PyUnicodeWriter_WriteChar(writer, ' ') < 0) {
|
2013-11-19 12:59:46 +01:00
|
|
|
goto error;
|
2024-10-10 12:20:53 +02:00
|
|
|
}
|
2013-11-19 12:59:46 +01:00
|
|
|
}
|
|
|
|
|
2024-10-10 12:20:53 +02:00
|
|
|
if (PyUnicodeWriter_WriteRepr(writer, v->ob_item[i]) < 0) {
|
2013-11-19 12:59:46 +01:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-10-10 12:20:53 +02:00
|
|
|
if (n == 1) {
|
|
|
|
if (PyUnicodeWriter_WriteChar(writer, ',') < 0) {
|
2013-11-19 12:59:46 +01:00
|
|
|
goto error;
|
2024-10-10 12:20:53 +02:00
|
|
|
}
|
2013-11-19 12:59:46 +01:00
|
|
|
}
|
2024-10-10 12:20:53 +02:00
|
|
|
if (PyUnicodeWriter_WriteChar(writer, ')') < 0) {
|
|
|
|
goto error;
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
|
|
|
|
2011-10-06 18:57:27 +02:00
|
|
|
Py_ReprLeave((PyObject *)v);
|
2024-10-10 12:20:53 +02:00
|
|
|
return PyUnicodeWriter_Finish(writer);
|
2001-06-16 05:11:17 +00:00
|
|
|
|
2011-10-06 18:57:27 +02:00
|
|
|
error:
|
2024-10-10 12:20:53 +02:00
|
|
|
PyUnicodeWriter_Discard(writer);
|
2010-05-09 15:52:27 +00:00
|
|
|
Py_ReprLeave((PyObject *)v);
|
2011-10-06 18:57:27 +02:00
|
|
|
return NULL;
|
1990-10-14 12:07:46 +00:00
|
|
|
}
|
|
|
|
|
2004-06-04 06:35:20 +00:00
|
|
|
|
2018-10-28 02:06:38 +02:00
|
|
|
/* Hash for tuples. This is a slightly simplified version of the xxHash
|
|
|
|
non-cryptographic hash:
|
2023-02-19 00:22:02 +00:00
|
|
|
- we do not use any parallelism, there is only 1 accumulator.
|
2018-10-28 02:06:38 +02:00
|
|
|
- we drop the final mixing since this is just a permutation of the
|
|
|
|
output space: it does not help against collisions.
|
|
|
|
- at the end, we mangle the length with a single constant.
|
|
|
|
For the xxHash specification, see
|
|
|
|
https://github.com/Cyan4973/xxHash/blob/master/doc/xxhash_spec.md
|
|
|
|
|
2025-03-27 09:57:06 -04:00
|
|
|
The constants for the hash function are defined in pycore_tuple.h.
|
2004-06-04 06:35:20 +00:00
|
|
|
*/
|
|
|
|
|
2010-10-17 20:54:53 +00:00
|
|
|
static Py_hash_t
|
2024-10-02 13:37:04 +02:00
|
|
|
tuple_hash(PyObject *op)
|
1993-03-29 10:43:31 +00:00
|
|
|
{
|
2024-10-02 13:37:04 +02:00
|
|
|
PyTupleObject *v = _PyTuple_CAST(op);
|
2025-03-27 09:57:06 -04:00
|
|
|
|
|
|
|
Py_uhash_t acc = FT_ATOMIC_LOAD_SSIZE_RELAXED(v->ob_hash);
|
|
|
|
if (acc != (Py_uhash_t)-1) {
|
|
|
|
return acc;
|
|
|
|
}
|
|
|
|
|
2024-10-02 13:37:04 +02:00
|
|
|
Py_ssize_t len = Py_SIZE(v);
|
2018-10-28 02:06:38 +02:00
|
|
|
PyObject **item = v->ob_item;
|
2025-03-27 09:57:06 -04:00
|
|
|
acc = _PyTuple_HASH_XXPRIME_5;
|
2024-10-02 13:37:04 +02:00
|
|
|
for (Py_ssize_t i = 0; i < len; i++) {
|
2018-10-28 02:06:38 +02:00
|
|
|
Py_uhash_t lane = PyObject_Hash(item[i]);
|
|
|
|
if (lane == (Py_uhash_t)-1) {
|
2010-05-09 15:52:27 +00:00
|
|
|
return -1;
|
2018-10-28 02:06:38 +02:00
|
|
|
}
|
2025-03-27 09:57:06 -04:00
|
|
|
acc += lane * _PyTuple_HASH_XXPRIME_2;
|
|
|
|
acc = _PyTuple_HASH_XXROTATE(acc);
|
|
|
|
acc *= _PyTuple_HASH_XXPRIME_1;
|
2018-10-28 02:06:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Add input length, mangled to keep the historical value of hash(()). */
|
2025-03-27 09:57:06 -04:00
|
|
|
acc += len ^ (_PyTuple_HASH_XXPRIME_5 ^ 3527539UL);
|
2018-10-28 02:06:38 +02:00
|
|
|
|
|
|
|
if (acc == (Py_uhash_t)-1) {
|
2025-03-27 09:57:06 -04:00
|
|
|
acc = 1546275796;
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
2025-03-27 09:57:06 -04:00
|
|
|
|
|
|
|
FT_ATOMIC_STORE_SSIZE_RELAXED(v->ob_hash, acc);
|
|
|
|
|
2018-10-28 02:06:38 +02:00
|
|
|
return acc;
|
1993-03-29 10:43:31 +00:00
|
|
|
}
|
|
|
|
|
2006-02-15 17:27:45 +00:00
|
|
|
static Py_ssize_t
|
2024-10-02 13:37:04 +02:00
|
|
|
tuple_length(PyObject *self)
|
1990-10-14 12:07:46 +00:00
|
|
|
{
|
2024-10-02 13:37:04 +02:00
|
|
|
PyTupleObject *a = _PyTuple_CAST(self);
|
2010-05-09 15:52:27 +00:00
|
|
|
return Py_SIZE(a);
|
1990-10-14 12:07:46 +00:00
|
|
|
}
|
|
|
|
|
2024-03-08 00:21:21 +08:00
|
|
|
static int
|
2024-10-02 13:37:04 +02:00
|
|
|
tuple_contains(PyObject *self, PyObject *el)
|
2000-04-27 21:41:03 +00:00
|
|
|
{
|
2024-10-02 13:37:04 +02:00
|
|
|
PyTupleObject *a = _PyTuple_CAST(self);
|
|
|
|
int cmp = 0;
|
|
|
|
for (Py_ssize_t i = 0; cmp == 0 && i < Py_SIZE(a); ++i) {
|
2019-08-04 14:12:48 +03:00
|
|
|
cmp = PyObject_RichCompareBool(PyTuple_GET_ITEM(a, i), el, Py_EQ);
|
2024-10-02 13:37:04 +02:00
|
|
|
}
|
2010-05-09 15:52:27 +00:00
|
|
|
return cmp;
|
2000-04-27 21:41:03 +00:00
|
|
|
}
|
|
|
|
|
1997-05-02 03:12:38 +00:00
|
|
|
static PyObject *
|
2024-10-02 13:37:04 +02:00
|
|
|
tuple_item(PyObject *op, Py_ssize_t i)
|
1990-10-14 12:07:46 +00:00
|
|
|
{
|
2024-10-02 13:37:04 +02:00
|
|
|
PyTupleObject *a = _PyTuple_CAST(op);
|
2010-05-09 15:52:27 +00:00
|
|
|
if (i < 0 || i >= Py_SIZE(a)) {
|
|
|
|
PyErr_SetString(PyExc_IndexError, "tuple index out of range");
|
|
|
|
return NULL;
|
|
|
|
}
|
2022-11-10 23:40:31 +01:00
|
|
|
return Py_NewRef(a->ob_item[i]);
|
1990-10-14 12:07:46 +00:00
|
|
|
}
|
|
|
|
|
2019-02-25 21:59:12 +05:00
|
|
|
PyObject *
|
|
|
|
_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n)
|
|
|
|
{
|
2019-08-14 19:10:33 +05:00
|
|
|
if (n == 0) {
|
2020-06-24 15:21:54 +02:00
|
|
|
return tuple_get_empty();
|
2019-08-14 19:10:33 +05:00
|
|
|
}
|
|
|
|
|
2020-06-24 15:21:54 +02:00
|
|
|
PyTupleObject *tuple = tuple_alloc(n);
|
2019-02-25 21:59:12 +05:00
|
|
|
if (tuple == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
PyObject **dst = tuple->ob_item;
|
|
|
|
for (Py_ssize_t i = 0; i < n; i++) {
|
|
|
|
PyObject *item = src[i];
|
2022-11-10 23:40:31 +01:00
|
|
|
dst[i] = Py_NewRef(item);
|
2019-02-25 21:59:12 +05:00
|
|
|
}
|
2021-07-01 02:30:46 +02:00
|
|
|
_PyObject_GC_TRACK(tuple);
|
2019-02-25 21:59:12 +05:00
|
|
|
return (PyObject *)tuple;
|
|
|
|
}
|
|
|
|
|
2024-07-02 12:30:14 -04:00
|
|
|
PyObject *
|
2025-01-22 10:51:37 +00:00
|
|
|
_PyTuple_FromStackRefStealOnSuccess(const _PyStackRef *src, Py_ssize_t n)
|
2024-07-02 12:30:14 -04:00
|
|
|
{
|
|
|
|
if (n == 0) {
|
|
|
|
return tuple_get_empty();
|
|
|
|
}
|
|
|
|
PyTupleObject *tuple = tuple_alloc(n);
|
|
|
|
if (tuple == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
PyObject **dst = tuple->ob_item;
|
|
|
|
for (Py_ssize_t i = 0; i < n; i++) {
|
|
|
|
dst[i] = PyStackRef_AsPyObjectSteal(src[i]);
|
|
|
|
}
|
|
|
|
_PyObject_GC_TRACK(tuple);
|
|
|
|
return (PyObject *)tuple;
|
|
|
|
}
|
|
|
|
|
2021-10-09 16:51:30 +01:00
|
|
|
PyObject *
|
|
|
|
_PyTuple_FromArraySteal(PyObject *const *src, Py_ssize_t n)
|
|
|
|
{
|
|
|
|
if (n == 0) {
|
|
|
|
return tuple_get_empty();
|
|
|
|
}
|
|
|
|
PyTupleObject *tuple = tuple_alloc(n);
|
|
|
|
if (tuple == NULL) {
|
2021-10-18 09:57:24 +01:00
|
|
|
for (Py_ssize_t i = 0; i < n; i++) {
|
|
|
|
Py_DECREF(src[i]);
|
|
|
|
}
|
2021-10-09 16:51:30 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
PyObject **dst = tuple->ob_item;
|
|
|
|
for (Py_ssize_t i = 0; i < n; i++) {
|
|
|
|
PyObject *item = src[i];
|
|
|
|
dst[i] = item;
|
|
|
|
}
|
|
|
|
_PyObject_GC_TRACK(tuple);
|
|
|
|
return (PyObject *)tuple;
|
|
|
|
}
|
|
|
|
|
1997-05-02 03:12:38 +00:00
|
|
|
static PyObject *
|
2024-10-02 13:37:04 +02:00
|
|
|
tuple_slice(PyTupleObject *a, Py_ssize_t ilow,
|
2013-08-13 20:18:52 +02:00
|
|
|
Py_ssize_t ihigh)
|
1990-10-14 12:07:46 +00:00
|
|
|
{
|
2010-05-09 15:52:27 +00:00
|
|
|
if (ilow < 0)
|
|
|
|
ilow = 0;
|
|
|
|
if (ihigh > Py_SIZE(a))
|
|
|
|
ihigh = Py_SIZE(a);
|
|
|
|
if (ihigh < ilow)
|
|
|
|
ihigh = ilow;
|
|
|
|
if (ilow == 0 && ihigh == Py_SIZE(a) && PyTuple_CheckExact(a)) {
|
2022-11-10 23:40:31 +01:00
|
|
|
return Py_NewRef(a);
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
2019-02-25 21:59:12 +05:00
|
|
|
return _PyTuple_FromArray(a->ob_item + ilow, ihigh - ilow);
|
1990-10-14 12:07:46 +00:00
|
|
|
}
|
|
|
|
|
1997-05-02 03:12:38 +00:00
|
|
|
PyObject *
|
2006-02-15 17:27:45 +00:00
|
|
|
PyTuple_GetSlice(PyObject *op, Py_ssize_t i, Py_ssize_t j)
|
1992-01-14 18:45:33 +00:00
|
|
|
{
|
2010-05-09 15:52:27 +00:00
|
|
|
if (op == NULL || !PyTuple_Check(op)) {
|
|
|
|
PyErr_BadInternalCall();
|
|
|
|
return NULL;
|
|
|
|
}
|
2024-10-02 13:37:04 +02:00
|
|
|
return tuple_slice((PyTupleObject *)op, i, j);
|
1992-01-14 18:45:33 +00:00
|
|
|
}
|
|
|
|
|
1997-05-02 03:12:38 +00:00
|
|
|
static PyObject *
|
2024-10-02 13:37:04 +02:00
|
|
|
tuple_concat(PyObject *aa, PyObject *bb)
|
1990-10-14 12:07:46 +00:00
|
|
|
{
|
2024-10-02 13:37:04 +02:00
|
|
|
PyTupleObject *a = _PyTuple_CAST(aa);
|
2017-03-06 23:39:35 +02:00
|
|
|
if (Py_SIZE(a) == 0 && PyTuple_CheckExact(bb)) {
|
2022-11-10 23:40:31 +01:00
|
|
|
return Py_NewRef(bb);
|
2017-03-06 23:39:35 +02:00
|
|
|
}
|
2010-05-09 15:52:27 +00:00
|
|
|
if (!PyTuple_Check(bb)) {
|
|
|
|
PyErr_Format(PyExc_TypeError,
|
|
|
|
"can only concatenate tuple (not \"%.200s\") to tuple",
|
|
|
|
Py_TYPE(bb)->tp_name);
|
|
|
|
return NULL;
|
|
|
|
}
|
2020-06-04 23:38:36 +02:00
|
|
|
PyTupleObject *b = (PyTupleObject *)bb;
|
|
|
|
|
2017-03-06 23:39:35 +02:00
|
|
|
if (Py_SIZE(b) == 0 && PyTuple_CheckExact(a)) {
|
2022-11-10 23:40:31 +01:00
|
|
|
return Py_NewRef(a);
|
2017-03-06 23:39:35 +02:00
|
|
|
}
|
2020-05-25 19:54:40 +05:00
|
|
|
assert((size_t)Py_SIZE(a) + (size_t)Py_SIZE(b) < PY_SSIZE_T_MAX);
|
2024-10-02 13:37:04 +02:00
|
|
|
Py_ssize_t size = Py_SIZE(a) + Py_SIZE(b);
|
2019-08-14 19:10:33 +05:00
|
|
|
if (size == 0) {
|
2020-06-24 15:21:54 +02:00
|
|
|
return tuple_get_empty();
|
2019-08-14 19:10:33 +05:00
|
|
|
}
|
|
|
|
|
2024-10-02 13:37:04 +02:00
|
|
|
PyTupleObject *np = tuple_alloc(size);
|
2010-05-09 15:52:27 +00:00
|
|
|
if (np == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2024-10-02 13:37:04 +02:00
|
|
|
|
|
|
|
PyObject **src = a->ob_item;
|
|
|
|
PyObject **dest = np->ob_item;
|
|
|
|
for (Py_ssize_t i = 0; i < Py_SIZE(a); i++) {
|
2010-05-09 15:52:27 +00:00
|
|
|
PyObject *v = src[i];
|
2022-11-10 23:40:31 +01:00
|
|
|
dest[i] = Py_NewRef(v);
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
2024-10-02 13:37:04 +02:00
|
|
|
|
2010-05-09 15:52:27 +00:00
|
|
|
src = b->ob_item;
|
|
|
|
dest = np->ob_item + Py_SIZE(a);
|
2024-10-02 13:37:04 +02:00
|
|
|
for (Py_ssize_t i = 0; i < Py_SIZE(b); i++) {
|
2010-05-09 15:52:27 +00:00
|
|
|
PyObject *v = src[i];
|
2022-11-10 23:40:31 +01:00
|
|
|
dest[i] = Py_NewRef(v);
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
2024-10-02 13:37:04 +02:00
|
|
|
|
2021-07-01 02:30:46 +02:00
|
|
|
_PyObject_GC_TRACK(np);
|
2010-05-09 15:52:27 +00:00
|
|
|
return (PyObject *)np;
|
1990-10-14 12:07:46 +00:00
|
|
|
}
|
|
|
|
|
1997-05-02 03:12:38 +00:00
|
|
|
static PyObject *
|
2024-10-02 13:37:04 +02:00
|
|
|
tuple_repeat(PyObject *self, Py_ssize_t n)
|
1991-06-04 19:35:24 +00:00
|
|
|
{
|
2024-10-02 13:37:04 +02:00
|
|
|
PyTupleObject *a = _PyTuple_CAST(self);
|
2022-07-26 04:10:23 +02:00
|
|
|
const Py_ssize_t input_size = Py_SIZE(a);
|
|
|
|
if (input_size == 0 || n == 1) {
|
2010-05-09 15:52:27 +00:00
|
|
|
if (PyTuple_CheckExact(a)) {
|
|
|
|
/* Since tuples are immutable, we can return a shared
|
|
|
|
copy in this case */
|
2022-11-10 23:40:31 +01:00
|
|
|
return Py_NewRef(a);
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
2019-08-14 19:10:33 +05:00
|
|
|
}
|
2022-07-26 04:10:23 +02:00
|
|
|
if (input_size == 0 || n <= 0) {
|
2020-06-24 15:21:54 +02:00
|
|
|
return tuple_get_empty();
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
2022-07-26 04:10:23 +02:00
|
|
|
assert(n>0);
|
|
|
|
|
|
|
|
if (input_size > PY_SSIZE_T_MAX / n)
|
2010-05-09 15:52:27 +00:00
|
|
|
return PyErr_NoMemory();
|
2022-07-26 04:10:23 +02:00
|
|
|
Py_ssize_t output_size = input_size * n;
|
|
|
|
|
|
|
|
PyTupleObject *np = tuple_alloc(output_size);
|
2010-05-09 15:52:27 +00:00
|
|
|
if (np == NULL)
|
|
|
|
return NULL;
|
2022-07-26 04:10:23 +02:00
|
|
|
|
2022-01-07 22:47:58 -05:00
|
|
|
PyObject **dest = np->ob_item;
|
2022-07-26 04:10:23 +02:00
|
|
|
if (input_size == 1) {
|
2022-01-07 22:47:58 -05:00
|
|
|
PyObject *elem = a->ob_item[0];
|
2022-07-26 04:10:23 +02:00
|
|
|
_Py_RefcntAdd(elem, n);
|
|
|
|
PyObject **dest_end = dest + output_size;
|
2022-01-07 22:47:58 -05:00
|
|
|
while (dest < dest_end) {
|
|
|
|
*dest++ = elem;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
PyObject **src = a->ob_item;
|
2022-07-26 04:10:23 +02:00
|
|
|
PyObject **src_end = src + input_size;
|
2022-01-07 22:47:58 -05:00
|
|
|
while (src < src_end) {
|
2022-07-26 04:10:23 +02:00
|
|
|
_Py_RefcntAdd(*src, n);
|
2022-01-07 22:47:58 -05:00
|
|
|
*dest++ = *src++;
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
2022-07-26 04:10:23 +02:00
|
|
|
|
|
|
|
_Py_memory_repeat((char *)np->ob_item, sizeof(PyObject *)*output_size,
|
|
|
|
sizeof(PyObject *)*input_size);
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
2021-07-01 02:30:46 +02:00
|
|
|
_PyObject_GC_TRACK(np);
|
2010-05-09 15:52:27 +00:00
|
|
|
return (PyObject *) np;
|
1991-06-04 19:35:24 +00:00
|
|
|
}
|
|
|
|
|
2017-03-19 08:47:58 +02:00
|
|
|
/*[clinic input]
|
|
|
|
tuple.index
|
|
|
|
|
|
|
|
value: object
|
2017-03-30 18:29:23 +03:00
|
|
|
start: slice_index(accept={int}) = 0
|
|
|
|
stop: slice_index(accept={int}, c_default="PY_SSIZE_T_MAX") = sys.maxsize
|
2017-03-19 08:47:58 +02:00
|
|
|
/
|
|
|
|
|
|
|
|
Return first index of value.
|
|
|
|
|
|
|
|
Raises ValueError if the value is not present.
|
|
|
|
[clinic start generated code]*/
|
|
|
|
|
2008-02-07 00:41:02 +00:00
|
|
|
static PyObject *
|
2017-03-19 08:47:58 +02:00
|
|
|
tuple_index_impl(PyTupleObject *self, PyObject *value, Py_ssize_t start,
|
|
|
|
Py_ssize_t stop)
|
2017-03-30 18:29:23 +03:00
|
|
|
/*[clinic end generated code: output=07b6f9f3cb5c33eb input=fb39e9874a21fe3f]*/
|
2008-02-07 00:41:02 +00:00
|
|
|
{
|
2017-03-19 08:47:58 +02:00
|
|
|
Py_ssize_t i;
|
2010-05-09 15:52:27 +00:00
|
|
|
|
|
|
|
if (start < 0) {
|
|
|
|
start += Py_SIZE(self);
|
|
|
|
if (start < 0)
|
|
|
|
start = 0;
|
|
|
|
}
|
|
|
|
if (stop < 0) {
|
|
|
|
stop += Py_SIZE(self);
|
|
|
|
}
|
2017-03-19 08:47:58 +02:00
|
|
|
else if (stop > Py_SIZE(self)) {
|
|
|
|
stop = Py_SIZE(self);
|
|
|
|
}
|
|
|
|
for (i = start; i < stop; i++) {
|
|
|
|
int cmp = PyObject_RichCompareBool(self->ob_item[i], value, Py_EQ);
|
2010-05-09 15:52:27 +00:00
|
|
|
if (cmp > 0)
|
|
|
|
return PyLong_FromSsize_t(i);
|
|
|
|
else if (cmp < 0)
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
PyErr_SetString(PyExc_ValueError, "tuple.index(x): x not in tuple");
|
|
|
|
return NULL;
|
2008-02-07 00:41:02 +00:00
|
|
|
}
|
|
|
|
|
2017-03-19 08:47:58 +02:00
|
|
|
/*[clinic input]
|
|
|
|
tuple.count
|
|
|
|
|
|
|
|
value: object
|
|
|
|
/
|
|
|
|
|
|
|
|
Return number of occurrences of value.
|
|
|
|
[clinic start generated code]*/
|
|
|
|
|
2008-02-07 00:41:02 +00:00
|
|
|
static PyObject *
|
2025-03-11 16:33:36 +01:00
|
|
|
tuple_count_impl(PyTupleObject *self, PyObject *value)
|
|
|
|
/*[clinic end generated code: output=cf02888d4bc15d7a input=531721aff65bd772]*/
|
2008-02-07 00:41:02 +00:00
|
|
|
{
|
2010-05-09 15:52:27 +00:00
|
|
|
Py_ssize_t count = 0;
|
|
|
|
Py_ssize_t i;
|
|
|
|
|
|
|
|
for (i = 0; i < Py_SIZE(self); i++) {
|
2017-03-19 08:47:58 +02:00
|
|
|
int cmp = PyObject_RichCompareBool(self->ob_item[i], value, Py_EQ);
|
2010-05-09 15:52:27 +00:00
|
|
|
if (cmp > 0)
|
|
|
|
count++;
|
|
|
|
else if (cmp < 0)
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return PyLong_FromSsize_t(count);
|
2008-02-07 00:41:02 +00:00
|
|
|
}
|
|
|
|
|
2000-06-23 14:18:11 +00:00
|
|
|
static int
|
2024-10-02 13:37:04 +02:00
|
|
|
tuple_traverse(PyObject *self, visitproc visit, void *arg)
|
2000-06-23 14:18:11 +00:00
|
|
|
{
|
2024-10-02 13:37:04 +02:00
|
|
|
PyTupleObject *o = _PyTuple_CAST(self);
|
|
|
|
for (Py_ssize_t i = Py_SIZE(o); --i >= 0; ) {
|
2010-05-09 15:52:27 +00:00
|
|
|
Py_VISIT(o->ob_item[i]);
|
2024-10-02 13:37:04 +02:00
|
|
|
}
|
2010-05-09 15:52:27 +00:00
|
|
|
return 0;
|
2000-06-23 14:18:11 +00:00
|
|
|
}
|
|
|
|
|
2001-01-18 00:00:53 +00:00
|
|
|
static PyObject *
|
2024-10-02 13:37:04 +02:00
|
|
|
tuple_richcompare(PyObject *v, PyObject *w, int op)
|
2001-01-18 00:00:53 +00:00
|
|
|
{
|
2010-05-09 15:52:27 +00:00
|
|
|
PyTupleObject *vt, *wt;
|
|
|
|
Py_ssize_t i;
|
|
|
|
Py_ssize_t vlen, wlen;
|
|
|
|
|
2011-08-10 20:28:54 -05:00
|
|
|
if (!PyTuple_Check(v) || !PyTuple_Check(w))
|
|
|
|
Py_RETURN_NOTIMPLEMENTED;
|
2010-05-09 15:52:27 +00:00
|
|
|
|
|
|
|
vt = (PyTupleObject *)v;
|
|
|
|
wt = (PyTupleObject *)w;
|
|
|
|
|
|
|
|
vlen = Py_SIZE(vt);
|
|
|
|
wlen = Py_SIZE(wt);
|
|
|
|
|
|
|
|
/* Note: the corresponding code for lists has an "early out" test
|
|
|
|
* here when op is EQ or NE and the lengths differ. That pays there,
|
|
|
|
* but Tim was unable to find any real code where EQ/NE tuple
|
|
|
|
* compares don't have the same length, so testing for it here would
|
|
|
|
* have cost without benefit.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Search for the first index where items are different.
|
|
|
|
* Note that because tuples are immutable, it's safe to reuse
|
|
|
|
* vlen and wlen across the comparison calls.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < vlen && i < wlen; i++) {
|
|
|
|
int k = PyObject_RichCompareBool(vt->ob_item[i],
|
|
|
|
wt->ob_item[i], Py_EQ);
|
|
|
|
if (k < 0)
|
|
|
|
return NULL;
|
|
|
|
if (!k)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i >= vlen || i >= wlen) {
|
|
|
|
/* No more items to compare -- compare sizes */
|
2017-11-02 11:32:54 +01:00
|
|
|
Py_RETURN_RICHCOMPARE(vlen, wlen, op);
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* We have an item that differs -- shortcuts for EQ/NE */
|
|
|
|
if (op == Py_EQ) {
|
2017-01-23 09:47:21 +02:00
|
|
|
Py_RETURN_FALSE;
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
|
|
|
if (op == Py_NE) {
|
2017-01-23 09:47:21 +02:00
|
|
|
Py_RETURN_TRUE;
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Compare the final item again using the proper operator */
|
|
|
|
return PyObject_RichCompare(vt->ob_item[i], wt->ob_item[i], op);
|
2001-01-18 00:00:53 +00:00
|
|
|
}
|
|
|
|
|
2002-07-17 16:30:39 +00:00
|
|
|
static PyObject *
|
2017-03-19 08:47:58 +02:00
|
|
|
tuple_subtype_new(PyTypeObject *type, PyObject *iterable);
|
|
|
|
|
|
|
|
/*[clinic input]
|
|
|
|
@classmethod
|
|
|
|
tuple.__new__ as tuple_new
|
|
|
|
iterable: object(c_default="NULL") = ()
|
|
|
|
/
|
|
|
|
|
|
|
|
Built-in immutable sequence.
|
|
|
|
|
|
|
|
If no argument is given, the constructor returns an empty tuple.
|
|
|
|
If iterable is specified the tuple is initialized from iterable's items.
|
|
|
|
|
|
|
|
If the argument is a tuple, the return value is the same object.
|
|
|
|
[clinic start generated code]*/
|
2001-08-30 03:11:59 +00:00
|
|
|
|
2001-08-02 04:15:00 +00:00
|
|
|
static PyObject *
|
2017-03-19 08:47:58 +02:00
|
|
|
tuple_new_impl(PyTypeObject *type, PyObject *iterable)
|
|
|
|
/*[clinic end generated code: output=4546d9f0d469bce7 input=86963bcde633b5a2]*/
|
2001-08-02 04:15:00 +00:00
|
|
|
{
|
2010-05-09 15:52:27 +00:00
|
|
|
if (type != &PyTuple_Type)
|
2017-03-19 08:47:58 +02:00
|
|
|
return tuple_subtype_new(type, iterable);
|
2010-05-09 15:52:27 +00:00
|
|
|
|
2020-06-24 15:21:54 +02:00
|
|
|
if (iterable == NULL) {
|
|
|
|
return tuple_get_empty();
|
|
|
|
}
|
|
|
|
else {
|
2017-03-19 08:47:58 +02:00
|
|
|
return PySequence_Tuple(iterable);
|
2020-06-24 15:21:54 +02:00
|
|
|
}
|
2001-08-02 04:15:00 +00:00
|
|
|
}
|
|
|
|
|
2020-03-13 22:57:00 +09:00
|
|
|
static PyObject *
|
|
|
|
tuple_vectorcall(PyObject *type, PyObject * const*args,
|
|
|
|
size_t nargsf, PyObject *kwnames)
|
|
|
|
{
|
2020-03-16 23:06:20 +09:00
|
|
|
if (!_PyArg_NoKwnames("tuple", kwnames)) {
|
2020-03-13 22:57:00 +09:00
|
|
|
return NULL;
|
|
|
|
}
|
2020-03-16 23:04:14 +09:00
|
|
|
|
2020-03-13 22:57:00 +09:00
|
|
|
Py_ssize_t nargs = PyVectorcall_NARGS(nargsf);
|
2020-03-16 23:04:14 +09:00
|
|
|
if (!_PyArg_CheckPositional("tuple", nargs, 0, 1)) {
|
2020-03-13 22:57:00 +09:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nargs) {
|
2022-01-21 23:33:43 +01:00
|
|
|
return tuple_new_impl(_PyType_CAST(type), args[0]);
|
2020-03-13 22:57:00 +09:00
|
|
|
}
|
2020-06-24 15:21:54 +02:00
|
|
|
else {
|
|
|
|
return tuple_get_empty();
|
|
|
|
}
|
2020-03-13 22:57:00 +09:00
|
|
|
}
|
|
|
|
|
2001-08-30 03:11:59 +00:00
|
|
|
static PyObject *
|
2017-03-19 08:47:58 +02:00
|
|
|
tuple_subtype_new(PyTypeObject *type, PyObject *iterable)
|
2001-08-30 03:11:59 +00:00
|
|
|
{
|
2010-05-09 15:52:27 +00:00
|
|
|
PyObject *tmp, *newobj, *item;
|
|
|
|
Py_ssize_t i, n;
|
|
|
|
|
|
|
|
assert(PyType_IsSubtype(type, &PyTuple_Type));
|
2021-07-01 02:30:46 +02:00
|
|
|
// tuple subclasses must implement the GC protocol
|
|
|
|
assert(_PyType_IS_GC(type));
|
|
|
|
|
2017-03-19 08:47:58 +02:00
|
|
|
tmp = tuple_new_impl(&PyTuple_Type, iterable);
|
2010-05-09 15:52:27 +00:00
|
|
|
if (tmp == NULL)
|
|
|
|
return NULL;
|
|
|
|
assert(PyTuple_Check(tmp));
|
2022-02-28 15:15:48 -07:00
|
|
|
/* This may allocate an empty tuple that is not the global one. */
|
2010-05-09 15:52:27 +00:00
|
|
|
newobj = type->tp_alloc(type, n = PyTuple_GET_SIZE(tmp));
|
2020-03-16 03:37:49 +08:00
|
|
|
if (newobj == NULL) {
|
|
|
|
Py_DECREF(tmp);
|
2010-05-09 15:52:27 +00:00
|
|
|
return NULL;
|
2020-03-16 03:37:49 +08:00
|
|
|
}
|
2010-05-09 15:52:27 +00:00
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
item = PyTuple_GET_ITEM(tmp, i);
|
2022-11-10 23:40:31 +01:00
|
|
|
PyTuple_SET_ITEM(newobj, i, Py_NewRef(item));
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
|
|
|
Py_DECREF(tmp);
|
2021-07-01 02:30:46 +02:00
|
|
|
|
2025-03-27 09:57:06 -04:00
|
|
|
_PyTuple_RESET_HASH_CACHE(newobj);
|
|
|
|
|
2021-07-01 02:30:46 +02:00
|
|
|
// Don't track if a subclass tp_alloc is PyType_GenericAlloc()
|
|
|
|
if (!_PyObject_GC_IS_TRACKED(newobj)) {
|
|
|
|
_PyObject_GC_TRACK(newobj);
|
|
|
|
}
|
2010-05-09 15:52:27 +00:00
|
|
|
return newobj;
|
2001-08-30 03:11:59 +00:00
|
|
|
}
|
|
|
|
|
1997-05-02 03:12:38 +00:00
|
|
|
static PySequenceMethods tuple_as_sequence = {
|
2024-10-02 13:37:04 +02:00
|
|
|
tuple_length, /* sq_length */
|
|
|
|
tuple_concat, /* sq_concat */
|
|
|
|
tuple_repeat, /* sq_repeat */
|
|
|
|
tuple_item, /* sq_item */
|
2010-05-09 15:52:27 +00:00
|
|
|
0, /* sq_slice */
|
|
|
|
0, /* sq_ass_item */
|
|
|
|
0, /* sq_ass_slice */
|
2024-10-02 13:37:04 +02:00
|
|
|
tuple_contains, /* sq_contains */
|
1990-10-14 12:07:46 +00:00
|
|
|
};
|
|
|
|
|
2002-06-11 10:55:12 +00:00
|
|
|
static PyObject*
|
2024-10-02 13:37:04 +02:00
|
|
|
tuple_subscript(PyObject *op, PyObject* item)
|
2002-06-11 10:55:12 +00:00
|
|
|
{
|
2024-10-02 13:37:04 +02:00
|
|
|
PyTupleObject *self = _PyTuple_CAST(op);
|
2020-04-08 02:01:56 +02:00
|
|
|
if (_PyIndex_Check(item)) {
|
2010-05-09 15:52:27 +00:00
|
|
|
Py_ssize_t i = PyNumber_AsSsize_t(item, PyExc_IndexError);
|
|
|
|
if (i == -1 && PyErr_Occurred())
|
|
|
|
return NULL;
|
|
|
|
if (i < 0)
|
|
|
|
i += PyTuple_GET_SIZE(self);
|
2024-10-02 13:37:04 +02:00
|
|
|
return tuple_item(op, i);
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
|
|
|
else if (PySlice_Check(item)) {
|
2019-05-17 01:13:03 -06:00
|
|
|
Py_ssize_t start, stop, step, slicelength, i;
|
|
|
|
size_t cur;
|
2010-05-09 15:52:27 +00:00
|
|
|
PyObject* it;
|
|
|
|
PyObject **src, **dest;
|
|
|
|
|
2017-04-08 09:53:51 +03:00
|
|
|
if (PySlice_Unpack(item, &start, &stop, &step) < 0) {
|
2010-05-09 15:52:27 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2017-04-08 09:53:51 +03:00
|
|
|
slicelength = PySlice_AdjustIndices(PyTuple_GET_SIZE(self), &start,
|
|
|
|
&stop, step);
|
2010-05-09 15:52:27 +00:00
|
|
|
|
|
|
|
if (slicelength <= 0) {
|
2020-06-24 15:21:54 +02:00
|
|
|
return tuple_get_empty();
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
|
|
|
else if (start == 0 && step == 1 &&
|
|
|
|
slicelength == PyTuple_GET_SIZE(self) &&
|
|
|
|
PyTuple_CheckExact(self)) {
|
2022-11-10 23:40:31 +01:00
|
|
|
return Py_NewRef(self);
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
|
|
|
else {
|
2020-06-24 15:21:54 +02:00
|
|
|
PyTupleObject* result = tuple_alloc(slicelength);
|
2010-05-09 15:52:27 +00:00
|
|
|
if (!result) return NULL;
|
|
|
|
|
|
|
|
src = self->ob_item;
|
2019-08-14 19:10:33 +05:00
|
|
|
dest = result->ob_item;
|
2010-05-09 15:52:27 +00:00
|
|
|
for (cur = start, i = 0; i < slicelength;
|
|
|
|
cur += step, i++) {
|
2022-11-10 23:40:31 +01:00
|
|
|
it = Py_NewRef(src[cur]);
|
2010-05-09 15:52:27 +00:00
|
|
|
dest[i] = it;
|
|
|
|
}
|
|
|
|
|
2021-07-01 02:30:46 +02:00
|
|
|
_PyObject_GC_TRACK(result);
|
2019-08-14 19:10:33 +05:00
|
|
|
return (PyObject *)result;
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
PyErr_Format(PyExc_TypeError,
|
2014-08-02 01:30:37 -04:00
|
|
|
"tuple indices must be integers or slices, not %.200s",
|
2010-05-09 15:52:27 +00:00
|
|
|
Py_TYPE(item)->tp_name);
|
|
|
|
return NULL;
|
|
|
|
}
|
2002-06-11 10:55:12 +00:00
|
|
|
}
|
|
|
|
|
2017-03-19 08:47:58 +02:00
|
|
|
/*[clinic input]
|
|
|
|
tuple.__getnewargs__
|
|
|
|
[clinic start generated code]*/
|
|
|
|
|
2003-01-29 17:58:45 +00:00
|
|
|
static PyObject *
|
2017-03-19 08:47:58 +02:00
|
|
|
tuple___getnewargs___impl(PyTupleObject *self)
|
|
|
|
/*[clinic end generated code: output=25e06e3ee56027e2 input=1aeb4b286a21639a]*/
|
2003-01-29 17:58:45 +00:00
|
|
|
{
|
2024-10-02 13:37:04 +02:00
|
|
|
return Py_BuildValue("(N)", tuple_slice(self, 0, Py_SIZE(self)));
|
2003-01-29 17:58:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static PyMethodDef tuple_methods[] = {
|
2017-03-19 08:47:58 +02:00
|
|
|
TUPLE___GETNEWARGS___METHODDEF
|
|
|
|
TUPLE_INDEX_METHODDEF
|
|
|
|
TUPLE_COUNT_METHODDEF
|
2021-09-19 18:05:30 +03:00
|
|
|
{"__class_getitem__", Py_GenericAlias, METH_O|METH_CLASS, PyDoc_STR("See PEP 585")},
|
2010-05-09 15:52:27 +00:00
|
|
|
{NULL, NULL} /* sentinel */
|
2003-01-29 17:58:45 +00:00
|
|
|
};
|
|
|
|
|
2002-06-11 10:55:12 +00:00
|
|
|
static PyMappingMethods tuple_as_mapping = {
|
2024-10-02 13:37:04 +02:00
|
|
|
tuple_length,
|
|
|
|
tuple_subscript,
|
2010-05-09 15:52:27 +00:00
|
|
|
0
|
2002-06-11 10:55:12 +00:00
|
|
|
};
|
|
|
|
|
2002-08-09 01:30:17 +00:00
|
|
|
static PyObject *tuple_iter(PyObject *seq);
|
|
|
|
|
1997-05-02 03:12:38 +00:00
|
|
|
PyTypeObject PyTuple_Type = {
|
2010-05-09 15:52:27 +00:00
|
|
|
PyVarObject_HEAD_INIT(&PyType_Type, 0)
|
|
|
|
"tuple",
|
|
|
|
sizeof(PyTupleObject) - sizeof(PyObject *),
|
|
|
|
sizeof(PyObject *),
|
2024-10-02 13:37:04 +02:00
|
|
|
tuple_dealloc, /* tp_dealloc */
|
2019-05-31 04:13:39 +02:00
|
|
|
0, /* tp_vectorcall_offset */
|
2010-05-09 15:52:27 +00:00
|
|
|
0, /* tp_getattr */
|
|
|
|
0, /* tp_setattr */
|
2019-05-31 04:13:39 +02:00
|
|
|
0, /* tp_as_async */
|
2024-10-02 13:37:04 +02:00
|
|
|
tuple_repr, /* tp_repr */
|
2010-05-09 15:52:27 +00:00
|
|
|
0, /* tp_as_number */
|
|
|
|
&tuple_as_sequence, /* tp_as_sequence */
|
|
|
|
&tuple_as_mapping, /* tp_as_mapping */
|
2024-10-02 13:37:04 +02:00
|
|
|
tuple_hash, /* tp_hash */
|
2010-05-09 15:52:27 +00:00
|
|
|
0, /* tp_call */
|
|
|
|
0, /* tp_str */
|
|
|
|
PyObject_GenericGetAttr, /* tp_getattro */
|
|
|
|
0, /* tp_setattro */
|
|
|
|
0, /* tp_as_buffer */
|
|
|
|
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
|
2021-02-26 14:51:55 -08:00
|
|
|
Py_TPFLAGS_BASETYPE | Py_TPFLAGS_TUPLE_SUBCLASS |
|
2021-04-30 09:50:28 +01:00
|
|
|
_Py_TPFLAGS_MATCH_SELF | Py_TPFLAGS_SEQUENCE, /* tp_flags */
|
2017-03-19 08:47:58 +02:00
|
|
|
tuple_new__doc__, /* tp_doc */
|
2024-10-02 13:37:04 +02:00
|
|
|
tuple_traverse, /* tp_traverse */
|
2010-05-09 15:52:27 +00:00
|
|
|
0, /* tp_clear */
|
2024-10-02 13:37:04 +02:00
|
|
|
tuple_richcompare, /* tp_richcompare */
|
2010-05-09 15:52:27 +00:00
|
|
|
0, /* tp_weaklistoffset */
|
|
|
|
tuple_iter, /* tp_iter */
|
|
|
|
0, /* tp_iternext */
|
|
|
|
tuple_methods, /* tp_methods */
|
|
|
|
0, /* tp_members */
|
|
|
|
0, /* tp_getset */
|
|
|
|
0, /* tp_base */
|
|
|
|
0, /* tp_dict */
|
|
|
|
0, /* tp_descr_get */
|
|
|
|
0, /* tp_descr_set */
|
|
|
|
0, /* tp_dictoffset */
|
|
|
|
0, /* tp_init */
|
|
|
|
0, /* tp_alloc */
|
|
|
|
tuple_new, /* tp_new */
|
|
|
|
PyObject_GC_Del, /* tp_free */
|
2020-03-13 22:57:00 +09:00
|
|
|
.tp_vectorcall = tuple_vectorcall,
|
2024-11-08 16:44:44 +00:00
|
|
|
.tp_version_tag = _Py_TYPE_VERSION_TUPLE,
|
1990-10-14 12:07:46 +00:00
|
|
|
};
|
1993-10-26 17:58:25 +00:00
|
|
|
|
|
|
|
/* The following function breaks the notion that tuples are immutable:
|
|
|
|
it changes the size of a tuple. We get away with this only if there
|
|
|
|
is only one module referencing the object. You can also think of it
|
2000-10-05 19:36:49 +00:00
|
|
|
as creating a new tuple object and destroying the old one, only more
|
|
|
|
efficiently. In any case, don't use this if the tuple may already be
|
2001-05-28 22:30:08 +00:00
|
|
|
known to some other part of the code. */
|
1993-10-26 17:58:25 +00:00
|
|
|
|
|
|
|
int
|
2006-02-15 17:27:45 +00:00
|
|
|
_PyTuple_Resize(PyObject **pv, Py_ssize_t newsize)
|
1993-10-26 17:58:25 +00:00
|
|
|
{
|
2013-08-13 20:18:52 +02:00
|
|
|
PyTupleObject *v;
|
|
|
|
PyTupleObject *sv;
|
2010-05-09 15:52:27 +00:00
|
|
|
Py_ssize_t i;
|
|
|
|
Py_ssize_t oldsize;
|
|
|
|
|
|
|
|
v = (PyTupleObject *) *pv;
|
2020-03-06 16:53:17 -06:00
|
|
|
if (v == NULL || !Py_IS_TYPE(v, &PyTuple_Type) ||
|
2010-05-09 15:52:27 +00:00
|
|
|
(Py_SIZE(v) != 0 && Py_REFCNT(v) != 1)) {
|
|
|
|
*pv = 0;
|
|
|
|
Py_XDECREF(v);
|
|
|
|
PyErr_BadInternalCall();
|
|
|
|
return -1;
|
|
|
|
}
|
2022-02-28 15:15:48 -07:00
|
|
|
|
2010-05-09 15:52:27 +00:00
|
|
|
oldsize = Py_SIZE(v);
|
2022-02-28 15:15:48 -07:00
|
|
|
if (oldsize == newsize) {
|
2010-05-09 15:52:27 +00:00
|
|
|
return 0;
|
2022-02-28 15:15:48 -07:00
|
|
|
}
|
|
|
|
if (newsize == 0) {
|
|
|
|
Py_DECREF(v);
|
|
|
|
*pv = tuple_get_empty();
|
|
|
|
return 0;
|
|
|
|
}
|
2010-05-09 15:52:27 +00:00
|
|
|
if (oldsize == 0) {
|
2022-02-28 15:15:48 -07:00
|
|
|
#ifdef Py_DEBUG
|
|
|
|
assert(v == &_Py_SINGLETON(tuple_empty));
|
|
|
|
#endif
|
|
|
|
/* The empty tuple is statically allocated so we never
|
|
|
|
resize it in-place. */
|
2010-05-09 15:52:27 +00:00
|
|
|
Py_DECREF(v);
|
|
|
|
*pv = PyTuple_New(newsize);
|
|
|
|
return *pv == NULL ? -1 : 0;
|
|
|
|
}
|
|
|
|
|
2020-02-03 17:55:05 +01:00
|
|
|
if (_PyObject_GC_IS_TRACKED(v)) {
|
2010-05-09 15:52:27 +00:00
|
|
|
_PyObject_GC_UNTRACK(v);
|
2020-02-03 17:55:05 +01:00
|
|
|
}
|
|
|
|
#ifdef Py_TRACE_REFS
|
2010-05-09 15:52:27 +00:00
|
|
|
_Py_ForgetReference((PyObject *) v);
|
2020-02-03 17:55:05 +01:00
|
|
|
#endif
|
2010-05-09 15:52:27 +00:00
|
|
|
/* DECREF items deleted by shrinkage */
|
|
|
|
for (i = newsize; i < oldsize; i++) {
|
2014-02-09 13:33:53 +02:00
|
|
|
Py_CLEAR(v->ob_item[i]);
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
2024-11-19 10:35:17 +00:00
|
|
|
_PyReftracerTrack((PyObject *)v, PyRefTracer_DESTROY);
|
2010-05-09 15:52:27 +00:00
|
|
|
sv = PyObject_GC_Resize(PyTupleObject, v, newsize);
|
|
|
|
if (sv == NULL) {
|
|
|
|
*pv = NULL;
|
2023-03-08 12:03:50 -07:00
|
|
|
#ifdef Py_REF_DEBUG
|
2024-04-08 12:11:36 -04:00
|
|
|
_Py_DecRefTotal(_PyThreadState_GET());
|
2023-03-08 12:03:50 -07:00
|
|
|
#endif
|
2010-05-09 15:52:27 +00:00
|
|
|
PyObject_GC_Del(v);
|
|
|
|
return -1;
|
|
|
|
}
|
2023-03-08 12:03:50 -07:00
|
|
|
_Py_NewReferenceNoTotal((PyObject *) sv);
|
2010-05-09 15:52:27 +00:00
|
|
|
/* Zero out items added by growing */
|
|
|
|
if (newsize > oldsize)
|
|
|
|
memset(&sv->ob_item[oldsize], 0,
|
|
|
|
sizeof(*sv->ob_item) * (newsize - oldsize));
|
|
|
|
*pv = (PyObject *) sv;
|
|
|
|
_PyObject_GC_TRACK(sv);
|
|
|
|
return 0;
|
1993-10-26 17:58:25 +00:00
|
|
|
}
|
1997-08-05 02:16:08 +00:00
|
|
|
|
2002-08-09 01:30:17 +00:00
|
|
|
/*********************** Tuple Iterator **************************/
|
|
|
|
|
2025-01-03 15:35:05 +01:00
|
|
|
#define _PyTupleIterObject_CAST(op) ((_PyTupleIterObject *)(op))
|
2002-08-09 01:30:17 +00:00
|
|
|
|
|
|
|
static void
|
2025-01-03 15:35:05 +01:00
|
|
|
tupleiter_dealloc(PyObject *self)
|
2002-08-09 01:30:17 +00:00
|
|
|
{
|
2025-01-03 15:35:05 +01:00
|
|
|
_PyTupleIterObject *it = _PyTupleIterObject_CAST(self);
|
2010-05-09 15:52:27 +00:00
|
|
|
_PyObject_GC_UNTRACK(it);
|
|
|
|
Py_XDECREF(it->it_seq);
|
2025-01-29 10:15:24 +01:00
|
|
|
assert(Py_IS_TYPE(self, &PyTupleIter_Type));
|
|
|
|
_Py_FREELIST_FREE(tuple_iters, it, PyObject_GC_Del);
|
2002-08-09 01:30:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2025-01-03 15:35:05 +01:00
|
|
|
tupleiter_traverse(PyObject *self, visitproc visit, void *arg)
|
2002-08-09 01:30:17 +00:00
|
|
|
{
|
2025-01-03 15:35:05 +01:00
|
|
|
_PyTupleIterObject *it = _PyTupleIterObject_CAST(self);
|
2010-05-09 15:52:27 +00:00
|
|
|
Py_VISIT(it->it_seq);
|
|
|
|
return 0;
|
2002-08-09 01:30:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static PyObject *
|
2025-01-03 15:35:05 +01:00
|
|
|
tupleiter_next(PyObject *self)
|
2002-08-09 01:30:17 +00:00
|
|
|
{
|
2025-01-03 15:35:05 +01:00
|
|
|
_PyTupleIterObject *it = _PyTupleIterObject_CAST(self);
|
2010-05-09 15:52:27 +00:00
|
|
|
PyTupleObject *seq;
|
|
|
|
PyObject *item;
|
|
|
|
|
|
|
|
assert(it != NULL);
|
|
|
|
seq = it->it_seq;
|
gh-115999: Make list and tuple iteration more thread-safe. (#128637)
Make tuple iteration more thread-safe, and actually test concurrent iteration of tuple, range and list. (This is prep work for enabling specialization of FOR_ITER in free-threaded builds.) The basic premise is:
Iterating over a shared iterable (list, tuple or range) should be safe, not involve data races, and behave like iteration normally does.
Using a shared iterator should not crash or involve data races, and should only produce items regular iteration would produce. It is not guaranteed to produce all items, or produce each item only once. (This is not the case for range iteration even after this PR.)
Providing stronger guarantees is possible for some of these iterators, but it's not always straight-forward and can significantly hamper the common case. Since iterators in general aren't shared between threads, and it's simply impossible to concurrently use many iterators (like generators), better to make sharing iterators without explicit synchronization clearly wrong.
Specific issues fixed in order to make the tests pass:
- List iteration could occasionally fail an assertion when a shared list was shrunk and an item past the new end was retrieved concurrently. There's still some unsafety when deleting/inserting multiple items through for example slice assignment, which uses memmove/memcpy.
- Tuple iteration could occasionally crash when the iterator's reference to the tuple was cleared on exhaustion. Like with list iteration, in free-threaded builds we can't safely and efficiently clear the iterator's reference to the iterable (doing it safely would mean extra, slow refcount operations), so just keep the iterable reference around.
2025-02-18 16:52:46 -08:00
|
|
|
#ifndef Py_GIL_DISABLED
|
2010-05-09 15:52:27 +00:00
|
|
|
if (seq == NULL)
|
|
|
|
return NULL;
|
gh-115999: Make list and tuple iteration more thread-safe. (#128637)
Make tuple iteration more thread-safe, and actually test concurrent iteration of tuple, range and list. (This is prep work for enabling specialization of FOR_ITER in free-threaded builds.) The basic premise is:
Iterating over a shared iterable (list, tuple or range) should be safe, not involve data races, and behave like iteration normally does.
Using a shared iterator should not crash or involve data races, and should only produce items regular iteration would produce. It is not guaranteed to produce all items, or produce each item only once. (This is not the case for range iteration even after this PR.)
Providing stronger guarantees is possible for some of these iterators, but it's not always straight-forward and can significantly hamper the common case. Since iterators in general aren't shared between threads, and it's simply impossible to concurrently use many iterators (like generators), better to make sharing iterators without explicit synchronization clearly wrong.
Specific issues fixed in order to make the tests pass:
- List iteration could occasionally fail an assertion when a shared list was shrunk and an item past the new end was retrieved concurrently. There's still some unsafety when deleting/inserting multiple items through for example slice assignment, which uses memmove/memcpy.
- Tuple iteration could occasionally crash when the iterator's reference to the tuple was cleared on exhaustion. Like with list iteration, in free-threaded builds we can't safely and efficiently clear the iterator's reference to the iterable (doing it safely would mean extra, slow refcount operations), so just keep the iterable reference around.
2025-02-18 16:52:46 -08:00
|
|
|
#endif
|
2010-05-09 15:52:27 +00:00
|
|
|
assert(PyTuple_Check(seq));
|
|
|
|
|
gh-115999: Make list and tuple iteration more thread-safe. (#128637)
Make tuple iteration more thread-safe, and actually test concurrent iteration of tuple, range and list. (This is prep work for enabling specialization of FOR_ITER in free-threaded builds.) The basic premise is:
Iterating over a shared iterable (list, tuple or range) should be safe, not involve data races, and behave like iteration normally does.
Using a shared iterator should not crash or involve data races, and should only produce items regular iteration would produce. It is not guaranteed to produce all items, or produce each item only once. (This is not the case for range iteration even after this PR.)
Providing stronger guarantees is possible for some of these iterators, but it's not always straight-forward and can significantly hamper the common case. Since iterators in general aren't shared between threads, and it's simply impossible to concurrently use many iterators (like generators), better to make sharing iterators without explicit synchronization clearly wrong.
Specific issues fixed in order to make the tests pass:
- List iteration could occasionally fail an assertion when a shared list was shrunk and an item past the new end was retrieved concurrently. There's still some unsafety when deleting/inserting multiple items through for example slice assignment, which uses memmove/memcpy.
- Tuple iteration could occasionally crash when the iterator's reference to the tuple was cleared on exhaustion. Like with list iteration, in free-threaded builds we can't safely and efficiently clear the iterator's reference to the iterable (doing it safely would mean extra, slow refcount operations), so just keep the iterable reference around.
2025-02-18 16:52:46 -08:00
|
|
|
Py_ssize_t index = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index);
|
|
|
|
if (index < PyTuple_GET_SIZE(seq)) {
|
|
|
|
FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, index + 1);
|
|
|
|
item = PyTuple_GET_ITEM(seq, index);
|
2022-11-10 23:40:31 +01:00
|
|
|
return Py_NewRef(item);
|
2010-05-09 15:52:27 +00:00
|
|
|
}
|
|
|
|
|
gh-115999: Make list and tuple iteration more thread-safe. (#128637)
Make tuple iteration more thread-safe, and actually test concurrent iteration of tuple, range and list. (This is prep work for enabling specialization of FOR_ITER in free-threaded builds.) The basic premise is:
Iterating over a shared iterable (list, tuple or range) should be safe, not involve data races, and behave like iteration normally does.
Using a shared iterator should not crash or involve data races, and should only produce items regular iteration would produce. It is not guaranteed to produce all items, or produce each item only once. (This is not the case for range iteration even after this PR.)
Providing stronger guarantees is possible for some of these iterators, but it's not always straight-forward and can significantly hamper the common case. Since iterators in general aren't shared between threads, and it's simply impossible to concurrently use many iterators (like generators), better to make sharing iterators without explicit synchronization clearly wrong.
Specific issues fixed in order to make the tests pass:
- List iteration could occasionally fail an assertion when a shared list was shrunk and an item past the new end was retrieved concurrently. There's still some unsafety when deleting/inserting multiple items through for example slice assignment, which uses memmove/memcpy.
- Tuple iteration could occasionally crash when the iterator's reference to the tuple was cleared on exhaustion. Like with list iteration, in free-threaded builds we can't safely and efficiently clear the iterator's reference to the iterable (doing it safely would mean extra, slow refcount operations), so just keep the iterable reference around.
2025-02-18 16:52:46 -08:00
|
|
|
#ifndef Py_GIL_DISABLED
|
2010-05-09 15:52:27 +00:00
|
|
|
it->it_seq = NULL;
|
Issue #26494: Fixed crash on iterating exhausting iterators.
Affected classes are generic sequence iterators, iterators of str, bytes,
bytearray, list, tuple, set, frozenset, dict, OrderedDict, corresponding
views and os.scandir() iterator.
2016-03-30 20:40:02 +03:00
|
|
|
Py_DECREF(seq);
|
gh-115999: Make list and tuple iteration more thread-safe. (#128637)
Make tuple iteration more thread-safe, and actually test concurrent iteration of tuple, range and list. (This is prep work for enabling specialization of FOR_ITER in free-threaded builds.) The basic premise is:
Iterating over a shared iterable (list, tuple or range) should be safe, not involve data races, and behave like iteration normally does.
Using a shared iterator should not crash or involve data races, and should only produce items regular iteration would produce. It is not guaranteed to produce all items, or produce each item only once. (This is not the case for range iteration even after this PR.)
Providing stronger guarantees is possible for some of these iterators, but it's not always straight-forward and can significantly hamper the common case. Since iterators in general aren't shared between threads, and it's simply impossible to concurrently use many iterators (like generators), better to make sharing iterators without explicit synchronization clearly wrong.
Specific issues fixed in order to make the tests pass:
- List iteration could occasionally fail an assertion when a shared list was shrunk and an item past the new end was retrieved concurrently. There's still some unsafety when deleting/inserting multiple items through for example slice assignment, which uses memmove/memcpy.
- Tuple iteration could occasionally crash when the iterator's reference to the tuple was cleared on exhaustion. Like with list iteration, in free-threaded builds we can't safely and efficiently clear the iterator's reference to the iterable (doing it safely would mean extra, slow refcount operations), so just keep the iterable reference around.
2025-02-18 16:52:46 -08:00
|
|
|
#endif
|
2010-05-09 15:52:27 +00:00
|
|
|
return NULL;
|
2002-08-09 01:30:17 +00:00
|
|
|
}
|
|
|
|
|
2005-09-24 21:23:05 +00:00
|
|
|
static PyObject *
|
2025-01-03 15:35:05 +01:00
|
|
|
tupleiter_len(PyObject *self, PyObject *Py_UNUSED(ignored))
|
2004-03-18 22:43:10 +00:00
|
|
|
{
|
2025-01-03 15:35:05 +01:00
|
|
|
_PyTupleIterObject *it = _PyTupleIterObject_CAST(self);
|
2010-05-09 15:52:27 +00:00
|
|
|
Py_ssize_t len = 0;
|
gh-115999: Make list and tuple iteration more thread-safe. (#128637)
Make tuple iteration more thread-safe, and actually test concurrent iteration of tuple, range and list. (This is prep work for enabling specialization of FOR_ITER in free-threaded builds.) The basic premise is:
Iterating over a shared iterable (list, tuple or range) should be safe, not involve data races, and behave like iteration normally does.
Using a shared iterator should not crash or involve data races, and should only produce items regular iteration would produce. It is not guaranteed to produce all items, or produce each item only once. (This is not the case for range iteration even after this PR.)
Providing stronger guarantees is possible for some of these iterators, but it's not always straight-forward and can significantly hamper the common case. Since iterators in general aren't shared between threads, and it's simply impossible to concurrently use many iterators (like generators), better to make sharing iterators without explicit synchronization clearly wrong.
Specific issues fixed in order to make the tests pass:
- List iteration could occasionally fail an assertion when a shared list was shrunk and an item past the new end was retrieved concurrently. There's still some unsafety when deleting/inserting multiple items through for example slice assignment, which uses memmove/memcpy.
- Tuple iteration could occasionally crash when the iterator's reference to the tuple was cleared on exhaustion. Like with list iteration, in free-threaded builds we can't safely and efficiently clear the iterator's reference to the iterable (doing it safely would mean extra, slow refcount operations), so just keep the iterable reference around.
2025-02-18 16:52:46 -08:00
|
|
|
#ifdef Py_GIL_DISABLED
|
|
|
|
Py_ssize_t idx = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index);
|
|
|
|
Py_ssize_t seq_len = PyTuple_GET_SIZE(it->it_seq);
|
|
|
|
if (idx < seq_len)
|
|
|
|
len = seq_len - idx;
|
|
|
|
#else
|
2010-05-09 15:52:27 +00:00
|
|
|
if (it->it_seq)
|
|
|
|
len = PyTuple_GET_SIZE(it->it_seq) - it->it_index;
|
gh-115999: Make list and tuple iteration more thread-safe. (#128637)
Make tuple iteration more thread-safe, and actually test concurrent iteration of tuple, range and list. (This is prep work for enabling specialization of FOR_ITER in free-threaded builds.) The basic premise is:
Iterating over a shared iterable (list, tuple or range) should be safe, not involve data races, and behave like iteration normally does.
Using a shared iterator should not crash or involve data races, and should only produce items regular iteration would produce. It is not guaranteed to produce all items, or produce each item only once. (This is not the case for range iteration even after this PR.)
Providing stronger guarantees is possible for some of these iterators, but it's not always straight-forward and can significantly hamper the common case. Since iterators in general aren't shared between threads, and it's simply impossible to concurrently use many iterators (like generators), better to make sharing iterators without explicit synchronization clearly wrong.
Specific issues fixed in order to make the tests pass:
- List iteration could occasionally fail an assertion when a shared list was shrunk and an item past the new end was retrieved concurrently. There's still some unsafety when deleting/inserting multiple items through for example slice assignment, which uses memmove/memcpy.
- Tuple iteration could occasionally crash when the iterator's reference to the tuple was cleared on exhaustion. Like with list iteration, in free-threaded builds we can't safely and efficiently clear the iterator's reference to the iterable (doing it safely would mean extra, slow refcount operations), so just keep the iterable reference around.
2025-02-18 16:52:46 -08:00
|
|
|
#endif
|
2010-05-09 15:52:27 +00:00
|
|
|
return PyLong_FromSsize_t(len);
|
2004-03-18 22:43:10 +00:00
|
|
|
}
|
|
|
|
|
2006-02-11 21:32:43 +00:00
|
|
|
PyDoc_STRVAR(length_hint_doc, "Private method returning an estimate of len(list(it)).");
|
2005-09-24 21:23:05 +00:00
|
|
|
|
2012-04-03 10:49:41 +00:00
|
|
|
static PyObject *
|
2025-01-03 15:35:05 +01:00
|
|
|
tupleiter_reduce(PyObject *self, PyObject *Py_UNUSED(ignored))
|
2012-04-03 10:49:41 +00:00
|
|
|
{
|
2023-02-24 18:02:04 -05:00
|
|
|
PyObject *iter = _PyEval_GetBuiltin(&_Py_ID(iter));
|
|
|
|
|
|
|
|
/* _PyEval_GetBuiltin can invoke arbitrary code,
|
|
|
|
* call must be before access of iterator pointers.
|
|
|
|
* see issue #101765 */
|
2025-01-03 15:35:05 +01:00
|
|
|
_PyTupleIterObject *it = _PyTupleIterObject_CAST(self);
|
2023-02-24 18:02:04 -05:00
|
|
|
|
gh-115999: Make list and tuple iteration more thread-safe. (#128637)
Make tuple iteration more thread-safe, and actually test concurrent iteration of tuple, range and list. (This is prep work for enabling specialization of FOR_ITER in free-threaded builds.) The basic premise is:
Iterating over a shared iterable (list, tuple or range) should be safe, not involve data races, and behave like iteration normally does.
Using a shared iterator should not crash or involve data races, and should only produce items regular iteration would produce. It is not guaranteed to produce all items, or produce each item only once. (This is not the case for range iteration even after this PR.)
Providing stronger guarantees is possible for some of these iterators, but it's not always straight-forward and can significantly hamper the common case. Since iterators in general aren't shared between threads, and it's simply impossible to concurrently use many iterators (like generators), better to make sharing iterators without explicit synchronization clearly wrong.
Specific issues fixed in order to make the tests pass:
- List iteration could occasionally fail an assertion when a shared list was shrunk and an item past the new end was retrieved concurrently. There's still some unsafety when deleting/inserting multiple items through for example slice assignment, which uses memmove/memcpy.
- Tuple iteration could occasionally crash when the iterator's reference to the tuple was cleared on exhaustion. Like with list iteration, in free-threaded builds we can't safely and efficiently clear the iterator's reference to the iterable (doing it safely would mean extra, slow refcount operations), so just keep the iterable reference around.
2025-02-18 16:52:46 -08:00
|
|
|
#ifdef Py_GIL_DISABLED
|
|
|
|
Py_ssize_t idx = FT_ATOMIC_LOAD_SSIZE_RELAXED(it->it_index);
|
|
|
|
if (idx < PyTuple_GET_SIZE(it->it_seq))
|
|
|
|
return Py_BuildValue("N(O)n", iter, it->it_seq, idx);
|
|
|
|
#else
|
2012-04-03 10:49:41 +00:00
|
|
|
if (it->it_seq)
|
2023-02-24 18:02:04 -05:00
|
|
|
return Py_BuildValue("N(O)n", iter, it->it_seq, it->it_index);
|
gh-115999: Make list and tuple iteration more thread-safe. (#128637)
Make tuple iteration more thread-safe, and actually test concurrent iteration of tuple, range and list. (This is prep work for enabling specialization of FOR_ITER in free-threaded builds.) The basic premise is:
Iterating over a shared iterable (list, tuple or range) should be safe, not involve data races, and behave like iteration normally does.
Using a shared iterator should not crash or involve data races, and should only produce items regular iteration would produce. It is not guaranteed to produce all items, or produce each item only once. (This is not the case for range iteration even after this PR.)
Providing stronger guarantees is possible for some of these iterators, but it's not always straight-forward and can significantly hamper the common case. Since iterators in general aren't shared between threads, and it's simply impossible to concurrently use many iterators (like generators), better to make sharing iterators without explicit synchronization clearly wrong.
Specific issues fixed in order to make the tests pass:
- List iteration could occasionally fail an assertion when a shared list was shrunk and an item past the new end was retrieved concurrently. There's still some unsafety when deleting/inserting multiple items through for example slice assignment, which uses memmove/memcpy.
- Tuple iteration could occasionally crash when the iterator's reference to the tuple was cleared on exhaustion. Like with list iteration, in free-threaded builds we can't safely and efficiently clear the iterator's reference to the iterable (doing it safely would mean extra, slow refcount operations), so just keep the iterable reference around.
2025-02-18 16:52:46 -08:00
|
|
|
#endif
|
|
|
|
return Py_BuildValue("N(())", iter);
|
2012-04-03 10:49:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static PyObject *
|
2025-01-03 15:35:05 +01:00
|
|
|
tupleiter_setstate(PyObject *self, PyObject *state)
|
2012-04-03 10:49:41 +00:00
|
|
|
{
|
2025-01-03 15:35:05 +01:00
|
|
|
_PyTupleIterObject *it = _PyTupleIterObject_CAST(self);
|
2013-06-24 23:59:24 +02:00
|
|
|
Py_ssize_t index = PyLong_AsSsize_t(state);
|
2012-04-03 10:49:41 +00:00
|
|
|
if (index == -1 && PyErr_Occurred())
|
|
|
|
return NULL;
|
|
|
|
if (it->it_seq != NULL) {
|
|
|
|
if (index < 0)
|
|
|
|
index = 0;
|
2014-03-05 13:47:57 +00:00
|
|
|
else if (index > PyTuple_GET_SIZE(it->it_seq))
|
|
|
|
index = PyTuple_GET_SIZE(it->it_seq); /* exhausted iterator */
|
gh-115999: Make list and tuple iteration more thread-safe. (#128637)
Make tuple iteration more thread-safe, and actually test concurrent iteration of tuple, range and list. (This is prep work for enabling specialization of FOR_ITER in free-threaded builds.) The basic premise is:
Iterating over a shared iterable (list, tuple or range) should be safe, not involve data races, and behave like iteration normally does.
Using a shared iterator should not crash or involve data races, and should only produce items regular iteration would produce. It is not guaranteed to produce all items, or produce each item only once. (This is not the case for range iteration even after this PR.)
Providing stronger guarantees is possible for some of these iterators, but it's not always straight-forward and can significantly hamper the common case. Since iterators in general aren't shared between threads, and it's simply impossible to concurrently use many iterators (like generators), better to make sharing iterators without explicit synchronization clearly wrong.
Specific issues fixed in order to make the tests pass:
- List iteration could occasionally fail an assertion when a shared list was shrunk and an item past the new end was retrieved concurrently. There's still some unsafety when deleting/inserting multiple items through for example slice assignment, which uses memmove/memcpy.
- Tuple iteration could occasionally crash when the iterator's reference to the tuple was cleared on exhaustion. Like with list iteration, in free-threaded builds we can't safely and efficiently clear the iterator's reference to the iterable (doing it safely would mean extra, slow refcount operations), so just keep the iterable reference around.
2025-02-18 16:52:46 -08:00
|
|
|
FT_ATOMIC_STORE_SSIZE_RELAXED(it->it_index, index);
|
2012-04-03 10:49:41 +00:00
|
|
|
}
|
|
|
|
Py_RETURN_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
PyDoc_STRVAR(reduce_doc, "Return state information for pickling.");
|
|
|
|
PyDoc_STRVAR(setstate_doc, "Set state information for unpickling.");
|
|
|
|
|
2005-09-24 21:23:05 +00:00
|
|
|
static PyMethodDef tupleiter_methods[] = {
|
2025-01-03 15:35:05 +01:00
|
|
|
{"__length_hint__", tupleiter_len, METH_NOARGS, length_hint_doc},
|
|
|
|
{"__reduce__", tupleiter_reduce, METH_NOARGS, reduce_doc},
|
|
|
|
{"__setstate__", tupleiter_setstate, METH_O, setstate_doc},
|
|
|
|
{NULL, NULL, 0, NULL} /* sentinel */
|
2004-03-18 22:43:10 +00:00
|
|
|
};
|
|
|
|
|
2002-08-09 01:30:17 +00:00
|
|
|
PyTypeObject PyTupleIter_Type = {
|
2010-05-09 15:52:27 +00:00
|
|
|
PyVarObject_HEAD_INIT(&PyType_Type, 0)
|
|
|
|
"tuple_iterator", /* tp_name */
|
2025-01-03 15:35:05 +01:00
|
|
|
sizeof(_PyTupleIterObject), /* tp_basicsize */
|
2010-05-09 15:52:27 +00:00
|
|
|
0, /* tp_itemsize */
|
|
|
|
/* methods */
|
2025-01-03 15:35:05 +01:00
|
|
|
tupleiter_dealloc, /* tp_dealloc */
|
2019-05-31 04:13:39 +02:00
|
|
|
0, /* tp_vectorcall_offset */
|
2010-05-09 15:52:27 +00:00
|
|
|
0, /* tp_getattr */
|
|
|
|
0, /* tp_setattr */
|
2019-05-31 04:13:39 +02:00
|
|
|
0, /* tp_as_async */
|
2010-05-09 15:52:27 +00:00
|
|
|
0, /* tp_repr */
|
|
|
|
0, /* tp_as_number */
|
|
|
|
0, /* tp_as_sequence */
|
|
|
|
0, /* tp_as_mapping */
|
|
|
|
0, /* tp_hash */
|
|
|
|
0, /* tp_call */
|
|
|
|
0, /* tp_str */
|
|
|
|
PyObject_GenericGetAttr, /* tp_getattro */
|
|
|
|
0, /* tp_setattro */
|
|
|
|
0, /* tp_as_buffer */
|
|
|
|
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */
|
|
|
|
0, /* tp_doc */
|
2025-01-03 15:35:05 +01:00
|
|
|
tupleiter_traverse, /* tp_traverse */
|
2010-05-09 15:52:27 +00:00
|
|
|
0, /* tp_clear */
|
|
|
|
0, /* tp_richcompare */
|
|
|
|
0, /* tp_weaklistoffset */
|
|
|
|
PyObject_SelfIter, /* tp_iter */
|
2024-09-27 23:51:50 +02:00
|
|
|
tupleiter_next, /* tp_iternext */
|
2010-05-09 15:52:27 +00:00
|
|
|
tupleiter_methods, /* tp_methods */
|
|
|
|
0,
|
2002-08-09 01:30:17 +00:00
|
|
|
};
|
2006-04-21 10:40:58 +00:00
|
|
|
|
|
|
|
static PyObject *
|
|
|
|
tuple_iter(PyObject *seq)
|
|
|
|
{
|
2010-05-09 15:52:27 +00:00
|
|
|
if (!PyTuple_Check(seq)) {
|
|
|
|
PyErr_BadInternalCall();
|
|
|
|
return NULL;
|
|
|
|
}
|
2025-01-29 10:15:24 +01:00
|
|
|
_PyTupleIterObject *it = _Py_FREELIST_POP(_PyTupleIterObject, tuple_iters);
|
|
|
|
if (it == NULL) {
|
|
|
|
it = PyObject_GC_New(_PyTupleIterObject, &PyTupleIter_Type);
|
|
|
|
if (it == NULL)
|
|
|
|
return NULL;
|
|
|
|
}
|
2010-05-09 15:52:27 +00:00
|
|
|
it->it_index = 0;
|
2022-11-10 23:40:31 +01:00
|
|
|
it->it_seq = (PyTupleObject *)Py_NewRef(seq);
|
2010-05-09 15:52:27 +00:00
|
|
|
_PyObject_GC_TRACK(it);
|
|
|
|
return (PyObject *)it;
|
2006-04-21 10:40:58 +00:00
|
|
|
}
|
2022-02-28 15:15:48 -07:00
|
|
|
|
|
|
|
|
|
|
|
/*************
|
|
|
|
* freelists *
|
|
|
|
*************/
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
maybe_freelist_push(PyTupleObject *op)
|
|
|
|
{
|
2024-07-22 12:08:27 -04:00
|
|
|
if (!Py_IS_TYPE(op, &PyTuple_Type)) {
|
2022-02-28 15:15:48 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
Py_ssize_t index = Py_SIZE(op) - 1;
|
2024-07-22 12:08:27 -04:00
|
|
|
if (index < PyTuple_MAXSAVESIZE) {
|
|
|
|
return _Py_FREELIST_PUSH(tuples[index], op, Py_tuple_MAXFREELIST);
|
2022-02-28 15:15:48 -07:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Print summary info about the state of the optimized allocator */
|
|
|
|
void
|
|
|
|
_PyTuple_DebugMallocStats(FILE *out)
|
|
|
|
{
|
2024-07-22 12:08:27 -04:00
|
|
|
for (int i = 0; i < PyTuple_MAXSAVESIZE; i++) {
|
2022-02-28 15:15:48 -07:00
|
|
|
int len = i + 1;
|
|
|
|
char buf[128];
|
|
|
|
PyOS_snprintf(buf, sizeof(buf),
|
|
|
|
"free %d-sized PyTupleObject", len);
|
2024-07-22 12:08:27 -04:00
|
|
|
_PyDebugAllocatorStats(out, buf, _Py_FREELIST_SIZE(tuples[i]),
|
2022-02-28 15:15:48 -07:00
|
|
|
_PyObject_VAR_SIZE(&PyTuple_Type, len));
|
|
|
|
}
|
|
|
|
}
|