glcore.h 0000644 00000014253 15173117417 0006204 0 ustar 00 #ifndef __gl_core_h_
#define __gl_core_h_
/*
* SGI FREE SOFTWARE LICENSE B (Version 2.0, Sept. 18, 2008)
* Copyright (C) 1991-2000 Silicon Graphics, Inc. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice including the dates of first publication and
* either this permission notice or a reference to
* http://oss.sgi.com/projects/FreeB/
* shall be included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* SILICON GRAPHICS, INC. BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
* OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Except as contained in this notice, the name of Silicon Graphics, Inc.
* shall not be used in advertising or otherwise to promote the sale, use or
* other dealings in this Software without prior written authorization from
* Silicon Graphics, Inc.
*/
#if !defined(_WIN32_WCE)
#include <sys/types.h>
#endif
#define GL_CORE_SGI 1
#define GL_CORE_MESA 2
#define GL_CORE_APPLE 4
#define GL_CORE_WINDOWS 8
typedef struct __GLcontextRec __GLcontext;
/*
** This file defines the interface between the GL core and the surrounding
** "operating system" that supports it (currently the GLX or WGL extensions).
**
** Members (data and function pointers) are documented as imported or
** exported according to how they are used by the core rendering functions.
** Imported members are initialized by the "operating system" and used by
** the core functions. Exported members are initialized by the core functions
** and used by the "operating system".
*/
/**
* Mode and limit information for a context. This information is
* kept around in the context so that values can be used during
* command execution, and for returning information about the
* context to the application.
*
* Instances of this structure are shared by the driver and the loader. To
* maintain binary compatability, new fields \b must be added only to the
* end of the structure.
*
* \sa _gl_context_modes_create
*/
typedef struct __GLcontextModesRec {
struct __GLcontextModesRec * next;
GLboolean rgbMode;
GLboolean floatMode;
GLboolean colorIndexMode;
GLuint doubleBufferMode;
GLuint stereoMode;
GLboolean haveAccumBuffer;
GLboolean haveDepthBuffer;
GLboolean haveStencilBuffer;
GLint redBits, greenBits, blueBits, alphaBits; /* bits per comp */
GLuint redMask, greenMask, blueMask, alphaMask;
GLint rgbBits; /* total bits for rgb */
GLint indexBits; /* total bits for colorindex */
GLint accumRedBits, accumGreenBits, accumBlueBits, accumAlphaBits;
GLint depthBits;
GLint stencilBits;
GLint numAuxBuffers;
GLint level;
GLint pixmapMode;
/* GLX */
GLint visualID;
GLint visualType; /**< One of the GLX X visual types. (i.e.,
* \c GLX_TRUE_COLOR, etc.)
*/
/* EXT_visual_rating / GLX 1.2 */
GLint visualRating;
/* EXT_visual_info / GLX 1.2 */
GLint transparentPixel;
/* colors are floats scaled to ints */
GLint transparentRed, transparentGreen, transparentBlue, transparentAlpha;
GLint transparentIndex;
/* ARB_multisample / SGIS_multisample */
GLint sampleBuffers;
GLint samples;
/* SGIX_fbconfig / GLX 1.3 */
GLint drawableType;
GLint renderType;
GLint xRenderable;
GLint fbconfigID;
/* SGIX_pbuffer / GLX 1.3 */
GLint maxPbufferWidth;
GLint maxPbufferHeight;
GLint maxPbufferPixels;
GLint optimalPbufferWidth; /* Only for SGIX_pbuffer. */
GLint optimalPbufferHeight; /* Only for SGIX_pbuffer. */
/* SGIX_visual_select_group */
GLint visualSelectGroup;
/* OML_swap_method */
GLint swapMethod;
GLint screen;
/* EXT_texture_from_pixmap */
GLint bindToTextureRgb;
GLint bindToTextureRgba;
GLint bindToMipmapTexture;
GLint bindToTextureTargets;
GLint yInverted;
} __GLcontextModes;
/* Several fields of __GLcontextModes can take these as values. Since
* GLX header files may not be available everywhere they need to be used,
* redefine them here.
*/
#define GLX_NONE 0x8000
#define GLX_SLOW_CONFIG 0x8001
#define GLX_TRUE_COLOR 0x8002
#define GLX_DIRECT_COLOR 0x8003
#define GLX_PSEUDO_COLOR 0x8004
#define GLX_STATIC_COLOR 0x8005
#define GLX_GRAY_SCALE 0x8006
#define GLX_STATIC_GRAY 0x8007
#define GLX_TRANSPARENT_RGB 0x8008
#define GLX_TRANSPARENT_INDEX 0x8009
#define GLX_NON_CONFORMANT_CONFIG 0x800D
#define GLX_SWAP_EXCHANGE_OML 0x8061
#define GLX_SWAP_COPY_OML 0x8062
#define GLX_SWAP_UNDEFINED_OML 0x8063
#define GLX_DONT_CARE 0xFFFFFFFF
#define GLX_RGBA_BIT 0x00000001
#define GLX_COLOR_INDEX_BIT 0x00000002
#define GLX_WINDOW_BIT 0x00000001
#define GLX_PIXMAP_BIT 0x00000002
#define GLX_PBUFFER_BIT 0x00000004
#define GLX_BIND_TO_TEXTURE_RGB_EXT 0x20D0
#define GLX_BIND_TO_TEXTURE_RGBA_EXT 0x20D1
#define GLX_BIND_TO_MIPMAP_TEXTURE_EXT 0x20D2
#define GLX_BIND_TO_TEXTURE_TARGETS_EXT 0x20D3
#define GLX_Y_INVERTED_EXT 0x20D4
#define GLX_TEXTURE_1D_BIT_EXT 0x00000001
#define GLX_TEXTURE_2D_BIT_EXT 0x00000002
#define GLX_TEXTURE_RECTANGLE_BIT_EXT 0x00000004
#endif /* __gl_core_h_ */
pycore_accu.h 0000644 00000002146 15173335256 0007226 0 ustar 00 #ifndef Py_LIMITED_API
#ifndef Py_INTERNAL_ACCU_H
#define Py_INTERNAL_ACCU_H
#ifdef __cplusplus
extern "C" {
#endif
/*** This is a private API for use by the interpreter and the stdlib.
*** Its definition may be changed or removed at any moment.
***/
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/*
* A two-level accumulator of unicode objects that avoids both the overhead
* of keeping a huge number of small separate objects, and the quadratic
* behaviour of using a naive repeated concatenation scheme.
*/
#undef small /* defined by some Windows headers */
typedef struct {
PyObject *large; /* A list of previously accumulated large strings */
PyObject *small; /* Pending small strings */
} _PyAccu;
PyAPI_FUNC(int) _PyAccu_Init(_PyAccu *acc);
PyAPI_FUNC(int) _PyAccu_Accumulate(_PyAccu *acc, PyObject *unicode);
PyAPI_FUNC(PyObject *) _PyAccu_FinishAsList(_PyAccu *acc);
PyAPI_FUNC(PyObject *) _PyAccu_Finish(_PyAccu *acc);
PyAPI_FUNC(void) _PyAccu_Destroy(_PyAccu *acc);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_ACCU_H */
#endif /* !Py_LIMITED_API */
pycore_object.h 0000644 00000005520 15173335256 0007560 0 ustar 00 #ifndef Py_INTERNAL_OBJECT_H
#define Py_INTERNAL_OBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_pystate.h" /* _PyRuntime */
PyAPI_FUNC(int) _PyType_CheckConsistency(PyTypeObject *type);
PyAPI_FUNC(int) _PyDict_CheckConsistency(PyObject *mp, int check_content);
/* Tell the GC to track this object.
*
* NB: While the object is tracked by the collector, it must be safe to call the
* ob_traverse method.
*
* Internal note: _PyRuntime.gc.generation0->_gc_prev doesn't have any bit flags
* because it's not object header. So we don't use _PyGCHead_PREV() and
* _PyGCHead_SET_PREV() for it to avoid unnecessary bitwise operations.
*
* The PyObject_GC_Track() function is the public version of this macro.
*/
static inline void _PyObject_GC_TRACK_impl(const char *filename, int lineno,
PyObject *op)
{
_PyObject_ASSERT_FROM(op, !_PyObject_GC_IS_TRACKED(op),
"object already tracked by the garbage collector",
filename, lineno, "_PyObject_GC_TRACK");
PyGC_Head *gc = _Py_AS_GC(op);
_PyObject_ASSERT_FROM(op,
(gc->_gc_prev & _PyGC_PREV_MASK_COLLECTING) == 0,
"object is in generation which is garbage collected",
filename, lineno, "_PyObject_GC_TRACK");
PyGC_Head *last = (PyGC_Head*)(_PyRuntime.gc.generation0->_gc_prev);
_PyGCHead_SET_NEXT(last, gc);
_PyGCHead_SET_PREV(gc, last);
_PyGCHead_SET_NEXT(gc, _PyRuntime.gc.generation0);
_PyRuntime.gc.generation0->_gc_prev = (uintptr_t)gc;
}
#define _PyObject_GC_TRACK(op) \
_PyObject_GC_TRACK_impl(__FILE__, __LINE__, _PyObject_CAST(op))
/* Tell the GC to stop tracking this object.
*
* Internal note: This may be called while GC. So _PyGC_PREV_MASK_COLLECTING
* must be cleared. But _PyGC_PREV_MASK_FINALIZED bit is kept.
*
* The object must be tracked by the GC.
*
* The PyObject_GC_UnTrack() function is the public version of this macro.
*/
static inline void _PyObject_GC_UNTRACK_impl(const char *filename, int lineno,
PyObject *op)
{
_PyObject_ASSERT_FROM(op, _PyObject_GC_IS_TRACKED(op),
"object not tracked by the garbage collector",
filename, lineno, "_PyObject_GC_UNTRACK");
PyGC_Head *gc = _Py_AS_GC(op);
PyGC_Head *prev = _PyGCHead_PREV(gc);
PyGC_Head *next = _PyGCHead_NEXT(gc);
_PyGCHead_SET_NEXT(prev, next);
_PyGCHead_SET_PREV(next, prev);
gc->_gc_next = 0;
gc->_gc_prev &= _PyGC_PREV_MASK_FINALIZED;
}
#define _PyObject_GC_UNTRACK(op) \
_PyObject_GC_UNTRACK_impl(__FILE__, __LINE__, _PyObject_CAST(op))
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_OBJECT_H */
pycore_getopt.h 0000644 00000000752 15173335256 0007616 0 ustar 00 #ifndef Py_INTERNAL_PYGETOPT_H
#define Py_INTERNAL_PYGETOPT_H
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
extern int _PyOS_opterr;
extern Py_ssize_t _PyOS_optind;
extern const wchar_t *_PyOS_optarg;
extern void _PyOS_ResetGetOpt(void);
typedef struct {
const wchar_t *name;
int has_arg;
int val;
} _PyOS_LongOption;
extern int _PyOS_GetOpt(Py_ssize_t argc, wchar_t * const *argv, int *longindex);
#endif /* !Py_INTERNAL_PYGETOPT_H */
pycore_condvar.h 0000644 00000005371 15173335256 0007752 0 ustar 00 #ifndef Py_INTERNAL_CONDVAR_H
#define Py_INTERNAL_CONDVAR_H
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#ifndef _POSIX_THREADS
/* This means pthreads are not implemented in libc headers, hence the macro
not present in unistd.h. But they still can be implemented as an external
library (e.g. gnu pth in pthread emulation) */
# ifdef HAVE_PTHREAD_H
# include <pthread.h> /* _POSIX_THREADS */
# endif
#endif
#ifdef _POSIX_THREADS
/*
* POSIX support
*/
#define Py_HAVE_CONDVAR
#include <pthread.h>
#define PyMUTEX_T pthread_mutex_t
#define PyCOND_T pthread_cond_t
#elif defined(NT_THREADS)
/*
* Windows (XP, 2003 server and later, as well as (hopefully) CE) support
*
* Emulated condition variables ones that work with XP and later, plus
* example native support on VISTA and onwards.
*/
#define Py_HAVE_CONDVAR
/* include windows if it hasn't been done before */
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
/* options */
/* non-emulated condition variables are provided for those that want
* to target Windows Vista. Modify this macro to enable them.
*/
#ifndef _PY_EMULATED_WIN_CV
#define _PY_EMULATED_WIN_CV 1 /* use emulated condition variables */
#endif
/* fall back to emulation if not targeting Vista */
#if !defined NTDDI_VISTA || NTDDI_VERSION < NTDDI_VISTA
#undef _PY_EMULATED_WIN_CV
#define _PY_EMULATED_WIN_CV 1
#endif
#if _PY_EMULATED_WIN_CV
typedef CRITICAL_SECTION PyMUTEX_T;
/* The ConditionVariable object. From XP onwards it is easily emulated
with a Semaphore.
Semaphores are available on Windows XP (2003 server) and later.
We use a Semaphore rather than an auto-reset event, because although
an auto-resent event might appear to solve the lost-wakeup bug (race
condition between releasing the outer lock and waiting) because it
maintains state even though a wait hasn't happened, there is still
a lost wakeup problem if more than one thread are interrupted in the
critical place. A semaphore solves that, because its state is
counted, not Boolean.
Because it is ok to signal a condition variable with no one
waiting, we need to keep track of the number of
waiting threads. Otherwise, the semaphore's state could rise
without bound. This also helps reduce the number of "spurious wakeups"
that would otherwise happen.
*/
typedef struct _PyCOND_T
{
HANDLE sem;
int waiting; /* to allow PyCOND_SIGNAL to be a no-op */
} PyCOND_T;
#else /* !_PY_EMULATED_WIN_CV */
/* Use native Win7 primitives if build target is Win7 or higher */
/* SRWLOCK is faster and better than CriticalSection */
typedef SRWLOCK PyMUTEX_T;
typedef CONDITION_VARIABLE PyCOND_T;
#endif /* _PY_EMULATED_WIN_CV */
#endif /* _POSIX_THREADS, NT_THREADS */
#endif /* Py_INTERNAL_CONDVAR_H */
pycore_tupleobject.h 0000644 00000000642 15173335256 0010632 0 ustar 00 #ifndef Py_INTERNAL_TUPLEOBJECT_H
#define Py_INTERNAL_TUPLEOBJECT_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "tupleobject.h"
#define _PyTuple_ITEMS(op) (_PyTuple_CAST(op)->ob_item)
PyAPI_FUNC(PyObject *) _PyTuple_FromArray(PyObject *const *, Py_ssize_t);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_TUPLEOBJECT_H */
pycore_hamt.h 0000644 00000007162 15173335256 0007247 0 ustar 00 #ifndef Py_INTERNAL_HAMT_H
#define Py_INTERNAL_HAMT_H
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/*
HAMT tree is shaped by hashes of keys. Every group of 5 bits of a hash denotes
the exact position of the key in one level of the tree. Since we're using
32 bit hashes, we can have at most 7 such levels. Although if there are
two distinct keys with equal hashes, they will have to occupy the same
cell in the 7th level of the tree -- so we'd put them in a "collision" node.
Which brings the total possible tree depth to 8. Read more about the actual
layout of the HAMT tree in `hamt.c`.
This constant is used to define a datastucture for storing iteration state.
*/
#define _Py_HAMT_MAX_TREE_DEPTH 8
#define PyHamt_Check(o) (Py_TYPE(o) == &_PyHamt_Type)
/* Abstract tree node. */
typedef struct {
PyObject_HEAD
} PyHamtNode;
/* An HAMT immutable mapping collection. */
typedef struct {
PyObject_HEAD
PyHamtNode *h_root;
PyObject *h_weakreflist;
Py_ssize_t h_count;
} PyHamtObject;
/* A struct to hold the state of depth-first traverse of the tree.
HAMT is an immutable collection. Iterators will hold a strong reference
to it, and every node in the HAMT has strong references to its children.
So for iterators, we can implement zero allocations and zero reference
inc/dec depth-first iteration.
- i_nodes: an array of seven pointers to tree nodes
- i_level: the current node in i_nodes
- i_pos: an array of positions within nodes in i_nodes.
*/
typedef struct {
PyHamtNode *i_nodes[_Py_HAMT_MAX_TREE_DEPTH];
Py_ssize_t i_pos[_Py_HAMT_MAX_TREE_DEPTH];
int8_t i_level;
} PyHamtIteratorState;
/* Base iterator object.
Contains the iteration state, a pointer to the HAMT tree,
and a pointer to the 'yield function'. The latter is a simple
function that returns a key/value tuple for the 'Items' iterator,
just a key for the 'Keys' iterator, and a value for the 'Values'
iterator.
*/
typedef struct {
PyObject_HEAD
PyHamtObject *hi_obj;
PyHamtIteratorState hi_iter;
binaryfunc hi_yield;
} PyHamtIterator;
PyAPI_DATA(PyTypeObject) _PyHamt_Type;
PyAPI_DATA(PyTypeObject) _PyHamt_ArrayNode_Type;
PyAPI_DATA(PyTypeObject) _PyHamt_BitmapNode_Type;
PyAPI_DATA(PyTypeObject) _PyHamt_CollisionNode_Type;
PyAPI_DATA(PyTypeObject) _PyHamtKeys_Type;
PyAPI_DATA(PyTypeObject) _PyHamtValues_Type;
PyAPI_DATA(PyTypeObject) _PyHamtItems_Type;
/* Create a new HAMT immutable mapping. */
PyHamtObject * _PyHamt_New(void);
/* Return a new collection based on "o", but with an additional
key/val pair. */
PyHamtObject * _PyHamt_Assoc(PyHamtObject *o, PyObject *key, PyObject *val);
/* Return a new collection based on "o", but without "key". */
PyHamtObject * _PyHamt_Without(PyHamtObject *o, PyObject *key);
/* Find "key" in the "o" collection.
Return:
- -1: An error occurred.
- 0: "key" wasn't found in "o".
- 1: "key" is in "o"; "*val" is set to its value (a borrowed ref).
*/
int _PyHamt_Find(PyHamtObject *o, PyObject *key, PyObject **val);
/* Check if "v" is equal to "w".
Return:
- 0: v != w
- 1: v == w
- -1: An error occurred.
*/
int _PyHamt_Eq(PyHamtObject *v, PyHamtObject *w);
/* Return the size of "o"; equivalent of "len(o)". */
Py_ssize_t _PyHamt_Len(PyHamtObject *o);
/* Return a Keys iterator over "o". */
PyObject * _PyHamt_NewIterKeys(PyHamtObject *o);
/* Return a Values iterator over "o". */
PyObject * _PyHamt_NewIterValues(PyHamtObject *o);
/* Return a Items iterator over "o". */
PyObject * _PyHamt_NewIterItems(PyHamtObject *o);
int _PyHamt_Init(void);
void _PyHamt_Fini(void);
#endif /* !Py_INTERNAL_HAMT_H */
pycore_warnings.h 0000644 00000001117 15173335256 0010140 0 ustar 00 #ifndef Py_INTERNAL_WARNINGS_H
#define Py_INTERNAL_WARNINGS_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "object.h"
struct _warnings_runtime_state {
/* Both 'filters' and 'onceregistry' can be set in warnings.py;
get_warnings_attr() will reset these variables accordingly. */
PyObject *filters; /* List */
PyObject *once_registry; /* Dict */
PyObject *default_action; /* String */
long filters_version;
};
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_WARNINGS_H */
pycore_pylifecycle.h 0000644 00000007347 15173335256 0010633 0 ustar 00 #ifndef Py_INTERNAL_LIFECYCLE_H
#define Py_INTERNAL_LIFECYCLE_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_initconfig.h" /* _PyArgv */
#include "pycore_pystate.h" /* _PyRuntimeState */
/* True if the main interpreter thread exited due to an unhandled
* KeyboardInterrupt exception, suggesting the user pressed ^C. */
PyAPI_DATA(int) _Py_UnhandledKeyboardInterrupt;
extern int _Py_SetFileSystemEncoding(
const char *encoding,
const char *errors);
extern void _Py_ClearFileSystemEncoding(void);
extern PyStatus _PyUnicode_InitEncodings(PyThreadState *tstate);
#ifdef MS_WINDOWS
extern int _PyUnicode_EnableLegacyWindowsFSEncoding(void);
#endif
PyAPI_FUNC(void) _Py_ClearStandardStreamEncoding(void);
PyAPI_FUNC(int) _Py_IsLocaleCoercionTarget(const char *ctype_loc);
/* Various one-time initializers */
extern PyStatus _PyUnicode_Init(void);
extern int _PyStructSequence_Init(void);
extern int _PyLong_Init(void);
extern PyStatus _PyFaulthandler_Init(int enable);
extern int _PyTraceMalloc_Init(int enable);
extern PyObject * _PyBuiltin_Init(void);
extern PyStatus _PySys_Create(
_PyRuntimeState *runtime,
PyInterpreterState *interp,
PyObject **sysmod_p);
extern PyStatus _PySys_SetPreliminaryStderr(PyObject *sysdict);
extern PyStatus _PySys_ReadPreinitWarnOptions(PyWideStringList *options);
extern PyStatus _PySys_ReadPreinitXOptions(PyConfig *config);
extern int _PySys_InitMain(
_PyRuntimeState *runtime,
PyInterpreterState *interp);
extern PyStatus _PyImport_Init(PyInterpreterState *interp);
extern PyStatus _PyExc_Init(void);
extern PyStatus _PyErr_Init(void);
extern PyStatus _PyBuiltins_AddExceptions(PyObject * bltinmod);
extern PyStatus _PyImportHooks_Init(void);
extern int _PyFloat_Init(void);
extern PyStatus _Py_HashRandomization_Init(const PyConfig *);
extern PyStatus _PyTypes_Init(void);
extern PyStatus _PyImportZip_Init(PyInterpreterState *interp);
/* Various internal finalizers */
extern void PyMethod_Fini(void);
extern void PyFrame_Fini(void);
extern void PyCFunction_Fini(void);
extern void PyDict_Fini(void);
extern void PyTuple_Fini(void);
extern void PyList_Fini(void);
extern void PySet_Fini(void);
extern void PyBytes_Fini(void);
extern void PyFloat_Fini(void);
extern int _PySignal_Init(int install_signal_handlers);
extern void PyOS_FiniInterrupts(void);
extern void PySlice_Fini(void);
extern void PyAsyncGen_Fini(void);
extern void _PyExc_Fini(void);
extern void _PyImport_Fini(void);
extern void _PyImport_Fini2(void);
extern void _PyGC_Fini(_PyRuntimeState *runtime);
extern void _PyType_Fini(void);
extern void _Py_HashRandomization_Fini(void);
extern void _PyUnicode_Fini(void);
extern void PyLong_Fini(void);
extern void _PyFaulthandler_Fini(void);
extern void _PyHash_Fini(void);
extern void _PyTraceMalloc_Fini(void);
extern void _PyWarnings_Fini(PyInterpreterState *interp);
extern void _PyGILState_Init(
_PyRuntimeState *runtime,
PyInterpreterState *interp,
PyThreadState *tstate);
extern void _PyGILState_Fini(_PyRuntimeState *runtime);
PyAPI_FUNC(void) _PyGC_DumpShutdownStats(_PyRuntimeState *runtime);
PyAPI_FUNC(PyStatus) _Py_PreInitializeFromPyArgv(
const PyPreConfig *src_config,
const _PyArgv *args);
PyAPI_FUNC(PyStatus) _Py_PreInitializeFromConfig(
const PyConfig *config,
const _PyArgv *args);
PyAPI_FUNC(int) _Py_HandleSystemExit(int *exitcode_p);
PyAPI_FUNC(PyObject*) _PyErr_WriteUnraisableDefaultHook(PyObject *unraisable);
PyAPI_FUNC(void) _PyErr_Print(PyThreadState *tstate);
PyAPI_FUNC(void) _PyErr_Display(PyObject *file, PyObject *exception,
PyObject *value, PyObject *tb);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_LIFECYCLE_H */
pycore_pystate.h 0000644 00000022564 15173335256 0010012 0 ustar 00 #ifndef Py_INTERNAL_PYSTATE_H
#define Py_INTERNAL_PYSTATE_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "cpython/initconfig.h"
#include "fileobject.h"
#include "pystate.h"
#include "pythread.h"
#include "sysmodule.h"
#include "pycore_gil.h" /* _gil_runtime_state */
#include "pycore_pathconfig.h"
#include "pycore_pymem.h"
#include "pycore_warnings.h"
/* ceval state */
struct _pending_calls {
int finishing;
PyThread_type_lock lock;
/* Request for running pending calls. */
_Py_atomic_int calls_to_do;
/* Request for looking at the `async_exc` field of the current
thread state.
Guarded by the GIL. */
int async_exc;
#define NPENDINGCALLS 32
struct {
int (*func)(void *);
void *arg;
} calls[NPENDINGCALLS];
int first;
int last;
};
struct _ceval_runtime_state {
int recursion_limit;
/* Records whether tracing is on for any thread. Counts the number
of threads for which tstate->c_tracefunc is non-NULL, so if the
value is 0, we know we don't have to check this thread's
c_tracefunc. This speeds up the if statement in
PyEval_EvalFrameEx() after fast_next_opcode. */
int tracing_possible;
/* This single variable consolidates all requests to break out of
the fast path in the eval loop. */
_Py_atomic_int eval_breaker;
/* Request for dropping the GIL */
_Py_atomic_int gil_drop_request;
struct _pending_calls pending;
/* Request for checking signals. */
_Py_atomic_int signals_pending;
struct _gil_runtime_state gil;
};
/* interpreter state */
typedef PyObject* (*_PyFrameEvalFunction)(struct _frame *, int);
// The PyInterpreterState typedef is in Include/pystate.h.
struct _is {
struct _is *next;
struct _ts *tstate_head;
int64_t id;
int64_t id_refcount;
int requires_idref;
PyThread_type_lock id_mutex;
int finalizing;
PyObject *modules;
PyObject *modules_by_index;
PyObject *sysdict;
PyObject *builtins;
PyObject *importlib;
/* Used in Python/sysmodule.c. */
int check_interval;
/* Used in Modules/_threadmodule.c. */
long num_threads;
/* Support for runtime thread stack size tuning.
A value of 0 means using the platform's default stack size
or the size specified by the THREAD_STACK_SIZE macro. */
/* Used in Python/thread.c. */
size_t pythread_stacksize;
PyObject *codec_search_path;
PyObject *codec_search_cache;
PyObject *codec_error_registry;
int codecs_initialized;
/* fs_codec.encoding is initialized to NULL.
Later, it is set to a non-NULL string by _PyUnicode_InitEncodings(). */
struct {
char *encoding; /* Filesystem encoding (encoded to UTF-8) */
char *errors; /* Filesystem errors (encoded to UTF-8) */
_Py_error_handler error_handler;
} fs_codec;
PyConfig config;
#ifdef HAVE_DLOPEN
int dlopenflags;
#endif
PyObject *dict; /* Stores per-interpreter state */
PyObject *builtins_copy;
PyObject *import_func;
/* Initialized to PyEval_EvalFrameDefault(). */
_PyFrameEvalFunction eval_frame;
Py_ssize_t co_extra_user_count;
freefunc co_extra_freefuncs[MAX_CO_EXTRA_USERS];
#ifdef HAVE_FORK
PyObject *before_forkers;
PyObject *after_forkers_parent;
PyObject *after_forkers_child;
#endif
/* AtExit module */
void (*pyexitfunc)(PyObject *);
PyObject *pyexitmodule;
uint64_t tstate_next_unique_id;
struct _warnings_runtime_state warnings;
PyObject *audit_hooks;
int int_max_str_digits;
};
PyAPI_FUNC(struct _is*) _PyInterpreterState_LookUpID(PY_INT64_T);
PyAPI_FUNC(int) _PyInterpreterState_IDInitref(struct _is *);
PyAPI_FUNC(int) _PyInterpreterState_IDIncref(struct _is *);
PyAPI_FUNC(void) _PyInterpreterState_IDDecref(struct _is *);
/* cross-interpreter data registry */
/* For now we use a global registry of shareable classes. An
alternative would be to add a tp_* slot for a class's
crossinterpdatafunc. It would be simpler and more efficient. */
struct _xidregitem;
struct _xidregitem {
PyTypeObject *cls;
crossinterpdatafunc getdata;
struct _xidregitem *next;
};
/* runtime audit hook state */
typedef struct _Py_AuditHookEntry {
struct _Py_AuditHookEntry *next;
Py_AuditHookFunction hookCFunction;
void *userData;
} _Py_AuditHookEntry;
/* GIL state */
struct _gilstate_runtime_state {
int check_enabled;
/* Assuming the current thread holds the GIL, this is the
PyThreadState for the current thread. */
_Py_atomic_address tstate_current;
PyThreadFrameGetter getframe;
/* The single PyInterpreterState used by this process'
GILState implementation
*/
/* TODO: Given interp_main, it may be possible to kill this ref */
PyInterpreterState *autoInterpreterState;
Py_tss_t autoTSSkey;
};
/* hook for PyEval_GetFrame(), requested for Psyco */
#define _PyThreadState_GetFrame _PyRuntime.gilstate.getframe
/* Issue #26558: Flag to disable PyGILState_Check().
If set to non-zero, PyGILState_Check() always return 1. */
#define _PyGILState_check_enabled _PyRuntime.gilstate.check_enabled
/* Full Python runtime state */
typedef struct pyruntimestate {
/* Is running Py_PreInitialize()? */
int preinitializing;
/* Is Python preinitialized? Set to 1 by Py_PreInitialize() */
int preinitialized;
/* Is Python core initialized? Set to 1 by _Py_InitializeCore() */
int core_initialized;
/* Is Python fully initialized? Set to 1 by Py_Initialize() */
int initialized;
/* Set by Py_FinalizeEx(). Only reset to NULL if Py_Initialize()
is called again. */
PyThreadState *finalizing;
struct pyinterpreters {
PyThread_type_lock mutex;
PyInterpreterState *head;
PyInterpreterState *main;
/* _next_interp_id is an auto-numbered sequence of small
integers. It gets initialized in _PyInterpreterState_Init(),
which is called in Py_Initialize(), and used in
PyInterpreterState_New(). A negative interpreter ID
indicates an error occurred. The main interpreter will
always have an ID of 0. Overflow results in a RuntimeError.
If that becomes a problem later then we can adjust, e.g. by
using a Python int. */
int64_t next_id;
} interpreters;
// XXX Remove this field once we have a tp_* slot.
struct _xidregistry {
PyThread_type_lock mutex;
struct _xidregitem *head;
} xidregistry;
unsigned long main_thread;
#define NEXITFUNCS 32
void (*exitfuncs[NEXITFUNCS])(void);
int nexitfuncs;
struct _gc_runtime_state gc;
struct _ceval_runtime_state ceval;
struct _gilstate_runtime_state gilstate;
PyPreConfig preconfig;
Py_OpenCodeHookFunction open_code_hook;
void *open_code_userdata;
_Py_AuditHookEntry *audit_hook_head;
// XXX Consolidate globals found via the check-c-globals script.
} _PyRuntimeState;
#define _PyRuntimeState_INIT \
{.preinitialized = 0, .core_initialized = 0, .initialized = 0}
/* Note: _PyRuntimeState_INIT sets other fields to 0/NULL */
PyAPI_DATA(_PyRuntimeState) _PyRuntime;
PyAPI_FUNC(PyStatus) _PyRuntimeState_Init(_PyRuntimeState *runtime);
PyAPI_FUNC(void) _PyRuntimeState_Fini(_PyRuntimeState *runtime);
PyAPI_FUNC(void) _PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime);
/* Initialize _PyRuntimeState.
Return NULL on success, or return an error message on failure. */
PyAPI_FUNC(PyStatus) _PyRuntime_Initialize(void);
PyAPI_FUNC(void) _PyRuntime_Finalize(void);
#define _Py_CURRENTLY_FINALIZING(runtime, tstate) \
(runtime->finalizing == tstate)
/* Variable and macro for in-line access to current thread
and interpreter state */
#define _PyRuntimeState_GetThreadState(runtime) \
((PyThreadState*)_Py_atomic_load_relaxed(&(runtime)->gilstate.tstate_current))
/* Get the current Python thread state.
Efficient macro reading directly the 'gilstate.tstate_current' atomic
variable. The macro is unsafe: it does not check for error and it can
return NULL.
The caller must hold the GIL.
See also PyThreadState_Get() and PyThreadState_GET(). */
#define _PyThreadState_GET() _PyRuntimeState_GetThreadState(&_PyRuntime)
/* Redefine PyThreadState_GET() as an alias to _PyThreadState_GET() */
#undef PyThreadState_GET
#define PyThreadState_GET() _PyThreadState_GET()
/* Get the current interpreter state.
The macro is unsafe: it does not check for error and it can return NULL.
The caller must hold the GIL.
See also _PyInterpreterState_Get()
and _PyGILState_GetInterpreterStateUnsafe(). */
#define _PyInterpreterState_GET_UNSAFE() (_PyThreadState_GET()->interp)
/* Other */
PyAPI_FUNC(void) _PyThreadState_Init(
_PyRuntimeState *runtime,
PyThreadState *tstate);
PyAPI_FUNC(void) _PyThreadState_DeleteExcept(
_PyRuntimeState *runtime,
PyThreadState *tstate);
PyAPI_FUNC(PyThreadState *) _PyThreadState_Swap(
struct _gilstate_runtime_state *gilstate,
PyThreadState *newts);
PyAPI_FUNC(PyStatus) _PyInterpreterState_Enable(_PyRuntimeState *runtime);
PyAPI_FUNC(void) _PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime);
PyAPI_FUNC(void) _PyGILState_Reinit(_PyRuntimeState *runtime);
PyAPI_FUNC(int) _PyOS_InterruptOccurred(PyThreadState *tstate);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_PYSTATE_H */
pycore_gil.h 0000644 00000002760 15173335256 0007070 0 ustar 00 #ifndef Py_INTERNAL_GIL_H
#define Py_INTERNAL_GIL_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_condvar.h"
#include "pycore_atomic.h"
#ifndef Py_HAVE_CONDVAR
# error You need either a POSIX-compatible or a Windows system!
#endif
/* Enable if you want to force the switching of threads at least
every `interval`. */
#undef FORCE_SWITCHING
#define FORCE_SWITCHING
struct _gil_runtime_state {
/* microseconds (the Python API uses seconds, though) */
unsigned long interval;
/* Last PyThreadState holding / having held the GIL. This helps us
know whether anyone else was scheduled after we dropped the GIL. */
_Py_atomic_address last_holder;
/* Whether the GIL is already taken (-1 if uninitialized). This is
atomic because it can be read without any lock taken in ceval.c. */
_Py_atomic_int locked;
/* Number of GIL switches since the beginning. */
unsigned long switch_number;
/* This condition variable allows one or several threads to wait
until the GIL is released. In addition, the mutex also protects
the above variables. */
PyCOND_T cond;
PyMUTEX_T mutex;
#ifdef FORCE_SWITCHING
/* This condition variable helps the GIL-releasing thread wait for
a GIL-awaiting thread to be scheduled and take the GIL. */
PyCOND_T switch_cond;
PyMUTEX_T switch_mutex;
#endif
};
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_GIL_H */
pycore_initconfig.h 0000644 00000012142 15173335256 0010441 0 ustar 00 #ifndef Py_INTERNAL_CORECONFIG_H
#define Py_INTERNAL_CORECONFIG_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_pystate.h" /* _PyRuntimeState */
/* --- PyStatus ----------------------------------------------- */
/* Almost all errors causing Python initialization to fail */
#ifdef _MSC_VER
/* Visual Studio 2015 doesn't implement C99 __func__ in C */
# define _PyStatus_GET_FUNC() __FUNCTION__
#else
# define _PyStatus_GET_FUNC() __func__
#endif
#define _PyStatus_OK() \
(PyStatus){._type = _PyStatus_TYPE_OK,}
/* other fields are set to 0 */
#define _PyStatus_ERR(ERR_MSG) \
(PyStatus){ \
._type = _PyStatus_TYPE_ERROR, \
.func = _PyStatus_GET_FUNC(), \
.err_msg = (ERR_MSG)}
/* other fields are set to 0 */
#define _PyStatus_NO_MEMORY() _PyStatus_ERR("memory allocation failed")
#define _PyStatus_EXIT(EXITCODE) \
(PyStatus){ \
._type = _PyStatus_TYPE_EXIT, \
.exitcode = (EXITCODE)}
#define _PyStatus_IS_ERROR(err) \
(err._type == _PyStatus_TYPE_ERROR)
#define _PyStatus_IS_EXIT(err) \
(err._type == _PyStatus_TYPE_EXIT)
#define _PyStatus_EXCEPTION(err) \
(err._type != _PyStatus_TYPE_OK)
#define _PyStatus_UPDATE_FUNC(err) \
do { err.func = _PyStatus_GET_FUNC(); } while (0)
/* --- PyWideStringList ------------------------------------------------ */
#define _PyWideStringList_INIT (PyWideStringList){.length = 0, .items = NULL}
#ifndef NDEBUG
PyAPI_FUNC(int) _PyWideStringList_CheckConsistency(const PyWideStringList *list);
#endif
PyAPI_FUNC(void) _PyWideStringList_Clear(PyWideStringList *list);
PyAPI_FUNC(int) _PyWideStringList_Copy(PyWideStringList *list,
const PyWideStringList *list2);
PyAPI_FUNC(PyStatus) _PyWideStringList_Extend(PyWideStringList *list,
const PyWideStringList *list2);
PyAPI_FUNC(PyObject*) _PyWideStringList_AsList(const PyWideStringList *list);
/* --- _PyArgv ---------------------------------------------------- */
typedef struct {
Py_ssize_t argc;
int use_bytes_argv;
char * const *bytes_argv;
wchar_t * const *wchar_argv;
} _PyArgv;
PyAPI_FUNC(PyStatus) _PyArgv_AsWstrList(const _PyArgv *args,
PyWideStringList *list);
/* --- Helper functions ------------------------------------------- */
PyAPI_FUNC(int) _Py_str_to_int(
const char *str,
int *result);
PyAPI_FUNC(const wchar_t*) _Py_get_xoption(
const PyWideStringList *xoptions,
const wchar_t *name);
PyAPI_FUNC(const char*) _Py_GetEnv(
int use_environment,
const char *name);
PyAPI_FUNC(void) _Py_get_env_flag(
int use_environment,
int *flag,
const char *name);
/* Py_GetArgcArgv() helper */
PyAPI_FUNC(void) _Py_ClearArgcArgv(void);
/* --- _PyPreCmdline ------------------------------------------------- */
typedef struct {
PyWideStringList argv;
PyWideStringList xoptions; /* "-X value" option */
int isolated; /* -I option */
int use_environment; /* -E option */
int dev_mode; /* -X dev and PYTHONDEVMODE */
} _PyPreCmdline;
#define _PyPreCmdline_INIT \
(_PyPreCmdline){ \
.use_environment = -1, \
.isolated = -1, \
.dev_mode = -1}
/* Note: _PyPreCmdline_INIT sets other fields to 0/NULL */
extern void _PyPreCmdline_Clear(_PyPreCmdline *cmdline);
extern PyStatus _PyPreCmdline_SetArgv(_PyPreCmdline *cmdline,
const _PyArgv *args);
extern PyStatus _PyPreCmdline_SetConfig(
const _PyPreCmdline *cmdline,
PyConfig *config);
extern PyStatus _PyPreCmdline_Read(_PyPreCmdline *cmdline,
const PyPreConfig *preconfig);
/* --- PyPreConfig ----------------------------------------------- */
PyAPI_FUNC(void) _PyPreConfig_InitCompatConfig(PyPreConfig *preconfig);
extern void _PyPreConfig_InitFromConfig(
PyPreConfig *preconfig,
const PyConfig *config);
extern PyStatus _PyPreConfig_InitFromPreConfig(
PyPreConfig *preconfig,
const PyPreConfig *config2);
extern PyObject* _PyPreConfig_AsDict(const PyPreConfig *preconfig);
extern void _PyPreConfig_GetConfig(PyPreConfig *preconfig,
const PyConfig *config);
extern PyStatus _PyPreConfig_Read(PyPreConfig *preconfig,
const _PyArgv *args);
extern PyStatus _PyPreConfig_Write(const PyPreConfig *preconfig);
/* --- PyConfig ---------------------------------------------- */
typedef enum {
/* Py_Initialize() API: backward compatibility with Python 3.6 and 3.7 */
_PyConfig_INIT_COMPAT = 1,
_PyConfig_INIT_PYTHON = 2,
_PyConfig_INIT_ISOLATED = 3
} _PyConfigInitEnum;
PyAPI_FUNC(void) _PyConfig_InitCompatConfig(PyConfig *config);
extern PyStatus _PyConfig_Copy(
PyConfig *config,
const PyConfig *config2);
extern PyStatus _PyConfig_InitPathConfig(PyConfig *config);
extern void _PyConfig_Write(const PyConfig *config,
_PyRuntimeState *runtime);
extern PyStatus _PyConfig_SetPyArgv(
PyConfig *config,
const _PyArgv *args);
extern int _Py_global_config_int_max_str_digits;
/* --- Function used for testing ---------------------------------- */
PyAPI_FUNC(PyObject*) _Py_GetConfigsAsDict(void);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_CORECONFIG_H */
pycore_code.h 0000644 00000001036 15173335256 0007222 0 ustar 00 #ifndef Py_INTERNAL_CODE_H
#define Py_INTERNAL_CODE_H
#ifdef __cplusplus
extern "C" {
#endif
typedef struct {
PyObject *ptr; /* Cached pointer (borrowed reference) */
uint64_t globals_ver; /* ma_version of global dict */
uint64_t builtins_ver; /* ma_version of builtin dict */
} _PyOpcache_LoadGlobal;
struct _PyOpcache {
union {
_PyOpcache_LoadGlobal lg;
} u;
char optimized;
};
/* Private API */
int _PyCode_InitOpcache(PyCodeObject *co);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_CODE_H */
pycore_context.h 0000644 00000001413 15173335256 0007773 0 ustar 00 #ifndef Py_INTERNAL_CONTEXT_H
#define Py_INTERNAL_CONTEXT_H
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_hamt.h"
struct _pycontextobject {
PyObject_HEAD
PyContext *ctx_prev;
PyHamtObject *ctx_vars;
PyObject *ctx_weakreflist;
int ctx_entered;
};
struct _pycontextvarobject {
PyObject_HEAD
PyObject *var_name;
PyObject *var_default;
PyObject *var_cached;
uint64_t var_cached_tsid;
uint64_t var_cached_tsver;
Py_hash_t var_hash;
};
struct _pycontexttokenobject {
PyObject_HEAD
PyContext *tok_ctx;
PyContextVar *tok_var;
PyObject *tok_oldval;
int tok_used;
};
int _PyContext_Init(void);
void _PyContext_Fini(void);
#endif /* !Py_INTERNAL_CONTEXT_H */
pycore_pymem.h 0000644 00000020031 15173335256 0007433 0 ustar 00 #ifndef Py_INTERNAL_PYMEM_H
#define Py_INTERNAL_PYMEM_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "objimpl.h"
#include "pymem.h"
/* GC runtime state */
/* If we change this, we need to change the default value in the
signature of gc.collect. */
#define NUM_GENERATIONS 3
/*
NOTE: about the counting of long-lived objects.
To limit the cost of garbage collection, there are two strategies;
- make each collection faster, e.g. by scanning fewer objects
- do less collections
This heuristic is about the latter strategy.
In addition to the various configurable thresholds, we only trigger a
full collection if the ratio
long_lived_pending / long_lived_total
is above a given value (hardwired to 25%).
The reason is that, while "non-full" collections (i.e., collections of
the young and middle generations) will always examine roughly the same
number of objects -- determined by the aforementioned thresholds --,
the cost of a full collection is proportional to the total number of
long-lived objects, which is virtually unbounded.
Indeed, it has been remarked that doing a full collection every
<constant number> of object creations entails a dramatic performance
degradation in workloads which consist in creating and storing lots of
long-lived objects (e.g. building a large list of GC-tracked objects would
show quadratic performance, instead of linear as expected: see issue #4074).
Using the above ratio, instead, yields amortized linear performance in
the total number of objects (the effect of which can be summarized
thusly: "each full garbage collection is more and more costly as the
number of objects grows, but we do fewer and fewer of them").
This heuristic was suggested by Martin von Löwis on python-dev in
June 2008. His original analysis and proposal can be found at:
http://mail.python.org/pipermail/python-dev/2008-June/080579.html
*/
/*
NOTE: about untracking of mutable objects.
Certain types of container cannot participate in a reference cycle, and
so do not need to be tracked by the garbage collector. Untracking these
objects reduces the cost of garbage collections. However, determining
which objects may be untracked is not free, and the costs must be
weighed against the benefits for garbage collection.
There are two possible strategies for when to untrack a container:
i) When the container is created.
ii) When the container is examined by the garbage collector.
Tuples containing only immutable objects (integers, strings etc, and
recursively, tuples of immutable objects) do not need to be tracked.
The interpreter creates a large number of tuples, many of which will
not survive until garbage collection. It is therefore not worthwhile
to untrack eligible tuples at creation time.
Instead, all tuples except the empty tuple are tracked when created.
During garbage collection it is determined whether any surviving tuples
can be untracked. A tuple can be untracked if all of its contents are
already not tracked. Tuples are examined for untracking in all garbage
collection cycles. It may take more than one cycle to untrack a tuple.
Dictionaries containing only immutable objects also do not need to be
tracked. Dictionaries are untracked when created. If a tracked item is
inserted into a dictionary (either as a key or value), the dictionary
becomes tracked. During a full garbage collection (all generations),
the collector will untrack any dictionaries whose contents are not
tracked.
The module provides the python function is_tracked(obj), which returns
the CURRENT tracking status of the object. Subsequent garbage
collections may change the tracking status of the object.
Untracking of certain containers was introduced in issue #4688, and
the algorithm was refined in response to issue #14775.
*/
struct gc_generation {
PyGC_Head head;
int threshold; /* collection threshold */
int count; /* count of allocations or collections of younger
generations */
};
/* Running stats per generation */
struct gc_generation_stats {
/* total number of collections */
Py_ssize_t collections;
/* total number of collected objects */
Py_ssize_t collected;
/* total number of uncollectable objects (put into gc.garbage) */
Py_ssize_t uncollectable;
};
struct _gc_runtime_state {
/* List of objects that still need to be cleaned up, singly linked
* via their gc headers' gc_prev pointers. */
PyObject *trash_delete_later;
/* Current call-stack depth of tp_dealloc calls. */
int trash_delete_nesting;
int enabled;
int debug;
/* linked lists of container objects */
struct gc_generation generations[NUM_GENERATIONS];
PyGC_Head *generation0;
/* a permanent generation which won't be collected */
struct gc_generation permanent_generation;
struct gc_generation_stats generation_stats[NUM_GENERATIONS];
/* true if we are currently running the collector */
int collecting;
/* list of uncollectable objects */
PyObject *garbage;
/* a list of callbacks to be invoked when collection is performed */
PyObject *callbacks;
/* This is the number of objects that survived the last full
collection. It approximates the number of long lived objects
tracked by the GC.
(by "full collection", we mean a collection of the oldest
generation). */
Py_ssize_t long_lived_total;
/* This is the number of objects that survived all "non-full"
collections, and are awaiting to undergo a full collection for
the first time. */
Py_ssize_t long_lived_pending;
};
PyAPI_FUNC(void) _PyGC_Initialize(struct _gc_runtime_state *);
/* Set the memory allocator of the specified domain to the default.
Save the old allocator into *old_alloc if it's non-NULL.
Return on success, or return -1 if the domain is unknown. */
PyAPI_FUNC(int) _PyMem_SetDefaultAllocator(
PyMemAllocatorDomain domain,
PyMemAllocatorEx *old_alloc);
/* Special bytes broadcast into debug memory blocks at appropriate times.
Strings of these are unlikely to be valid addresses, floats, ints or
7-bit ASCII.
- PYMEM_CLEANBYTE: clean (newly allocated) memory
- PYMEM_DEADBYTE dead (newly freed) memory
- PYMEM_FORBIDDENBYTE: untouchable bytes at each end of a block
Byte patterns 0xCB, 0xDB and 0xFB have been replaced with 0xCD, 0xDD and
0xFD to use the same values than Windows CRT debug malloc() and free().
If modified, _PyMem_IsPtrFreed() should be updated as well. */
#define PYMEM_CLEANBYTE 0xCD
#define PYMEM_DEADBYTE 0xDD
#define PYMEM_FORBIDDENBYTE 0xFD
/* Heuristic checking if a pointer value is newly allocated
(uninitialized), newly freed or NULL (is equal to zero).
The pointer is not dereferenced, only the pointer value is checked.
The heuristic relies on the debug hooks on Python memory allocators which
fills newly allocated memory with CLEANBYTE (0xCD) and newly freed memory
with DEADBYTE (0xDD). Detect also "untouchable bytes" marked
with FORBIDDENBYTE (0xFD). */
static inline int _PyMem_IsPtrFreed(void *ptr)
{
uintptr_t value = (uintptr_t)ptr;
#if SIZEOF_VOID_P == 8
return (value == 0
|| value == (uintptr_t)0xCDCDCDCDCDCDCDCD
|| value == (uintptr_t)0xDDDDDDDDDDDDDDDD
|| value == (uintptr_t)0xFDFDFDFDFDFDFDFD);
#elif SIZEOF_VOID_P == 4
return (value == 0
|| value == (uintptr_t)0xCDCDCDCD
|| value == (uintptr_t)0xDDDDDDDD
|| value == (uintptr_t)0xFDFDFDFD);
#else
# error "unknown pointer size"
#endif
}
PyAPI_FUNC(int) _PyMem_GetAllocatorName(
const char *name,
PyMemAllocatorName *allocator);
/* Configure the Python memory allocators.
Pass PYMEM_ALLOCATOR_DEFAULT to use default allocators.
PYMEM_ALLOCATOR_NOT_SET does nothing. */
PyAPI_FUNC(int) _PyMem_SetupAllocators(PyMemAllocatorName allocator);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_PYMEM_H */
pycore_pyhash.h 0000644 00000000316 15173335256 0007604 0 ustar 00 #ifndef Py_INTERNAL_HASH_H
#define Py_INTERNAL_HASH_H
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
uint64_t _Py_KeyedHash(uint64_t, const char *, Py_ssize_t);
#endif
pycore_traceback.h 0000644 00000006004 15173335256 0010227 0 ustar 00 #ifndef Py_INTERNAL_TRACEBACK_H
#define Py_INTERNAL_TRACEBACK_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pystate.h" /* PyInterpreterState */
/* Write the Python traceback into the file 'fd'. For example:
Traceback (most recent call first):
File "xxx", line xxx in <xxx>
File "xxx", line xxx in <xxx>
...
File "xxx", line xxx in <xxx>
This function is written for debug purpose only, to dump the traceback in
the worst case: after a segmentation fault, at fatal error, etc. That's why,
it is very limited. Strings are truncated to 100 characters and encoded to
ASCII with backslashreplace. It doesn't write the source code, only the
function name, filename and line number of each frame. Write only the first
100 frames: if the traceback is truncated, write the line " ...".
This function is signal safe. */
PyAPI_FUNC(void) _Py_DumpTraceback(
int fd,
PyThreadState *tstate);
/* Write the traceback of all threads into the file 'fd'. current_thread can be
NULL.
Return NULL on success, or an error message on error.
This function is written for debug purpose only. It calls
_Py_DumpTraceback() for each thread, and so has the same limitations. It
only write the traceback of the first 100 threads: write "..." if there are
more threads.
If current_tstate is NULL, the function tries to get the Python thread state
of the current thread. It is not an error if the function is unable to get
the current Python thread state.
If interp is NULL, the function tries to get the interpreter state from
the current Python thread state, or from
_PyGILState_GetInterpreterStateUnsafe() in last resort.
It is better to pass NULL to interp and current_tstate, the function tries
different options to retrieve these informations.
This function is signal safe. */
PyAPI_FUNC(const char*) _Py_DumpTracebackThreads(
int fd,
PyInterpreterState *interp,
PyThreadState *current_tstate);
/* Write a Unicode object into the file descriptor fd. Encode the string to
ASCII using the backslashreplace error handler.
Do nothing if text is not a Unicode object. The function accepts Unicode
string which is not ready (PyUnicode_WCHAR_KIND).
This function is signal safe. */
PyAPI_FUNC(void) _Py_DumpASCII(int fd, PyObject *text);
/* Format an integer as decimal into the file descriptor fd.
This function is signal safe. */
PyAPI_FUNC(void) _Py_DumpDecimal(
int fd,
unsigned long value);
/* Format an integer as hexadecimal into the file descriptor fd with at least
width digits.
The maximum width is sizeof(unsigned long)*2 digits.
This function is signal safe. */
PyAPI_FUNC(void) _Py_DumpHexadecimal(
int fd,
unsigned long value,
Py_ssize_t width);
PyAPI_FUNC(PyObject*) _PyTraceBack_FromFrame(
PyObject *tb_next,
struct _frame *frame);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_TRACEBACK_H */
pycore_pyerrors.h 0000644 00000002461 15173335256 0010200 0 ustar 00 #ifndef Py_INTERNAL_PYERRORS_H
#define Py_INTERNAL_PYERRORS_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
static inline PyObject* _PyErr_Occurred(PyThreadState *tstate)
{
return tstate == NULL ? NULL : tstate->curexc_type;
}
PyAPI_FUNC(void) _PyErr_Fetch(
PyThreadState *tstate,
PyObject **type,
PyObject **value,
PyObject **traceback);
PyAPI_FUNC(int) _PyErr_ExceptionMatches(
PyThreadState *tstate,
PyObject *exc);
PyAPI_FUNC(void) _PyErr_Restore(
PyThreadState *tstate,
PyObject *type,
PyObject *value,
PyObject *traceback);
PyAPI_FUNC(void) _PyErr_SetObject(
PyThreadState *tstate,
PyObject *type,
PyObject *value);
PyAPI_FUNC(void) _PyErr_Clear(PyThreadState *tstate);
PyAPI_FUNC(void) _PyErr_SetNone(PyThreadState *tstate, PyObject *exception);
PyAPI_FUNC(void) _PyErr_SetString(
PyThreadState *tstate,
PyObject *exception,
const char *string);
PyAPI_FUNC(PyObject *) _PyErr_Format(
PyThreadState *tstate,
PyObject *exception,
const char *format,
...);
PyAPI_FUNC(void) _PyErr_NormalizeException(
PyThreadState *tstate,
PyObject **exc,
PyObject **val,
PyObject **tb);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_PYERRORS_H */
pycore_atomic.h 0000644 00000041060 15173335256 0007565 0 ustar 00 #ifndef Py_ATOMIC_H
#define Py_ATOMIC_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "dynamic_annotations.h"
#include "pyconfig.h"
#if defined(HAVE_STD_ATOMIC)
#include <stdatomic.h>
#endif
#if defined(_MSC_VER)
#include <intrin.h>
#if defined(_M_IX86) || defined(_M_X64)
# include <immintrin.h>
#endif
#endif
/* This is modeled after the atomics interface from C1x, according to
* the draft at
* http://www.open-std.org/JTC1/SC22/wg14/www/docs/n1425.pdf.
* Operations and types are named the same except with a _Py_ prefix
* and have the same semantics.
*
* Beware, the implementations here are deep magic.
*/
#if defined(HAVE_STD_ATOMIC)
typedef enum _Py_memory_order {
_Py_memory_order_relaxed = memory_order_relaxed,
_Py_memory_order_acquire = memory_order_acquire,
_Py_memory_order_release = memory_order_release,
_Py_memory_order_acq_rel = memory_order_acq_rel,
_Py_memory_order_seq_cst = memory_order_seq_cst
} _Py_memory_order;
typedef struct _Py_atomic_address {
atomic_uintptr_t _value;
} _Py_atomic_address;
typedef struct _Py_atomic_int {
atomic_int _value;
} _Py_atomic_int;
#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \
atomic_signal_fence(ORDER)
#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \
atomic_thread_fence(ORDER)
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
atomic_store_explicit(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER)
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
atomic_load_explicit(&((ATOMIC_VAL)->_value), ORDER)
/* Use builtin atomic operations in GCC >= 4.7 */
#elif defined(HAVE_BUILTIN_ATOMIC)
typedef enum _Py_memory_order {
_Py_memory_order_relaxed = __ATOMIC_RELAXED,
_Py_memory_order_acquire = __ATOMIC_ACQUIRE,
_Py_memory_order_release = __ATOMIC_RELEASE,
_Py_memory_order_acq_rel = __ATOMIC_ACQ_REL,
_Py_memory_order_seq_cst = __ATOMIC_SEQ_CST
} _Py_memory_order;
typedef struct _Py_atomic_address {
uintptr_t _value;
} _Py_atomic_address;
typedef struct _Py_atomic_int {
int _value;
} _Py_atomic_int;
#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) \
__atomic_signal_fence(ORDER)
#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) \
__atomic_thread_fence(ORDER)
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
(assert((ORDER) == __ATOMIC_RELAXED \
|| (ORDER) == __ATOMIC_SEQ_CST \
|| (ORDER) == __ATOMIC_RELEASE), \
__atomic_store_n(&((ATOMIC_VAL)->_value), NEW_VAL, ORDER))
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
(assert((ORDER) == __ATOMIC_RELAXED \
|| (ORDER) == __ATOMIC_SEQ_CST \
|| (ORDER) == __ATOMIC_ACQUIRE \
|| (ORDER) == __ATOMIC_CONSUME), \
__atomic_load_n(&((ATOMIC_VAL)->_value), ORDER))
/* Only support GCC (for expression statements) and x86 (for simple
* atomic semantics) and MSVC x86/x64/ARM */
#elif defined(__GNUC__) && (defined(__i386__) || defined(__amd64))
typedef enum _Py_memory_order {
_Py_memory_order_relaxed,
_Py_memory_order_acquire,
_Py_memory_order_release,
_Py_memory_order_acq_rel,
_Py_memory_order_seq_cst
} _Py_memory_order;
typedef struct _Py_atomic_address {
uintptr_t _value;
} _Py_atomic_address;
typedef struct _Py_atomic_int {
int _value;
} _Py_atomic_int;
static __inline__ void
_Py_atomic_signal_fence(_Py_memory_order order)
{
if (order != _Py_memory_order_relaxed)
__asm__ volatile("":::"memory");
}
static __inline__ void
_Py_atomic_thread_fence(_Py_memory_order order)
{
if (order != _Py_memory_order_relaxed)
__asm__ volatile("mfence":::"memory");
}
/* Tell the race checker about this operation's effects. */
static __inline__ void
_Py_ANNOTATE_MEMORY_ORDER(const volatile void *address, _Py_memory_order order)
{
(void)address; /* shut up -Wunused-parameter */
switch(order) {
case _Py_memory_order_release:
case _Py_memory_order_acq_rel:
case _Py_memory_order_seq_cst:
_Py_ANNOTATE_HAPPENS_BEFORE(address);
break;
case _Py_memory_order_relaxed:
case _Py_memory_order_acquire:
break;
}
switch(order) {
case _Py_memory_order_acquire:
case _Py_memory_order_acq_rel:
case _Py_memory_order_seq_cst:
_Py_ANNOTATE_HAPPENS_AFTER(address);
break;
case _Py_memory_order_relaxed:
case _Py_memory_order_release:
break;
}
}
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
__extension__ ({ \
__typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \
__typeof__(atomic_val->_value) new_val = NEW_VAL;\
volatile __typeof__(new_val) *volatile_data = &atomic_val->_value; \
_Py_memory_order order = ORDER; \
_Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \
\
/* Perform the operation. */ \
_Py_ANNOTATE_IGNORE_WRITES_BEGIN(); \
switch(order) { \
case _Py_memory_order_release: \
_Py_atomic_signal_fence(_Py_memory_order_release); \
/* fallthrough */ \
case _Py_memory_order_relaxed: \
*volatile_data = new_val; \
break; \
\
case _Py_memory_order_acquire: \
case _Py_memory_order_acq_rel: \
case _Py_memory_order_seq_cst: \
__asm__ volatile("xchg %0, %1" \
: "+r"(new_val) \
: "m"(atomic_val->_value) \
: "memory"); \
break; \
} \
_Py_ANNOTATE_IGNORE_WRITES_END(); \
})
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
__extension__ ({ \
__typeof__(ATOMIC_VAL) atomic_val = ATOMIC_VAL; \
__typeof__(atomic_val->_value) result; \
volatile __typeof__(result) *volatile_data = &atomic_val->_value; \
_Py_memory_order order = ORDER; \
_Py_ANNOTATE_MEMORY_ORDER(atomic_val, order); \
\
/* Perform the operation. */ \
_Py_ANNOTATE_IGNORE_READS_BEGIN(); \
switch(order) { \
case _Py_memory_order_release: \
case _Py_memory_order_acq_rel: \
case _Py_memory_order_seq_cst: \
/* Loads on x86 are not releases by default, so need a */ \
/* thread fence. */ \
_Py_atomic_thread_fence(_Py_memory_order_release); \
break; \
default: \
/* No fence */ \
break; \
} \
result = *volatile_data; \
switch(order) { \
case _Py_memory_order_acquire: \
case _Py_memory_order_acq_rel: \
case _Py_memory_order_seq_cst: \
/* Loads on x86 are automatically acquire operations so */ \
/* can get by with just a compiler fence. */ \
_Py_atomic_signal_fence(_Py_memory_order_acquire); \
break; \
default: \
/* No fence */ \
break; \
} \
_Py_ANNOTATE_IGNORE_READS_END(); \
result; \
})
#elif defined(_MSC_VER)
/* _Interlocked* functions provide a full memory barrier and are therefore
enough for acq_rel and seq_cst. If the HLE variants aren't available
in hardware they will fall back to a full memory barrier as well.
This might affect performance but likely only in some very specific and
hard to meassure scenario.
*/
#if defined(_M_IX86) || defined(_M_X64)
typedef enum _Py_memory_order {
_Py_memory_order_relaxed,
_Py_memory_order_acquire,
_Py_memory_order_release,
_Py_memory_order_acq_rel,
_Py_memory_order_seq_cst
} _Py_memory_order;
typedef struct _Py_atomic_address {
volatile uintptr_t _value;
} _Py_atomic_address;
typedef struct _Py_atomic_int {
volatile int _value;
} _Py_atomic_int;
#if defined(_M_X64)
#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \
switch (ORDER) { \
case _Py_memory_order_acquire: \
_InterlockedExchange64_HLEAcquire((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)(NEW_VAL)); \
break; \
case _Py_memory_order_release: \
_InterlockedExchange64_HLERelease((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)(NEW_VAL)); \
break; \
default: \
_InterlockedExchange64((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)(NEW_VAL)); \
break; \
}
#else
#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) ((void)0);
#endif
#define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \
switch (ORDER) { \
case _Py_memory_order_acquire: \
_InterlockedExchange_HLEAcquire((volatile long*)&((ATOMIC_VAL)->_value), (int)(NEW_VAL)); \
break; \
case _Py_memory_order_release: \
_InterlockedExchange_HLERelease((volatile long*)&((ATOMIC_VAL)->_value), (int)(NEW_VAL)); \
break; \
default: \
_InterlockedExchange((volatile long*)&((ATOMIC_VAL)->_value), (int)(NEW_VAL)); \
break; \
}
#if defined(_M_X64)
/* This has to be an intptr_t for now.
gil_created() uses -1 as a sentinel value, if this returns
a uintptr_t it will do an unsigned compare and crash
*/
inline intptr_t _Py_atomic_load_64bit_impl(volatile uintptr_t* value, int order) {
__int64 old;
switch (order) {
case _Py_memory_order_acquire:
{
do {
old = *value;
} while(_InterlockedCompareExchange64_HLEAcquire((volatile __int64*)value, old, old) != old);
break;
}
case _Py_memory_order_release:
{
do {
old = *value;
} while(_InterlockedCompareExchange64_HLERelease((volatile __int64*)value, old, old) != old);
break;
}
case _Py_memory_order_relaxed:
old = *value;
break;
default:
{
do {
old = *value;
} while(_InterlockedCompareExchange64((volatile __int64*)value, old, old) != old);
break;
}
}
return old;
}
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) \
_Py_atomic_load_64bit_impl((volatile uintptr_t*)&((ATOMIC_VAL)->_value), (ORDER))
#else
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) ((ATOMIC_VAL)->_value)
#endif
inline int _Py_atomic_load_32bit_impl(volatile int* value, int order) {
long old;
switch (order) {
case _Py_memory_order_acquire:
{
do {
old = *value;
} while(_InterlockedCompareExchange_HLEAcquire((volatile long*)value, old, old) != old);
break;
}
case _Py_memory_order_release:
{
do {
old = *value;
} while(_InterlockedCompareExchange_HLERelease((volatile long*)value, old, old) != old);
break;
}
case _Py_memory_order_relaxed:
old = *value;
break;
default:
{
do {
old = *value;
} while(_InterlockedCompareExchange((volatile long*)value, old, old) != old);
break;
}
}
return old;
}
#define _Py_atomic_load_32bit(ATOMIC_VAL, ORDER) \
_Py_atomic_load_32bit_impl((volatile int*)&((ATOMIC_VAL)->_value), (ORDER))
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
if (sizeof((ATOMIC_VAL)->_value) == 8) { \
_Py_atomic_store_64bit((ATOMIC_VAL), NEW_VAL, ORDER) } else { \
_Py_atomic_store_32bit((ATOMIC_VAL), NEW_VAL, ORDER) }
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
( \
sizeof((ATOMIC_VAL)->_value) == 8 ? \
_Py_atomic_load_64bit((ATOMIC_VAL), ORDER) : \
_Py_atomic_load_32bit((ATOMIC_VAL), ORDER) \
)
#elif defined(_M_ARM) || defined(_M_ARM64)
typedef enum _Py_memory_order {
_Py_memory_order_relaxed,
_Py_memory_order_acquire,
_Py_memory_order_release,
_Py_memory_order_acq_rel,
_Py_memory_order_seq_cst
} _Py_memory_order;
typedef struct _Py_atomic_address {
volatile uintptr_t _value;
} _Py_atomic_address;
typedef struct _Py_atomic_int {
volatile int _value;
} _Py_atomic_int;
#if defined(_M_ARM64)
#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) \
switch (ORDER) { \
case _Py_memory_order_acquire: \
_InterlockedExchange64_acq((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
break; \
case _Py_memory_order_release: \
_InterlockedExchange64_rel((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
break; \
default: \
_InterlockedExchange64((__int64 volatile*)&((ATOMIC_VAL)->_value), (__int64)NEW_VAL); \
break; \
}
#else
#define _Py_atomic_store_64bit(ATOMIC_VAL, NEW_VAL, ORDER) ((void)0);
#endif
#define _Py_atomic_store_32bit(ATOMIC_VAL, NEW_VAL, ORDER) \
switch (ORDER) { \
case _Py_memory_order_acquire: \
_InterlockedExchange_acq((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
break; \
case _Py_memory_order_release: \
_InterlockedExchange_rel((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
break; \
default: \
_InterlockedExchange((volatile long*)&((ATOMIC_VAL)->_value), (int)NEW_VAL); \
break; \
}
#if defined(_M_ARM64)
/* This has to be an intptr_t for now.
gil_created() uses -1 as a sentinel value, if this returns
a uintptr_t it will do an unsigned compare and crash
*/
inline intptr_t _Py_atomic_load_64bit_impl(volatile uintptr_t* value, int order) {
uintptr_t old;
switch (order) {
case _Py_memory_order_acquire:
{
do {
old = *value;
} while(_InterlockedCompareExchange64_acq(value, old, old) != old);
break;
}
case _Py_memory_order_release:
{
do {
old = *value;
} while(_InterlockedCompareExchange64_rel(value, old, old) != old);
break;
}
case _Py_memory_order_relaxed:
old = *value;
break;
default:
{
do {
old = *value;
} while(_InterlockedCompareExchange64(value, old, old) != old);
break;
}
}
return old;
}
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) \
_Py_atomic_load_64bit_impl((volatile uintptr_t*)&((ATOMIC_VAL)->_value), (ORDER))
#else
#define _Py_atomic_load_64bit(ATOMIC_VAL, ORDER) ((ATOMIC_VAL)->_value)
#endif
inline int _Py_atomic_load_32bit_impl(volatile int* value, int order) {
int old;
switch (order) {
case _Py_memory_order_acquire:
{
do {
old = *value;
} while(_InterlockedCompareExchange_acq(value, old, old) != old);
break;
}
case _Py_memory_order_release:
{
do {
old = *value;
} while(_InterlockedCompareExchange_rel(value, old, old) != old);
break;
}
case _Py_memory_order_relaxed:
old = *value;
break;
default:
{
do {
old = *value;
} while(_InterlockedCompareExchange(value, old, old) != old);
break;
}
}
return old;
}
#define _Py_atomic_load_32bit(ATOMIC_VAL, ORDER) \
_Py_atomic_load_32bit_impl((volatile int*)&((ATOMIC_VAL)->_value), (ORDER))
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
if (sizeof((ATOMIC_VAL)->_value) == 8) { \
_Py_atomic_store_64bit((ATOMIC_VAL), (NEW_VAL), (ORDER)) } else { \
_Py_atomic_store_32bit((ATOMIC_VAL), (NEW_VAL), (ORDER)) }
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
( \
sizeof((ATOMIC_VAL)->_value) == 8 ? \
_Py_atomic_load_64bit((ATOMIC_VAL), (ORDER)) : \
_Py_atomic_load_32bit((ATOMIC_VAL), (ORDER)) \
)
#endif
#else /* !gcc x86 !_msc_ver */
typedef enum _Py_memory_order {
_Py_memory_order_relaxed,
_Py_memory_order_acquire,
_Py_memory_order_release,
_Py_memory_order_acq_rel,
_Py_memory_order_seq_cst
} _Py_memory_order;
typedef struct _Py_atomic_address {
uintptr_t _value;
} _Py_atomic_address;
typedef struct _Py_atomic_int {
int _value;
} _Py_atomic_int;
/* Fall back to other compilers and processors by assuming that simple
volatile accesses are atomic. This is false, so people should port
this. */
#define _Py_atomic_signal_fence(/*memory_order*/ ORDER) ((void)0)
#define _Py_atomic_thread_fence(/*memory_order*/ ORDER) ((void)0)
#define _Py_atomic_store_explicit(ATOMIC_VAL, NEW_VAL, ORDER) \
((ATOMIC_VAL)->_value = NEW_VAL)
#define _Py_atomic_load_explicit(ATOMIC_VAL, ORDER) \
((ATOMIC_VAL)->_value)
#endif
/* Standardized shortcuts. */
#define _Py_atomic_store(ATOMIC_VAL, NEW_VAL) \
_Py_atomic_store_explicit((ATOMIC_VAL), (NEW_VAL), _Py_memory_order_seq_cst)
#define _Py_atomic_load(ATOMIC_VAL) \
_Py_atomic_load_explicit((ATOMIC_VAL), _Py_memory_order_seq_cst)
/* Python-local extensions */
#define _Py_atomic_store_relaxed(ATOMIC_VAL, NEW_VAL) \
_Py_atomic_store_explicit((ATOMIC_VAL), (NEW_VAL), _Py_memory_order_relaxed)
#define _Py_atomic_load_relaxed(ATOMIC_VAL) \
_Py_atomic_load_explicit((ATOMIC_VAL), _Py_memory_order_relaxed)
#ifdef __cplusplus
}
#endif
#endif /* Py_ATOMIC_H */
pycore_fileutils.h 0000644 00000002346 15173335256 0010315 0 ustar 00 #ifndef Py_INTERNAL_FILEUTILS_H
#define Py_INTERNAL_FILEUTILS_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "Py_BUILD_CORE must be defined to include this header"
#endif
#include <locale.h> /* struct lconv */
PyAPI_DATA(int) _Py_HasFileSystemDefaultEncodeErrors;
PyAPI_FUNC(int) _Py_DecodeUTF8Ex(
const char *arg,
Py_ssize_t arglen,
wchar_t **wstr,
size_t *wlen,
const char **reason,
_Py_error_handler errors);
PyAPI_FUNC(int) _Py_EncodeUTF8Ex(
const wchar_t *text,
char **str,
size_t *error_pos,
const char **reason,
int raw_malloc,
_Py_error_handler errors);
PyAPI_FUNC(wchar_t*) _Py_DecodeUTF8_surrogateescape(
const char *arg,
Py_ssize_t arglen,
size_t *wlen);
PyAPI_FUNC(int) _Py_GetForceASCII(void);
/* Reset "force ASCII" mode (if it was initialized).
This function should be called when Python changes the LC_CTYPE locale,
so the "force ASCII" mode can be detected again on the new locale
encoding. */
PyAPI_FUNC(void) _Py_ResetForceASCII(void);
PyAPI_FUNC(int) _Py_GetLocaleconvNumeric(
struct lconv *lc,
PyObject **decimal_point,
PyObject **thousands_sep);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_FILEUTILS_H */
pycore_ceval.h 0000644 00000001706 15173335256 0007406 0 ustar 00 #ifndef Py_INTERNAL_CEVAL_H
#define Py_INTERNAL_CEVAL_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
#include "pycore_atomic.h"
#include "pycore_pystate.h"
#include "pythread.h"
PyAPI_FUNC(void) _Py_FinishPendingCalls(_PyRuntimeState *runtime);
PyAPI_FUNC(void) _PyEval_Initialize(struct _ceval_runtime_state *);
PyAPI_FUNC(void) _PyEval_FiniThreads(
struct _ceval_runtime_state *ceval);
PyAPI_FUNC(void) _PyEval_SignalReceived(
struct _ceval_runtime_state *ceval);
PyAPI_FUNC(int) _PyEval_AddPendingCall(
PyThreadState *tstate,
struct _ceval_runtime_state *ceval,
int (*func)(void *),
void *arg);
PyAPI_FUNC(void) _PyEval_SignalAsyncExc(
struct _ceval_runtime_state *ceval);
PyAPI_FUNC(void) _PyEval_ReInitThreads(
_PyRuntimeState *runtime);
/* Private function */
void _PyEval_Fini(void);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_CEVAL_H */
pycore_long.h 0000644 00000003014 15173335256 0007245 0 ustar 00 #ifndef Py_INTERNAL_LONG_H
#define Py_INTERNAL_LONG_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
/*
* Default int base conversion size limitation: Denial of Service prevention.
*
* Chosen such that this isn't wildly slow on modern hardware and so that
* everyone's existing deployed numpy test suite passes before
* https://github.com/numpy/numpy/issues/22098 is widely available.
*
* $ python -m timeit -s 's = "1"*4300' 'int(s)'
* 2000 loops, best of 5: 125 usec per loop
* $ python -m timeit -s 's = "1"*4300; v = int(s)' 'str(v)'
* 1000 loops, best of 5: 311 usec per loop
* (zen2 cloud VM)
*
* 4300 decimal digits fits a ~14284 bit number.
*/
#define _PY_LONG_DEFAULT_MAX_STR_DIGITS 4300
/*
* Threshold for max digits check. For performance reasons int() and
* int.__str__() don't checks values that are smaller than this
* threshold. Acts as a guaranteed minimum size limit for bignums that
* applications can expect from CPython.
*
* % python -m timeit -s 's = "1"*640; v = int(s)' 'str(int(s))'
* 20000 loops, best of 5: 12 usec per loop
*
* "640 digits should be enough for anyone." - gps
* fits a ~2126 bit decimal number.
*/
#define _PY_LONG_MAX_STR_DIGITS_THRESHOLD 640
#if ((_PY_LONG_DEFAULT_MAX_STR_DIGITS != 0) && \
(_PY_LONG_DEFAULT_MAX_STR_DIGITS < _PY_LONG_MAX_STR_DIGITS_THRESHOLD))
# error "_PY_LONG_DEFAULT_MAX_STR_DIGITS smaller than threshold."
#endif
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_LONG_H */
pycore_pathconfig.h 0000644 00000003765 15173335256 0010445 0 ustar 00 #ifndef Py_INTERNAL_PATHCONFIG_H
#define Py_INTERNAL_PATHCONFIG_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef Py_BUILD_CORE
# error "this header requires Py_BUILD_CORE define"
#endif
typedef struct _PyPathConfig {
/* Full path to the Python program */
wchar_t *program_full_path;
wchar_t *prefix;
wchar_t *exec_prefix;
/* Set by Py_SetPath(), or computed by _PyConfig_InitPathConfig() */
wchar_t *module_search_path;
/* Python program name */
wchar_t *program_name;
/* Set by Py_SetPythonHome() or PYTHONHOME environment variable */
wchar_t *home;
#ifdef MS_WINDOWS
/* isolated and site_import are used to set Py_IsolatedFlag and
Py_NoSiteFlag flags on Windows in read_pth_file(). These fields
are ignored when their value are equal to -1 (unset). */
int isolated;
int site_import;
/* Set when a venv is detected */
wchar_t *base_executable;
#endif
} _PyPathConfig;
#ifdef MS_WINDOWS
# define _PyPathConfig_INIT \
{.module_search_path = NULL, \
.isolated = -1, \
.site_import = -1}
#else
# define _PyPathConfig_INIT \
{.module_search_path = NULL}
#endif
/* Note: _PyPathConfig_INIT sets other fields to 0/NULL */
PyAPI_DATA(_PyPathConfig) _Py_path_config;
#ifdef MS_WINDOWS
PyAPI_DATA(wchar_t*) _Py_dll_path;
#endif
extern void _PyPathConfig_ClearGlobal(void);
extern PyStatus _PyPathConfig_SetGlobal(
const struct _PyPathConfig *pathconfig);
extern PyStatus _PyPathConfig_Calculate(
_PyPathConfig *pathconfig,
const PyConfig *config);
extern int _PyPathConfig_ComputeSysPath0(
const PyWideStringList *argv,
PyObject **path0);
extern int _Py_FindEnvConfigValue(
FILE *env_file,
const wchar_t *key,
wchar_t *value,
size_t value_size);
#ifdef MS_WINDOWS
extern wchar_t* _Py_GetDLLPath(void);
#endif
extern PyStatus _PyConfig_WritePathConfig(const PyConfig *config);
extern void _Py_DumpPathConfig(PyThreadState *tstate);
#ifdef __cplusplus
}
#endif
#endif /* !Py_INTERNAL_PATHCONFIG_H */