diff --git a/Include/internal/pycore_obmalloc.h b/Include/internal/pycore_obmalloc.h index 0b23bb48dd5c1b..d4dbe541e6da51 100644 --- a/Include/internal/pycore_obmalloc.h +++ b/Include/internal/pycore_obmalloc.h @@ -691,7 +691,11 @@ struct _obmalloc_state { /* Allocate memory directly from the O/S virtual memory system, - * where supported. Otherwise fallback on malloc */ + * where supported. Otherwise fallback on malloc. + * + * Large-page and huge-page backends may round the mapped size up + * internally, so pass the original requested size back to + * _PyObject_VirtualFree(). */ void *_PyObject_VirtualAlloc(size_t size); void _PyObject_VirtualFree(void *, size_t size); diff --git a/Misc/NEWS.d/next/Core_and_Builtins/2026-04-01-12-52-31.gh-issue-144319.iZk4hs.rst b/Misc/NEWS.d/next/Core_and_Builtins/2026-04-01-12-52-31.gh-issue-144319.iZk4hs.rst new file mode 100644 index 00000000000000..f3f07ab35dbb01 --- /dev/null +++ b/Misc/NEWS.d/next/Core_and_Builtins/2026-04-01-12-52-31.gh-issue-144319.iZk4hs.rst @@ -0,0 +1,3 @@ +Fix a bug that could cause applications with specific allocation patterns to +leak memory via Huge Pages if compiled with Huge Page support. Patch by +Pablo Galindo diff --git a/Objects/obmalloc.c b/Objects/obmalloc.c index 983bdddbf026a8..e2d5b012955c3e 100644 --- a/Objects/obmalloc.c +++ b/Objects/obmalloc.c @@ -14,6 +14,7 @@ #include // malloc() #include #include // fopen(), fgets(), sscanf() +#include // errno #ifdef WITH_MIMALLOC // Forward declarations of functions used in our mimalloc modifications static void _PyMem_mi_page_clear_qsbr(mi_page_t *page); @@ -572,6 +573,49 @@ _pymalloc_system_hugepage_size(void) } #endif +#if (defined(MS_WINDOWS) && defined(PYMALLOC_USE_HUGEPAGES)) || \ + (defined(PYMALLOC_USE_HUGEPAGES) && defined(ARENAS_USE_MMAP) && defined(MAP_HUGETLB)) +static size_t +_pymalloc_round_up_to_multiple(size_t size, size_t multiple) +{ + if (multiple == 0 || size == 0) { + return size; + } + + size_t remainder = size % multiple; + if (remainder == 0) { + return size; + } + + size_t padding = multiple - remainder; + if (size > SIZE_MAX - padding) { + return 0; + } + return size + padding; +} +#endif + +static size_t +_pymalloc_virtual_alloc_size(size_t size) +{ +#if defined(MS_WINDOWS) && defined(PYMALLOC_USE_HUGEPAGES) + if (_PyRuntime.allocators.use_hugepages) { + SIZE_T large_page_size = GetLargePageMinimum(); + if (large_page_size > 0) { + return _pymalloc_round_up_to_multiple(size, (size_t)large_page_size); + } + } +#elif defined(PYMALLOC_USE_HUGEPAGES) && defined(ARENAS_USE_MMAP) && defined(MAP_HUGETLB) + if (_PyRuntime.allocators.use_hugepages) { + size_t hp_size = _pymalloc_system_hugepage_size(); + if (hp_size > 0) { + return _pymalloc_round_up_to_multiple(size, hp_size); + } + } +#endif + return size; +} + void * _PyMem_ArenaAlloc(void *Py_UNUSED(ctx), size_t size) { @@ -648,7 +692,11 @@ _PyMem_ArenaFree(void *Py_UNUSED(ctx), void *ptr, if (ptr == NULL) { return; } - munmap(ptr, size); + if (munmap(ptr, size) < 0) { + _Py_FatalErrorFormat(__func__, + "munmap(%p, %zu) failed with errno %d", + ptr, size, errno); + } #else free(ptr); #endif @@ -1128,13 +1176,19 @@ PyObject_SetArenaAllocator(PyObjectArenaAllocator *allocator) void * _PyObject_VirtualAlloc(size_t size) { - return _PyObject_Arena.alloc(_PyObject_Arena.ctx, size); + size_t alloc_size = _pymalloc_virtual_alloc_size(size); + if (alloc_size == 0 && size != 0) { + return NULL; + } + return _PyObject_Arena.alloc(_PyObject_Arena.ctx, alloc_size); } void _PyObject_VirtualFree(void *obj, size_t size) { - _PyObject_Arena.free(_PyObject_Arena.ctx, obj, size); + size_t alloc_size = _pymalloc_virtual_alloc_size(size); + assert(alloc_size != 0 || size == 0); + _PyObject_Arena.free(_PyObject_Arena.ctx, obj, alloc_size); }