From c90d4af8750bef77ce9abe91df80d5d98ba297d7 Mon Sep 17 00:00:00 2001 From: Tavian Barnes Date: Tue, 29 Oct 2024 14:37:19 -0400 Subject: alloc: Don't require size % align == 0 Allowing unaligned sizes will allow us to allocate aligned slabs with additional metadata in the tail without ballooning the allocation size for large alignments. Link: https://www.open-std.org/jtc1/sc22/wg14/www/docs/n2244.htm#dr_460 Link: https://www.open-std.org/jtc1/sc22/wg14/www/docs/n2072.htm --- src/alloc.c | 15 +++++---------- src/alloc.h | 34 ++++++++++++++-------------------- 2 files changed, 19 insertions(+), 30 deletions(-) (limited to 'src') diff --git a/src/alloc.c b/src/alloc.c index 779e1d7..4e68e13 100644 --- a/src/alloc.c +++ b/src/alloc.c @@ -20,24 +20,22 @@ # define ALLOC_MAX (SIZE_MAX / 2) #endif -/** Portable aligned_alloc()/posix_memalign(). */ +/** posix_memalign() wrapper. */ static void *xmemalign(size_t align, size_t size) { bfs_assert(has_single_bit(align)); bfs_assert(align >= sizeof(void *)); - bfs_assert(is_aligned(align, size)); -#if BFS_HAS_ALIGNED_ALLOC - return aligned_alloc(align, size); -#else + // Since https://www.open-std.org/jtc1/sc22/wg14/www/docs/n2072.htm, + // aligned_alloc() doesn't require the size to be a multiple of align. + // But the sanitizers don't know about that yet, so always use + // posix_memalign(). void *ptr = NULL; errno = posix_memalign(&ptr, align, size); return ptr; -#endif } void *alloc(size_t align, size_t size) { bfs_assert(has_single_bit(align)); - bfs_assert(is_aligned(align, size)); if (size > ALLOC_MAX) { errno = EOVERFLOW; @@ -53,7 +51,6 @@ void *alloc(size_t align, size_t size) { void *zalloc(size_t align, size_t size) { bfs_assert(has_single_bit(align)); - bfs_assert(is_aligned(align, size)); if (size > ALLOC_MAX) { errno = EOVERFLOW; @@ -73,8 +70,6 @@ void *zalloc(size_t align, size_t size) { void *xrealloc(void *ptr, size_t align, size_t old_size, size_t new_size) { bfs_assert(has_single_bit(align)); - bfs_assert(is_aligned(align, old_size)); - bfs_assert(is_aligned(align, new_size)); if (new_size == 0) { free(ptr); diff --git a/src/alloc.h b/src/alloc.h index 4f119e0..7865d5d 100644 --- a/src/alloc.h +++ b/src/alloc.h @@ -30,25 +30,24 @@ static inline size_t align_ceil(size_t align, size_t size) { } /** - * Saturating array size. - * - * @align - * Array element alignment. - * @size - * Array element size. - * @count - * Array element count. - * @return - * size * count, saturating to the maximum aligned value on overflow. + * Saturating size addition. */ -static inline size_t array_size(size_t align, size_t size, size_t count) { +static inline size_t size_add(size_t lhs, size_t rhs) { + size_t ret = lhs + rhs; + return ret >= lhs ? ret : (size_t)-1; +} + +/** + * Saturating size multiplication. + */ +static inline size_t size_mul(size_t size, size_t count) { size_t ret = size * count; - return ret / size == count ? ret : ~(align - 1); + return ret / size == count ? ret : (size_t)-1; } /** Saturating array sizeof. */ #define sizeof_array(type, count) \ - array_size(alignof(type), sizeof(type), count) + size_mul(sizeof(type), count) /** Size of a struct/union field. */ #define sizeof_member(type, member) \ @@ -72,13 +71,8 @@ static inline size_t array_size(size_t align, size_t size, size_t count) { * to the maximum aligned value on overflow. */ static inline size_t flex_size(size_t align, size_t min, size_t offset, size_t size, size_t count) { - size_t ret = size * count; - size_t overflow = ret / size != count; - - size_t extra = offset + align - 1; - ret += extra; - overflow |= ret < extra; - ret |= -overflow; + size_t ret = size_mul(size, count); + ret = size_add(ret, offset + align - 1); ret = align_floor(align, ret); // Make sure flex_sizeof(type, member, 0) >= sizeof(type), even if the -- cgit v1.2.3