GNU libmicrohttpd 0.9.77
Loading...
Searching...
No Matches
memorypool.c
Go to the documentation of this file.
1/*
2 This file is part of libmicrohttpd
3 Copyright (C) 2007--2021 Daniel Pittman, Christian Grothoff, and
4 Karlson2k (Evgeny Grin)
5
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with this library; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19*/
20
27#include "memorypool.h"
28#ifdef HAVE_STDLIB_H
29#include <stdlib.h>
30#endif /* HAVE_STDLIB_H */
31#include <string.h>
32#include <stdint.h>
33#include "mhd_assert.h"
34#if HAVE_SYS_MMAN_H
35#include <sys/mman.h>
36#endif
37#ifdef _WIN32
38#include <windows.h>
39#endif
40#ifdef HAVE_SYSCONF
41#include <unistd.h>
42#if defined(_SC_PAGE_SIZE)
43#define MHD_SC_PAGESIZE _SC_PAGE_SIZE
44#elif defined(_SC_PAGESIZE)
45#define MHD_SC_PAGESIZE _SC_PAGESIZE
46#endif /* _SC_PAGESIZE */
47#endif /* HAVE_SYSCONF */
48#include "mhd_limits.h" /* for SIZE_MAX, PAGESIZE / PAGE_SIZE */
49
50#if defined(MHD_USE_PAGESIZE_MACRO) || defined(MHD_USE_PAGE_SIZE_MACRO)
51#ifndef HAVE_SYSCONF /* Avoid duplicate include */
52#include <unistd.h>
53#endif /* HAVE_SYSCONF */
54#ifdef HAVE_SYS_PARAM_H
55#include <sys/param.h>
56#endif /* HAVE_SYS_PARAM_H */
57#endif /* MHD_USE_PAGESIZE_MACRO || MHD_USE_PAGE_SIZE_MACRO */
58
62#define _MHD_FALLBACK_PAGE_SIZE (4096)
63
64#if defined(MHD_USE_PAGESIZE_MACRO)
65#define MHD_DEF_PAGE_SIZE_ PAGESIZE
66#elif defined(MHD_USE_PAGE_SIZE_MACRO)
67#define MHD_DEF_PAGE_SIZE_ PAGE_SIZE
68#else /* ! PAGESIZE */
69#define MHD_DEF_PAGE_SIZE_ _MHD_FALLBACK_PAGE_SIZE
70#endif /* ! PAGESIZE */
71
72
73#ifdef MHD_ASAN_POISON_ACTIVE
74#include <sanitizer/asan_interface.h>
75#endif /* MHD_ASAN_POISON_ACTIVE */
76
77/* define MAP_ANONYMOUS for Mac OS X */
78#if defined(MAP_ANON) && ! defined(MAP_ANONYMOUS)
79#define MAP_ANONYMOUS MAP_ANON
80#endif
81#if defined(_WIN32)
82#define MAP_FAILED NULL
83#elif ! defined(MAP_FAILED)
84#define MAP_FAILED ((void*) -1)
85#endif
86
90#define ALIGN_SIZE (2 * sizeof(void*))
91
95#define ROUND_TO_ALIGN(n) (((n) + (ALIGN_SIZE - 1)) \
96 / (ALIGN_SIZE) *(ALIGN_SIZE))
97
98
99#ifndef MHD_ASAN_POISON_ACTIVE
100#define _MHD_NOSANITIZE_PTRS
101#define _MHD_RED_ZONE_SIZE (0)
102#define ROUND_TO_ALIGN_PLUS_RED_ZONE(n) ROUND_TO_ALIGN(n)
103#define _MHD_POISON_MEMORY(pointer, size) (void)0
104#define _MHD_UNPOISON_MEMORY(pointer, size) (void)0
105#else /* MHD_ASAN_POISON_ACTIVE */
106#if defined(FUNC_ATTR_PTRCOMPARE_WOKRS)
107#define _MHD_NOSANITIZE_PTRS \
108 __attribute__((no_sanitize("pointer-compare","pointer-subtract")))
109#elif defined(FUNC_ATTR_NOSANITIZE_WORKS)
110#define _MHD_NOSANITIZE_PTRS __attribute__((no_sanitize("address")))
111#endif
112#define _MHD_RED_ZONE_SIZE (ALIGN_SIZE)
113#define ROUND_TO_ALIGN_PLUS_RED_ZONE(n) (ROUND_TO_ALIGN(n) + _MHD_RED_ZONE_SIZE)
114#define _MHD_POISON_MEMORY(pointer, size) \
115 ASAN_POISON_MEMORY_REGION ((pointer), (size))
116#define _MHD_UNPOISON_MEMORY(pointer, size) \
117 ASAN_UNPOISON_MEMORY_REGION ((pointer), (size))
118#endif /* MHD_ASAN_POISON_ACTIVE */
119
123static size_t MHD_sys_page_size_ =
124#if defined(MHD_USE_PAGESIZE_MACRO_STATIC)
125 PAGESIZE;
126#elif defined(MHD_USE_PAGE_SIZE_MACRO_STATIC)
127 PAGE_SIZE;
128#else /* ! MHD_USE_PAGE_SIZE_MACRO_STATIC */
129 _MHD_FALLBACK_PAGE_SIZE; /* Default fallback value */
130#endif /* ! MHD_USE_PAGE_SIZE_MACRO_STATIC */
131
135void
137{
138#ifdef MHD_SC_PAGESIZE
139 long result;
140 result = sysconf (MHD_SC_PAGESIZE);
141 if (-1 != result)
142 MHD_sys_page_size_ = (size_t) result;
143 else
145#elif defined(_WIN32)
146 SYSTEM_INFO si;
147 GetSystemInfo (&si);
148 MHD_sys_page_size_ = (size_t) si.dwPageSize;
149#else
151#endif /* _WIN32 */
153}
154
155
160struct MemoryPool
161{
162
166 uint8_t *memory;
167
171 size_t size;
172
176 size_t pos;
177
181 size_t end;
182
186 bool is_mmap;
187};
188
189
196struct MemoryPool *
197MHD_pool_create (size_t max)
198{
199 struct MemoryPool *pool;
200 size_t alloc_size;
201
202 mhd_assert (max > 0);
203 alloc_size = 0;
204 pool = malloc (sizeof (struct MemoryPool));
205 if (NULL == pool)
206 return NULL;
207#if defined(MAP_ANONYMOUS) || defined(_WIN32)
208 if ( (max <= 32 * 1024) ||
209 (max < MHD_sys_page_size_ * 4 / 3) )
210 {
211 pool->memory = MAP_FAILED;
212 }
213 else
214 {
215 /* Round up allocation to page granularity. */
216 alloc_size = max + MHD_sys_page_size_ - 1;
217 alloc_size -= alloc_size % MHD_sys_page_size_;
218#if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
219 pool->memory = mmap (NULL,
220 alloc_size,
221 PROT_READ | PROT_WRITE,
222 MAP_PRIVATE | MAP_ANONYMOUS,
223 -1,
224 0);
225#elif defined(_WIN32)
226 pool->memory = VirtualAlloc (NULL,
227 alloc_size,
228 MEM_COMMIT | MEM_RESERVE,
229 PAGE_READWRITE);
230#endif /* _WIN32 */
231 }
232#else /* ! _WIN32 && ! MAP_ANONYMOUS */
233 pool->memory = MAP_FAILED;
234#endif /* ! _WIN32 && ! MAP_ANONYMOUS */
235 if (MAP_FAILED == pool->memory)
236 {
237 alloc_size = ROUND_TO_ALIGN (max);
238 pool->memory = malloc (alloc_size);
239 if (NULL == pool->memory)
240 {
241 free (pool);
242 return NULL;
243 }
244 pool->is_mmap = false;
245 }
246#if defined(MAP_ANONYMOUS) || defined(_WIN32)
247 else
248 {
249 pool->is_mmap = true;
250 }
251#endif /* _WIN32 || MAP_ANONYMOUS */
252 mhd_assert (0 == (((uintptr_t) pool->memory) % ALIGN_SIZE));
253 pool->pos = 0;
254 pool->end = alloc_size;
255 pool->size = alloc_size;
256 mhd_assert (0 < alloc_size);
257 _MHD_POISON_MEMORY (pool->memory, pool->size);
258 return pool;
259}
260
261
267void
268MHD_pool_destroy (struct MemoryPool *pool)
269{
270 if (NULL == pool)
271 return;
272
273 mhd_assert (pool->end >= pool->pos);
274 mhd_assert (pool->size >= pool->end - pool->pos);
275 _MHD_UNPOISON_MEMORY (pool->memory, pool->size);
276 if (! pool->is_mmap)
277 free (pool->memory);
278 else
279#if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
280 munmap (pool->memory,
281 pool->size);
282#elif defined(_WIN32)
283 VirtualFree (pool->memory,
284 0,
285 MEM_RELEASE);
286#else
287 abort ();
288#endif
289 free (pool);
290}
291
292
299size_t
300MHD_pool_get_free (struct MemoryPool *pool)
301{
302 mhd_assert (pool->end >= pool->pos);
303 mhd_assert (pool->size >= pool->end - pool->pos);
304#ifdef MHD_ASAN_POISON_ACTIVE
305 if ((pool->end - pool->pos) <= _MHD_RED_ZONE_SIZE)
306 return 0;
307#endif /* MHD_ASAN_POISON_ACTIVE */
308 return (pool->end - pool->pos) - _MHD_RED_ZONE_SIZE;
309}
310
311
323void *
324MHD_pool_allocate (struct MemoryPool *pool,
325 size_t size,
326 bool from_end)
327{
328 void *ret;
329 size_t asize;
330
331 mhd_assert (pool->end >= pool->pos);
332 mhd_assert (pool->size >= pool->end - pool->pos);
333 asize = ROUND_TO_ALIGN_PLUS_RED_ZONE (size);
334 if ( (0 == asize) && (0 != size) )
335 return NULL; /* size too close to SIZE_MAX */
336 if (asize > pool->end - pool->pos)
337 return NULL;
338 if (from_end)
339 {
340 ret = &pool->memory[pool->end - asize];
341 pool->end -= asize;
342 }
343 else
344 {
345 ret = &pool->memory[pool->pos];
346 pool->pos += asize;
347 }
348 _MHD_UNPOISON_MEMORY (ret, size);
349 return ret;
350}
351
352
372void *
373MHD_pool_try_alloc (struct MemoryPool *pool,
374 size_t size,
375 size_t *required_bytes)
376{
377 void *ret;
378 size_t asize;
379
380 mhd_assert (pool->end >= pool->pos);
381 mhd_assert (pool->size >= pool->end - pool->pos);
382 asize = ROUND_TO_ALIGN_PLUS_RED_ZONE (size);
383 if ( (0 == asize) && (0 != size) )
384 { /* size is too close to SIZE_MAX, very unlikely */
385 *required_bytes = SIZE_MAX;
386 return NULL;
387 }
388 if (asize > pool->end - pool->pos)
389 {
390 mhd_assert ((pool->end - pool->pos) == \
391 ROUND_TO_ALIGN (pool->end - pool->pos));
392 if (asize <= pool->end)
393 *required_bytes = asize - (pool->end - pool->pos);
394 else
395 *required_bytes = SIZE_MAX;
396 return NULL;
397 }
398 ret = &pool->memory[pool->end - asize];
399 pool->end -= asize;
400 _MHD_UNPOISON_MEMORY (ret, size);
401 return ret;
402}
403
404
423MHD_pool_reallocate (struct MemoryPool *pool,
424 void *old,
425 size_t old_size,
426 size_t new_size)
427{
428 size_t asize;
429 uint8_t *new_blc;
430
431 mhd_assert (pool->end >= pool->pos);
432 mhd_assert (pool->size >= pool->end - pool->pos);
433 mhd_assert (old != NULL || old_size == 0);
434 mhd_assert (pool->size >= old_size);
435 mhd_assert (old == NULL || pool->memory <= (uint8_t *) old);
436 /* (old == NULL || pool->memory + pool->size >= (uint8_t*) old + old_size) */
437 mhd_assert (old == NULL || \
438 (pool->size - _MHD_RED_ZONE_SIZE) >= \
439 (((size_t) (((uint8_t *) old) - pool->memory)) + old_size));
440 /* Blocks "from the end" must not be reallocated */
441 /* (old == NULL || old_size == 0 || pool->memory + pool->pos > (uint8_t*) old) */
442 mhd_assert (old == NULL || old_size == 0 || \
443 pool->pos > (size_t) ((uint8_t *) old - pool->memory));
444 mhd_assert (old == NULL || old_size == 0 || \
445 (size_t) (((uint8_t *) old) - pool->memory) + old_size <= \
446 pool->end - _MHD_RED_ZONE_SIZE);
447
448 if (NULL != old)
449 { /* Have previously allocated data */
450 const size_t old_offset = (uint8_t *) old - pool->memory;
451 const bool shrinking = (old_size > new_size);
452 /* Try resizing in-place */
453 if (shrinking)
454 { /* Shrinking in-place, zero-out freed part */
455 memset ((uint8_t *) old + new_size, 0, old_size - new_size);
456 _MHD_POISON_MEMORY ((uint8_t *) old + new_size, old_size - new_size);
457 }
458 if (pool->pos ==
459 ROUND_TO_ALIGN_PLUS_RED_ZONE (old_offset + old_size))
460 { /* "old" block is the last allocated block */
461 const size_t new_apos =
462 ROUND_TO_ALIGN_PLUS_RED_ZONE (old_offset + new_size);
463 if (! shrinking)
464 { /* Grow in-place, check for enough space. */
465 if ( (new_apos > pool->end) ||
466 (new_apos < pool->pos) ) /* Value wrap */
467 return NULL; /* No space */
468 }
469 /* Resized in-place */
470 pool->pos = new_apos;
471 _MHD_UNPOISON_MEMORY (old, new_size);
472 return old;
473 }
474 if (shrinking)
475 return old; /* Resized in-place, freed part remains allocated */
476 }
477 /* Need to allocate new block */
478 asize = ROUND_TO_ALIGN_PLUS_RED_ZONE (new_size);
479 if ( ( (0 == asize) &&
480 (0 != new_size) ) || /* Value wrap, too large new_size. */
481 (asize > pool->end - pool->pos) ) /* Not enough space */
482 return NULL;
483
484 new_blc = pool->memory + pool->pos;
485 pool->pos += asize;
486
487 _MHD_UNPOISON_MEMORY (new_blc, new_size);
488 if (0 != old_size)
489 {
490 /* Move data to new block, old block remains allocated */
491 memcpy (new_blc, old, old_size);
492 /* Zero-out old block */
493 memset (old, 0, old_size);
494 _MHD_POISON_MEMORY (old, old_size);
495 }
496 return new_blc;
497}
498
499
514MHD_pool_reset (struct MemoryPool *pool,
515 void *keep,
516 size_t copy_bytes,
517 size_t new_size)
518{
519 mhd_assert (pool->end >= pool->pos);
520 mhd_assert (pool->size >= pool->end - pool->pos);
521 mhd_assert (copy_bytes <= new_size);
522 mhd_assert (copy_bytes <= pool->size);
523 mhd_assert (keep != NULL || copy_bytes == 0);
524 mhd_assert (keep == NULL || pool->memory <= (uint8_t *) keep);
525 /* (keep == NULL || pool->memory + pool->size >= (uint8_t*) keep + copy_bytes) */
526 mhd_assert (keep == NULL || \
527 pool->size >= \
528 ((size_t) ((uint8_t *) keep - pool->memory)) + copy_bytes);
529 _MHD_UNPOISON_MEMORY (pool->memory, new_size);
530 if ( (NULL != keep) &&
531 (keep != pool->memory) )
532 {
533 if (0 != copy_bytes)
534 memmove (pool->memory,
535 keep,
536 copy_bytes);
537 }
538 /* technically not needed, but safer to zero out */
539 if (pool->size > copy_bytes)
540 {
541 size_t to_zero;
543 to_zero = pool->size - copy_bytes;
544 _MHD_UNPOISON_MEMORY (pool->memory + copy_bytes, to_zero);
545#ifdef _WIN32
546 if (pool->is_mmap)
547 {
548 size_t to_recommit;
549 uint8_t *recommit_addr;
550 /* Round down to page size */
551 to_recommit = to_zero - to_zero % MHD_sys_page_size_;
552 recommit_addr = pool->memory + pool->size - to_recommit;
553
554 /* De-committing and re-committing again clear memory and make
555 * pages free / available for other needs until accessed. */
556 if (VirtualFree (recommit_addr,
557 to_recommit,
558 MEM_DECOMMIT))
559 {
560 to_zero -= to_recommit;
561
562 if (recommit_addr != VirtualAlloc (recommit_addr,
563 to_recommit,
564 MEM_COMMIT,
565 PAGE_READWRITE))
566 abort (); /* Serious error, must never happen */
567 }
568 }
569#endif /* _WIN32 */
570 memset (&pool->memory[copy_bytes],
571 0,
572 to_zero);
573 }
574 pool->pos = ROUND_TO_ALIGN_PLUS_RED_ZONE (new_size);
575 pool->end = pool->size;
576 _MHD_POISON_MEMORY (((uint8_t *) pool->memory) + new_size, \
577 pool->size - new_size);
578 return pool->memory;
579}
580
581
582/* end of memorypool.c */
void * MHD_pool_reallocate(struct MemoryPool *pool, void *old, size_t old_size, size_t new_size)
Definition memorypool.c:248
void MHD_pool_destroy(struct MemoryPool *pool)
Definition memorypool.c:157
#define MAP_FAILED
Definition memorypool.c:32
size_t MHD_pool_get_free(struct MemoryPool *pool)
Definition memorypool.c:185
void * MHD_pool_reset(struct MemoryPool *pool, void *keep, size_t copy_bytes, size_t new_size)
Definition memorypool.c:314
struct MemoryPool * MHD_pool_create(size_t max)
Definition memorypool.c:102
void * MHD_pool_allocate(struct MemoryPool *pool, size_t size, int from_end)
Definition memorypool.c:203
#define ROUND_TO_ALIGN(n)
Definition memorypool.c:43
#define mhd_assert(CHK)
Definition mhd_assert.h:39
#define SIZE_MAX
Definition mhd_limits.h:99
#define NULL
void MHD_init_mem_pools_(void)
Definition memorypool.c:136
#define MHD_DEF_PAGE_SIZE_
Definition memorypool.c:69
#define _MHD_NOSANITIZE_PTRS
Definition memorypool.c:100
#define ALIGN_SIZE
Definition memorypool.c:90
#define _MHD_FALLBACK_PAGE_SIZE
Definition memorypool.c:62
void * MHD_pool_try_alloc(struct MemoryPool *pool, size_t size, size_t *required_bytes)
Definition memorypool.c:373
#define ROUND_TO_ALIGN_PLUS_RED_ZONE(n)
Definition memorypool.c:102
#define _MHD_UNPOISON_MEMORY(pointer, size)
Definition memorypool.c:104
static size_t MHD_sys_page_size_
Definition memorypool.c:123
#define _MHD_POISON_MEMORY(pointer, size)
Definition memorypool.c:103
#define _MHD_RED_ZONE_SIZE
Definition memorypool.c:101
memory pool; mostly used for efficient (de)allocation for each connection and bounding memory use for...
macros for mhd_assert()
limits values definitions