Handle IRQs inside the kernel
All checks were successful
Build documentation / build-and-deploy (push) Successful in 2m42s
All checks were successful
Build documentation / build-and-deploy (push) Successful in 2m42s
This commit is contained in:
82
kernel/mm/_malloc_port.c
Normal file
82
kernel/mm/_malloc_port.c
Normal file
@@ -0,0 +1,82 @@
|
||||
#include <libk/align.h>
|
||||
#include <libk/string.h>
|
||||
#include <limine/requests.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <page_size.h>
|
||||
#include <stdint.h>
|
||||
#include <sys/debug.h>
|
||||
|
||||
#define LACKS_UNISTD_H 1
|
||||
#define LACKS_SYS_MMAN_H 1
|
||||
#define LACKS_SYS_PARAM_H 1
|
||||
#define LACKS_FCNTL_H 1
|
||||
#define LACKS_SYS_TYPES_H 1
|
||||
#define LACKS_SCHED_H 1
|
||||
#define LACKS_ERRNO_H 1
|
||||
#define LACKS_STDLIB_H 1
|
||||
#define LACKS_TIME_H 1
|
||||
#define LACKS_STRINGS_H 1
|
||||
|
||||
#define ABORT \
|
||||
{ \
|
||||
(void)0; \
|
||||
}
|
||||
#define MALLOC_ALIGNMENT 16
|
||||
#define HAVE_MORECORE 0
|
||||
#define NO_MALLOC_STATS 1
|
||||
#define USE_LOCKS 1
|
||||
#define MALLOC_FAILURE_ACTION \
|
||||
do { \
|
||||
DEBUG ("malloc failure\n"); \
|
||||
} while (0)
|
||||
#define EINVAL -1
|
||||
#define ENOMEM -1
|
||||
#define DEFAULT_MMAP_THRESHOLD 0
|
||||
|
||||
#define open _open_dummy
|
||||
#define O_RDWR 0
|
||||
#define PROT_READ 0
|
||||
#define PROT_WRITE 0
|
||||
#define MAP_PRIVATE 0
|
||||
#define fprintf(...)
|
||||
|
||||
int _open_dummy (const char* path, uint16_t modes) {
|
||||
(void)path, (void)modes;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void* mmap (void* addr, size_t size, int prot, int flags, int fd, size_t off) {
|
||||
(void)prot, (void)fd, (void)off, (void)flags;
|
||||
|
||||
if (size == 0)
|
||||
return (void*)-1;
|
||||
|
||||
size = div_align_up (size, PAGE_SIZE);
|
||||
|
||||
physaddr_t p_addr = pmm_alloc (size);
|
||||
|
||||
if (p_addr == PMM_ALLOC_ERR)
|
||||
return (void*)-1;
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t a = (uintptr_t)(p_addr + hhdm->offset);
|
||||
|
||||
memset ((void*)a, 0, size * PAGE_SIZE);
|
||||
|
||||
return (void*)a;
|
||||
}
|
||||
|
||||
int munmap (void* addr, size_t length) {
|
||||
if (length == 0)
|
||||
return 0;
|
||||
|
||||
length = div_align_up (length, PAGE_SIZE);
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
physaddr_t p_addr = (uintptr_t)addr - hhdm->offset;
|
||||
|
||||
pmm_free (p_addr, length);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1,417 +0,0 @@
|
||||
/* liballoc breaks when optimized too aggressively, for eg. clang's -Oz */
|
||||
#pragma clang optimize off
|
||||
|
||||
#include <libk/align.h>
|
||||
#include <limine/requests.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <mm/types.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/mm.h>
|
||||
|
||||
spin_lock_t _liballoc_lock = SPIN_LOCK_INIT;
|
||||
|
||||
int liballoc_lock (uint64_t* flags) {
|
||||
spin_lock (&_liballoc_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int liballoc_unlock (uint64_t flags) {
|
||||
spin_unlock (&_liballoc_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void* liballoc_alloc (int pages) {
|
||||
physaddr_t p_addr = pmm_alloc (pages);
|
||||
|
||||
if (p_addr == PMM_ALLOC_ERR)
|
||||
return NULL;
|
||||
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
uintptr_t addr = (uintptr_t)(p_addr + hhdm->offset);
|
||||
|
||||
return (void*)addr;
|
||||
}
|
||||
|
||||
int liballoc_free (void* ptr, int pages) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
physaddr_t p_addr = (uintptr_t)ptr - hhdm->offset;
|
||||
|
||||
pmm_free (p_addr, pages);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Durand's Ridiculously Amazing Super Duper Memory functions. */
|
||||
|
||||
// #define DEBUG
|
||||
|
||||
#define LIBALLOC_MAGIC 0xc001c0de
|
||||
#define MAXCOMPLETE 5
|
||||
#define MAXEXP 32
|
||||
#define MINEXP 8
|
||||
|
||||
#define MODE_BEST 0
|
||||
#define MODE_INSTANT 1
|
||||
|
||||
#define MODE MODE_BEST
|
||||
|
||||
struct boundary_tag* l_freePages[MAXEXP]; //< Allowing for 2^MAXEXP blocks
|
||||
int l_completePages[MAXEXP]; //< Allowing for 2^MAXEXP blocks
|
||||
|
||||
static int l_initialized = 0; //< Flag to indicate initialization.
|
||||
static int l_pageSize = 4096; //< Individual page size
|
||||
static int l_pageCount = 16; //< Minimum number of pages to allocate.
|
||||
|
||||
// *********** HELPER FUNCTIONS *******************************
|
||||
|
||||
/** Returns the exponent required to manage 'size' amount of memory.
|
||||
*
|
||||
* Returns n where 2^n <= size < 2^(n+1)
|
||||
*/
|
||||
static inline int getexp (unsigned int size) {
|
||||
if (size < (1 << MINEXP)) {
|
||||
return -1; // Smaller than the quantum.
|
||||
}
|
||||
|
||||
int shift = MINEXP;
|
||||
|
||||
while (shift < MAXEXP) {
|
||||
if ((1 << shift) > size)
|
||||
break;
|
||||
shift += 1;
|
||||
}
|
||||
|
||||
return shift - 1;
|
||||
}
|
||||
|
||||
static void* liballoc_memset (void* s, int c, size_t n) {
|
||||
size_t i;
|
||||
for (i = 0; i < n; i++)
|
||||
((char*)s)[i] = c;
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
static void* liballoc_memcpy (void* s1, const void* s2, size_t n) {
|
||||
char* cdest;
|
||||
char* csrc;
|
||||
unsigned int* ldest = (unsigned int*)s1;
|
||||
unsigned int* lsrc = (unsigned int*)s2;
|
||||
|
||||
while (n >= sizeof (unsigned int)) {
|
||||
*ldest++ = *lsrc++;
|
||||
n -= sizeof (unsigned int);
|
||||
}
|
||||
|
||||
cdest = (char*)ldest;
|
||||
csrc = (char*)lsrc;
|
||||
|
||||
while (n > 0) {
|
||||
*cdest++ = *csrc++;
|
||||
n -= 1;
|
||||
}
|
||||
|
||||
return s1;
|
||||
}
|
||||
|
||||
static inline void insert_tag (struct boundary_tag* tag, int index) {
|
||||
int realIndex;
|
||||
|
||||
if (index < 0) {
|
||||
realIndex = getexp (tag->real_size - sizeof (struct boundary_tag));
|
||||
if (realIndex < MINEXP)
|
||||
realIndex = MINEXP;
|
||||
} else
|
||||
realIndex = index;
|
||||
|
||||
tag->index = realIndex;
|
||||
|
||||
if (l_freePages[realIndex] != NULL) {
|
||||
l_freePages[realIndex]->prev = tag;
|
||||
tag->next = l_freePages[realIndex];
|
||||
}
|
||||
|
||||
l_freePages[realIndex] = tag;
|
||||
}
|
||||
|
||||
static inline void remove_tag (struct boundary_tag* tag) {
|
||||
if (l_freePages[tag->index] == tag)
|
||||
l_freePages[tag->index] = tag->next;
|
||||
|
||||
if (tag->prev != NULL)
|
||||
tag->prev->next = tag->next;
|
||||
if (tag->next != NULL)
|
||||
tag->next->prev = tag->prev;
|
||||
|
||||
tag->next = NULL;
|
||||
tag->prev = NULL;
|
||||
tag->index = -1;
|
||||
}
|
||||
|
||||
static inline struct boundary_tag* melt_left (struct boundary_tag* tag) {
|
||||
struct boundary_tag* left = tag->split_left;
|
||||
|
||||
left->real_size += tag->real_size;
|
||||
left->split_right = tag->split_right;
|
||||
|
||||
if (tag->split_right != NULL)
|
||||
tag->split_right->split_left = left;
|
||||
|
||||
return left;
|
||||
}
|
||||
|
||||
static inline struct boundary_tag* absorb_right (struct boundary_tag* tag) {
|
||||
struct boundary_tag* right = tag->split_right;
|
||||
|
||||
remove_tag (right); // Remove right from free pages.
|
||||
|
||||
tag->real_size += right->real_size;
|
||||
|
||||
tag->split_right = right->split_right;
|
||||
if (right->split_right != NULL)
|
||||
right->split_right->split_left = tag;
|
||||
|
||||
return tag;
|
||||
}
|
||||
|
||||
static inline struct boundary_tag* split_tag (struct boundary_tag* tag) {
|
||||
unsigned int remainder = tag->real_size - sizeof (struct boundary_tag) - tag->size;
|
||||
|
||||
struct boundary_tag* new_tag =
|
||||
(struct boundary_tag*)((uintptr_t)tag + sizeof (struct boundary_tag) + tag->size);
|
||||
|
||||
new_tag->magic = LIBALLOC_MAGIC;
|
||||
new_tag->real_size = remainder;
|
||||
|
||||
new_tag->next = NULL;
|
||||
new_tag->prev = NULL;
|
||||
|
||||
new_tag->split_left = tag;
|
||||
new_tag->split_right = tag->split_right;
|
||||
|
||||
if (new_tag->split_right != NULL)
|
||||
new_tag->split_right->split_left = new_tag;
|
||||
tag->split_right = new_tag;
|
||||
|
||||
tag->real_size -= new_tag->real_size;
|
||||
|
||||
insert_tag (new_tag, -1);
|
||||
|
||||
return new_tag;
|
||||
}
|
||||
|
||||
// ***************************************************************
|
||||
|
||||
static struct boundary_tag* allocate_new_tag (unsigned int size) {
|
||||
unsigned int pages;
|
||||
unsigned int usage;
|
||||
struct boundary_tag* tag;
|
||||
|
||||
// This is how much space is required.
|
||||
usage = size + sizeof (struct boundary_tag);
|
||||
|
||||
// Perfect amount of space
|
||||
pages = usage / l_pageSize;
|
||||
if ((usage % l_pageSize) != 0)
|
||||
pages += 1;
|
||||
|
||||
// Make sure it's >= the minimum size.
|
||||
if (pages < (unsigned int)l_pageCount)
|
||||
pages = l_pageCount;
|
||||
|
||||
tag = (struct boundary_tag*)liballoc_alloc (pages);
|
||||
|
||||
if (tag == NULL)
|
||||
return NULL; // uh oh, we ran out of memory.
|
||||
|
||||
tag->magic = LIBALLOC_MAGIC;
|
||||
tag->size = size;
|
||||
tag->real_size = pages * l_pageSize;
|
||||
tag->index = -1;
|
||||
|
||||
tag->next = NULL;
|
||||
tag->prev = NULL;
|
||||
tag->split_left = NULL;
|
||||
tag->split_right = NULL;
|
||||
|
||||
return tag;
|
||||
}
|
||||
|
||||
void* malloc (size_t size) {
|
||||
size = align_up (size, 16);
|
||||
uint64_t fla;
|
||||
|
||||
int index;
|
||||
void* ptr;
|
||||
struct boundary_tag* tag = NULL;
|
||||
|
||||
liballoc_lock (&fla);
|
||||
|
||||
if (l_initialized == 0) {
|
||||
for (index = 0; index < MAXEXP; index++) {
|
||||
l_freePages[index] = NULL;
|
||||
l_completePages[index] = 0;
|
||||
}
|
||||
l_initialized = 1;
|
||||
}
|
||||
|
||||
index = getexp (size) + MODE;
|
||||
if (index < MINEXP)
|
||||
index = MINEXP;
|
||||
|
||||
// Find one big enough.
|
||||
tag = l_freePages[index]; // Start at the front of the list.
|
||||
while (tag != NULL) {
|
||||
// If there's enough space in this tag.
|
||||
if ((tag->real_size - sizeof (struct boundary_tag)) >= (size + sizeof (struct boundary_tag))) {
|
||||
break;
|
||||
}
|
||||
|
||||
tag = tag->next;
|
||||
}
|
||||
|
||||
// No page found. Make one.
|
||||
if (tag == NULL) {
|
||||
if ((tag = allocate_new_tag (size)) == NULL) {
|
||||
liballoc_unlock (fla);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
index = getexp (tag->real_size - sizeof (struct boundary_tag));
|
||||
} else {
|
||||
remove_tag (tag);
|
||||
|
||||
if ((tag->split_left == NULL) && (tag->split_right == NULL))
|
||||
l_completePages[index] -= 1;
|
||||
}
|
||||
|
||||
// We have a free page. Remove it from the free pages list.
|
||||
|
||||
tag->size = size;
|
||||
|
||||
// Removed... see if we can re-use the excess space.
|
||||
|
||||
unsigned int remainder =
|
||||
tag->real_size - size - sizeof (struct boundary_tag) * 2; // Support a new tag + remainder
|
||||
|
||||
if (((int)(remainder) > 0) /*&& ( (tag->real_size - remainder) >= (1<<MINEXP))*/) {
|
||||
int childIndex = getexp (remainder);
|
||||
|
||||
if (childIndex >= 0) {
|
||||
struct boundary_tag* new_tag = split_tag (tag);
|
||||
|
||||
(void)new_tag;
|
||||
}
|
||||
}
|
||||
|
||||
ptr = (void*)((uintptr_t)tag + sizeof (struct boundary_tag));
|
||||
|
||||
liballoc_unlock (fla);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void free (void* ptr) {
|
||||
int index;
|
||||
struct boundary_tag* tag;
|
||||
uint64_t flf;
|
||||
|
||||
if (ptr == NULL)
|
||||
return;
|
||||
|
||||
liballoc_lock (&flf);
|
||||
|
||||
tag = (struct boundary_tag*)((uintptr_t)ptr - sizeof (struct boundary_tag));
|
||||
|
||||
if (tag->magic != LIBALLOC_MAGIC) {
|
||||
liballoc_unlock (flf); // release the lock
|
||||
return;
|
||||
}
|
||||
|
||||
// MELT LEFT...
|
||||
while ((tag->split_left != NULL) && (tag->split_left->index >= 0)) {
|
||||
tag = melt_left (tag);
|
||||
remove_tag (tag);
|
||||
}
|
||||
|
||||
// MELT RIGHT...
|
||||
while ((tag->split_right != NULL) && (tag->split_right->index >= 0)) {
|
||||
tag = absorb_right (tag);
|
||||
}
|
||||
|
||||
// Where is it going back to?
|
||||
index = getexp (tag->real_size - sizeof (struct boundary_tag));
|
||||
if (index < MINEXP)
|
||||
index = MINEXP;
|
||||
|
||||
// A whole, empty block?
|
||||
if ((tag->split_left == NULL) && (tag->split_right == NULL)) {
|
||||
if (l_completePages[index] == MAXCOMPLETE) {
|
||||
// Too many standing by to keep. Free this one.
|
||||
unsigned int pages = tag->real_size / l_pageSize;
|
||||
|
||||
if ((tag->real_size % l_pageSize) != 0)
|
||||
pages += 1;
|
||||
if (pages < (unsigned int)l_pageCount)
|
||||
pages = l_pageCount;
|
||||
|
||||
liballoc_free (tag, pages);
|
||||
|
||||
liballoc_unlock (flf);
|
||||
return;
|
||||
}
|
||||
|
||||
l_completePages[index] += 1; // Increase the count of complete pages.
|
||||
}
|
||||
|
||||
// ..........
|
||||
|
||||
insert_tag (tag, index);
|
||||
|
||||
liballoc_unlock (flf);
|
||||
}
|
||||
|
||||
void* calloc (size_t nobj, size_t size) {
|
||||
int real_size;
|
||||
void* p;
|
||||
|
||||
real_size = nobj * size;
|
||||
|
||||
p = malloc (real_size);
|
||||
|
||||
liballoc_memset (p, 0, real_size);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
void* realloc (void* p, size_t size) {
|
||||
void* ptr;
|
||||
struct boundary_tag* tag;
|
||||
int real_size;
|
||||
uint64_t flr;
|
||||
|
||||
if (size == 0) {
|
||||
free (p);
|
||||
return NULL;
|
||||
}
|
||||
if (p == NULL)
|
||||
return malloc (size);
|
||||
|
||||
if (&liballoc_lock != NULL)
|
||||
liballoc_lock (&flr); // lockit
|
||||
tag = (struct boundary_tag*)((uintptr_t)p - sizeof (struct boundary_tag));
|
||||
real_size = tag->size;
|
||||
if (&liballoc_unlock != NULL)
|
||||
liballoc_unlock (flr);
|
||||
|
||||
if ((size_t)real_size > size)
|
||||
real_size = size;
|
||||
|
||||
ptr = malloc (size);
|
||||
liballoc_memcpy (ptr, p, real_size);
|
||||
free (p);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
@@ -1,89 +0,0 @@
|
||||
#ifndef _LIBALLOC_H
|
||||
#define _LIBALLOC_H
|
||||
|
||||
#include <aux/compiler.h>
|
||||
#include <libk/std.h>
|
||||
|
||||
// If we are told to not define our own size_t, then we
|
||||
// skip the define.
|
||||
#ifndef _ALLOC_SKIP_DEFINE
|
||||
|
||||
#ifndef _HAVE_SIZE_T
|
||||
#define _HAVE_SIZE_T
|
||||
typedef unsigned int size_t;
|
||||
#endif
|
||||
|
||||
#ifndef NULL
|
||||
#define NULL 0
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/** This is a boundary tag which is prepended to the
|
||||
* page or section of a page which we have allocated. It is
|
||||
* used to identify valid memory blocks that the
|
||||
* application is trying to free.
|
||||
*/
|
||||
struct boundary_tag {
|
||||
unsigned int magic; //< It's a kind of ...
|
||||
unsigned int size; //< Requested size.
|
||||
unsigned int real_size; //< Actual size.
|
||||
int index; //< Location in the page table.
|
||||
|
||||
struct boundary_tag* split_left; //< Linked-list info for broken pages.
|
||||
struct boundary_tag* split_right; //< The same.
|
||||
|
||||
struct boundary_tag* next; //< Linked list info.
|
||||
struct boundary_tag* prev; //< Linked list info.
|
||||
} ALIGNED (16);
|
||||
|
||||
/** This function is supposed to lock the memory data structures. It
|
||||
* could be as simple as disabling interrupts or acquiring a spinlock.
|
||||
* It's up to you to decide.
|
||||
*
|
||||
* \return 0 if the lock was acquired successfully. Anything else is
|
||||
* failure.
|
||||
*/
|
||||
extern int liballoc_lock (uint64_t* flags);
|
||||
|
||||
/** This function unlocks what was previously locked by the liballoc_lock
|
||||
* function. If it disabled interrupts, it enables interrupts. If it
|
||||
* had acquiried a spinlock, it releases the spinlock. etc.
|
||||
*
|
||||
* \return 0 if the lock was successfully released.
|
||||
*/
|
||||
extern int liballoc_unlock (uint64_t flags);
|
||||
|
||||
/** This is the hook into the local system which allocates pages. It
|
||||
* accepts an integer parameter which is the number of pages
|
||||
* required. The page size was set up in the liballoc_init function.
|
||||
*
|
||||
* \return NULL if the pages were not allocated.
|
||||
* \return A pointer to the allocated memory.
|
||||
*/
|
||||
extern void* liballoc_alloc (int);
|
||||
|
||||
/** This frees previously allocated memory. The void* parameter passed
|
||||
* to the function is the exact same value returned from a previous
|
||||
* liballoc_alloc call.
|
||||
*
|
||||
* The integer value is the number of pages to free.
|
||||
*
|
||||
* \return 0 if the memory was successfully freed.
|
||||
*/
|
||||
extern int liballoc_free (void*, int);
|
||||
|
||||
void* malloc (size_t); //< The standard function.
|
||||
void* realloc (void*, size_t); //< The standard function.
|
||||
void* calloc (size_t, size_t); //< The standard function.
|
||||
void free (void*); //< The standard function.
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
6295
kernel/mm/malloc.c
Normal file
6295
kernel/mm/malloc.c
Normal file
File diff suppressed because it is too large
Load Diff
637
kernel/mm/malloc.h
Normal file
637
kernel/mm/malloc.h
Normal file
@@ -0,0 +1,637 @@
|
||||
/*
|
||||
Copyright 2023 Doug Lea
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
Default header file for malloc-2.8.x
|
||||
Re-licensed 25 Sep 2023 with MIT-0 replacing obsolete CC0
|
||||
See https://opensource.org/license/mit-0/
|
||||
|
||||
This header is for ANSI C/C++ only. You can set any of
|
||||
the following #defines before including:
|
||||
|
||||
* If USE_DL_PREFIX is defined, it is assumed that malloc.c
|
||||
was also compiled with this option, so all routines
|
||||
have names starting with "dl".
|
||||
|
||||
* If HAVE_USR_INCLUDE_MALLOC_H is defined, it is assumed that this
|
||||
file will be #included AFTER <malloc.h>. This is needed only if
|
||||
your system defines a struct mallinfo that is incompatible with the
|
||||
standard one declared here. Otherwise, you can include this file
|
||||
INSTEAD of your system system <malloc.h>. At least on ANSI, all
|
||||
declarations should be compatible with system versions
|
||||
|
||||
* If MSPACES is defined, declarations for mspace versions are included.
|
||||
*/
|
||||
|
||||
#ifndef MALLOC_280_H
|
||||
#define MALLOC_280_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <stddef.h> /* for size_t */
|
||||
|
||||
#ifndef ONLY_MSPACES
|
||||
#define ONLY_MSPACES 0 /* define to a value */
|
||||
#elif ONLY_MSPACES != 0
|
||||
#define ONLY_MSPACES 1
|
||||
#endif /* ONLY_MSPACES */
|
||||
#ifndef NO_MALLINFO
|
||||
#define NO_MALLINFO 0
|
||||
#endif /* NO_MALLINFO */
|
||||
|
||||
#ifndef MSPACES
|
||||
#if ONLY_MSPACES
|
||||
#define MSPACES 1
|
||||
#else /* ONLY_MSPACES */
|
||||
#define MSPACES 0
|
||||
#endif /* ONLY_MSPACES */
|
||||
#endif /* MSPACES */
|
||||
|
||||
#if !ONLY_MSPACES
|
||||
|
||||
#ifndef USE_DL_PREFIX
|
||||
#define dlcalloc calloc
|
||||
#define dlfree free
|
||||
#define dlmalloc malloc
|
||||
#define dlmemalign memalign
|
||||
#define dlposix_memalign posix_memalign
|
||||
#define dlrealloc realloc
|
||||
#define dlvalloc valloc
|
||||
#define dlpvalloc pvalloc
|
||||
#define dlmallinfo mallinfo
|
||||
#define dlmallopt mallopt
|
||||
#define dlmalloc_trim malloc_trim
|
||||
#define dlmalloc_stats malloc_stats
|
||||
#define dlmalloc_usable_size malloc_usable_size
|
||||
#define dlmalloc_footprint malloc_footprint
|
||||
#define dlmalloc_max_footprint malloc_max_footprint
|
||||
#define dlmalloc_footprint_limit malloc_footprint_limit
|
||||
#define dlmalloc_set_footprint_limit malloc_set_footprint_limit
|
||||
#define dlmalloc_inspect_all malloc_inspect_all
|
||||
#define dlindependent_calloc independent_calloc
|
||||
#define dlindependent_comalloc independent_comalloc
|
||||
#define dlbulk_free bulk_free
|
||||
#endif /* USE_DL_PREFIX */
|
||||
|
||||
#if !NO_MALLINFO
|
||||
#ifndef HAVE_USR_INCLUDE_MALLOC_H
|
||||
#ifndef _MALLOC_H
|
||||
#ifndef MALLINFO_FIELD_TYPE
|
||||
#define MALLINFO_FIELD_TYPE size_t
|
||||
#endif /* MALLINFO_FIELD_TYPE */
|
||||
#ifndef STRUCT_MALLINFO_DECLARED
|
||||
#define STRUCT_MALLINFO_DECLARED 1
|
||||
struct mallinfo {
|
||||
MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
|
||||
MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
|
||||
MALLINFO_FIELD_TYPE smblks; /* always 0 */
|
||||
MALLINFO_FIELD_TYPE hblks; /* always 0 */
|
||||
MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
|
||||
MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
|
||||
MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
|
||||
MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
|
||||
MALLINFO_FIELD_TYPE fordblks; /* total free space */
|
||||
MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
|
||||
};
|
||||
#endif /* STRUCT_MALLINFO_DECLARED */
|
||||
#endif /* _MALLOC_H */
|
||||
#endif /* HAVE_USR_INCLUDE_MALLOC_H */
|
||||
#endif /* !NO_MALLINFO */
|
||||
|
||||
/*
|
||||
malloc(size_t n)
|
||||
Returns a pointer to a newly allocated chunk of at least n bytes, or
|
||||
null if no space is available, in which case errno is set to ENOMEM
|
||||
on ANSI C systems.
|
||||
|
||||
If n is zero, malloc returns a minimum-sized chunk. (The minimum
|
||||
size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
|
||||
systems.) Note that size_t is an unsigned type, so calls with
|
||||
arguments that would be negative if signed are interpreted as
|
||||
requests for huge amounts of space, which will often fail. The
|
||||
maximum supported value of n differs across systems, but is in all
|
||||
cases less than the maximum representable value of a size_t.
|
||||
*/
|
||||
void* dlmalloc(size_t);
|
||||
|
||||
/*
|
||||
free(void* p)
|
||||
Releases the chunk of memory pointed to by p, that had been previously
|
||||
allocated using malloc or a related routine such as realloc.
|
||||
It has no effect if p is null. If p was not malloced or already
|
||||
freed, free(p) will by default cuase the current program to abort.
|
||||
*/
|
||||
void dlfree(void*);
|
||||
|
||||
/*
|
||||
calloc(size_t n_elements, size_t element_size);
|
||||
Returns a pointer to n_elements * element_size bytes, with all locations
|
||||
set to zero.
|
||||
*/
|
||||
void* dlcalloc(size_t, size_t);
|
||||
|
||||
/*
|
||||
realloc(void* p, size_t n)
|
||||
Returns a pointer to a chunk of size n that contains the same data
|
||||
as does chunk p up to the minimum of (n, p's size) bytes, or null
|
||||
if no space is available.
|
||||
|
||||
The returned pointer may or may not be the same as p. The algorithm
|
||||
prefers extending p in most cases when possible, otherwise it
|
||||
employs the equivalent of a malloc-copy-free sequence.
|
||||
|
||||
If p is null, realloc is equivalent to malloc.
|
||||
|
||||
If space is not available, realloc returns null, errno is set (if on
|
||||
ANSI) and p is NOT freed.
|
||||
|
||||
if n is for fewer bytes than already held by p, the newly unused
|
||||
space is lopped off and freed if possible. realloc with a size
|
||||
argument of zero (re)allocates a minimum-sized chunk.
|
||||
|
||||
The old unix realloc convention of allowing the last-free'd chunk
|
||||
to be used as an argument to realloc is not supported.
|
||||
*/
|
||||
void* dlrealloc(void*, size_t);
|
||||
|
||||
/*
|
||||
realloc_in_place(void* p, size_t n)
|
||||
Resizes the space allocated for p to size n, only if this can be
|
||||
done without moving p (i.e., only if there is adjacent space
|
||||
available if n is greater than p's current allocated size, or n is
|
||||
less than or equal to p's size). This may be used instead of plain
|
||||
realloc if an alternative allocation strategy is needed upon failure
|
||||
to expand space; for example, reallocation of a buffer that must be
|
||||
memory-aligned or cleared. You can use realloc_in_place to trigger
|
||||
these alternatives only when needed.
|
||||
|
||||
Returns p if successful; otherwise null.
|
||||
*/
|
||||
void* dlrealloc_in_place(void*, size_t);
|
||||
|
||||
/*
|
||||
memalign(size_t alignment, size_t n);
|
||||
Returns a pointer to a newly allocated chunk of n bytes, aligned
|
||||
in accord with the alignment argument.
|
||||
|
||||
The alignment argument should be a power of two. If the argument is
|
||||
not a power of two, the nearest greater power is used.
|
||||
8-byte alignment is guaranteed by normal malloc calls, so don't
|
||||
bother calling memalign with an argument of 8 or less.
|
||||
|
||||
Overreliance on memalign is a sure way to fragment space.
|
||||
*/
|
||||
void* dlmemalign(size_t, size_t);
|
||||
|
||||
/*
|
||||
int posix_memalign(void** pp, size_t alignment, size_t n);
|
||||
Allocates a chunk of n bytes, aligned in accord with the alignment
|
||||
argument. Differs from memalign only in that it (1) assigns the
|
||||
allocated memory to *pp rather than returning it, (2) fails and
|
||||
returns EINVAL if the alignment is not a power of two (3) fails and
|
||||
returns ENOMEM if memory cannot be allocated.
|
||||
*/
|
||||
int dlposix_memalign(void**, size_t, size_t);
|
||||
|
||||
/*
|
||||
valloc(size_t n);
|
||||
Equivalent to memalign(pagesize, n), where pagesize is the page
|
||||
size of the system. If the pagesize is unknown, 4096 is used.
|
||||
*/
|
||||
void* dlvalloc(size_t);
|
||||
|
||||
/*
|
||||
mallopt(int parameter_number, int parameter_value)
|
||||
Sets tunable parameters The format is to provide a
|
||||
(parameter-number, parameter-value) pair. mallopt then sets the
|
||||
corresponding parameter to the argument value if it can (i.e., so
|
||||
long as the value is meaningful), and returns 1 if successful else
|
||||
0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
|
||||
normally defined in malloc.h. None of these are use in this malloc,
|
||||
so setting them has no effect. But this malloc also supports other
|
||||
options in mallopt:
|
||||
|
||||
Symbol param # default allowed param values
|
||||
M_TRIM_THRESHOLD -1 2*1024*1024 any (-1U disables trimming)
|
||||
M_GRANULARITY -2 page size any power of 2 >= page size
|
||||
M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
|
||||
*/
|
||||
int dlmallopt(int, int);
|
||||
|
||||
#define M_TRIM_THRESHOLD (-1)
|
||||
#define M_GRANULARITY (-2)
|
||||
#define M_MMAP_THRESHOLD (-3)
|
||||
|
||||
|
||||
/*
|
||||
malloc_footprint();
|
||||
Returns the number of bytes obtained from the system. The total
|
||||
number of bytes allocated by malloc, realloc etc., is less than this
|
||||
value. Unlike mallinfo, this function returns only a precomputed
|
||||
result, so can be called frequently to monitor memory consumption.
|
||||
Even if locks are otherwise defined, this function does not use them,
|
||||
so results might not be up to date.
|
||||
*/
|
||||
size_t dlmalloc_footprint(void);
|
||||
|
||||
/*
|
||||
malloc_max_footprint();
|
||||
Returns the maximum number of bytes obtained from the system. This
|
||||
value will be greater than current footprint if deallocated space
|
||||
has been reclaimed by the system. The peak number of bytes allocated
|
||||
by malloc, realloc etc., is less than this value. Unlike mallinfo,
|
||||
this function returns only a precomputed result, so can be called
|
||||
frequently to monitor memory consumption. Even if locks are
|
||||
otherwise defined, this function does not use them, so results might
|
||||
not be up to date.
|
||||
*/
|
||||
size_t dlmalloc_max_footprint(void);
|
||||
|
||||
/*
|
||||
malloc_footprint_limit();
|
||||
Returns the number of bytes that the heap is allowed to obtain from
|
||||
the system, returning the last value returned by
|
||||
malloc_set_footprint_limit, or the maximum size_t value if
|
||||
never set. The returned value reflects a permission. There is no
|
||||
guarantee that this number of bytes can actually be obtained from
|
||||
the system.
|
||||
*/
|
||||
size_t dlmalloc_footprint_limit(void);
|
||||
|
||||
/*
|
||||
malloc_set_footprint_limit();
|
||||
Sets the maximum number of bytes to obtain from the system, causing
|
||||
failure returns from malloc and related functions upon attempts to
|
||||
exceed this value. The argument value may be subject to page
|
||||
rounding to an enforceable limit; this actual value is returned.
|
||||
Using an argument of the maximum possible size_t effectively
|
||||
disables checks. If the argument is less than or equal to the
|
||||
current malloc_footprint, then all future allocations that require
|
||||
additional system memory will fail. However, invocation cannot
|
||||
retroactively deallocate existing used memory.
|
||||
*/
|
||||
size_t dlmalloc_set_footprint_limit(size_t bytes);
|
||||
|
||||
/*
|
||||
malloc_inspect_all(void(*handler)(void *start,
|
||||
void *end,
|
||||
size_t used_bytes,
|
||||
void* callback_arg),
|
||||
void* arg);
|
||||
Traverses the heap and calls the given handler for each managed
|
||||
region, skipping all bytes that are (or may be) used for bookkeeping
|
||||
purposes. Traversal does not include include chunks that have been
|
||||
directly memory mapped. Each reported region begins at the start
|
||||
address, and continues up to but not including the end address. The
|
||||
first used_bytes of the region contain allocated data. If
|
||||
used_bytes is zero, the region is unallocated. The handler is
|
||||
invoked with the given callback argument. If locks are defined, they
|
||||
are held during the entire traversal. It is a bad idea to invoke
|
||||
other malloc functions from within the handler.
|
||||
|
||||
For example, to count the number of in-use chunks with size greater
|
||||
than 1000, you could write:
|
||||
static int count = 0;
|
||||
void count_chunks(void* start, void* end, size_t used, void* arg) {
|
||||
if (used >= 1000) ++count;
|
||||
}
|
||||
then:
|
||||
malloc_inspect_all(count_chunks, NULL);
|
||||
|
||||
malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined.
|
||||
*/
|
||||
void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*),
|
||||
void* arg);
|
||||
|
||||
#if !NO_MALLINFO
|
||||
/*
|
||||
mallinfo()
|
||||
Returns (by copy) a struct containing various summary statistics:
|
||||
|
||||
arena: current total non-mmapped bytes allocated from system
|
||||
ordblks: the number of free chunks
|
||||
smblks: always zero.
|
||||
hblks: current number of mmapped regions
|
||||
hblkhd: total bytes held in mmapped regions
|
||||
usmblks: the maximum total allocated space. This will be greater
|
||||
than current total if trimming has occurred.
|
||||
fsmblks: always zero
|
||||
uordblks: current total allocated space (normal or mmapped)
|
||||
fordblks: total free space
|
||||
keepcost: the maximum number of bytes that could ideally be released
|
||||
back to system via malloc_trim. ("ideally" means that
|
||||
it ignores page restrictions etc.)
|
||||
|
||||
Because these fields are ints, but internal bookkeeping may
|
||||
be kept as longs, the reported values may wrap around zero and
|
||||
thus be inaccurate.
|
||||
*/
|
||||
|
||||
struct mallinfo dlmallinfo(void);
|
||||
#endif /* NO_MALLINFO */
|
||||
|
||||
/*
|
||||
independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
|
||||
|
||||
independent_calloc is similar to calloc, but instead of returning a
|
||||
single cleared space, it returns an array of pointers to n_elements
|
||||
independent elements that can hold contents of size elem_size, each
|
||||
of which starts out cleared, and can be independently freed,
|
||||
realloc'ed etc. The elements are guaranteed to be adjacently
|
||||
allocated (this is not guaranteed to occur with multiple callocs or
|
||||
mallocs), which may also improve cache locality in some
|
||||
applications.
|
||||
|
||||
The "chunks" argument is optional (i.e., may be null, which is
|
||||
probably the most typical usage). If it is null, the returned array
|
||||
is itself dynamically allocated and should also be freed when it is
|
||||
no longer needed. Otherwise, the chunks array must be of at least
|
||||
n_elements in length. It is filled in with the pointers to the
|
||||
chunks.
|
||||
|
||||
In either case, independent_calloc returns this pointer array, or
|
||||
null if the allocation failed. If n_elements is zero and "chunks"
|
||||
is null, it returns a chunk representing an array with zero elements
|
||||
(which should be freed if not wanted).
|
||||
|
||||
Each element must be freed when it is no longer needed. This can be
|
||||
done all at once using bulk_free.
|
||||
|
||||
independent_calloc simplifies and speeds up implementations of many
|
||||
kinds of pools. It may also be useful when constructing large data
|
||||
structures that initially have a fixed number of fixed-sized nodes,
|
||||
but the number is not known at compile time, and some of the nodes
|
||||
may later need to be freed. For example:
|
||||
|
||||
struct Node { int item; struct Node* next; };
|
||||
|
||||
struct Node* build_list() {
|
||||
struct Node** pool;
|
||||
int n = read_number_of_nodes_needed();
|
||||
if (n <= 0) return 0;
|
||||
pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
|
||||
if (pool == 0) die();
|
||||
// organize into a linked list...
|
||||
struct Node* first = pool[0];
|
||||
for (i = 0; i < n-1; ++i)
|
||||
pool[i]->next = pool[i+1];
|
||||
free(pool); // Can now free the array (or not, if it is needed later)
|
||||
return first;
|
||||
}
|
||||
*/
|
||||
void** dlindependent_calloc(size_t, size_t, void**);
|
||||
|
||||
/*
|
||||
independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
|
||||
|
||||
independent_comalloc allocates, all at once, a set of n_elements
|
||||
chunks with sizes indicated in the "sizes" array. It returns
|
||||
an array of pointers to these elements, each of which can be
|
||||
independently freed, realloc'ed etc. The elements are guaranteed to
|
||||
be adjacently allocated (this is not guaranteed to occur with
|
||||
multiple callocs or mallocs), which may also improve cache locality
|
||||
in some applications.
|
||||
|
||||
The "chunks" argument is optional (i.e., may be null). If it is null
|
||||
the returned array is itself dynamically allocated and should also
|
||||
be freed when it is no longer needed. Otherwise, the chunks array
|
||||
must be of at least n_elements in length. It is filled in with the
|
||||
pointers to the chunks.
|
||||
|
||||
In either case, independent_comalloc returns this pointer array, or
|
||||
null if the allocation failed. If n_elements is zero and chunks is
|
||||
null, it returns a chunk representing an array with zero elements
|
||||
(which should be freed if not wanted).
|
||||
|
||||
Each element must be freed when it is no longer needed. This can be
|
||||
done all at once using bulk_free.
|
||||
|
||||
independent_comallac differs from independent_calloc in that each
|
||||
element may have a different size, and also that it does not
|
||||
automatically clear elements.
|
||||
|
||||
independent_comalloc can be used to speed up allocation in cases
|
||||
where several structs or objects must always be allocated at the
|
||||
same time. For example:
|
||||
|
||||
struct Head { ... }
|
||||
struct Foot { ... }
|
||||
|
||||
void send_message(char* msg) {
|
||||
int msglen = strlen(msg);
|
||||
size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
|
||||
void* chunks[3];
|
||||
if (independent_comalloc(3, sizes, chunks) == 0)
|
||||
die();
|
||||
struct Head* head = (struct Head*)(chunks[0]);
|
||||
char* body = (char*)(chunks[1]);
|
||||
struct Foot* foot = (struct Foot*)(chunks[2]);
|
||||
// ...
|
||||
}
|
||||
|
||||
In general though, independent_comalloc is worth using only for
|
||||
larger values of n_elements. For small values, you probably won't
|
||||
detect enough difference from series of malloc calls to bother.
|
||||
|
||||
Overuse of independent_comalloc can increase overall memory usage,
|
||||
since it cannot reuse existing noncontiguous small chunks that
|
||||
might be available for some of the elements.
|
||||
*/
|
||||
void** dlindependent_comalloc(size_t, size_t*, void**);
|
||||
|
||||
/*
|
||||
bulk_free(void* array[], size_t n_elements)
|
||||
Frees and clears (sets to null) each non-null pointer in the given
|
||||
array. This is likely to be faster than freeing them one-by-one.
|
||||
If footers are used, pointers that have been allocated in different
|
||||
mspaces are not freed or cleared, and the count of all such pointers
|
||||
is returned. For large arrays of pointers with poor locality, it
|
||||
may be worthwhile to sort this array before calling bulk_free.
|
||||
*/
|
||||
size_t dlbulk_free(void**, size_t n_elements);
|
||||
|
||||
/*
|
||||
pvalloc(size_t n);
|
||||
Equivalent to valloc(minimum-page-that-holds(n)), that is,
|
||||
round up n to nearest pagesize.
|
||||
*/
|
||||
void* dlpvalloc(size_t);
|
||||
|
||||
/*
|
||||
malloc_trim(size_t pad);
|
||||
|
||||
If possible, gives memory back to the system (via negative arguments
|
||||
to sbrk) if there is unused memory at the `high' end of the malloc
|
||||
pool or in unused MMAP segments. You can call this after freeing
|
||||
large blocks of memory to potentially reduce the system-level memory
|
||||
requirements of a program. However, it cannot guarantee to reduce
|
||||
memory. Under some allocation patterns, some large free blocks of
|
||||
memory will be locked between two used chunks, so they cannot be
|
||||
given back to the system.
|
||||
|
||||
The `pad' argument to malloc_trim represents the amount of free
|
||||
trailing space to leave untrimmed. If this argument is zero, only
|
||||
the minimum amount of memory to maintain internal data structures
|
||||
will be left. Non-zero arguments can be supplied to maintain enough
|
||||
trailing space to service future expected allocations without having
|
||||
to re-obtain memory from the system.
|
||||
|
||||
Malloc_trim returns 1 if it actually released any memory, else 0.
|
||||
*/
|
||||
int dlmalloc_trim(size_t);
|
||||
|
||||
/*
|
||||
malloc_stats();
|
||||
Prints on stderr the amount of space obtained from the system (both
|
||||
via sbrk and mmap), the maximum amount (which may be more than
|
||||
current if malloc_trim and/or munmap got called), and the current
|
||||
number of bytes allocated via malloc (or realloc, etc) but not yet
|
||||
freed. Note that this is the number of bytes allocated, not the
|
||||
number requested. It will be larger than the number requested
|
||||
because of alignment and bookkeeping overhead. Because it includes
|
||||
alignment wastage as being in use, this figure may be greater than
|
||||
zero even when no user-level chunks are allocated.
|
||||
|
||||
The reported current and maximum system memory can be inaccurate if
|
||||
a program makes other calls to system memory allocation functions
|
||||
(normally sbrk) outside of malloc.
|
||||
|
||||
malloc_stats prints only the most commonly interesting statistics.
|
||||
More information can be obtained by calling mallinfo.
|
||||
|
||||
malloc_stats is not compiled if NO_MALLOC_STATS is defined.
|
||||
*/
|
||||
void dlmalloc_stats(void);
|
||||
|
||||
#endif /* !ONLY_MSPACES */
|
||||
|
||||
/*
|
||||
malloc_usable_size(void* p);
|
||||
|
||||
Returns the number of bytes you can actually use in
|
||||
an allocated chunk, which may be more than you requested (although
|
||||
often not) due to alignment and minimum size constraints.
|
||||
You can use this many bytes without worrying about
|
||||
overwriting other allocated objects. This is not a particularly great
|
||||
programming practice. malloc_usable_size can be more useful in
|
||||
debugging and assertions, for example:
|
||||
|
||||
p = malloc(n);
|
||||
assert(malloc_usable_size(p) >= 256);
|
||||
*/
|
||||
size_t dlmalloc_usable_size(const void*);
|
||||
|
||||
#if MSPACES
|
||||
|
||||
/*
|
||||
mspace is an opaque type representing an independent
|
||||
region of space that supports mspace_malloc, etc.
|
||||
*/
|
||||
typedef void* mspace;
|
||||
|
||||
/*
|
||||
create_mspace creates and returns a new independent space with the
|
||||
given initial capacity, or, if 0, the default granularity size. It
|
||||
returns null if there is no system memory available to create the
|
||||
space. If argument locked is non-zero, the space uses a separate
|
||||
lock to control access. The capacity of the space will grow
|
||||
dynamically as needed to service mspace_malloc requests. You can
|
||||
control the sizes of incremental increases of this space by
|
||||
compiling with a different DEFAULT_GRANULARITY or dynamically
|
||||
setting with mallopt(M_GRANULARITY, value).
|
||||
*/
|
||||
mspace create_mspace(size_t capacity, int locked);
|
||||
|
||||
/*
|
||||
destroy_mspace destroys the given space, and attempts to return all
|
||||
of its memory back to the system, returning the total number of
|
||||
bytes freed. After destruction, the results of access to all memory
|
||||
used by the space become undefined.
|
||||
*/
|
||||
size_t destroy_mspace(mspace msp);
|
||||
|
||||
/*
|
||||
create_mspace_with_base uses the memory supplied as the initial base
|
||||
of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
|
||||
space is used for bookkeeping, so the capacity must be at least this
|
||||
large. (Otherwise 0 is returned.) When this initial space is
|
||||
exhausted, additional memory will be obtained from the system.
|
||||
Destroying this space will deallocate all additionally allocated
|
||||
space (if possible) but not the initial base.
|
||||
*/
|
||||
mspace create_mspace_with_base(void* base, size_t capacity, int locked);
|
||||
|
||||
/*
|
||||
mspace_track_large_chunks controls whether requests for large chunks
|
||||
are allocated in their own untracked mmapped regions, separate from
|
||||
others in this mspace. By default large chunks are not tracked,
|
||||
which reduces fragmentation. However, such chunks are not
|
||||
necessarily released to the system upon destroy_mspace. Enabling
|
||||
tracking by setting to true may increase fragmentation, but avoids
|
||||
leakage when relying on destroy_mspace to release all memory
|
||||
allocated using this space. The function returns the previous
|
||||
setting.
|
||||
*/
|
||||
int mspace_track_large_chunks(mspace msp, int enable);
|
||||
|
||||
#if !NO_MALLINFO
|
||||
/*
|
||||
mspace_mallinfo behaves as mallinfo, but reports properties of
|
||||
the given space.
|
||||
*/
|
||||
struct mallinfo mspace_mallinfo(mspace msp);
|
||||
#endif /* NO_MALLINFO */
|
||||
|
||||
/*
|
||||
An alias for mallopt.
|
||||
*/
|
||||
int mspace_mallopt(int, int);
|
||||
|
||||
/*
|
||||
The following operate identically to their malloc counterparts
|
||||
but operate only for the given mspace argument
|
||||
*/
|
||||
void* mspace_malloc(mspace msp, size_t bytes);
|
||||
void mspace_free(mspace msp, void* mem);
|
||||
void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
|
||||
void* mspace_realloc(mspace msp, void* mem, size_t newsize);
|
||||
void* mspace_realloc_in_place(mspace msp, void* mem, size_t newsize);
|
||||
void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
|
||||
void** mspace_independent_calloc(mspace msp, size_t n_elements,
|
||||
size_t elem_size, void* chunks[]);
|
||||
void** mspace_independent_comalloc(mspace msp, size_t n_elements,
|
||||
size_t sizes[], void* chunks[]);
|
||||
size_t mspace_bulk_free(mspace msp, void**, size_t n_elements);
|
||||
size_t mspace_usable_size(const void* mem);
|
||||
void mspace_malloc_stats(mspace msp);
|
||||
int mspace_trim(mspace msp, size_t pad);
|
||||
size_t mspace_footprint(mspace msp);
|
||||
size_t mspace_max_footprint(mspace msp);
|
||||
size_t mspace_footprint_limit(mspace msp);
|
||||
size_t mspace_set_footprint_limit(mspace msp, size_t bytes);
|
||||
void mspace_inspect_all(mspace msp,
|
||||
void(*handler)(void *, void *, size_t, void*),
|
||||
void* arg);
|
||||
#endif /* MSPACES */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; /* end of extern "C" */
|
||||
#endif
|
||||
|
||||
#endif /* MALLOC_280_H */
|
||||
@@ -1,5 +1,5 @@
|
||||
c += mm/pmm.c \
|
||||
mm/liballoc.c
|
||||
mm/malloc.c
|
||||
|
||||
o += mm/pmm.o \
|
||||
mm/liballoc.o
|
||||
mm/malloc.o
|
||||
|
||||
Reference in New Issue
Block a user