PMM and liballoc port

This commit is contained in:
2025-12-17 22:42:48 +01:00
parent 13fee12f59
commit f60d8d6861
32 changed files with 1202 additions and 12 deletions

View File

@@ -5,11 +5,8 @@ ldflags :=
cflags := cflags :=
buildtype ?= release buildtype ?= release
include $(platform)/flags.mk include flags.mk
include generic/flags.mk include src.mk
include $(platform)/src.mk
include libk/src.mk
all: build/kernel.elf all: build/kernel.elf

View File

@@ -1,11 +1,18 @@
#include <limine/limine.h> #include <limine/limine.h>
#include <amd64/init.h> #include <amd64/init.h>
#include <sys/debug.h> #include <sys/debug.h>
#include <mm/pmm.h>
#include <mm/liballoc.h>
void bootmain(void) { void bootmain(void) {
amd64_init(); amd64_init();
DEBUG("Hello from amd64!\n"); DEBUG("Hello from amd64!\n");
pmm_init();
int *a = malloc(sizeof(int));
*a = 6969;
DEBUG("a=%p, *a=%d\n", a, *a);
for (;;); for (;;);
} }

6
kernel/amd64/mm.h Normal file
View File

@@ -0,0 +1,6 @@
#ifndef _KERNEL_AMD64_MM_H
#define _KERNEL_AMD64_MM_H
#define PAGE_SIZE 4096
#endif // _KERNEL_AMD64_MM_H

5
kernel/amd64/spin_lock.c Normal file
View File

@@ -0,0 +1,5 @@
#include <sys/spin_lock.h>
void spin_lock_relax(void) {
__asm__ volatile("pause");
}

View File

@@ -2,10 +2,12 @@ c += amd64/bootmain.c \
amd64/init.c \ amd64/init.c \
amd64/tss.c \ amd64/tss.c \
amd64/io.c \ amd64/io.c \
amd64/debug.c amd64/debug.c \
amd64/spin_lock.c
o += amd64/bootmain.o \ o += amd64/bootmain.o \
amd64/init.o \ amd64/init.o \
amd64/tss.o \ amd64/tss.o \
amd64/io.o \ amd64/io.o \
amd64/debug.o amd64/debug.o \
amd64/spin_lock.o

View File

@@ -0,0 +1,95 @@
#ifndef _STD_ATOMIC_H
#define _STD_ATOMIC_H
#include <stdint.h>
#include <stddef.h>
// modified version of: https://github.com/llvm/llvm-project/blob/main/clang/lib/Headers/stdatomic.h
typedef enum memory_order {
memory_order_relaxed = __ATOMIC_RELAXED,
memory_order_consume = __ATOMIC_CONSUME,
memory_order_acquire = __ATOMIC_ACQUIRE,
memory_order_release = __ATOMIC_RELEASE,
memory_order_acq_rel = __ATOMIC_ACQ_REL,
memory_order_seq_cst = __ATOMIC_SEQ_CST
} memory_order;
typedef _Atomic(_Bool) atomic_bool;
typedef _Atomic(char) atomic_char;
typedef _Atomic(signed char) atomic_schar;
typedef _Atomic(unsigned char) atomic_uchar;
typedef _Atomic(short) atomic_short;
typedef _Atomic(unsigned short) atomic_ushort;
typedef _Atomic(int) atomic_int;
typedef _Atomic(unsigned int) atomic_uint;
typedef _Atomic(long) atomic_long;
typedef _Atomic(unsigned long) atomic_ulong;
typedef _Atomic(long long) atomic_llong;
typedef _Atomic(unsigned long long) atomic_ullong;
typedef _Atomic(uint_least16_t) atomic_char16_t;
typedef _Atomic(uint_least32_t) atomic_char32_t;
typedef _Atomic(wchar_t) atomic_wchar_t;
typedef _Atomic(int_least8_t) atomic_int_least8_t;
typedef _Atomic(uint_least8_t) atomic_uint_least8_t;
typedef _Atomic(int_least16_t) atomic_int_least16_t;
typedef _Atomic(uint_least16_t) atomic_uint_least16_t;
typedef _Atomic(int_least32_t) atomic_int_least32_t;
typedef _Atomic(uint_least32_t) atomic_uint_least32_t;
typedef _Atomic(int_least64_t) atomic_int_least64_t;
typedef _Atomic(uint_least64_t) atomic_uint_least64_t;
typedef _Atomic(int_fast8_t) atomic_int_fast8_t;
typedef _Atomic(uint_fast8_t) atomic_uint_fast8_t;
typedef _Atomic(int_fast16_t) atomic_int_fast16_t;
typedef _Atomic(uint_fast16_t) atomic_uint_fast16_t;
typedef _Atomic(int_fast32_t) atomic_int_fast32_t;
typedef _Atomic(uint_fast32_t) atomic_uint_fast32_t;
typedef _Atomic(int_fast64_t) atomic_int_fast64_t;
typedef _Atomic(uint_fast64_t) atomic_uint_fast64_t;
typedef _Atomic(intptr_t) atomic_intptr_t;
typedef _Atomic(uintptr_t) atomic_uintptr_t;
typedef _Atomic(size_t) atomic_size_t;
typedef _Atomic(ptrdiff_t) atomic_ptrdiff_t;
typedef _Atomic(intmax_t) atomic_intmax_t;
typedef _Atomic(uintmax_t) atomic_uintmax_t;
typedef struct atomic_flag { atomic_bool _Value; } atomic_flag;
#define ATOMIC_FLAG_INIT ((atomic_flag){ 0 })
#define atomic_store(object, desired) __c11_atomic_store(object, desired, __ATOMIC_SEQ_CST)
#define atomic_store_explicit __c11_atomic_store
#define atomic_load(object) __c11_atomic_load(object, __ATOMIC_SEQ_CST)
#define atomic_load_explicit __c11_atomic_load
#define atomic_exchange(object, desired) __c11_atomic_exchange(object, desired, __ATOMIC_SEQ_CST)
#define atomic_exchange_explicit __c11_atomic_exchange
#define atomic_compare_exchange_strong(object, expected, desired) __c11_atomic_compare_exchange_strong(object, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
#define atomic_compare_exchange_strong_explicit __c11_atomic_compare_exchange_strong
#define atomic_compare_exchange_weak(object, expected, desired) __c11_atomic_compare_exchange_weak(object, expected, desired, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
#define atomic_compare_exchange_weak_explicit __c11_atomic_compare_exchange_weak
#define atomic_fetch_add(object, operand) __c11_atomic_fetch_add(object, operand, __ATOMIC_SEQ_CST)
#define atomic_fetch_add_explicit __c11_atomic_fetch_add
#define atomic_fetch_sub(object, operand) __c11_atomic_fetch_sub(object, operand, __ATOMIC_SEQ_CST)
#define atomic_fetch_sub_explicit __c11_atomic_fetch_sub
#define atomic_fetch_or(object, operand) __c11_atomic_fetch_or(object, operand, __ATOMIC_SEQ_CST)
#define atomic_fetch_or_explicit __c11_atomic_fetch_or
#define atomic_fetch_xor(object, operand) __c11_atomic_fetch_xor(object, operand, __ATOMIC_SEQ_CST)
#define atomic_fetch_xor_explicit __c11_atomic_fetch_xor
#define atomic_fetch_and(object, operand) __c11_atomic_fetch_and(object, operand, __ATOMIC_SEQ_CST)
#define atomic_fetch_and_explicit __c11_atomic_fetch_and
#define atomic_flag_test_and_set(object) __c11_atomic_exchange(&(object)->_Value, 1, __ATOMIC_SEQ_CST)
#define atomic_flag_test_and_set_explicit(object, order) __c11_atomic_exchange(&(object)->_Value, 1, order)
#define atomic_flag_clear(object) __c11_atomic_store(&(object)->_Value, 0, __ATOMIC_SEQ_CST)
#define atomic_flag_clear_explicit(object, order) __c11_atomic_store(&(object)->_Value, 0, order)
#endif // _STD_ATOMIC_H

2
kernel/flags.mk Normal file
View File

@@ -0,0 +1,2 @@
include $(platform)/flags.mk
include generic/flags.mk

View File

@@ -10,7 +10,8 @@ cflags += -nostdinc \
cflags += -isystem . -isystem c_headers/include cflags += -isystem . -isystem c_headers/include
cflags += -DPRINTF_INCLUDE_CONFIG_H=1 cflags += -DPRINTF_INCLUDE_CONFIG_H=1 \
-D_ALLOC_SKIP_DEFINE
ifeq ($(buildtype),debug) ifeq ($(buildtype),debug)
cflags += -O0 -g cflags += -O0 -g

8
kernel/libk/align.h Normal file
View File

@@ -0,0 +1,8 @@
#ifndef _KERNEL_LIBK_ALIGN_H
#define _KERNEL_LIBK_ALIGN_H
#define div_align_up(x, div) (((x) + (div) - 1) / (div))
#define align_down(x, a) ((x) & ~((a) - 1))
#define align_up(x, a) (((x) + ((a) - 1)) & ~((a) - 1))
#endif // _KERNEL_LIBK_ALIGN_H

91
kernel/libk/bm.c Normal file
View File

@@ -0,0 +1,91 @@
#include <libk/std.h>
#include <libk/bm.h>
#include <libk/string.h>
void bm_init(struct bm *bm, uint8_t *base, size_t nbits) {
bm->base = base;
bm->nbits = nbits;
memset(bm->base, 0, (nbits + 7) / 8);
}
/*
* Set a bit in a bitmap.
*/
void bm_set(struct bm *bm, size_t k) {
if (k >= bm->nbits)
return;
uint8_t *b = (uint8_t *)((uintptr_t)bm->base + (k / 8));
*b = ((*b) | (1 << (k % 8)));
}
/*
* Clear a bit in a bitmap.
*/
void bm_clear(struct bm *bm, size_t k) {
if (k >= bm->nbits)
return;
uint8_t *b = (uint8_t *)((uintptr_t)bm->base + (k / 8));
*b = ((*b) & ~(1 << (k % 8)));
}
/*
* Test (true/false) a bit in a bitmap.
*/
bool bm_test(struct bm *bm, size_t k) {
if (k >= bm->nbits)
return false;
uint8_t *b = (uint8_t *)((uintptr_t)bm->base + (k / 8));
return (*b) & (1 << (k % 8));
}
/*
* Set a range of bits in a bitmap. if starting bit is out of range, we fail.
*/
bool bm_set_region(struct bm *bm, size_t k, size_t m) {
if ((k >= m) || (k >= bm->nbits) || (k + m >= bm->nbits))
return false;
for (size_t i = k; i < m; i++) {
bool taken = bm_test(bm, i);
if (taken)
return false;
}
for (size_t i = k; i < m; i++)
bm_set(bm, i);
return true;
}
/*
* Clear a range of bits in a bitmap. starting bit must be in range.
*/
void bm_clear_region(struct bm *bm, size_t k, size_t m) {
if ((k >= m) || (k >= bm->nbits) || (k + m >= bm->nbits))
return;
for (size_t i = k; i < m; i++)
bm_clear(bm, i);
}
/*
* Test a range if bits in a bitmap. Return true if at least one bit
* is set, else return false (all bits clear). For convenience, if k or m
* are out of range, act as if the bits are set / bitmap is full - this is
* useful for implementing the physical memory manager algorithm.
*/
bool bm_test_region(struct bm *bm, size_t k, size_t m) {
if ((k >= m) || (k >= bm->nbits) || (k + m >= bm->nbits))
return true;
for (size_t i = k; i < m; i++) {
bool test = bm_test(bm, i);
if (test)
return true;
}
return false;
}

19
kernel/libk/bm.h Normal file
View File

@@ -0,0 +1,19 @@
#ifndef _KERNEL_LIBK_BM_H
#define _KERNEL_LIBK_BM_H
#include <libk/std.h>
struct bm {
uint8_t *base;
size_t nbits;
};
void bm_init(struct bm *bm, uint8_t *base, size_t nbits);
void bm_set(struct bm *bm, size_t k);
bool bm_set_region(struct bm *bm, size_t k, size_t m);
void bm_clear(struct bm *bm, size_t k);
void bm_clear_region(struct bm *bm, size_t k, size_t m);
bool bm_test(struct bm *bm, size_t k);
bool bm_test_region(struct bm *bm, size_t k, size_t m);
#endif // _KERNEL_LIBK_BM_H

View File

@@ -1,7 +1,9 @@
c += libk/string.c \ c += libk/string.c \
libk/printf.c \ libk/printf.c \
libk/putchar_.c libk/putchar_.c \
libk/bm.c
o += libk/string.o \ o += libk/string.o \
libk/printf.o \ libk/printf.o \
libk/putchar_.o libk/putchar_.o \
libk/bm.o

View File

@@ -8,5 +8,6 @@
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
#include <stdnoreturn.h> #include <stdnoreturn.h>
#include <stdatomic.h>
#endif // _KERNEL_LIBK_STD_H #endif // _KERNEL_LIBK_STD_H

1
kernel/limine/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
*.o

20
kernel/limine/requests.c Normal file
View File

@@ -0,0 +1,20 @@
#include <limine/limine.h>
#define DECL_REQ(small, big) \
__attribute__((used, section(".limine_requests"))) \
struct limine_ ## small ## _request limine_ ## small ## _request = { \
.id = LIMINE_ ## big ## _REQUEST_ID, \
.revision = 4 \
}
__attribute__((used, section(".limine_requests")))
volatile uint64_t limine_base_revision[] = LIMINE_BASE_REVISION(4);
__attribute__((used, section(".limine_requests_start")))
volatile uint64_t limine_requests_start_marker[] = LIMINE_REQUESTS_START_MARKER;
__attribute__((used, section(".limine_requests_end")))
volatile uint64_t limine_requests_end_marker[] = LIMINE_REQUESTS_END_MARKER;
DECL_REQ(hhdm, HHDM);
DECL_REQ(memmap, MEMMAP);

12
kernel/limine/requests.h Normal file
View File

@@ -0,0 +1,12 @@
#ifndef _KERNEL_LIMINE_REQUESTS_H
#define _KERNEL_LIMINE_REQUESTS_H
#include <limine/limine.h>
#define EXTERN_REQ(small) \
extern struct limine_ ## small ## _request limine_ ## small ## _request
EXTERN_REQ(hhdm);
EXTERN_REQ(memmap);
#endif // _KERNEL_LIMINE_REQUESTS_H

3
kernel/limine/src.mk Normal file
View File

@@ -0,0 +1,3 @@
c += limine/requests.c
o += limine/requests.o

1
kernel/mm/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
*.o

575
kernel/mm/liballoc.c Normal file
View File

@@ -0,0 +1,575 @@
/* liballoc breaks when optimized too aggressively, for eg. clang's -Oz */
#pragma clang optimize off
#include <mm/liballoc.h>
#include <mm/pmm.h>
#include <mm/types.h>
#include <sync/spin_lock.h>
#include <limine/requests.h>
/* Porting */
spin_lock_t _liballoc_lock = SPIN_LOCK_INIT;
int liballoc_lock(void) {
spin_lock(&_liballoc_lock);
return 0;
}
int liballoc_unlock(void) {
spin_unlock(&_liballoc_lock);
return 0;
}
void *liballoc_alloc(int pages) {
physaddr_t p_addr = pmm_alloc(pages);
if (p_addr == PMM_ALLOC_ERR)
return NULL;
struct limine_hhdm_response *hhdm = limine_hhdm_request.response;
uintptr_t addr = (uintptr_t)(p_addr + hhdm->offset);
return (void *)addr;
}
int liballoc_free(void *ptr, int pages) {
struct limine_hhdm_response *hhdm = limine_hhdm_request.response;
physaddr_t p_addr = (uintptr_t)ptr - hhdm->offset;
pmm_free(p_addr, pages);
return 0;
}
/** Durand's Ridiculously Amazing Super Duper Memory functions. */
//#define DEBUG
#define LIBALLOC_MAGIC 0xc001c0de
#define MAXCOMPLETE 5
#define MAXEXP 32
#define MINEXP 8
#define MODE_BEST 0
#define MODE_INSTANT 1
#define MODE MODE_BEST
#ifdef DEBUG
#include <stdio.h>
#endif
struct boundary_tag* l_freePages[MAXEXP]; //< Allowing for 2^MAXEXP blocks
int l_completePages[MAXEXP]; //< Allowing for 2^MAXEXP blocks
#ifdef DEBUG
unsigned int l_allocated = 0; //< The real amount of memory allocated.
unsigned int l_inuse = 0; //< The amount of memory in use (malloc'ed).
#endif
static int l_initialized = 0; //< Flag to indicate initialization.
static int l_pageSize = 4096; //< Individual page size
static int l_pageCount = 16; //< Minimum number of pages to allocate.
// *********** HELPER FUNCTIONS *******************************
/** Returns the exponent required to manage 'size' amount of memory.
*
* Returns n where 2^n <= size < 2^(n+1)
*/
static inline int getexp( unsigned int size )
{
if ( size < (1<<MINEXP) )
{
#ifdef DEBUG
printf("getexp returns -1 for %i less than MINEXP\n", size );
#endif
return -1; // Smaller than the quantum.
}
int shift = MINEXP;
while ( shift < MAXEXP )
{
if ( (1<<shift) > size ) break;
shift += 1;
}
#ifdef DEBUG
printf("getexp returns %i (%i bytes) for %i size\n", shift - 1, (1<<(shift -1)), size );
#endif
return shift - 1;
}
static void* liballoc_memset(void* s, int c, size_t n)
{
size_t i;
for ( i = 0; i < n ; i++)
((char*)s)[i] = c;
return s;
}
static void* liballoc_memcpy(void* s1, const void* s2, size_t n)
{
char *cdest;
char *csrc;
unsigned int *ldest = (unsigned int*)s1;
unsigned int *lsrc = (unsigned int*)s2;
while ( n >= sizeof(unsigned int) )
{
*ldest++ = *lsrc++;
n -= sizeof(unsigned int);
}
cdest = (char*)ldest;
csrc = (char*)lsrc;
while ( n > 0 )
{
*cdest++ = *csrc++;
n -= 1;
}
return s1;
}
#ifdef DEBUG
static void dump_array()
{
int i = 0;
struct boundary_tag *tag = NULL;
printf("------ Free pages array ---------\n");
printf("System memory allocated: %i\n", l_allocated );
printf("Memory in used (malloc'ed): %i\n", l_inuse );
for ( i = 0; i < MAXEXP; i++ )
{
printf("%.2i(%i): ",i, l_completePages[i] );
tag = l_freePages[ i ];
while ( tag != NULL )
{
if ( tag->split_left != NULL ) printf("*");
printf("%i", tag->real_size );
if ( tag->split_right != NULL ) printf("*");
printf(" ");
tag = tag->next;
}
printf("\n");
}
printf("'*' denotes a split to the left/right of a tag\n");
fflush( stdout );
}
#endif
static inline void insert_tag( struct boundary_tag *tag, int index )
{
int realIndex;
if ( index < 0 )
{
realIndex = getexp( tag->real_size - sizeof(struct boundary_tag) );
if ( realIndex < MINEXP ) realIndex = MINEXP;
}
else
realIndex = index;
tag->index = realIndex;
if ( l_freePages[ realIndex ] != NULL )
{
l_freePages[ realIndex ]->prev = tag;
tag->next = l_freePages[ realIndex ];
}
l_freePages[ realIndex ] = tag;
}
static inline void remove_tag( struct boundary_tag *tag )
{
if ( l_freePages[ tag->index ] == tag ) l_freePages[ tag->index ] = tag->next;
if ( tag->prev != NULL ) tag->prev->next = tag->next;
if ( tag->next != NULL ) tag->next->prev = tag->prev;
tag->next = NULL;
tag->prev = NULL;
tag->index = -1;
}
static inline struct boundary_tag* melt_left( struct boundary_tag *tag )
{
struct boundary_tag *left = tag->split_left;
left->real_size += tag->real_size;
left->split_right = tag->split_right;
if ( tag->split_right != NULL ) tag->split_right->split_left = left;
return left;
}
static inline struct boundary_tag* absorb_right( struct boundary_tag *tag )
{
struct boundary_tag *right = tag->split_right;
remove_tag( right ); // Remove right from free pages.
tag->real_size += right->real_size;
tag->split_right = right->split_right;
if ( right->split_right != NULL )
right->split_right->split_left = tag;
return tag;
}
static inline struct boundary_tag* split_tag( struct boundary_tag* tag )
{
unsigned int remainder = tag->real_size - sizeof(struct boundary_tag) - tag->size;
struct boundary_tag *new_tag =
(struct boundary_tag*)((uintptr_t)tag + sizeof(struct boundary_tag) + tag->size);
new_tag->magic = LIBALLOC_MAGIC;
new_tag->real_size = remainder;
new_tag->next = NULL;
new_tag->prev = NULL;
new_tag->split_left = tag;
new_tag->split_right = tag->split_right;
if (new_tag->split_right != NULL) new_tag->split_right->split_left = new_tag;
tag->split_right = new_tag;
tag->real_size -= new_tag->real_size;
insert_tag( new_tag, -1 );
return new_tag;
}
// ***************************************************************
static struct boundary_tag* allocate_new_tag( unsigned int size )
{
unsigned int pages;
unsigned int usage;
struct boundary_tag *tag;
// This is how much space is required.
usage = size + sizeof(struct boundary_tag);
// Perfect amount of space
pages = usage / l_pageSize;
if ( (usage % l_pageSize) != 0 ) pages += 1;
// Make sure it's >= the minimum size.
if ( pages < (unsigned int)l_pageCount ) pages = l_pageCount;
tag = (struct boundary_tag*)liballoc_alloc( pages );
if ( tag == NULL ) return NULL; // uh oh, we ran out of memory.
tag->magic = LIBALLOC_MAGIC;
tag->size = size;
tag->real_size = pages * l_pageSize;
tag->index = -1;
tag->next = NULL;
tag->prev = NULL;
tag->split_left = NULL;
tag->split_right = NULL;
#ifdef DEBUG
printf("Resource allocated %x of %i pages (%i bytes) for %i size.\n", tag, pages, pages * l_pageSize, size );
l_allocated += pages * l_pageSize;
printf("Total memory usage = %i KB\n", (int)((l_allocated / (1024))) );
#endif
return tag;
}
void *malloc(size_t size)
{
int index;
void *ptr;
struct boundary_tag *tag = NULL;
liballoc_lock();
if ( l_initialized == 0 )
{
#ifdef DEBUG
printf("%s\n","liballoc initializing.");
#endif
for ( index = 0; index < MAXEXP; index++ )
{
l_freePages[index] = NULL;
l_completePages[index] = 0;
}
l_initialized = 1;
}
index = getexp( size ) + MODE;
if ( index < MINEXP ) index = MINEXP;
// Find one big enough.
tag = l_freePages[ index ]; // Start at the front of the list.
while ( tag != NULL )
{
// If there's enough space in this tag.
if ( (tag->real_size - sizeof(struct boundary_tag))
>= (size + sizeof(struct boundary_tag) ) )
{
#ifdef DEBUG
printf("Tag search found %i >= %i\n",(tag->real_size - sizeof(struct boundary_tag)), (size + sizeof(struct boundary_tag) ) );
#endif
break;
}
tag = tag->next;
}
// No page found. Make one.
if ( tag == NULL )
{
if ( (tag = allocate_new_tag( size )) == NULL )
{
liballoc_unlock();
return NULL;
}
index = getexp( tag->real_size - sizeof(struct boundary_tag) );
}
else
{
remove_tag( tag );
if ( (tag->split_left == NULL) && (tag->split_right == NULL) )
l_completePages[ index ] -= 1;
}
// We have a free page. Remove it from the free pages list.
tag->size = size;
// Removed... see if we can re-use the excess space.
#ifdef DEBUG
printf("Found tag with %i bytes available (requested %i bytes, leaving %i), which has exponent: %i (%i bytes)\n", tag->real_size - sizeof(struct boundary_tag), size, tag->real_size - size - sizeof(struct boundary_tag), index, 1<<index );
#endif
unsigned int remainder = tag->real_size - size - sizeof( struct boundary_tag ) * 2; // Support a new tag + remainder
if ( ((int)(remainder) > 0) /*&& ( (tag->real_size - remainder) >= (1<<MINEXP))*/ )
{
int childIndex = getexp( remainder );
if ( childIndex >= 0 )
{
#ifdef DEBUG
printf("Seems to be splittable: %i >= 2^%i .. %i\n", remainder, childIndex, (1<<childIndex) );
#endif
struct boundary_tag *new_tag = split_tag( tag );
(void)new_tag;
#ifdef DEBUG
printf("Old tag has become %i bytes, new tag is now %i bytes (%i exp)\n", tag->real_size, new_tag->real_size, new_tag->index );
#endif
}
}
ptr = (void*)((uintptr_t)tag + sizeof( struct boundary_tag ) );
#ifdef DEBUG
l_inuse += size;
printf("malloc: %x, %i, %i\n", ptr, (int)l_inuse / 1024, (int)l_allocated / 1024 );
dump_array();
#endif
liballoc_unlock();
return ptr;
}
void free(void *ptr)
{
int index;
struct boundary_tag *tag;
if ( ptr == NULL ) return;
liballoc_lock();
tag = (struct boundary_tag*)((uintptr_t)ptr - sizeof( struct boundary_tag ));
if ( tag->magic != LIBALLOC_MAGIC )
{
liballoc_unlock(); // release the lock
return;
}
#ifdef DEBUG
l_inuse -= tag->size;
printf("free: %x, %i, %i\n", ptr, (int)l_inuse / 1024, (int)l_allocated / 1024 );
#endif
// MELT LEFT...
while ( (tag->split_left != NULL) && (tag->split_left->index >= 0) )
{
#ifdef DEBUG
printf("Melting tag left into available memory. Left was %i, becomes %i (%i)\n", tag->split_left->real_size, tag->split_left->real_size + tag->real_size, tag->split_left->real_size );
#endif
tag = melt_left( tag );
remove_tag( tag );
}
// MELT RIGHT...
while ( (tag->split_right != NULL) && (tag->split_right->index >= 0) )
{
#ifdef DEBUG
printf("Melting tag right into available memory. This was was %i, becomes %i (%i)\n", tag->real_size, tag->split_right->real_size + tag->real_size, tag->split_right->real_size );
#endif
tag = absorb_right( tag );
}
// Where is it going back to?
index = getexp( tag->real_size - sizeof(struct boundary_tag) );
if ( index < MINEXP ) index = MINEXP;
// A whole, empty block?
if ( (tag->split_left == NULL) && (tag->split_right == NULL) )
{
if ( l_completePages[ index ] == MAXCOMPLETE )
{
// Too many standing by to keep. Free this one.
unsigned int pages = tag->real_size / l_pageSize;
if ( (tag->real_size % l_pageSize) != 0 ) pages += 1;
if ( pages < (unsigned int)l_pageCount ) pages = l_pageCount;
liballoc_free( tag, pages );
#ifdef DEBUG
l_allocated -= pages * l_pageSize;
printf("Resource freeing %x of %i pages\n", tag, pages );
dump_array();
#endif
liballoc_unlock();
return;
}
l_completePages[ index ] += 1; // Increase the count of complete pages.
}
// ..........
insert_tag( tag, index );
#ifdef DEBUG
printf("Returning tag with %i bytes (requested %i bytes), which has exponent: %i\n", tag->real_size, tag->size, index );
dump_array();
#endif
liballoc_unlock();
}
void* calloc(size_t nobj, size_t size)
{
int real_size;
void *p;
real_size = nobj * size;
p = malloc( real_size );
liballoc_memset( p, 0, real_size );
return p;
}
void* realloc(void *p, size_t size)
{
void *ptr;
struct boundary_tag *tag;
int real_size;
if ( size == 0 )
{
free( p );
return NULL;
}
if ( p == NULL ) return malloc( size );
if ( &liballoc_lock != NULL ) liballoc_lock(); // lockit
tag = (struct boundary_tag*)((uintptr_t)p - sizeof( struct boundary_tag ));
real_size = tag->size;
if ( &liballoc_unlock != NULL ) liballoc_unlock();
if ( (size_t)real_size > size ) real_size = size;
ptr = malloc( size );
liballoc_memcpy( ptr, p, real_size );
free( p );
return ptr;
}

99
kernel/mm/liballoc.h Normal file
View File

@@ -0,0 +1,99 @@
#ifndef _LIBALLOC_H
#define _LIBALLOC_H
#include <libk/std.h>
// If we are told to not define our own size_t, then we
// skip the define.
#ifndef _ALLOC_SKIP_DEFINE
#ifndef _HAVE_SIZE_T
#define _HAVE_SIZE_T
typedef unsigned int size_t;
#endif
#ifndef NULL
#define NULL 0
#endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
/** This is a boundary tag which is prepended to the
* page or section of a page which we have allocated. It is
* used to identify valid memory blocks that the
* application is trying to free.
*/
struct boundary_tag
{
unsigned int magic; //< It's a kind of ...
unsigned int size; //< Requested size.
unsigned int real_size; //< Actual size.
int index; //< Location in the page table.
struct boundary_tag *split_left; //< Linked-list info for broken pages.
struct boundary_tag *split_right; //< The same.
struct boundary_tag *next; //< Linked list info.
struct boundary_tag *prev; //< Linked list info.
};
/** This function is supposed to lock the memory data structures. It
* could be as simple as disabling interrupts or acquiring a spinlock.
* It's up to you to decide.
*
* \return 0 if the lock was acquired successfully. Anything else is
* failure.
*/
extern int liballoc_lock();
/** This function unlocks what was previously locked by the liballoc_lock
* function. If it disabled interrupts, it enables interrupts. If it
* had acquiried a spinlock, it releases the spinlock. etc.
*
* \return 0 if the lock was successfully released.
*/
extern int liballoc_unlock();
/** This is the hook into the local system which allocates pages. It
* accepts an integer parameter which is the number of pages
* required. The page size was set up in the liballoc_init function.
*
* \return NULL if the pages were not allocated.
* \return A pointer to the allocated memory.
*/
extern void* liballoc_alloc(int);
/** This frees previously allocated memory. The void* parameter passed
* to the function is the exact same value returned from a previous
* liballoc_alloc call.
*
* The integer value is the number of pages to free.
*
* \return 0 if the memory was successfully freed.
*/
extern int liballoc_free(void*,int);
void *malloc(size_t); //< The standard function.
void *realloc(void *, size_t); //< The standard function.
void *calloc(size_t, size_t); //< The standard function.
void free(void *); //< The standard function.
#ifdef __cplusplus
}
#endif
#endif

151
kernel/mm/pmm.c Normal file
View File

@@ -0,0 +1,151 @@
#include <libk/std.h>
#include <libk/bm.h>
#include <libk/string.h>
#include <libk/align.h>
#include <sys/mm.h>
#include <sys/debug.h>
#include <sync/spin_lock.h>
#include <mm/types.h>
#include <mm/pmm.h>
#include <limine/limine.h>
#include <limine/requests.h>
static struct pmm pmm;
void pmm_init(void) {
memset(&pmm, 0, sizeof(pmm));
struct limine_memmap_response *memmap = limine_memmap_request.response;
struct limine_hhdm_response *hhdm = limine_hhdm_request.response;
size_t region = 0;
for (size_t i = 0; i < memmap->entry_count; i++) {
struct limine_memmap_entry *entry = memmap->entries[i];
static const char *entry_strings[] = {
"usable", "reserved", "acpi reclaimable", "acpi nvs",
"bad memory", "bootloader reclaimable", "executable and modules",
"framebuffer", "acpi tables"
};
DEBUG("memmap entry: %-25s %p (%zu bytes)\n", entry_strings[entry->type], entry->base, entry->length);
if (entry->type == LIMINE_MEMMAP_USABLE && region < PMM_REGIONS_MAX) {
struct pmm_region *pmm_region = &pmm.regions[region];
/*
* We need to calculate sizes for the pmm region and the bitmap. The bitmap MUSTN'T include it's
* own region within the bit range.
* */
size_t size = align_down(entry->length, PAGE_SIZE);
physaddr_t start = align_up(entry->base, PAGE_SIZE);
size_t max_pages = (size * 8) / (PAGE_SIZE * 8 + 1);
size_t bm_nbits = max_pages;
size_t bm_size = align_up(bm_nbits, 8) / 8;
physaddr_t bm_base = start;
physaddr_t data_base = align_up(bm_base + bm_size, PAGE_SIZE);
if (bm_base + bm_size >= start + size)
continue;
size_t available = (start + size) - data_base;
size_t final_pages = available / PAGE_SIZE;
if (final_pages < max_pages) {
bm_nbits = final_pages;
bm_size = align_up(bm_nbits, 8) / 8;
data_base = align_up(bm_base + bm_size, PAGE_SIZE);
}
size_t managed_size = final_pages * PAGE_SIZE;
uint8_t *bm_base1 = (uint8_t *)(bm_base + hhdm->offset);
/* Init the pm region. */
pmm_region->lock = SPIN_LOCK_INIT;
pmm_region->membase = data_base;
pmm_region->size = managed_size;
bm_init(&pmm_region->bm, bm_base1, bm_nbits);
bm_clear_region(&pmm_region->bm, 0, bm_nbits);
pmm_region->flags |= PMM_REGION_ACTIVE; /* mark as active */
region++;
}
}
}
/*
* Find free space for a block range. For every bit of the bitmap, we test nblks bits forward.
* bm_test_region helps us out, because it automatically does range checks. See comments there.
*/
static size_t pmm_find_free_space(struct pmm_region *pmm_region, size_t nblks) {
for (size_t bit = 0; bit < pmm_region->bm.nbits; bit++) {
if (bm_test_region(&pmm_region->bm, bit, nblks)) {
continue;
}
return bit;
}
return (size_t)-1;
}
physaddr_t pmm_alloc(size_t nblks) {
for (size_t region = 0; region < PMM_REGIONS_MAX; region++) {
struct pmm_region *pmm_region = &pmm.regions[region];
/* Inactive region, so don't bother with it. */
if (!(pmm_region->flags & PMM_REGION_ACTIVE))
continue;
spin_lock(&pmm_region->lock);
/* Find starting bit of the free bit range */
size_t bit = pmm_find_free_space(pmm_region, nblks);
/* Found a free range? */
if (bit != (size_t)-1) {
/* Mark it */
bm_set_region(&pmm_region->bm, bit, nblks);
spin_unlock(&pmm_region->lock);
return pmm_region->membase + bit * PAGE_SIZE;
}
spin_unlock(&pmm_region->lock);
}
return PMM_ALLOC_ERR;
}
void pmm_free(physaddr_t p_addr, size_t nblks) {
/* Round down to nearest page boundary */
physaddr_t aligned_p_addr = align_down(p_addr, PAGE_SIZE);
for (size_t region = 0; region < PMM_REGIONS_MAX; region++) {
struct pmm_region *pmm_region = &pmm.regions[region];
/* Inactive region, so don't bother with it. */
if (!(pmm_region->flags & PMM_REGION_ACTIVE))
continue;
/* If aligned_p_addr is within the range if this region, it belongs to it. */
if (aligned_p_addr >= pmm_region->membase && aligned_p_addr < pmm_region->size) {
physaddr_t addr = aligned_p_addr - pmm_region->membase;
size_t bit = div_align_up(addr, PAGE_SIZE);
spin_lock(&pmm_region->lock);
bm_clear_region(&pmm_region->bm, bit, nblks);
spin_unlock(&pmm_region->lock);
break;
}
}
}

31
kernel/mm/pmm.h Normal file
View File

@@ -0,0 +1,31 @@
#ifndef _KERNEL_MM_PMM_H
#define _KERNEL_MM_PMM_H
#include <libk/std.h>
#include <libk/bm.h>
#include <sync/spin_lock.h>
#include <mm/types.h>
#define PMM_ALLOC_ERR ((physaddr_t)-1)
#define PMM_REGIONS_MAX 32
#define PMM_REGION_ACTIVE (1 << 0)
struct pmm_region {
spin_lock_t lock;
struct bm bm;
physaddr_t membase;
size_t size;
uint32_t flags;
};
struct pmm {
struct pmm_region regions[PMM_REGIONS_MAX];
};
void pmm_init(void);
physaddr_t pmm_alloc(size_t nblks);
void pmm_free(physaddr_t p_addr, size_t nblks);
#endif // _KERNEL_MM_PMM_H

5
kernel/mm/src.mk Normal file
View File

@@ -0,0 +1,5 @@
c += mm/pmm.c \
mm/liballoc.c
o += mm/pmm.o \
mm/liballoc.o

8
kernel/mm/types.h Normal file
View File

@@ -0,0 +1,8 @@
#ifndef _KERNEL_MM_TYPES_H
#define _KERNEL_MM_TYPES_H
#include <libk/std.h>
typedef uintptr_t physaddr_t;
#endif // _KERNEL_MM_TYPES_H

5
kernel/src.mk Normal file
View File

@@ -0,0 +1,5 @@
include $(platform)/src.mk
include libk/src.mk
include sync/src.mk
include mm/src.mk
include limine/src.mk

1
kernel/sync/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
*.o

12
kernel/sync/spin_lock.c Normal file
View File

@@ -0,0 +1,12 @@
#include <libk/std.h>
#include <sys/spin_lock.h>
#include <sync/spin_lock.h>
void spin_lock(spin_lock_t *sl) {
while (atomic_flag_test_and_set_explicit(sl, memory_order_acquire))
spin_lock_relax();
}
void spin_unlock(spin_lock_t *sl) {
atomic_flag_clear_explicit(sl, memory_order_release);
}

13
kernel/sync/spin_lock.h Normal file
View File

@@ -0,0 +1,13 @@
#ifndef _KERNEL_SYNC_SPIN_LOCK_H
#define _KERNEL_SYNC_SPIN_LOCK_H
#include <libk/std.h>
#define SPIN_LOCK_INIT ATOMIC_FLAG_INIT
typedef atomic_flag spin_lock_t;
void spin_lock(spin_lock_t *sl);
void spin_unlock(spin_lock_t *sl);
#endif // _KERNEL_SYNC_SPIN_LOCK_H

3
kernel/sync/src.mk Normal file
View File

@@ -0,0 +1,3 @@
c += sync/spin_lock.c
o += sync/spin_lock.o

View File

@@ -4,7 +4,7 @@
void debugprintf(const char *fmt, ...); void debugprintf(const char *fmt, ...);
#define DEBUG(fmt, ...) do { \ #define DEBUG(fmt, ...) do { \
debugprintf("%s: " fmt, __PRETTY_FUNCTION__, ##__VA_ARGS__); \ debugprintf("%s: " fmt, __func__, ##__VA_ARGS__); \
} while(0) } while(0)
#endif // _KERNEL_SYS_DEBUG_H #endif // _KERNEL_SYS_DEBUG_H

8
kernel/sys/mm.h Normal file
View File

@@ -0,0 +1,8 @@
#ifndef _KERNEL_SYS_MM_H
#define _KERNEL_SYS_MM_H
#if defined(__x86_64__)
#include <amd64/mm.h>
#endif
#endif // _KERNEL_SYS_MM_H

6
kernel/sys/spin_lock.h Normal file
View File

@@ -0,0 +1,6 @@
#ifndef _KERNEL_SYS_SPIN_LOCK_H
#define _KERNEL_SYS_SPIN_LOCK_H
void spin_lock_relax(void);
#endif // _KERNEL_SYS_SPIN_LOCK_H