Use clang-format
This commit is contained in:
@@ -1,52 +1,51 @@
|
||||
#include <libk/std.h>
|
||||
#include <libk/bm.h>
|
||||
#include <libk/string.h>
|
||||
#include <libk/align.h>
|
||||
#include <sys/mm.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <mm/types.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <libk/bm.h>
|
||||
#include <libk/std.h>
|
||||
#include <libk/string.h>
|
||||
#include <limine/limine.h>
|
||||
#include <limine/requests.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <mm/types.h>
|
||||
#include <sync/spin_lock.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/mm.h>
|
||||
|
||||
static struct pmm pmm;
|
||||
|
||||
void pmm_init(void) {
|
||||
memset(&pmm, 0, sizeof(pmm));
|
||||
void pmm_init (void) {
|
||||
memset (&pmm, 0, sizeof (pmm));
|
||||
|
||||
struct limine_memmap_response *memmap = limine_memmap_request.response;
|
||||
struct limine_hhdm_response *hhdm = limine_hhdm_request.response;
|
||||
struct limine_memmap_response* memmap = limine_memmap_request.response;
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
size_t region = 0;
|
||||
for (size_t i = 0; i < memmap->entry_count; i++) {
|
||||
struct limine_memmap_entry *entry = memmap->entries[i];
|
||||
static const char *entry_strings[] = {
|
||||
"usable", "reserved", "acpi reclaimable", "acpi nvs",
|
||||
"bad memory", "bootloader reclaimable", "executable and modules",
|
||||
"framebuffer", "acpi tables"
|
||||
};
|
||||
struct limine_memmap_entry* entry = memmap->entries[i];
|
||||
static const char* entry_strings[] = {"usable", "reserved",
|
||||
"acpi reclaimable", "acpi nvs", "bad memory", "bootloader reclaimable",
|
||||
"executable and modules", "framebuffer", "acpi tables"};
|
||||
|
||||
DEBUG("memmap entry: %-25s %p (%zu bytes)\n", entry_strings[entry->type], entry->base, entry->length);
|
||||
DEBUG ("memmap entry: %-25s %p (%zu bytes)\n", entry_strings[entry->type],
|
||||
entry->base, entry->length);
|
||||
|
||||
if (entry->type == LIMINE_MEMMAP_USABLE && region < PMM_REGIONS_MAX) {
|
||||
struct pmm_region *pmm_region = &pmm.regions[region];
|
||||
struct pmm_region* pmm_region = &pmm.regions[region];
|
||||
|
||||
/*
|
||||
* We need to calculate sizes for the pmm region and the bitmap. The bitmap MUSTN'T include it's
|
||||
* own region within the bit range.
|
||||
* */
|
||||
|
||||
size_t size = align_down(entry->length, PAGE_SIZE);
|
||||
physaddr_t start = align_up(entry->base, PAGE_SIZE);
|
||||
size_t size = align_down (entry->length, PAGE_SIZE);
|
||||
physaddr_t start = align_up (entry->base, PAGE_SIZE);
|
||||
|
||||
size_t max_pages = (size * 8) / (PAGE_SIZE * 8 + 1);
|
||||
|
||||
size_t bm_nbits = max_pages;
|
||||
size_t bm_size = align_up(bm_nbits, 8) / 8;
|
||||
size_t bm_size = align_up (bm_nbits, 8) / 8;
|
||||
|
||||
physaddr_t bm_base = start;
|
||||
physaddr_t data_base = align_up(bm_base + bm_size, PAGE_SIZE);
|
||||
physaddr_t data_base = align_up (bm_base + bm_size, PAGE_SIZE);
|
||||
|
||||
if (bm_base + bm_size >= start + size)
|
||||
continue;
|
||||
@@ -57,22 +56,22 @@ void pmm_init(void) {
|
||||
|
||||
if (final_pages < max_pages) {
|
||||
bm_nbits = final_pages;
|
||||
bm_size = align_up(bm_nbits, 8) / 8;
|
||||
data_base = align_up(bm_base + bm_size, PAGE_SIZE);
|
||||
bm_size = align_up (bm_nbits, 8) / 8;
|
||||
data_base = align_up (bm_base + bm_size, PAGE_SIZE);
|
||||
}
|
||||
|
||||
size_t managed_size = final_pages * PAGE_SIZE;
|
||||
|
||||
uint8_t *bm_base1 = (uint8_t *)(bm_base + hhdm->offset);
|
||||
uint8_t* bm_base1 = (uint8_t*)(bm_base + hhdm->offset);
|
||||
|
||||
/* Init the pm region. */
|
||||
pmm_region->lock = SPIN_LOCK_INIT;
|
||||
pmm_region->membase = data_base;
|
||||
pmm_region->size = managed_size;
|
||||
bm_init(&pmm_region->bm, bm_base1, bm_nbits);
|
||||
bm_clear_region(&pmm_region->bm, 0, bm_nbits);
|
||||
bm_init (&pmm_region->bm, bm_base1, bm_nbits);
|
||||
bm_clear_region (&pmm_region->bm, 0, bm_nbits);
|
||||
pmm_region->flags |= PMM_REGION_ACTIVE; /* mark as active */
|
||||
|
||||
|
||||
region++;
|
||||
}
|
||||
}
|
||||
@@ -82,9 +81,10 @@ void pmm_init(void) {
|
||||
* Find free space for a block range. For every bit of the bitmap, we test nblks bits forward.
|
||||
* bm_test_region helps us out, because it automatically does range checks. See comments there.
|
||||
*/
|
||||
static size_t pmm_find_free_space(struct pmm_region *pmm_region, size_t nblks) {
|
||||
static size_t pmm_find_free_space (
|
||||
struct pmm_region* pmm_region, size_t nblks) {
|
||||
for (size_t bit = 0; bit < pmm_region->bm.nbits; bit++) {
|
||||
if (bm_test_region(&pmm_region->bm, bit, nblks)) {
|
||||
if (bm_test_region (&pmm_region->bm, bit, nblks)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -94,56 +94,57 @@ static size_t pmm_find_free_space(struct pmm_region *pmm_region, size_t nblks) {
|
||||
return (size_t)-1;
|
||||
}
|
||||
|
||||
physaddr_t pmm_alloc(size_t nblks) {
|
||||
physaddr_t pmm_alloc (size_t nblks) {
|
||||
for (size_t region = 0; region < PMM_REGIONS_MAX; region++) {
|
||||
struct pmm_region *pmm_region = &pmm.regions[region];
|
||||
struct pmm_region* pmm_region = &pmm.regions[region];
|
||||
|
||||
/* Inactive region, so don't bother with it. */
|
||||
if (!(pmm_region->flags & PMM_REGION_ACTIVE))
|
||||
continue;
|
||||
|
||||
spin_lock(&pmm_region->lock);
|
||||
spin_lock (&pmm_region->lock);
|
||||
|
||||
/* Find starting bit of the free bit range */
|
||||
size_t bit = pmm_find_free_space(pmm_region, nblks);
|
||||
size_t bit = pmm_find_free_space (pmm_region, nblks);
|
||||
|
||||
/* Found a free range? */
|
||||
if (bit != (size_t)-1) {
|
||||
/* Mark it */
|
||||
bm_set_region(&pmm_region->bm, bit, nblks);
|
||||
spin_unlock(&pmm_region->lock);
|
||||
bm_set_region (&pmm_region->bm, bit, nblks);
|
||||
spin_unlock (&pmm_region->lock);
|
||||
|
||||
return pmm_region->membase + bit * PAGE_SIZE;
|
||||
}
|
||||
|
||||
spin_unlock(&pmm_region->lock);
|
||||
spin_unlock (&pmm_region->lock);
|
||||
}
|
||||
|
||||
return PMM_ALLOC_ERR;
|
||||
}
|
||||
|
||||
void pmm_free(physaddr_t p_addr, size_t nblks) {
|
||||
void pmm_free (physaddr_t p_addr, size_t nblks) {
|
||||
/* Round down to nearest page boundary */
|
||||
physaddr_t aligned_p_addr = align_down(p_addr, PAGE_SIZE);
|
||||
physaddr_t aligned_p_addr = align_down (p_addr, PAGE_SIZE);
|
||||
|
||||
for (size_t region = 0; region < PMM_REGIONS_MAX; region++) {
|
||||
struct pmm_region *pmm_region = &pmm.regions[region];
|
||||
struct pmm_region* pmm_region = &pmm.regions[region];
|
||||
|
||||
/* Inactive region, so don't bother with it. */
|
||||
if (!(pmm_region->flags & PMM_REGION_ACTIVE))
|
||||
continue;
|
||||
|
||||
/* If aligned_p_addr is within the range if this region, it belongs to it. */
|
||||
if (aligned_p_addr >= pmm_region->membase && aligned_p_addr < pmm_region->size) {
|
||||
if (aligned_p_addr >= pmm_region->membase &&
|
||||
aligned_p_addr < pmm_region->size) {
|
||||
physaddr_t addr = aligned_p_addr - pmm_region->membase;
|
||||
|
||||
size_t bit = div_align_up(addr, PAGE_SIZE);
|
||||
size_t bit = div_align_up (addr, PAGE_SIZE);
|
||||
|
||||
spin_lock(&pmm_region->lock);
|
||||
|
||||
bm_clear_region(&pmm_region->bm, bit, nblks);
|
||||
spin_lock (&pmm_region->lock);
|
||||
|
||||
spin_unlock(&pmm_region->lock);
|
||||
bm_clear_region (&pmm_region->bm, bit, nblks);
|
||||
|
||||
spin_unlock (&pmm_region->lock);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user