Redesign userspace memory management
All checks were successful
Build documentation / build-and-deploy (push) Successful in 44s
All checks were successful
Build documentation / build-and-deploy (push) Successful in 44s
This commit is contained in:
@@ -1,20 +0,0 @@
|
||||
#ifndef _KERNEL_PROC_KPPROC_FB_H
|
||||
#define _KERNEL_PROC_KPPROC_FB_H
|
||||
|
||||
#include <aux/compiler.h>
|
||||
#include <libk/std.h>
|
||||
|
||||
/* data to expose as a kpproc resource */
|
||||
struct kpproc_fb {
|
||||
uintptr_t paddr;
|
||||
uint64_t w, h, pitch;
|
||||
uint16_t bpp;
|
||||
uint8_t red_mask_size;
|
||||
uint8_t red_mask_shift;
|
||||
uint8_t green_mask_size;
|
||||
uint8_t green_mask_shift;
|
||||
uint8_t blue_mask_size;
|
||||
uint8_t blue_mask_shift;
|
||||
};
|
||||
|
||||
#endif // _KERNEL_PROC_KPPROC_FB_H
|
||||
@@ -1,11 +0,0 @@
|
||||
#include <libk/std.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <proc/mem.h>
|
||||
#include <proc/proc.h>
|
||||
#include <proc/resource.h>
|
||||
#include <sync/spin_lock.h>
|
||||
|
||||
void proc_cleanup_resource_mem (struct proc_resource* resource) {
|
||||
if (!resource->u.mem.managed)
|
||||
pmm_free (resource->u.mem.paddr, resource->u.mem.pages);
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
#ifndef _KERNEL_PROC_MEM_H
|
||||
#define _KERNEL_PROC_MEM_H
|
||||
|
||||
#include <libk/std.h>
|
||||
|
||||
struct proc;
|
||||
struct proc_resource;
|
||||
|
||||
struct proc_resource_mem {
|
||||
struct proc_resource* resource;
|
||||
|
||||
uintptr_t paddr;
|
||||
size_t pages;
|
||||
ptrdiff_t alive_pages;
|
||||
bool managed;
|
||||
};
|
||||
|
||||
void proc_cleanup_resource_mem (struct proc_resource* resource);
|
||||
|
||||
#endif // _KERNEL_PROC_MEM_H
|
||||
@@ -9,7 +9,6 @@
|
||||
#include <limine/requests.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <proc/kpproc_fb.h>
|
||||
#include <proc/proc.h>
|
||||
#include <proc/procgroup.h>
|
||||
#include <proc/resource.h>
|
||||
@@ -34,104 +33,12 @@ static rw_spin_lock_t proc_tree_lock = RW_SPIN_LOCK_INIT;
|
||||
|
||||
static atomic_int sched_cycles = 0;
|
||||
|
||||
/* kernel pseudo process */
|
||||
static struct proc kpproc;
|
||||
|
||||
static bool proc_check_elf (uint8_t* elf) {
|
||||
if (!((elf[0] == 0x7F) && (elf[1] == 'E') && (elf[2] == 'L') && (elf[3] == 'F')))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,
|
||||
uint32_t flags) {
|
||||
spin_lock_ctx_t ctxprpd;
|
||||
struct proc_mapping* mapping = malloc (sizeof (*mapping));
|
||||
|
||||
if (mapping == NULL)
|
||||
return false;
|
||||
|
||||
mapping->paddr = start_paddr;
|
||||
mapping->vaddr = start_vaddr;
|
||||
mapping->size = pages * PAGE_SIZE;
|
||||
|
||||
flags &= ~(MM_PD_LOCK | MM_PD_RELOAD); /* clear LOCK flag if present, because we lock manualy */
|
||||
|
||||
spin_lock (&proc->procgroup->pd.lock, &ctxprpd);
|
||||
|
||||
list_append (proc->procgroup->mappings, &mapping->proc_mappings_link);
|
||||
|
||||
for (uintptr_t vpage = start_vaddr, ppage = start_paddr; vpage < start_vaddr + pages * PAGE_SIZE;
|
||||
vpage += PAGE_SIZE, ppage += PAGE_SIZE) {
|
||||
mm_map_page (&proc->procgroup->pd, ppage, vpage, flags);
|
||||
}
|
||||
|
||||
spin_unlock (&proc->procgroup->pd.lock, &ctxprpd);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool proc_unmap (struct proc* proc, uintptr_t start_vaddr, size_t pages) {
|
||||
size_t unmap_size = pages * PAGE_SIZE;
|
||||
uintptr_t end_vaddr = start_vaddr + unmap_size;
|
||||
struct list_node_link *mapping_link, *mapping_link_tmp;
|
||||
bool used_tail_mapping = false;
|
||||
spin_lock_ctx_t ctxprpd;
|
||||
|
||||
struct proc_mapping* tail_mapping = malloc (sizeof (*tail_mapping));
|
||||
if (tail_mapping == NULL)
|
||||
return false;
|
||||
|
||||
spin_lock (&proc->procgroup->pd.lock, &ctxprpd);
|
||||
|
||||
list_foreach (proc->procgroup->mappings, mapping_link, mapping_link_tmp) {
|
||||
struct proc_mapping* mapping =
|
||||
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
|
||||
|
||||
uintptr_t m_end = mapping->vaddr + mapping->size;
|
||||
|
||||
/* check overlap */
|
||||
if ((start_vaddr < m_end) && (end_vaddr > mapping->vaddr)) {
|
||||
/* split in the middle */
|
||||
if ((start_vaddr > mapping->vaddr) && (end_vaddr < m_end)) {
|
||||
tail_mapping->vaddr = end_vaddr;
|
||||
tail_mapping->paddr = mapping->paddr + (end_vaddr - mapping->vaddr);
|
||||
tail_mapping->size = m_end - end_vaddr;
|
||||
|
||||
mapping->size = start_vaddr - mapping->vaddr;
|
||||
|
||||
list_insert_after (proc->procgroup->mappings, &mapping->proc_mappings_link,
|
||||
&tail_mapping->proc_mappings_link);
|
||||
|
||||
used_tail_mapping = true;
|
||||
|
||||
break;
|
||||
} else if ((start_vaddr <= mapping->vaddr) && (end_vaddr < m_end)) { /* shrink left */
|
||||
size_t diff = end_vaddr - mapping->vaddr;
|
||||
mapping->vaddr += diff;
|
||||
mapping->paddr += diff;
|
||||
mapping->size -= diff;
|
||||
} else if ((start_vaddr > mapping->vaddr) && (end_vaddr >= m_end)) { /* shrink right */
|
||||
mapping->size = start_vaddr - mapping->vaddr;
|
||||
} else { /* full overlap */
|
||||
list_remove (proc->procgroup->mappings, &mapping->proc_mappings_link);
|
||||
free (mapping);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!used_tail_mapping)
|
||||
free (tail_mapping);
|
||||
|
||||
for (uintptr_t vpage = start_vaddr; vpage < end_vaddr; vpage += PAGE_SIZE) {
|
||||
mm_unmap_page (&proc->procgroup->pd, vpage, 0);
|
||||
}
|
||||
|
||||
spin_unlock (&proc->procgroup->pd.lock, &ctxprpd);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
|
||||
struct elf_aux aux;
|
||||
|
||||
@@ -156,23 +63,16 @@ struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf) {
|
||||
|
||||
size_t blks = div_align_up (phdr->p_memsz + off, PAGE_SIZE);
|
||||
|
||||
int rid = procgroup_get_sys_rid (proc->procgroup);
|
||||
struct proc_resource* r = proc_create_resource_mem (proc->procgroup, rid, blks, 0, false);
|
||||
if (r == NULL) {
|
||||
DEBUG ("pmm oom error while loading ELF segments! (tried to alloc %zu blks)\n", blks);
|
||||
}
|
||||
|
||||
uintptr_t p_addr = r->u.mem.paddr;
|
||||
|
||||
memset ((void*)((uintptr_t)hhdm->offset + p_addr), 0, blks * PAGE_SIZE);
|
||||
memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off),
|
||||
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
|
||||
|
||||
uint32_t pg_flags = MM_PG_USER | MM_PG_PRESENT;
|
||||
if (phdr->p_flags & PF_W)
|
||||
pg_flags |= MM_PG_RW;
|
||||
|
||||
proc_map (proc, p_addr, v_addr, blks, pg_flags);
|
||||
uintptr_t p_addr;
|
||||
procgroup_map (proc->procgroup, v_addr, blks, pg_flags, &p_addr);
|
||||
|
||||
memset ((void*)((uintptr_t)hhdm->offset + p_addr), 0, blks * PAGE_SIZE);
|
||||
memcpy ((void*)((uintptr_t)hhdm->offset + p_addr + off),
|
||||
(void*)((uintptr_t)elf + phdr->p_offset), phdr->p_filesz);
|
||||
} break;
|
||||
}
|
||||
}
|
||||
@@ -347,59 +247,12 @@ static void proc_irq_sched (void* arg, void* regs) {
|
||||
proc_sched ();
|
||||
}
|
||||
|
||||
static void proc_kpproc_init (void) {
|
||||
struct limine_hhdm_response* hhdm = limine_hhdm_request.response;
|
||||
|
||||
memset (&kpproc, 0, sizeof (kpproc));
|
||||
|
||||
kpproc.lock = SPIN_LOCK_INIT;
|
||||
kpproc.state = PROC_PSEUDO;
|
||||
kpproc.pid = 0;
|
||||
|
||||
kpproc.cpu = thiscpu;
|
||||
kpproc.procgroup = procgroup_create ();
|
||||
procgroup_attach (kpproc.procgroup, &kpproc);
|
||||
|
||||
rbtree_insert (struct proc, &proc_tree, &kpproc.proc_tree_link, proc_tree_link, pid);
|
||||
|
||||
/* prepare kernel resources */
|
||||
{
|
||||
/* frame buffer */
|
||||
|
||||
struct limine_framebuffer_response* fb = limine_framebuffer_request.response;
|
||||
struct kpproc_fb fb_info = {
|
||||
.paddr = (uintptr_t)fb->framebuffers[0]->address - (uintptr_t)hhdm->offset,
|
||||
.w = fb->framebuffers[0]->width,
|
||||
.h = fb->framebuffers[0]->height,
|
||||
.pitch = fb->framebuffers[0]->pitch,
|
||||
.bpp = fb->framebuffers[0]->bpp,
|
||||
.red_mask_size = fb->framebuffers[0]->red_mask_size,
|
||||
.red_mask_shift = fb->framebuffers[0]->red_mask_shift,
|
||||
.green_mask_size = fb->framebuffers[0]->green_mask_size,
|
||||
.green_mask_shift = fb->framebuffers[0]->green_mask_shift,
|
||||
.blue_mask_size = fb->framebuffers[0]->blue_mask_size,
|
||||
.blue_mask_shift = fb->framebuffers[0]->blue_mask_shift,
|
||||
};
|
||||
|
||||
DEBUG ("Framebuffer address %p\n", fb_info.paddr);
|
||||
|
||||
size_t pages = align_up (sizeof (fb_info), PAGE_SIZE) / PAGE_SIZE;
|
||||
uintptr_t fb_info_memblk_paddr = pmm_alloc (pages);
|
||||
memcpy ((struct kpproc_fb*)((uintptr_t)hhdm->offset + fb_info_memblk_paddr), &fb_info,
|
||||
sizeof (fb_info));
|
||||
|
||||
proc_create_resource_mem (kpproc.procgroup, 0, pages, fb_info_memblk_paddr, true);
|
||||
}
|
||||
}
|
||||
|
||||
void proc_init (void) {
|
||||
#if defined(__x86_64__)
|
||||
irq_attach (&proc_irq_sched, NULL, SCHED_PREEMPT_TIMER);
|
||||
irq_attach (&proc_irq_sched, NULL, CPU_REQUEST_SCHED);
|
||||
#endif
|
||||
|
||||
proc_kpproc_init ();
|
||||
|
||||
struct proc* spin_proc = proc_spawn_rd ("spin.exe");
|
||||
proc_register (spin_proc, thiscpu);
|
||||
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
#define PROC_READY 0
|
||||
#define PROC_DEAD 1
|
||||
#define PROC_SUSPENDED 2
|
||||
#define PROC_PSEUDO 3
|
||||
|
||||
/* process flags */
|
||||
#define PROC_USTK_PREALLOC (1 << 0)
|
||||
@@ -53,9 +52,6 @@ struct proc {
|
||||
|
||||
void proc_sched (void);
|
||||
void proc_kill (struct proc* proc);
|
||||
bool proc_map (struct proc* proc, uintptr_t start_paddr, uintptr_t start_vaddr, size_t pages,
|
||||
uint32_t flags);
|
||||
bool proc_unmap (struct proc* proc, uintptr_t start_vaddr, size_t pages);
|
||||
struct elf_aux proc_load_segments (struct proc* proc, uint8_t* elf);
|
||||
void proc_register (struct proc* proc, struct cpu* cpu);
|
||||
struct proc* proc_find_pid (int pid);
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#include <libk/rbtree.h>
|
||||
#include <libk/std.h>
|
||||
#include <mm/liballoc.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <proc/proc.h>
|
||||
#include <proc/procgroup.h>
|
||||
#include <sync/rw_spin_lock.h>
|
||||
@@ -12,6 +13,116 @@ static struct rb_node_link* procgroup_tree = NULL;
|
||||
static rw_spin_lock_t procgroup_tree_lock = RW_SPIN_LOCK_INIT;
|
||||
static atomic_int pgids = 0;
|
||||
|
||||
uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags,
|
||||
uintptr_t* out_paddr) {
|
||||
spin_lock_ctx_t ctxprpd;
|
||||
|
||||
vaddr = (vaddr == 0) ? PROC_MAP_BASE : vaddr;
|
||||
|
||||
struct proc_mapping* mapping = malloc (sizeof (*mapping));
|
||||
|
||||
if (mapping == NULL)
|
||||
return 0;
|
||||
|
||||
uintptr_t paddr = pmm_alloc (pages);
|
||||
|
||||
if (paddr == PMM_ALLOC_ERR) {
|
||||
free (mapping);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (out_paddr != NULL)
|
||||
*out_paddr = paddr;
|
||||
|
||||
mapping->paddr = paddr;
|
||||
mapping->vaddr = vaddr;
|
||||
mapping->size = pages * PAGE_SIZE;
|
||||
|
||||
flags &= ~(MM_PD_LOCK | MM_PD_RELOAD); /* clear LOCK flag if present, because we lock manualy */
|
||||
|
||||
spin_lock (&procgroup->pd.lock, &ctxprpd);
|
||||
|
||||
list_append (procgroup->mappings, &mapping->proc_mappings_link);
|
||||
|
||||
for (uintptr_t vpage = vaddr, ppage = paddr; vpage < vaddr + pages * PAGE_SIZE;
|
||||
vpage += PAGE_SIZE, ppage += PAGE_SIZE) {
|
||||
mm_map_page (&procgroup->pd, ppage, vpage, flags);
|
||||
}
|
||||
|
||||
spin_unlock (&procgroup->pd.lock, &ctxprpd);
|
||||
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t pages) {
|
||||
size_t unmap_size = pages * PAGE_SIZE;
|
||||
uintptr_t end_vaddr = start_vaddr + unmap_size;
|
||||
struct list_node_link *mapping_link, *mapping_link_tmp;
|
||||
bool used_tail_mapping = false;
|
||||
spin_lock_ctx_t ctxprpd;
|
||||
|
||||
struct proc_mapping* tail_mapping = malloc (sizeof (*tail_mapping));
|
||||
if (tail_mapping == NULL)
|
||||
return false;
|
||||
|
||||
spin_lock (&procgroup->pd.lock, &ctxprpd);
|
||||
|
||||
list_foreach (procgroup->mappings, mapping_link, mapping_link_tmp) {
|
||||
struct proc_mapping* mapping =
|
||||
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
|
||||
|
||||
uintptr_t m_start = mapping->vaddr;
|
||||
uintptr_t m_end = mapping->vaddr + mapping->size;
|
||||
|
||||
/* check overlap */
|
||||
if ((start_vaddr < m_end) && (end_vaddr > mapping->vaddr)) {
|
||||
uintptr_t free_vstart = (start_vaddr > m_start) ? start_vaddr : m_start;
|
||||
uintptr_t free_vend = (end_vaddr < m_end) ? end_vaddr : m_end;
|
||||
size_t free_size = free_vend - free_vstart;
|
||||
|
||||
uintptr_t ppage_to_free = mapping->paddr + (free_vstart - m_start);
|
||||
pmm_free (ppage_to_free, free_size / PAGE_SIZE);
|
||||
|
||||
/* split in the middle */
|
||||
if ((start_vaddr > m_start) && (end_vaddr < m_end)) {
|
||||
tail_mapping->vaddr = end_vaddr;
|
||||
tail_mapping->paddr = mapping->paddr + (end_vaddr - m_start);
|
||||
tail_mapping->size = m_end - end_vaddr;
|
||||
|
||||
mapping->size = start_vaddr - m_start;
|
||||
|
||||
list_insert_after (procgroup->mappings, &mapping->proc_mappings_link,
|
||||
&tail_mapping->proc_mappings_link);
|
||||
|
||||
used_tail_mapping = true;
|
||||
|
||||
break;
|
||||
} else if ((start_vaddr <= m_start) && (end_vaddr < m_end)) { /* shrink left */
|
||||
size_t diff = end_vaddr - m_start;
|
||||
mapping->vaddr += diff;
|
||||
mapping->paddr += diff;
|
||||
mapping->size -= diff;
|
||||
} else if ((start_vaddr > m_start) && (end_vaddr >= m_end)) { /* shrink right */
|
||||
mapping->size = start_vaddr - m_start;
|
||||
} else { /* full overlap */
|
||||
list_remove (procgroup->mappings, &mapping->proc_mappings_link);
|
||||
free (mapping);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!used_tail_mapping)
|
||||
free (tail_mapping);
|
||||
|
||||
for (uintptr_t vpage = start_vaddr; vpage < end_vaddr; vpage += PAGE_SIZE) {
|
||||
mm_unmap_page (&procgroup->pd, vpage, 0);
|
||||
}
|
||||
|
||||
spin_unlock (&procgroup->pd.lock, &ctxprpd);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
struct procgroup* procgroup_create (void) {
|
||||
spin_lock_ctx_t ctxpgtr;
|
||||
|
||||
@@ -87,6 +198,17 @@ void procgroup_detach (struct procgroup* procgroup, struct proc* proc) {
|
||||
proc_resource_unlink (resource);
|
||||
}
|
||||
|
||||
struct list_node_link *mapping_link, *mapping_link_tmp;
|
||||
list_foreach (procgroup->mappings, mapping_link, mapping_link_tmp) {
|
||||
struct proc_mapping* mapping =
|
||||
list_entry (mapping_link, struct proc_mapping, proc_mappings_link);
|
||||
|
||||
pmm_free (mapping->paddr, mapping->size / PAGE_SIZE);
|
||||
free (mapping);
|
||||
}
|
||||
|
||||
pmm_free (procgroup->pd.cr3_paddr, 1);
|
||||
|
||||
free (procgroup);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,5 +34,8 @@ struct procgroup* procgroup_create (void);
|
||||
void procgroup_attach (struct procgroup* procgroup, struct proc* proc);
|
||||
void procgroup_detach (struct procgroup* procgroup, struct proc* proc);
|
||||
int procgroup_get_sys_rid (struct procgroup* procgroup);
|
||||
uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags,
|
||||
uintptr_t* out_paddr);
|
||||
bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t pages);
|
||||
|
||||
#endif // _KERNEL_PROC_PROCGROUP_H
|
||||
|
||||
@@ -24,50 +24,6 @@ struct proc_resource* proc_find_resource (struct procgroup* procgroup, int rid)
|
||||
return resource;
|
||||
}
|
||||
|
||||
struct proc_resource* proc_create_resource_mem (struct procgroup* procgroup, int rid, size_t pages,
|
||||
uintptr_t paddr, bool managed) {
|
||||
spin_lock_ctx_t ctxpg;
|
||||
struct proc_resource* resource;
|
||||
|
||||
if (pages == 0)
|
||||
return NULL;
|
||||
|
||||
resource = proc_find_resource (procgroup, rid);
|
||||
if (resource != NULL)
|
||||
return resource;
|
||||
|
||||
resource = malloc (sizeof (*resource));
|
||||
if (resource == NULL)
|
||||
return NULL;
|
||||
|
||||
memset (resource, 0, sizeof (*resource));
|
||||
resource->lock = SPIN_LOCK_INIT;
|
||||
resource->ops.cleanup = &proc_cleanup_resource_mem;
|
||||
resource->rid = rid;
|
||||
resource->type = PR_MEM;
|
||||
resource->u.mem.resource = resource;
|
||||
if (managed) {
|
||||
resource->u.mem.managed = true;
|
||||
} else {
|
||||
paddr = pmm_alloc (pages);
|
||||
if (paddr == PMM_ALLOC_ERR) {
|
||||
free (resource);
|
||||
return NULL;
|
||||
}
|
||||
resource->u.mem.managed = false;
|
||||
}
|
||||
resource->u.mem.paddr = paddr;
|
||||
resource->u.mem.pages = resource->u.mem.alive_pages = pages;
|
||||
resource->refs = 1;
|
||||
|
||||
spin_lock (&procgroup->lock, &ctxpg);
|
||||
rbtree_insert (struct proc_resource, &procgroup->resource_tree, &resource->resource_tree_link,
|
||||
resource_tree_link, rid);
|
||||
spin_unlock (&procgroup->lock, &ctxpg);
|
||||
|
||||
return resource;
|
||||
}
|
||||
|
||||
struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, int rid) {
|
||||
spin_lock_ctx_t ctxpg;
|
||||
struct proc_resource* resource;
|
||||
@@ -83,7 +39,7 @@ struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, i
|
||||
memset (resource, 0, sizeof (*resource));
|
||||
resource->lock = SPIN_LOCK_INIT;
|
||||
resource->ops.cleanup = &proc_cleanup_resource_mutex;
|
||||
resource->u.mem.resource = resource;
|
||||
resource->u.mutex.resource = resource;
|
||||
resource->rid = rid;
|
||||
resource->type = PR_MUTEX;
|
||||
resource->refs = 1;
|
||||
|
||||
@@ -4,11 +4,9 @@
|
||||
#include <libk/list.h>
|
||||
#include <libk/rbtree.h>
|
||||
#include <libk/std.h>
|
||||
#include <proc/mem.h>
|
||||
#include <proc/mutex.h>
|
||||
#include <sync/spin_lock.h>
|
||||
|
||||
#define PR_MEM 0
|
||||
#define PR_MUTEX 1
|
||||
|
||||
struct proc;
|
||||
@@ -21,7 +19,6 @@ struct proc_resource {
|
||||
spin_lock_t lock;
|
||||
struct rb_node_link resource_tree_link;
|
||||
union {
|
||||
struct proc_resource_mem mem;
|
||||
struct proc_mutex mutex;
|
||||
} u;
|
||||
struct {
|
||||
@@ -30,8 +27,6 @@ struct proc_resource {
|
||||
};
|
||||
|
||||
struct proc_resource* proc_find_resource (struct procgroup* procgroup, int rid);
|
||||
struct proc_resource* proc_create_resource_mem (struct procgroup* procgroup, int rid, size_t pages,
|
||||
uintptr_t paddr, bool managed);
|
||||
struct proc_resource* proc_create_resource_mutex (struct procgroup* procgroup, int rid);
|
||||
void proc_resource_unlink (struct proc_resource* resource);
|
||||
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
c += proc/proc.c \
|
||||
proc/resource.c \
|
||||
proc/mutex.c \
|
||||
proc/mem.c \
|
||||
proc/procgroup.c
|
||||
|
||||
o += proc/proc.o \
|
||||
proc/resource.o \
|
||||
proc/mutex.o \
|
||||
proc/mem.o \
|
||||
proc/procgroup.o
|
||||
|
||||
Reference in New Issue
Block a user