#include #include #include #include #include #include #include #include #include #include static struct rb_node_link* procgroup_tree = NULL; static rw_spin_lock_t procgroup_tree_lock = RW_SPIN_LOCK_INIT; static atomic_int pgids = 0; uintptr_t procgroup_map (struct procgroup* procgroup, uintptr_t vaddr, size_t pages, uint32_t flags, uintptr_t* out_paddr) { spin_lock_ctx_t ctxprpd; vaddr = (vaddr == 0) ? PROC_MAP_BASE : vaddr; struct proc_mapping* mapping = malloc (sizeof (*mapping)); if (mapping == NULL) return 0; uintptr_t paddr = pmm_alloc (pages); if (paddr == PMM_ALLOC_ERR) { free (mapping); return 0; } if (out_paddr != NULL) *out_paddr = paddr; mapping->paddr = paddr; mapping->vaddr = vaddr; mapping->size = pages * PAGE_SIZE; flags &= ~(MM_PD_LOCK | MM_PD_RELOAD); /* clear LOCK flag if present, because we lock manualy */ spin_lock (&procgroup->pd.lock, &ctxprpd); list_append (procgroup->mappings, &mapping->proc_mappings_link); for (uintptr_t vpage = vaddr, ppage = paddr; vpage < vaddr + pages * PAGE_SIZE; vpage += PAGE_SIZE, ppage += PAGE_SIZE) { mm_map_page (&procgroup->pd, ppage, vpage, flags); } spin_unlock (&procgroup->pd.lock, &ctxprpd); return vaddr; } bool procgroup_unmap (struct procgroup* procgroup, uintptr_t start_vaddr, size_t pages) { size_t unmap_size = pages * PAGE_SIZE; uintptr_t end_vaddr = start_vaddr + unmap_size; struct list_node_link *mapping_link, *mapping_link_tmp; bool used_tail_mapping = false; spin_lock_ctx_t ctxprpd; struct proc_mapping* tail_mapping = malloc (sizeof (*tail_mapping)); if (tail_mapping == NULL) return false; spin_lock (&procgroup->pd.lock, &ctxprpd); list_foreach (procgroup->mappings, mapping_link, mapping_link_tmp) { struct proc_mapping* mapping = list_entry (mapping_link, struct proc_mapping, proc_mappings_link); uintptr_t m_start = mapping->vaddr; uintptr_t m_end = mapping->vaddr + mapping->size; /* check overlap */ if ((start_vaddr < m_end) && (end_vaddr > mapping->vaddr)) { uintptr_t free_vstart = (start_vaddr > m_start) ? start_vaddr : m_start; uintptr_t free_vend = (end_vaddr < m_end) ? end_vaddr : m_end; size_t free_size = free_vend - free_vstart; uintptr_t ppage_to_free = mapping->paddr + (free_vstart - m_start); pmm_free (ppage_to_free, free_size / PAGE_SIZE); /* split in the middle */ if ((start_vaddr > m_start) && (end_vaddr < m_end)) { tail_mapping->vaddr = end_vaddr; tail_mapping->paddr = mapping->paddr + (end_vaddr - m_start); tail_mapping->size = m_end - end_vaddr; mapping->size = start_vaddr - m_start; list_insert_after (procgroup->mappings, &mapping->proc_mappings_link, &tail_mapping->proc_mappings_link); used_tail_mapping = true; break; } else if ((start_vaddr <= m_start) && (end_vaddr < m_end)) { /* shrink left */ size_t diff = end_vaddr - m_start; mapping->vaddr += diff; mapping->paddr += diff; mapping->size -= diff; } else if ((start_vaddr > m_start) && (end_vaddr >= m_end)) { /* shrink right */ mapping->size = start_vaddr - m_start; } else { /* full overlap */ list_remove (procgroup->mappings, &mapping->proc_mappings_link); free (mapping); } } } if (!used_tail_mapping) free (tail_mapping); for (uintptr_t vpage = start_vaddr; vpage < end_vaddr; vpage += PAGE_SIZE) { mm_unmap_page (&procgroup->pd, vpage, 0); } spin_unlock (&procgroup->pd.lock, &ctxprpd); return true; } struct procgroup* procgroup_create (void) { spin_lock_ctx_t ctxpgtr; struct procgroup* procgroup = malloc (sizeof (*procgroup)); if (procgroup == NULL) { return NULL; } procgroup->refs = 0; procgroup->memb_proc_tree = NULL; procgroup->lock = SPIN_LOCK_INIT; procgroup->pgid = atomic_fetch_add (&pgids, 1); procgroup->pd.lock = SPIN_LOCK_INIT; procgroup->pd.cr3_paddr = mm_alloc_user_pd_phys (); rw_spin_write_lock (&procgroup_tree_lock, &ctxpgtr); rbtree_insert (struct procgroup, &procgroup_tree, &procgroup->procgroup_tree_link, procgroup_tree_link, pgid); rw_spin_write_unlock (&procgroup_tree_lock, &ctxpgtr); return procgroup; } void procgroup_attach (struct procgroup* procgroup, struct proc* proc) { spin_lock_ctx_t ctxpg, ctxpr; spin_lock (&procgroup->lock, &ctxpg); spin_lock (&proc->lock, &ctxpr); rbtree_insert (struct proc, &procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link, procgroup_memb_tree_link, pid); atomic_fetch_add (&procgroup->refs, 1); DEBUG ("procgrpup attach PID %d to PGID %d\n", proc->pid, procgroup->pgid); spin_unlock (&proc->lock, &ctxpr); spin_unlock (&procgroup->lock, &ctxpg); } void procgroup_detach (struct procgroup* procgroup, struct proc* proc) { spin_lock_ctx_t ctxpg, ctxpr, ctxpgtr; spin_lock (&procgroup->lock, &ctxpg); spin_lock (&proc->lock, &ctxpr); rbtree_delete (&procgroup->memb_proc_tree, &proc->procgroup_memb_tree_link); int refs = atomic_fetch_sub (&procgroup->refs, 1); DEBUG ("procgrpup detach PID %d to PGID %d\n", proc->pid, procgroup->pgid); spin_unlock (&proc->lock, &ctxpr); spin_unlock (&procgroup->lock, &ctxpg); if (refs == 1) { rw_spin_write_lock (&procgroup_tree_lock, &ctxpgtr); spin_lock (&procgroup->lock, &ctxpg); rbtree_delete (&procgroup_tree, &procgroup->procgroup_tree_link); spin_unlock (&procgroup->lock, &ctxpg); rw_spin_write_unlock (&procgroup_tree_lock, &ctxpgtr); /* delete resources */ struct rb_node_link* rnode; rbtree_first (&procgroup->resource_tree, rnode); while (rnode) { struct rb_node_link* next; rbtree_next (rnode, next); struct proc_resource* resource = rbtree_entry (rnode, struct proc_resource, resource_tree_link); rnode = next; proc_delete_resource (resource); } struct list_node_link *mapping_link, *mapping_link_tmp; list_foreach (procgroup->mappings, mapping_link, mapping_link_tmp) { struct proc_mapping* mapping = list_entry (mapping_link, struct proc_mapping, proc_mappings_link); pmm_free (mapping->paddr, mapping->size / PAGE_SIZE); free (mapping); } pmm_free (procgroup->pd.cr3_paddr, 1); free (procgroup); } }