From 6b811ad804a34996258ebd853e7758de5fcd64df Mon Sep 17 00:00:00 2001 From: Kevin Alavik Date: Thu, 15 May 2025 17:08:47 +0200 Subject: [PATCH] fix/kernel: Fixed new VMM API --- kernel/src/arch/paging.h | 8 ++ kernel/src/emk.c | 9 +- kernel/src/mm/vmm.c | 176 +++++++++++++++++++++++++-------------- kernel/src/mm/vmm.h | 32 +++++-- 4 files changed, 151 insertions(+), 74 deletions(-) diff --git a/kernel/src/arch/paging.h b/kernel/src/arch/paging.h index 2416356..b426e54 100644 --- a/kernel/src/arch/paging.h +++ b/kernel/src/arch/paging.h @@ -4,6 +4,14 @@ #include #include +#include + +// Macro to convert VALLOC_* flags to VMM_* page table flags +#define VFLAGS_TO_PFLAGS(flags) \ + (VMM_PRESENT | \ + (((flags) & VALLOC_WRITE) ? VMM_WRITE : 0) | \ + (((flags) & VALLOC_USER) ? VMM_USER : 0) | \ + (((flags) & VALLOC_EXEC) ? 0 : VMM_NX)) #define VMM_PRESENT (1ULL << 0) #define VMM_WRITE (1ULL << 1) diff --git a/kernel/src/emk.c b/kernel/src/emk.c index 5c18c15..316e7c6 100644 --- a/kernel/src/emk.c +++ b/kernel/src/emk.c @@ -68,7 +68,6 @@ void emk_entry(void) hhdm_offset = hhdm_request.response->offset; log_early("HHDM Offset: %llx", hhdm_offset); pmm_init(); - log_early("Initialized physical page manager"); /* Test allocate a single physical page */ char *a = palloc(1, true); @@ -78,6 +77,7 @@ void emk_entry(void) *a = 32; log_early("Allocated 1 physical page: %llx", (uint64_t)a); pfree(a, 1); + log_early("Initialized physical page manager"); /* Setup virtual memory */ if (!kernel_address_request.response) @@ -91,14 +91,13 @@ void emk_entry(void) log_early("Initialized paging"); /* Kernel Virtual Memory Context, not to be confused with KVM */ - vpm_ctx_t *kvm_ctx = vmm_init(kernel_pagemap); + vctx_t *kvm_ctx = vinit(kernel_pagemap, 0x1000); if (!kvm_ctx) { kpanic(NULL, "Failed to create kernel VMM context"); } - log_early("Initialized virtual page manager"); - char *b = valloc(kvm_ctx, 1, VMM_PRESENT | VMM_WRITE); + char *b = valloc(kvm_ctx, 1, VALLOC_RW); if (!b) { kpanic(NULL, "Failed to allocate single virtual page"); @@ -106,6 +105,8 @@ void emk_entry(void) *b = 32; log_early("Allocated 1 virtual page: %llx", (uint64_t)b); + vfree(kvm_ctx, b); + log_early("Initialized virtual page manager"); hlt(); } \ No newline at end of file diff --git a/kernel/src/mm/vmm.c b/kernel/src/mm/vmm.c index 86b5995..224979d 100644 --- a/kernel/src/mm/vmm.c +++ b/kernel/src/mm/vmm.c @@ -5,90 +5,144 @@ #include #include #include +#include -vpm_ctx_t *vmm_init(uint64_t *pm) +vctx_t *vinit(uint64_t *pm, uint64_t start) { - if (!pm || !IS_PAGE_ALIGNED(PHYSICAL(pm))) - return NULL; - - vpm_ctx_t *ctx = (vpm_ctx_t *)palloc(1, true); + vctx_t *ctx = (vctx_t *)palloc(1, true); if (!ctx) return NULL; + memset(ctx, 0, sizeof(vctx_t)); + ctx->root = (vregion_t *)palloc(1, true); + if (!ctx->root) + { + pfree(ctx, 1); + return NULL; + } ctx->pagemap = pm; - ctx->root = NULL; /* valloc creates the root if not present */ + ctx->root->start = start; + ctx->root->pages = 0; return ctx; } -void *valloc(vpm_ctx_t *ctx, size_t pages, uint64_t flags) +void vdestroy(vctx_t *ctx) { - if (!ctx || !pages) - return NULL; + if (ctx->root == NULL || ctx->pagemap == NULL) + return; - /* Allocate physical pages */ - void *phys = palloc(pages, true); - if (!phys) - return NULL; - - uint64_t virt = VPM_MIN_ADDR; - vm_region_t *prev = NULL; - vm_region_t *curr = ctx->root; - - while (curr) + vregion_t *region = ctx->root; + while (region != NULL) { - uint64_t curr_end = curr->start + (curr->pages * PAGE_SIZE); - if (virt + (pages * PAGE_SIZE) <= curr->start) + vregion_t *next = region->next; + pfree(region, 1); + region = next; + } + pfree(ctx, 1); +} + +void *valloc(vctx_t *ctx, size_t pages, uint64_t flags) +{ + if (ctx == NULL || ctx->root == NULL || ctx->pagemap == NULL) + return NULL; + + vregion_t *region = ctx->root; + vregion_t *new = NULL; + vregion_t *last = ctx->root; + + while (region) + { + + if (region->next == NULL || region->start + region->pages < region->next->start) + { + new = (vregion_t *)palloc(1, true); + if (!new) + return NULL; + + memset(new, 0, sizeof(vregion_t)); + new->pages = pages; + new->flags = VFLAGS_TO_PFLAGS(flags); + new->start = region->start + (region->pages * PAGE_SIZE); + new->next = region->next; + new->prev = region; + region->next = new; + for (uint64_t i = 0; i < pages; i++) + { + uint64_t page = (uint64_t)palloc(1, false); + if (page == 0) + return NULL; + + vmap(ctx->pagemap, new->start + i * PAGE_SIZE, page, new->flags); + } + return (void *)new->start; + } + region = region->next; + } + + new = (vregion_t *)palloc(1, true); + if (!new) + return NULL; + + memset(new, 0, sizeof(vregion_t)); + last->next = new; + new->prev = last; + new->start = last->start + (last->pages * PAGE_SIZE); + new->pages = pages; + new->flags = VFLAGS_TO_PFLAGS(flags); + new->next = NULL; + + for (uint64_t i = 0; i < pages; i++) + { + uint64_t page = (uint64_t)palloc(1, false); + if (page == 0) + return NULL; + + vmap(ctx->pagemap, new->start + i * PAGE_SIZE, page, new->flags); + } + return (void *)new->start; +} + +void vfree(vctx_t *ctx, void *ptr) +{ + if (ctx == NULL) + return; + + vregion_t *region = ctx->root; + while (region != NULL) + { + if (region->start == (uint64_t)ptr) { - /* Found a gap */ break; } - virt = curr_end; - prev = curr; - curr = curr->next; + region = region->next; } - /* Map the virtual to physical pages */ - for (size_t i = 0; i < pages; i++) + if (region == NULL) + return; + + vregion_t *prev = region->prev; + vregion_t *next = region->next; + + for (uint64_t i = 0; i < region->pages; i++) { - uint64_t vaddr = virt + (i * PAGE_SIZE); - uint64_t paddr = (uint64_t)phys + (i * PAGE_SIZE); - if (vmap(ctx->pagemap, vaddr, paddr, flags) != 0) + uint64_t virt = region->start + i * PAGE_SIZE; + uint64_t phys = virt_to_phys(kernel_pagemap, virt); + + if (phys != 0) { - /* Mapping failed, unmap any mapped pages and free physical memory */ - for (size_t j = 0; j < i; j++) - { - vunmap(ctx->pagemap, virt + (j * PAGE_SIZE)); - } - pfree(phys, pages); - return NULL; + pfree((void *)phys, 1); + vunmap(ctx->pagemap, virt); } } - /* Create new region */ - vm_region_t *region = (vm_region_t *)palloc(1, true); - if (!region) - { - /* Region allocation failed, clean up */ - for (size_t i = 0; i < pages; i++) - { - vunmap(ctx->pagemap, virt + (i * PAGE_SIZE)); - } - pfree(phys, pages); - return NULL; - } + if (prev != NULL) + prev->next = next; - region->start = virt; - region->pages = pages; - region->next = curr; + if (next != NULL) + next->prev = prev; - if (prev) - { - prev->next = region; - } - else - { - ctx->root = region; - } + if (region == ctx->root) + ctx->root = next; - return (void *)virt; + pfree(region, 1); } \ No newline at end of file diff --git a/kernel/src/mm/vmm.h b/kernel/src/mm/vmm.h index 5d1f1f8..0668393 100644 --- a/kernel/src/mm/vmm.h +++ b/kernel/src/mm/vmm.h @@ -9,21 +9,35 @@ #define VPM_MIN_ADDR 0x1000 #endif // VPM_MIN_ADDR -typedef struct vm_region +#define VALLOC_NONE 0x0 +#define VALLOC_READ (1 << 0) +#define VALLOC_WRITE (1 << 1) +#define VALLOC_EXEC (1 << 2) +#define VALLOC_USER (1 << 3) + +#define VALLOC_RW (VALLOC_READ | VALLOC_WRITE) +#define VALLOC_RX (VALLOC_READ | VALLOC_EXEC) +#define VALLOC_RWX (VALLOC_READ | VALLOC_WRITE | VALLOC_EXEC) + +typedef struct vregion { uint64_t start; uint64_t pages; - struct vm_region *next; - /* TOOD: Maybe store flags */ -} vm_region_t; + uint64_t flags; + struct vregion *next; + struct vregion *prev; +} vregion_t; -typedef struct vpm_ctx +typedef struct vctx { - vm_region_t *root; + vregion_t *root; uint64_t *pagemap; -} vpm_ctx_t; + uint64_t start; +} vctx_t; -vpm_ctx_t *vmm_init(uint64_t *pm); -void *valloc(vpm_ctx_t *ctx, size_t pages, uint64_t flags); +vctx_t *vinit(uint64_t *pm, uint64_t start); +void vdestroy(vctx_t *ctx); +void *valloc(vctx_t *ctx, size_t pages, uint64_t flags); +void vfree(vctx_t *ctx, void *ptr); #endif // VMM_H \ No newline at end of file