1
0
Fork 0

fix/kernel: Fixed new VMM API

This commit is contained in:
Kevin Alavik 2025-05-15 17:08:47 +02:00
parent 3aa5a1bb86
commit 6b811ad804
Signed by: cmpsb
GPG key ID: 10D1CC0526FDC6D7
4 changed files with 151 additions and 74 deletions

View file

@ -4,6 +4,14 @@
#include <stdint.h>
#include <boot/emk.h>
#include <mm/vmm.h>
// Macro to convert VALLOC_* flags to VMM_* page table flags
#define VFLAGS_TO_PFLAGS(flags) \
(VMM_PRESENT | \
(((flags) & VALLOC_WRITE) ? VMM_WRITE : 0) | \
(((flags) & VALLOC_USER) ? VMM_USER : 0) | \
(((flags) & VALLOC_EXEC) ? 0 : VMM_NX))
#define VMM_PRESENT (1ULL << 0)
#define VMM_WRITE (1ULL << 1)

View file

@ -68,7 +68,6 @@ void emk_entry(void)
hhdm_offset = hhdm_request.response->offset;
log_early("HHDM Offset: %llx", hhdm_offset);
pmm_init();
log_early("Initialized physical page manager");
/* Test allocate a single physical page */
char *a = palloc(1, true);
@ -78,6 +77,7 @@ void emk_entry(void)
*a = 32;
log_early("Allocated 1 physical page: %llx", (uint64_t)a);
pfree(a, 1);
log_early("Initialized physical page manager");
/* Setup virtual memory */
if (!kernel_address_request.response)
@ -91,14 +91,13 @@ void emk_entry(void)
log_early("Initialized paging");
/* Kernel Virtual Memory Context, not to be confused with KVM */
vpm_ctx_t *kvm_ctx = vmm_init(kernel_pagemap);
vctx_t *kvm_ctx = vinit(kernel_pagemap, 0x1000);
if (!kvm_ctx)
{
kpanic(NULL, "Failed to create kernel VMM context");
}
log_early("Initialized virtual page manager");
char *b = valloc(kvm_ctx, 1, VMM_PRESENT | VMM_WRITE);
char *b = valloc(kvm_ctx, 1, VALLOC_RW);
if (!b)
{
kpanic(NULL, "Failed to allocate single virtual page");
@ -106,6 +105,8 @@ void emk_entry(void)
*b = 32;
log_early("Allocated 1 virtual page: %llx", (uint64_t)b);
vfree(kvm_ctx, b);
log_early("Initialized virtual page manager");
hlt();
}

View file

@ -5,90 +5,144 @@
#include <util/align.h>
#include <boot/emk.h>
#include <arch/paging.h>
#include <lib/string.h>
vpm_ctx_t *vmm_init(uint64_t *pm)
vctx_t *vinit(uint64_t *pm, uint64_t start)
{
if (!pm || !IS_PAGE_ALIGNED(PHYSICAL(pm)))
return NULL;
vpm_ctx_t *ctx = (vpm_ctx_t *)palloc(1, true);
vctx_t *ctx = (vctx_t *)palloc(1, true);
if (!ctx)
return NULL;
memset(ctx, 0, sizeof(vctx_t));
ctx->root = (vregion_t *)palloc(1, true);
if (!ctx->root)
{
pfree(ctx, 1);
return NULL;
}
ctx->pagemap = pm;
ctx->root = NULL; /* valloc creates the root if not present */
ctx->root->start = start;
ctx->root->pages = 0;
return ctx;
}
void *valloc(vpm_ctx_t *ctx, size_t pages, uint64_t flags)
void vdestroy(vctx_t *ctx)
{
if (!ctx || !pages)
return NULL;
if (ctx->root == NULL || ctx->pagemap == NULL)
return;
/* Allocate physical pages */
void *phys = palloc(pages, true);
if (!phys)
return NULL;
uint64_t virt = VPM_MIN_ADDR;
vm_region_t *prev = NULL;
vm_region_t *curr = ctx->root;
while (curr)
vregion_t *region = ctx->root;
while (region != NULL)
{
uint64_t curr_end = curr->start + (curr->pages * PAGE_SIZE);
if (virt + (pages * PAGE_SIZE) <= curr->start)
vregion_t *next = region->next;
pfree(region, 1);
region = next;
}
pfree(ctx, 1);
}
void *valloc(vctx_t *ctx, size_t pages, uint64_t flags)
{
if (ctx == NULL || ctx->root == NULL || ctx->pagemap == NULL)
return NULL;
vregion_t *region = ctx->root;
vregion_t *new = NULL;
vregion_t *last = ctx->root;
while (region)
{
if (region->next == NULL || region->start + region->pages < region->next->start)
{
new = (vregion_t *)palloc(1, true);
if (!new)
return NULL;
memset(new, 0, sizeof(vregion_t));
new->pages = pages;
new->flags = VFLAGS_TO_PFLAGS(flags);
new->start = region->start + (region->pages * PAGE_SIZE);
new->next = region->next;
new->prev = region;
region->next = new;
for (uint64_t i = 0; i < pages; i++)
{
uint64_t page = (uint64_t)palloc(1, false);
if (page == 0)
return NULL;
vmap(ctx->pagemap, new->start + i * PAGE_SIZE, page, new->flags);
}
return (void *)new->start;
}
region = region->next;
}
new = (vregion_t *)palloc(1, true);
if (!new)
return NULL;
memset(new, 0, sizeof(vregion_t));
last->next = new;
new->prev = last;
new->start = last->start + (last->pages * PAGE_SIZE);
new->pages = pages;
new->flags = VFLAGS_TO_PFLAGS(flags);
new->next = NULL;
for (uint64_t i = 0; i < pages; i++)
{
uint64_t page = (uint64_t)palloc(1, false);
if (page == 0)
return NULL;
vmap(ctx->pagemap, new->start + i * PAGE_SIZE, page, new->flags);
}
return (void *)new->start;
}
void vfree(vctx_t *ctx, void *ptr)
{
if (ctx == NULL)
return;
vregion_t *region = ctx->root;
while (region != NULL)
{
if (region->start == (uint64_t)ptr)
{
/* Found a gap */
break;
}
virt = curr_end;
prev = curr;
curr = curr->next;
region = region->next;
}
/* Map the virtual to physical pages */
for (size_t i = 0; i < pages; i++)
if (region == NULL)
return;
vregion_t *prev = region->prev;
vregion_t *next = region->next;
for (uint64_t i = 0; i < region->pages; i++)
{
uint64_t vaddr = virt + (i * PAGE_SIZE);
uint64_t paddr = (uint64_t)phys + (i * PAGE_SIZE);
if (vmap(ctx->pagemap, vaddr, paddr, flags) != 0)
uint64_t virt = region->start + i * PAGE_SIZE;
uint64_t phys = virt_to_phys(kernel_pagemap, virt);
if (phys != 0)
{
/* Mapping failed, unmap any mapped pages and free physical memory */
for (size_t j = 0; j < i; j++)
{
vunmap(ctx->pagemap, virt + (j * PAGE_SIZE));
}
pfree(phys, pages);
return NULL;
pfree((void *)phys, 1);
vunmap(ctx->pagemap, virt);
}
}
/* Create new region */
vm_region_t *region = (vm_region_t *)palloc(1, true);
if (!region)
{
/* Region allocation failed, clean up */
for (size_t i = 0; i < pages; i++)
{
vunmap(ctx->pagemap, virt + (i * PAGE_SIZE));
}
pfree(phys, pages);
return NULL;
}
if (prev != NULL)
prev->next = next;
region->start = virt;
region->pages = pages;
region->next = curr;
if (next != NULL)
next->prev = prev;
if (prev)
{
prev->next = region;
}
else
{
ctx->root = region;
}
if (region == ctx->root)
ctx->root = next;
return (void *)virt;
pfree(region, 1);
}

View file

@ -9,21 +9,35 @@
#define VPM_MIN_ADDR 0x1000
#endif // VPM_MIN_ADDR
typedef struct vm_region
#define VALLOC_NONE 0x0
#define VALLOC_READ (1 << 0)
#define VALLOC_WRITE (1 << 1)
#define VALLOC_EXEC (1 << 2)
#define VALLOC_USER (1 << 3)
#define VALLOC_RW (VALLOC_READ | VALLOC_WRITE)
#define VALLOC_RX (VALLOC_READ | VALLOC_EXEC)
#define VALLOC_RWX (VALLOC_READ | VALLOC_WRITE | VALLOC_EXEC)
typedef struct vregion
{
uint64_t start;
uint64_t pages;
struct vm_region *next;
/* TOOD: Maybe store flags */
} vm_region_t;
uint64_t flags;
struct vregion *next;
struct vregion *prev;
} vregion_t;
typedef struct vpm_ctx
typedef struct vctx
{
vm_region_t *root;
vregion_t *root;
uint64_t *pagemap;
} vpm_ctx_t;
uint64_t start;
} vctx_t;
vpm_ctx_t *vmm_init(uint64_t *pm);
void *valloc(vpm_ctx_t *ctx, size_t pages, uint64_t flags);
vctx_t *vinit(uint64_t *pm, uint64_t start);
void vdestroy(vctx_t *ctx);
void *valloc(vctx_t *ctx, size_t pages, uint64_t flags);
void vfree(vctx_t *ctx, void *ptr);
#endif // VMM_H