mm: Changes

+ vmm: Rename to "paging"
+ vma: Start implementation
This commit is contained in:
RaphProductions 2025-05-19 07:25:31 +02:00
parent c658f738e4
commit 16246cc167
8 changed files with 125 additions and 51 deletions

View file

@ -1,3 +1,8 @@
# Soaplin (rewrite) # Soaplin (rewrite)
This is a carefully rewritten version of Soaplin, that should be held with super glue instead of This is a carefully rewritten version of Soaplin, that should be held with super glue instead of
0.99$ duct tape bought on TEMU. 0.99$ duct tape bought on TEMU.
## Features
* Support for compiling the kernel on armv8, RISC-V, and LoongArch64 (even though the kernel doesn't do anything on these architectures)
* Free list PMM with lazy loading
* x86_64 paging

View file

@ -7,7 +7,7 @@
#pragma once #pragma once
#include "mm/vmm.h" #include "mm/paging.h"
// Stage 1 initialization: Core components (such as the GDT & IDT on x86_64...) // Stage 1 initialization: Core components (such as the GDT & IDT on x86_64...)
void arch_init_stage1(); void arch_init_stage1();

View file

@ -5,13 +5,13 @@
* cpu.c - x86_64 CPU control implementation. * cpu.c - x86_64 CPU control implementation.
*/ */
#include "mm/vmm.h"
#if defined (__x86_64__) #if defined (__x86_64__)
#include <arch/x86_64/gdt.h> #include <arch/x86_64/gdt.h>
#include <arch/x86_64/idt.h> #include <arch/x86_64/idt.h>
#include <arch/cpu.h> #include <arch/cpu.h>
#include <mm/pmm.h> #include <mm/pmm.h>
#include "mm/paging.h"
void arch_init_stage1() { void arch_init_stage1() {
gdt_init(); gdt_init();

View file

@ -5,7 +5,6 @@
* main.c - Kernel entry point and initialization. * main.c - Kernel entry point and initialization.
*/ */
#include "mm/vmm.h"
#include <stdint.h> #include <stdint.h>
#include <stddef.h> #include <stddef.h>
#include <stdbool.h> #include <stdbool.h>
@ -19,6 +18,7 @@
#include <lib/logoutputs_sk.h> #include <lib/logoutputs_sk.h>
#include <mm/memop.h> #include <mm/memop.h>
#include <mm/pmm.h> #include <mm/pmm.h>
#include "mm/paging.h"
void kmain(void) { void kmain(void) {
tty_init(); tty_init();
@ -32,12 +32,8 @@ void kmain(void) {
arch_init_stage1(); arch_init_stage1();
pmm_init(); pmm_init();
vmm_init(); pg_init();
/*uint8_t* mem = pmm_alloc_page() + 0xFFFF800000000000;
memcpy(mem, "HelloWorld\0", 11);
trace("pmm: Read from allocated memory: %s\n", mem);
pmm_free_page(mem);
trace("pmm: Freed memory.\n");*/
// We're done, just hang... // We're done, just hang...
hcf(); hcf();

View file

@ -14,13 +14,13 @@
#include <lib/log.h> #include <lib/log.h>
#include <mm/memop.h> #include <mm/memop.h>
#include <mm/pmm.h> #include <mm/pmm.h>
#include <mm/vmm.h> #include <mm/paging.h>
#include <stdint.h> #include <stdint.h>
pagemap_t vmm_kernel_pm = NULL; pagemap_t pg_kernel_pm = NULL;
pagemap_t vmm_current_pm = NULL; pagemap_t pg_current_pm = NULL;
void vmm_init() { void pg_init() {
#if !defined(__x86_64__) #if !defined(__x86_64__)
fatal("vmm: not implemented\n"); fatal("vmm: not implemented\n");
hcf(); hcf();
@ -28,7 +28,7 @@ void vmm_init() {
// Our objective here is to recreate the // Our objective here is to recreate the
// kernel page map that Limine provide us // kernel page map that Limine provide us
vmm_kernel_pm = vmm_alloc_pm(); pg_kernel_pm = pg_alloc_pm();
uint64_t kvaddr = limine_get_kernel_vaddr(); uint64_t kvaddr = limine_get_kernel_vaddr();
uint64_t kpaddr = limine_get_kernel_paddr(); uint64_t kpaddr = limine_get_kernel_paddr();
@ -53,42 +53,42 @@ void vmm_init() {
size_t length = ALIGN_UP(cur_phdr->p_memsz, PMM_PAGE_SIZE); size_t length = ALIGN_UP(cur_phdr->p_memsz, PMM_PAGE_SIZE);
for (uint64_t i = 0; i < length; i += PMM_PAGE_SIZE) { for (uint64_t i = 0; i < length; i += PMM_PAGE_SIZE) {
vmm_map(vmm_kernel_pm, cur_phdr->p_vaddr + i, phys + i, flags); pg_map(pg_kernel_pm, cur_phdr->p_vaddr + i, phys + i, flags);
} }
trace("vmm: Mapped range: %p -> %p (length: %x)\n", phys, cur_phdr->p_vaddr, length); trace("vmm: Mapped range: %p -> %p (length: %x)\n", phys, cur_phdr->p_vaddr, length);
} }
for (uint64_t i = 0; i < 0x100000000; i += PMM_PAGE_SIZE) for (uint64_t i = 0; i < 0x100000000; i += PMM_PAGE_SIZE)
vmm_map(vmm_kernel_pm, higher_half(i), i, PTE_PRESENT | PTE_WRITE); pg_map(pg_kernel_pm, higher_half(i), i, PTE_PRESENT | PTE_WRITE);
trace("vmm: Mapped range: %p -> %p (length: %x)\n", 0x0, 0xFFFF800000000000, 0x100000000); trace("vmm: Mapped range: %p -> %p (length: %x)\n", 0x0, 0xFFFF800000000000, 0x100000000);
vmm_load_pm(vmm_kernel_pm); pg_load_pm(pg_kernel_pm);
trace("vmm: Initialized.\n"); trace("vmm: Initialized.\n");
} }
void vmm_load_pm(pagemap_t pm) { void pg_load_pm(pagemap_t pm) {
if (!pm) if (!pm)
return; return;
vmm_current_pm = pm; pg_current_pm = pm;
cpu_load_pm((pagemap_t)physical((uint64_t)pm)); cpu_load_pm((pagemap_t)physical((uint64_t)pm));
} }
pagemap_t vmm_alloc_pm() { pagemap_t pg_alloc_pm() {
pagemap_t pm = (pagemap_t)higher_half((uint64_t)pmm_alloc_page()); pagemap_t pm = (pagemap_t)higher_half((uint64_t)pmm_alloc_page());
memset((void*)pm, 0, PMM_PAGE_SIZE); memset((void*)pm, 0, PMM_PAGE_SIZE);
if (vmm_kernel_pm) if (pg_kernel_pm)
{ {
for (int i = 256; i < 512; i++) for (int i = 256; i < 512; i++)
pm[i] = vmm_kernel_pm[i]; pm[i] = pg_kernel_pm[i];
} }
return pm; return pm;
} }
void vmm_free_pm(pagemap_t pm) { void pg_free_pm(pagemap_t pm) {
if (pm == vmm_kernel_pm) if (pm == pg_kernel_pm)
{ {
warn("vmm: Who tried to free the kernel's pagemap?!\n"); warn("vmm: Who tried to free the kernel's pagemap?!\n");
return; return;
@ -96,7 +96,7 @@ void vmm_free_pm(pagemap_t pm) {
pmm_free_page((void*)pm); pmm_free_page((void*)pm);
} }
static uint64_t *__vmm_get_next_lvl(uint64_t *level, uint64_t entry, static uint64_t *__pg_get_next_lvl(uint64_t *level, uint64_t entry,
uint64_t flags, bool alloc) { uint64_t flags, bool alloc) {
if (level[entry] & PTE_PRESENT) if (level[entry] & PTE_PRESENT)
return (uint64_t *)higher_half(PTE_GET_ADDR(level[entry])); return (uint64_t *)higher_half(PTE_GET_ADDR(level[entry]));
@ -109,7 +109,7 @@ static uint64_t *__vmm_get_next_lvl(uint64_t *level, uint64_t entry,
return NULL; return NULL;
} }
void vmm_map(pagemap_t pm, uint64_t vaddr, uint64_t paddr, uint64_t flags) { void pg_map(pagemap_t pm, uint64_t vaddr, uint64_t paddr, uint64_t flags) {
if (!pm) return; if (!pm) return;
uint64_t pml4_entry = (vaddr >> 39) & 0x1ff; uint64_t pml4_entry = (vaddr >> 39) & 0x1ff;
@ -117,14 +117,14 @@ void vmm_map(pagemap_t pm, uint64_t vaddr, uint64_t paddr, uint64_t flags) {
uint64_t pml2_entry = (vaddr >> 21) & 0x1ff; uint64_t pml2_entry = (vaddr >> 21) & 0x1ff;
uint64_t pml1_entry = (vaddr >> 12) & 0x1ff; uint64_t pml1_entry = (vaddr >> 12) & 0x1ff;
uint64_t *pml3 = __vmm_get_next_lvl(pm , pml4_entry, PTE_PRESENT | PTE_WRITE, true); uint64_t *pml3 = __pg_get_next_lvl(pm , pml4_entry, PTE_PRESENT | PTE_WRITE, true);
uint64_t *pml2 = __vmm_get_next_lvl(pml3, pml3_entry, PTE_PRESENT | PTE_WRITE, true); uint64_t *pml2 = __pg_get_next_lvl(pml3, pml3_entry, PTE_PRESENT | PTE_WRITE, true);
uint64_t *pml1 = __vmm_get_next_lvl(pml2, pml2_entry, PTE_PRESENT | PTE_WRITE, true); uint64_t *pml1 = __pg_get_next_lvl(pml2, pml2_entry, PTE_PRESENT | PTE_WRITE, true);
pml1[pml1_entry] = paddr | flags; pml1[pml1_entry] = paddr | flags;
} }
void vmm_map_user(pagemap_t pm, uint64_t vaddr, uint64_t paddr, void pg_map_user(pagemap_t pm, uint64_t vaddr, uint64_t paddr,
uint64_t flags) { uint64_t flags) {
if (!pm) return; if (!pm) return;
@ -133,14 +133,14 @@ void vmm_map_user(pagemap_t pm, uint64_t vaddr, uint64_t paddr,
uint64_t pml2_entry = (vaddr >> 21) & 0x1ff; uint64_t pml2_entry = (vaddr >> 21) & 0x1ff;
uint64_t pml1_entry = (vaddr >> 12) & 0x1ff; uint64_t pml1_entry = (vaddr >> 12) & 0x1ff;
uint64_t *pml3 = __vmm_get_next_lvl(pm , pml4_entry, flags, true); uint64_t *pml3 = __pg_get_next_lvl(pm , pml4_entry, flags, true);
uint64_t *pml2 = __vmm_get_next_lvl(pml3, pml3_entry, flags, true); uint64_t *pml2 = __pg_get_next_lvl(pml3, pml3_entry, flags, true);
uint64_t *pml1 = __vmm_get_next_lvl(pml2, pml2_entry, flags, true); uint64_t *pml1 = __pg_get_next_lvl(pml2, pml2_entry, flags, true);
pml1[pml1_entry] = paddr | flags; pml1[pml1_entry] = paddr | flags;
} }
void vmm_unmap(pagemap_t pm, uint64_t vaddr) { void pg_unmap(pagemap_t pm, uint64_t vaddr) {
if (!pm) return; if (!pm) return;
uint64_t pml4_entry = (vaddr >> 39) & 0x1ff; uint64_t pml4_entry = (vaddr >> 39) & 0x1ff;
@ -148,18 +148,18 @@ void vmm_unmap(pagemap_t pm, uint64_t vaddr) {
uint64_t pml2_entry = (vaddr >> 21) & 0x1ff; uint64_t pml2_entry = (vaddr >> 21) & 0x1ff;
uint64_t pml1_entry = (vaddr >> 12) & 0x1ff; uint64_t pml1_entry = (vaddr >> 12) & 0x1ff;
uint64_t *pml3 = __vmm_get_next_lvl(pm , pml4_entry, 0, false); uint64_t *pml3 = __pg_get_next_lvl(pm , pml4_entry, 0, false);
if (!pml3) return; if (!pml3) return;
uint64_t *pml2 = __vmm_get_next_lvl(pml3, pml3_entry, 0, false); uint64_t *pml2 = __pg_get_next_lvl(pml3, pml3_entry, 0, false);
if (!pml2) return; if (!pml2) return;
uint64_t *pml1 = __vmm_get_next_lvl(pml2, pml2_entry, 0, false); uint64_t *pml1 = __pg_get_next_lvl(pml2, pml2_entry, 0, false);
if (!pml1) return; if (!pml1) return;
pml1[pml1_entry] = 0; pml1[pml1_entry] = 0;
cpu_invalidate_page(pm, vaddr); cpu_invalidate_page(pm, vaddr);
} }
void vmm_protect(pagemap_t pm, uint64_t vaddr, uint64_t flags) { void pg_protect(pagemap_t pm, uint64_t vaddr, uint64_t flags) {
if (!pm) return; if (!pm) return;
uint64_t pml4_entry = (vaddr >> 39) & 0x1ff; uint64_t pml4_entry = (vaddr >> 39) & 0x1ff;
@ -167,11 +167,11 @@ void vmm_protect(pagemap_t pm, uint64_t vaddr, uint64_t flags) {
uint64_t pml2_entry = (vaddr >> 21) & 0x1ff; uint64_t pml2_entry = (vaddr >> 21) & 0x1ff;
uint64_t pml1_entry = (vaddr >> 12) & 0x1ff; uint64_t pml1_entry = (vaddr >> 12) & 0x1ff;
uint64_t *pml3 = __vmm_get_next_lvl(pm , pml4_entry, 0, false); uint64_t *pml3 = __pg_get_next_lvl(pm , pml4_entry, 0, false);
if (!pml3) return; if (!pml3) return;
uint64_t *pml2 = __vmm_get_next_lvl(pml3, pml3_entry, 0, false); uint64_t *pml2 = __pg_get_next_lvl(pml3, pml3_entry, 0, false);
if (!pml2) return; if (!pml2) return;
uint64_t *pml1 = __vmm_get_next_lvl(pml2, pml2_entry, 0, false); uint64_t *pml1 = __pg_get_next_lvl(pml2, pml2_entry, 0, false);
if (!pml1) return; if (!pml1) return;
uint64_t paddr = pml1[pml1_entry] & PTE_ADDR_MASK; uint64_t paddr = pml1[pml1_entry] & PTE_ADDR_MASK;

View file

@ -2,7 +2,7 @@
* The Soaplin Kernel * The Soaplin Kernel
* Copyright (C) 2025 The SILD Project * Copyright (C) 2025 The SILD Project
* *
* vmm.c - Virtual memory manager * pg.c - Virtual memory manager
*/ */
#pragma once #pragma once
@ -21,13 +21,13 @@
typedef uint64_t *pagemap_t; typedef uint64_t *pagemap_t;
void vmm_init(); void pg_init();
pagemap_t vmm_alloc_pm(); pagemap_t pg_alloc_pm();
void vmm_free_pm(pagemap_t pm); void pg_free_pm(pagemap_t pm);
void vmm_load_pm(pagemap_t pm); void pg_load_pm(pagemap_t pm);
void vmm_map(pagemap_t pm, uint64_t vaddr, uint64_t paddr, uint64_t flags); void pg_map(pagemap_t pm, uint64_t vaddr, uint64_t paddr, uint64_t flags);
void vmm_map_user(pagemap_t pm, uint64_t vaddr, uint64_t paddr, void pg_map_user(pagemap_t pm, uint64_t vaddr, uint64_t paddr,
uint64_t flags); uint64_t flags);
void vmm_unmap(pagemap_t pm, uint64_t vaddr); void pg_unmap(pagemap_t pm, uint64_t vaddr);
void vmm_protect(pagemap_t pm, uint64_t vaddr, uint64_t flags); void pg_protect(pagemap_t pm, uint64_t vaddr, uint64_t flags);

45
kernel/src/mm/vma.c Normal file
View file

@ -0,0 +1,45 @@
/*
* The Soaplin Kernel
* Copyright (C) 2025 The SILD Project
*
* vma.c - Virtual memory allocator
*/
#include <stddef.h>
#include <stdint.h>
#include <mm/paging.h>
#include "mm/vma.h"
#include "mm/pmm.h"
vma_ctx_t *vma_alloc_ctx(pagemap_t pm, uint64_t start) {
if (pm == NULL)
return NULL;
vma_ctx_t *ctx = (vma_ctx_t *)higher_half((uint64_t)pmm_alloc_page());
ctx->pm = pm;
vma_region_t *root_reg = (vma_region_t *)higher_half((uint64_t)pmm_alloc_page());
root_reg->start = start;
root_reg->length = 0;
root_reg->next = NULL;
ctx->root = root_reg;
return ctx;
}
void vma_free_ctx(vma_ctx_t *ctx) {
if (!ctx ||
!ctx->pm ||
!ctx->root)
return;
vma_region_t *reg = ctx->root;
while (reg) {
vma_region_t *next = reg->next;
pmm_free_page((void*)physical((uint64_t)reg));
reg = next;
}
pmm_free_page((void*)physical((uint64_t)ctx));
}

28
kernel/src/mm/vma.h Normal file
View file

@ -0,0 +1,28 @@
/*
* The Soaplin Kernel
* Copyright (C) 2025 The SILD Project
*
* vma.h - Virtual memory allocator
*/
#pragma once
#include "mm/paging.h"
typedef struct __vma_region {
uint64_t start;
uint64_t pflags;
// Length is in pages, not in bytes.
uint64_t length;
struct __vma_region *next;
} vma_region_t;
typedef struct {
pagemap_t pm;
vma_region_t *root;
} vma_ctx_t;
vma_ctx_t *vma_alloc_ctx(pagemap_t pm, uint64_t start);
void vma_free_ctx (vma_ctx_t *ctx);