From fc4de346a30774c978f56e9c5010853fea2f191c Mon Sep 17 00:00:00 2001 From: Kevin Alavik Date: Wed, 14 May 2025 16:38:26 +0200 Subject: [PATCH] feat: Started om paging --- .vscode/c_cpp_properties.json | 18 ++++ .vscode/settings.json | 3 +- kernel/src/arch/paging.c | 184 ++++++++++++++++++++++++++++++++++ kernel/src/arch/paging.h | 28 ++++++ kernel/src/boot/emk.h | 7 +- kernel/src/emk.c | 14 ++- kernel/src/mm/pmm.c | 4 +- kernel/src/mm/pmm.h | 4 +- kernel/src/mm/vmm.h | 22 ++++ 9 files changed, 275 insertions(+), 9 deletions(-) create mode 100644 .vscode/c_cpp_properties.json create mode 100644 kernel/src/arch/paging.c create mode 100644 kernel/src/arch/paging.h create mode 100644 kernel/src/mm/vmm.h diff --git a/.vscode/c_cpp_properties.json b/.vscode/c_cpp_properties.json new file mode 100644 index 0000000..df35a0b --- /dev/null +++ b/.vscode/c_cpp_properties.json @@ -0,0 +1,18 @@ +{ + "configurations": [ + { + "name": "linux-gcc-x64", + "includePath": [ + "${workspaceFolder}/**" + ], + "compilerPath": "/usr/bin/gcc", + "cStandard": "${default}", + "cppStandard": "${default}", + "intelliSenseMode": "linux-gcc-x64", + "defines": [ + "LIMINE_API_REVISION=3" + ] + } + ], + "version": 4 +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index 6f862d5..394630d 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -4,6 +4,7 @@ "stdarg.h": "c", "idt.h": "c", "limine.h": "c", - "stddef.h": "c" + "stddef.h": "c", + "stdint.h": "c" } } \ No newline at end of file diff --git a/kernel/src/arch/paging.c b/kernel/src/arch/paging.c new file mode 100644 index 0000000..f034655 --- /dev/null +++ b/kernel/src/arch/paging.c @@ -0,0 +1,184 @@ +#include +#include +#include +#include +#include +#include +#include + +uint64_t *kernel_pagemap = 0; +extern char __limine_requests_start[]; +extern char __limine_requests_end[]; +extern char __text_start[]; +extern char __text_end[]; +extern char __rodata_start[]; +extern char __rodata_end[]; +extern char __data_start[]; +extern char __data_end[]; + +#define PRINT_SECTION(name, start, end) log_early("section=%s, start=0x%.16llx, end=0x%.16llx, size=%d", name, start, end, end - start) + +/* Helpers */ +static inline uint64_t page_index(uint64_t virt, uint64_t shift) +{ + return (virt & (uint64_t)0x1ff << shift) >> shift; +} + +static inline uint64_t *get_table(uint64_t *table, uint64_t index) +{ + return (uint64_t *)HIGHER_HALF(table[index] & PAGE_MASK); +} + +static inline uint64_t *get_or_alloc_table(uint64_t *table, uint64_t index, uint64_t flags) +{ + if (!(table[index] & VMM_PRESENT)) + { + uint64_t *pml = palloc(1, true); + memset(pml, 0, PAGE_SIZE); + table[index] = (uint64_t)PHYSICAL(pml) | 0b111; + } + table[index] |= flags & 0xFF; + return (uint64_t *)HIGHER_HALF(table[index] & PAGE_MASK); +} + +uint64_t virt_to_phys(uint64_t *pagemap, uint64_t virt) +{ + + uint64_t pml4_idx = page_index(virt, PML4_SHIFT); + if (!(pagemap[pml4_idx] & VMM_PRESENT)) + return 0; + + uint64_t *pml3 = get_table(pagemap, pml4_idx); + uint64_t pml3_idx = page_index(virt, PML3_SHIFT); + if (!(pml3[pml3_idx] & VMM_PRESENT)) + return 0; + + uint64_t *pml2 = get_table(pml3, pml3_idx); + uint64_t pml2_idx = page_index(virt, PML2_SHIFT); + if (!(pml2[pml2_idx] & VMM_PRESENT)) + return 0; + + uint64_t *pml1 = get_table(pml2, pml2_idx); + uint64_t pml1_idx = page_index(virt, PML1_SHIFT); + if (!(pml1[pml1_idx] & VMM_PRESENT)) + return 0; + + return pml1[pml1_idx] & PAGE_MASK; +} + +/* Pagemap set/get */ +void pmset(uint64_t *pagemap) +{ + __asm__ volatile("movq %0, %%cr3" ::"r"(PHYSICAL((uint64_t)pagemap))); +} + +uint64_t *pmget() +{ + uint64_t p; + __asm__ volatile("movq %%cr3, %0" : "=r"(p)); + return (uint64_t *)p; +} + +/* Mapping and unmapping */ +void vmap(uint64_t *pagemap, uint64_t virt, uint64_t phys, uint64_t flags) +{ + + uint64_t pml4_idx = page_index(virt, PML4_SHIFT); + uint64_t pml3_idx = page_index(virt, PML3_SHIFT); + uint64_t pml2_idx = page_index(virt, PML2_SHIFT); + uint64_t pml1_idx = page_index(virt, PML1_SHIFT); + + uint64_t *pml3 = get_or_alloc_table(pagemap, pml4_idx, flags); + uint64_t *pml2 = get_or_alloc_table(pml3, pml3_idx, flags); + uint64_t *pml1 = get_or_alloc_table(pml2, pml2_idx, flags); + + pml1[pml1_idx] = phys | flags; +} + +void vunmap(uint64_t *pagemap, uint64_t virt) +{ + uint64_t pml4_idx = page_index(virt, PML4_SHIFT); + if (!(pagemap[pml4_idx] & VMM_PRESENT)) + return; + + uint64_t *pml3 = get_table(pagemap, pml4_idx); + uint64_t pml3_idx = page_index(virt, PML3_SHIFT); + if (!(pml3[pml3_idx] & VMM_PRESENT)) + return; + + uint64_t *pml2 = get_table(pml3, pml3_idx); + uint64_t pml2_idx = page_index(virt, PML2_SHIFT); + if (!(pml2[pml2_idx] & VMM_PRESENT)) + return; + + uint64_t *pml1 = get_table(pml2, pml2_idx); + uint64_t pml1_idx = page_index(virt, PML1_SHIFT); + + pml1[pml1_idx] = 0; + __asm__ volatile("invlpg (%0)" ::"r"(virt) : "memory"); +} + +void paging_init() +{ + kernel_pagemap = (uint64_t *)palloc(1, true); + if (kernel_pagemap == NULL) + { + kpanic(NULL, "Failed to allocate page for kernel pagemap, halting"); + } + memset(kernel_pagemap, 0, PAGE_SIZE); + + PRINT_SECTION("text", __text_start, __text_end); + PRINT_SECTION("rodata", __rodata_start, __rodata_end); + PRINT_SECTION("data", __data_start, __data_end); + + kstack_top = ALIGN_UP(kstack_top, PAGE_SIZE); + for (uint64_t stack = kstack_top - (16 * 1024); stack < kstack_top; stack += PAGE_SIZE) + { + vmap(kernel_pagemap, stack, (uint64_t)PHYSICAL(stack), VMM_PRESENT | VMM_WRITE | VMM_NX); + } + log_early("Mapped kernel stack"); + + for (uint64_t reqs = ALIGN_DOWN(__limine_requests_start, PAGE_SIZE); reqs < ALIGN_UP(__limine_requests_end, PAGE_SIZE); reqs += PAGE_SIZE) + { + vmap(kernel_pagemap, reqs, reqs - kvirt + kphys, VMM_PRESENT | VMM_WRITE); + } + log_early("Mapped Limine Requests region."); + + for (uint64_t text = ALIGN_DOWN(__text_start, PAGE_SIZE); text < ALIGN_UP(__text_end, PAGE_SIZE); text += PAGE_SIZE) + { + vmap(kernel_pagemap, text, text - kvirt + kphys, VMM_PRESENT); + } + log_early("Mapped .text"); + + for (uint64_t rodata = ALIGN_DOWN(__rodata_start, PAGE_SIZE); rodata < ALIGN_UP(__rodata_end, PAGE_SIZE); rodata += PAGE_SIZE) + { + vmap(kernel_pagemap, rodata, rodata - kvirt + kphys, VMM_PRESENT | VMM_NX); + } + log_early("Mapped .rodata"); + + for (uint64_t data = ALIGN_DOWN(__data_start, PAGE_SIZE); data < ALIGN_UP(__data_end, PAGE_SIZE); data += PAGE_SIZE) + { + vmap(kernel_pagemap, data, data - kvirt + kphys, VMM_PRESENT | VMM_WRITE | VMM_NX); + } + log_early("Mapped .data"); + + for (uint64_t i = 0; i < memmap->entry_count; i++) + { + struct limine_memmap_entry *entry = memmap->entries[i]; + uint64_t base = ALIGN_DOWN(entry->base, PAGE_SIZE); + uint64_t end = ALIGN_UP(entry->base + entry->length, PAGE_SIZE); + + for (uint64_t addr = base; addr < end; addr += PAGE_SIZE) + { + vmap(kernel_pagemap, (uint64_t)HIGHER_HALF(addr), addr, VMM_PRESENT | VMM_WRITE | VMM_NX); + } + log_early("Mapped memory map entry %d: base=0x%.16llx, length=0x%.16llx, type=%d", i, entry->base, entry->length, entry->type); + } + + for (uint64_t gb4 = 0; gb4 < 0x100000000; gb4 += PAGE_SIZE) + { + vmap(kernel_pagemap, (uint64_t)HIGHER_HALF(gb4), gb4, VMM_PRESENT | VMM_WRITE); + } + log_early("Mapped HHDM"); + pmset(kernel_pagemap); +} \ No newline at end of file diff --git a/kernel/src/arch/paging.h b/kernel/src/arch/paging.h new file mode 100644 index 0000000..b123a66 --- /dev/null +++ b/kernel/src/arch/paging.h @@ -0,0 +1,28 @@ +#ifndef PAGING_H +#define PAGING_H + +#include + +#define VMM_PRESENT BIT(0) +#define VMM_WRITE BIT(1) +#define VMM_USER BIT(2) +#define VMM_NX BIT(63) + +#define PAGE_MASK 0x000FFFFFFFFFF000ULL +#define PAGE_INDEX_MASK 0x1FF + +#define PML1_SHIFT 12 +#define PML2_SHIFT 21 +#define PML3_SHIFT 30 +#define PML4_SHIFT 39 + +extern uint64_t *kernel_pagemap; + +void pmset(uint64_t *pagemap); +uint64_t *pmget(); +void vmap(uint64_t *pagemap, uint64_t virt, uint64_t phys, uint64_t flags); +void vunmap(uint64_t *pagemap, uint64_t virt); +uint64_t virt_to_phys(uint64_t *pagemap, uint64_t virt); +void paging_init(); + +#endif // PAGING_H \ No newline at end of file diff --git a/kernel/src/boot/emk.h b/kernel/src/boot/emk.h index 74cf3d4..f6eb930 100644 --- a/kernel/src/boot/emk.h +++ b/kernel/src/boot/emk.h @@ -6,9 +6,12 @@ extern uint64_t hhdm_offset; extern struct limine_memmap_response *memmap; +extern uint64_t kvirt; +extern uint64_t kphys; +extern uint64_t kstack_top; -#define HIGHER_HALF(ptr) ((void *)((uint64_t)ptr) + hhdm_offset) -#define PHYSICAL(ptr) ((void *)((uint64_t)ptr) - hhdm_offset) +#define HIGHER_HALF(ptr) ((void *)((uint64_t)(ptr) < hhdm_offset ? (uint64_t)(ptr) + hhdm_offset : (uint64_t)(ptr))) +#define PHYSICAL(ptr) ((void *)((uint64_t)(ptr) >= hhdm_offset ? (uint64_t)(ptr) - hhdm_offset : (uint64_t)(ptr))) #define BIT(x) (1ULL << (x)) diff --git a/kernel/src/emk.c b/kernel/src/emk.c index 4275b96..caf1905 100644 --- a/kernel/src/emk.c +++ b/kernel/src/emk.c @@ -10,6 +10,7 @@ #include #include #include +#include __attribute__((used, section(".limine_requests"))) static volatile LIMINE_BASE_REVISION(3); __attribute__((used, section(".limine_requests"))) static volatile struct limine_memmap_request memmap_request = { @@ -18,14 +19,21 @@ __attribute__((used, section(".limine_requests"))) static volatile struct limine __attribute__((used, section(".limine_requests"))) static volatile struct limine_hhdm_request hhdm_request = { .id = LIMINE_HHDM_REQUEST, .revision = 0}; +__attribute__((used, section(".limine_requests"))) volatile struct limine_executable_address_request kernel_address_request = { + .id = LIMINE_EXECUTABLE_ADDRESS_REQUEST, + .response = 0}; __attribute__((used, section(".limine_requests_start"))) static volatile LIMINE_REQUESTS_START_MARKER; __attribute__((used, section(".limine_requests_end"))) static volatile LIMINE_REQUESTS_END_MARKER; uint64_t hhdm_offset = 0; struct limine_memmap_response *memmap = NULL; +uint64_t kvirt = 0; +uint64_t kphys = 0; +uint64_t kstack_top = 0; void emk_entry(void) { + __asm__ volatile("movq %%rsp, %0" : "=r"(kstack_top)); if (serial_init(COM1) != 0) { /* Just halt and say nothing */ @@ -62,13 +70,15 @@ void emk_entry(void) log_early("Initialized PMM"); /* Test allocate a single physical page */ - char *a = pmm_request_pages(1, true); + char *a = palloc(1, true); if (!a) kpanic(NULL, "Failed to allocate single physical page"); *a = 32; log_early("Allocated 1 physical page: %llx", (uint64_t)a); - pmm_release_pages(a, 1); + pfree(a, 1); + + paging_init(); hlt(); } \ No newline at end of file diff --git a/kernel/src/mm/pmm.c b/kernel/src/mm/pmm.c index b53c855..dba7fc6 100644 --- a/kernel/src/mm/pmm.c +++ b/kernel/src/mm/pmm.c @@ -82,7 +82,7 @@ void pmm_init(void) } } -void *pmm_request_pages(size_t pages, bool higher_half) +void *palloc(size_t pages, bool higher_half) { if (pages == 0 || pages > free_pages) return NULL; @@ -137,7 +137,7 @@ void *pmm_request_pages(size_t pages, bool higher_half) return NULL; } -void pmm_release_pages(void *ptr, size_t pages) +void pfree(void *ptr, size_t pages) { if (!ptr || !is_aligned(ptr, MIN_ALIGN)) return; diff --git a/kernel/src/mm/pmm.h b/kernel/src/mm/pmm.h index e0542e8..9a5bdad 100644 --- a/kernel/src/mm/pmm.h +++ b/kernel/src/mm/pmm.h @@ -7,7 +7,7 @@ #define PAGE_SIZE 0x1000 void pmm_init(); -void *pmm_request_pages(size_t pages, bool higher_half); -void pmm_release_pages(void *ptr, size_t pages); +void *palloc(size_t pages, bool higher_half); +void pfree(void *ptr, size_t pages); #endif // PMM_H \ No newline at end of file diff --git a/kernel/src/mm/vmm.h b/kernel/src/mm/vmm.h new file mode 100644 index 0000000..79af11f --- /dev/null +++ b/kernel/src/mm/vmm.h @@ -0,0 +1,22 @@ +#ifndef VMM_H +#define VMM_H + +#include + +typedef struct vm_region +{ + uint64_t start; + uint64_t pages; + struct vm_region *next; + /* TOOD: Maybe store flags */ +} vm_region_t; + +typedef struct vma_ctx +{ + vm_region_t *root; + uint64_t *pagemap; +} vma_ctx_t; + +void vmm_init(); + +#endif // VMM_H \ No newline at end of file