1
0
Fork 0

feat: Started om paging

This commit is contained in:
Kevin Alavik 2025-05-14 16:38:26 +02:00
parent 2bbc7dd70f
commit fc4de346a3
Signed by: cmpsb
GPG key ID: 10D1CC0526FDC6D7
9 changed files with 275 additions and 9 deletions

18
.vscode/c_cpp_properties.json vendored Normal file
View file

@ -0,0 +1,18 @@
{
"configurations": [
{
"name": "linux-gcc-x64",
"includePath": [
"${workspaceFolder}/**"
],
"compilerPath": "/usr/bin/gcc",
"cStandard": "${default}",
"cppStandard": "${default}",
"intelliSenseMode": "linux-gcc-x64",
"defines": [
"LIMINE_API_REVISION=3"
]
}
],
"version": 4
}

View file

@ -4,6 +4,7 @@
"stdarg.h": "c",
"idt.h": "c",
"limine.h": "c",
"stddef.h": "c"
"stddef.h": "c",
"stdint.h": "c"
}
}

184
kernel/src/arch/paging.c Normal file
View file

@ -0,0 +1,184 @@
#include <arch/paging.h>
#include <mm/pmm.h>
#include <lib/string.h>
#include <sys/kpanic.h>
#include <boot/emk.h>
#include <util/log.h>
#include <util/align.h>
uint64_t *kernel_pagemap = 0;
extern char __limine_requests_start[];
extern char __limine_requests_end[];
extern char __text_start[];
extern char __text_end[];
extern char __rodata_start[];
extern char __rodata_end[];
extern char __data_start[];
extern char __data_end[];
#define PRINT_SECTION(name, start, end) log_early("section=%s, start=0x%.16llx, end=0x%.16llx, size=%d", name, start, end, end - start)
/* Helpers */
static inline uint64_t page_index(uint64_t virt, uint64_t shift)
{
return (virt & (uint64_t)0x1ff << shift) >> shift;
}
static inline uint64_t *get_table(uint64_t *table, uint64_t index)
{
return (uint64_t *)HIGHER_HALF(table[index] & PAGE_MASK);
}
static inline uint64_t *get_or_alloc_table(uint64_t *table, uint64_t index, uint64_t flags)
{
if (!(table[index] & VMM_PRESENT))
{
uint64_t *pml = palloc(1, true);
memset(pml, 0, PAGE_SIZE);
table[index] = (uint64_t)PHYSICAL(pml) | 0b111;
}
table[index] |= flags & 0xFF;
return (uint64_t *)HIGHER_HALF(table[index] & PAGE_MASK);
}
uint64_t virt_to_phys(uint64_t *pagemap, uint64_t virt)
{
uint64_t pml4_idx = page_index(virt, PML4_SHIFT);
if (!(pagemap[pml4_idx] & VMM_PRESENT))
return 0;
uint64_t *pml3 = get_table(pagemap, pml4_idx);
uint64_t pml3_idx = page_index(virt, PML3_SHIFT);
if (!(pml3[pml3_idx] & VMM_PRESENT))
return 0;
uint64_t *pml2 = get_table(pml3, pml3_idx);
uint64_t pml2_idx = page_index(virt, PML2_SHIFT);
if (!(pml2[pml2_idx] & VMM_PRESENT))
return 0;
uint64_t *pml1 = get_table(pml2, pml2_idx);
uint64_t pml1_idx = page_index(virt, PML1_SHIFT);
if (!(pml1[pml1_idx] & VMM_PRESENT))
return 0;
return pml1[pml1_idx] & PAGE_MASK;
}
/* Pagemap set/get */
void pmset(uint64_t *pagemap)
{
__asm__ volatile("movq %0, %%cr3" ::"r"(PHYSICAL((uint64_t)pagemap)));
}
uint64_t *pmget()
{
uint64_t p;
__asm__ volatile("movq %%cr3, %0" : "=r"(p));
return (uint64_t *)p;
}
/* Mapping and unmapping */
void vmap(uint64_t *pagemap, uint64_t virt, uint64_t phys, uint64_t flags)
{
uint64_t pml4_idx = page_index(virt, PML4_SHIFT);
uint64_t pml3_idx = page_index(virt, PML3_SHIFT);
uint64_t pml2_idx = page_index(virt, PML2_SHIFT);
uint64_t pml1_idx = page_index(virt, PML1_SHIFT);
uint64_t *pml3 = get_or_alloc_table(pagemap, pml4_idx, flags);
uint64_t *pml2 = get_or_alloc_table(pml3, pml3_idx, flags);
uint64_t *pml1 = get_or_alloc_table(pml2, pml2_idx, flags);
pml1[pml1_idx] = phys | flags;
}
void vunmap(uint64_t *pagemap, uint64_t virt)
{
uint64_t pml4_idx = page_index(virt, PML4_SHIFT);
if (!(pagemap[pml4_idx] & VMM_PRESENT))
return;
uint64_t *pml3 = get_table(pagemap, pml4_idx);
uint64_t pml3_idx = page_index(virt, PML3_SHIFT);
if (!(pml3[pml3_idx] & VMM_PRESENT))
return;
uint64_t *pml2 = get_table(pml3, pml3_idx);
uint64_t pml2_idx = page_index(virt, PML2_SHIFT);
if (!(pml2[pml2_idx] & VMM_PRESENT))
return;
uint64_t *pml1 = get_table(pml2, pml2_idx);
uint64_t pml1_idx = page_index(virt, PML1_SHIFT);
pml1[pml1_idx] = 0;
__asm__ volatile("invlpg (%0)" ::"r"(virt) : "memory");
}
void paging_init()
{
kernel_pagemap = (uint64_t *)palloc(1, true);
if (kernel_pagemap == NULL)
{
kpanic(NULL, "Failed to allocate page for kernel pagemap, halting");
}
memset(kernel_pagemap, 0, PAGE_SIZE);
PRINT_SECTION("text", __text_start, __text_end);
PRINT_SECTION("rodata", __rodata_start, __rodata_end);
PRINT_SECTION("data", __data_start, __data_end);
kstack_top = ALIGN_UP(kstack_top, PAGE_SIZE);
for (uint64_t stack = kstack_top - (16 * 1024); stack < kstack_top; stack += PAGE_SIZE)
{
vmap(kernel_pagemap, stack, (uint64_t)PHYSICAL(stack), VMM_PRESENT | VMM_WRITE | VMM_NX);
}
log_early("Mapped kernel stack");
for (uint64_t reqs = ALIGN_DOWN(__limine_requests_start, PAGE_SIZE); reqs < ALIGN_UP(__limine_requests_end, PAGE_SIZE); reqs += PAGE_SIZE)
{
vmap(kernel_pagemap, reqs, reqs - kvirt + kphys, VMM_PRESENT | VMM_WRITE);
}
log_early("Mapped Limine Requests region.");
for (uint64_t text = ALIGN_DOWN(__text_start, PAGE_SIZE); text < ALIGN_UP(__text_end, PAGE_SIZE); text += PAGE_SIZE)
{
vmap(kernel_pagemap, text, text - kvirt + kphys, VMM_PRESENT);
}
log_early("Mapped .text");
for (uint64_t rodata = ALIGN_DOWN(__rodata_start, PAGE_SIZE); rodata < ALIGN_UP(__rodata_end, PAGE_SIZE); rodata += PAGE_SIZE)
{
vmap(kernel_pagemap, rodata, rodata - kvirt + kphys, VMM_PRESENT | VMM_NX);
}
log_early("Mapped .rodata");
for (uint64_t data = ALIGN_DOWN(__data_start, PAGE_SIZE); data < ALIGN_UP(__data_end, PAGE_SIZE); data += PAGE_SIZE)
{
vmap(kernel_pagemap, data, data - kvirt + kphys, VMM_PRESENT | VMM_WRITE | VMM_NX);
}
log_early("Mapped .data");
for (uint64_t i = 0; i < memmap->entry_count; i++)
{
struct limine_memmap_entry *entry = memmap->entries[i];
uint64_t base = ALIGN_DOWN(entry->base, PAGE_SIZE);
uint64_t end = ALIGN_UP(entry->base + entry->length, PAGE_SIZE);
for (uint64_t addr = base; addr < end; addr += PAGE_SIZE)
{
vmap(kernel_pagemap, (uint64_t)HIGHER_HALF(addr), addr, VMM_PRESENT | VMM_WRITE | VMM_NX);
}
log_early("Mapped memory map entry %d: base=0x%.16llx, length=0x%.16llx, type=%d", i, entry->base, entry->length, entry->type);
}
for (uint64_t gb4 = 0; gb4 < 0x100000000; gb4 += PAGE_SIZE)
{
vmap(kernel_pagemap, (uint64_t)HIGHER_HALF(gb4), gb4, VMM_PRESENT | VMM_WRITE);
}
log_early("Mapped HHDM");
pmset(kernel_pagemap);
}

28
kernel/src/arch/paging.h Normal file
View file

@ -0,0 +1,28 @@
#ifndef PAGING_H
#define PAGING_H
#include <boot/emk.h>
#define VMM_PRESENT BIT(0)
#define VMM_WRITE BIT(1)
#define VMM_USER BIT(2)
#define VMM_NX BIT(63)
#define PAGE_MASK 0x000FFFFFFFFFF000ULL
#define PAGE_INDEX_MASK 0x1FF
#define PML1_SHIFT 12
#define PML2_SHIFT 21
#define PML3_SHIFT 30
#define PML4_SHIFT 39
extern uint64_t *kernel_pagemap;
void pmset(uint64_t *pagemap);
uint64_t *pmget();
void vmap(uint64_t *pagemap, uint64_t virt, uint64_t phys, uint64_t flags);
void vunmap(uint64_t *pagemap, uint64_t virt);
uint64_t virt_to_phys(uint64_t *pagemap, uint64_t virt);
void paging_init();
#endif // PAGING_H

View file

@ -6,9 +6,12 @@
extern uint64_t hhdm_offset;
extern struct limine_memmap_response *memmap;
extern uint64_t kvirt;
extern uint64_t kphys;
extern uint64_t kstack_top;
#define HIGHER_HALF(ptr) ((void *)((uint64_t)ptr) + hhdm_offset)
#define PHYSICAL(ptr) ((void *)((uint64_t)ptr) - hhdm_offset)
#define HIGHER_HALF(ptr) ((void *)((uint64_t)(ptr) < hhdm_offset ? (uint64_t)(ptr) + hhdm_offset : (uint64_t)(ptr)))
#define PHYSICAL(ptr) ((void *)((uint64_t)(ptr) >= hhdm_offset ? (uint64_t)(ptr) - hhdm_offset : (uint64_t)(ptr)))
#define BIT(x) (1ULL << (x))

View file

@ -10,6 +10,7 @@
#include <arch/idt.h>
#include <sys/kpanic.h>
#include <mm/pmm.h>
#include <arch/paging.h>
__attribute__((used, section(".limine_requests"))) static volatile LIMINE_BASE_REVISION(3);
__attribute__((used, section(".limine_requests"))) static volatile struct limine_memmap_request memmap_request = {
@ -18,14 +19,21 @@ __attribute__((used, section(".limine_requests"))) static volatile struct limine
__attribute__((used, section(".limine_requests"))) static volatile struct limine_hhdm_request hhdm_request = {
.id = LIMINE_HHDM_REQUEST,
.revision = 0};
__attribute__((used, section(".limine_requests"))) volatile struct limine_executable_address_request kernel_address_request = {
.id = LIMINE_EXECUTABLE_ADDRESS_REQUEST,
.response = 0};
__attribute__((used, section(".limine_requests_start"))) static volatile LIMINE_REQUESTS_START_MARKER;
__attribute__((used, section(".limine_requests_end"))) static volatile LIMINE_REQUESTS_END_MARKER;
uint64_t hhdm_offset = 0;
struct limine_memmap_response *memmap = NULL;
uint64_t kvirt = 0;
uint64_t kphys = 0;
uint64_t kstack_top = 0;
void emk_entry(void)
{
__asm__ volatile("movq %%rsp, %0" : "=r"(kstack_top));
if (serial_init(COM1) != 0)
{
/* Just halt and say nothing */
@ -62,13 +70,15 @@ void emk_entry(void)
log_early("Initialized PMM");
/* Test allocate a single physical page */
char *a = pmm_request_pages(1, true);
char *a = palloc(1, true);
if (!a)
kpanic(NULL, "Failed to allocate single physical page");
*a = 32;
log_early("Allocated 1 physical page: %llx", (uint64_t)a);
pmm_release_pages(a, 1);
pfree(a, 1);
paging_init();
hlt();
}

View file

@ -82,7 +82,7 @@ void pmm_init(void)
}
}
void *pmm_request_pages(size_t pages, bool higher_half)
void *palloc(size_t pages, bool higher_half)
{
if (pages == 0 || pages > free_pages)
return NULL;
@ -137,7 +137,7 @@ void *pmm_request_pages(size_t pages, bool higher_half)
return NULL;
}
void pmm_release_pages(void *ptr, size_t pages)
void pfree(void *ptr, size_t pages)
{
if (!ptr || !is_aligned(ptr, MIN_ALIGN))
return;

View file

@ -7,7 +7,7 @@
#define PAGE_SIZE 0x1000
void pmm_init();
void *pmm_request_pages(size_t pages, bool higher_half);
void pmm_release_pages(void *ptr, size_t pages);
void *palloc(size_t pages, bool higher_half);
void pfree(void *ptr, size_t pages);
#endif // PMM_H

22
kernel/src/mm/vmm.h Normal file
View file

@ -0,0 +1,22 @@
#ifndef VMM_H
#define VMM_H
#include <stdint.h>
typedef struct vm_region
{
uint64_t start;
uint64_t pages;
struct vm_region *next;
/* TOOD: Maybe store flags */
} vm_region_t;
typedef struct vma_ctx
{
vm_region_t *root;
uint64_t *pagemap;
} vma_ctx_t;
void vmm_init();
#endif // VMM_H