Fixed ELF loading again, memory management needs to be redone properly

This commit is contained in:
Jozef Nagy 2025-04-16 23:11:21 +02:00
parent 67f719c73f
commit 819a24ab8d
Signed by untrusted user who does not match committer: crz
GPG key ID: 459A4811CEAC7068
9 changed files with 68 additions and 28 deletions

View file

@ -28,45 +28,55 @@
/* https://github.com/KevinAlavik/nekonix/blob/main/kernel/src/mm/vmm.c */
/* Thanks, Kevin <3 */
void map_pages(pagetable *pm, uintptr_t virt, uintptr_t phys, size_t size, uint64_t flags)
{
for (size_t i = 0; i < ROUND_UP(size, PAGE_SIZE); i += PAGE_SIZE) {
map_page(pm, virt + i, phys + i, flags);
}
}
void map_page(pagetable *pm, uintptr_t virt, uintptr_t phys, uint64_t flags)
static void _map(pagetable *pm, uintptr_t virt, uintptr_t phys, uint64_t flags)
{
uint64_t pml1_idx = (virt >> 12) & 0x1ff;
uint64_t pml2_idx = (virt >> 21) & 0x1ff;
uint64_t pml3_idx = (virt >> 30) & 0x1ff;
uint64_t pml4_idx = (virt >> 39) & 0x1ff;
flags |= VMM_PRESENT;
if (!(pm->entries[pml4_idx] & 1)) {
void *pml4 = mem_alloc(PAGE_SIZE);
memset(pml4, 0, sizeof(pagetable));
pm->entries[pml4_idx] = (uint64_t)pml4 | VMM_PRESENT | VMM_WRITABLE | VMM_USER;
pm->entries[pml4_idx] = (uint64_t)pml4 | VMM_PRESENT | VMM_WRITABLE;
}
pagetable *pml3_table = (pagetable *)(pm->entries[pml4_idx] & 0x000FFFFFFFFFF000);
if (!(pml3_table->entries[pml3_idx] & 1)) {
void *pml3 = mem_alloc(PAGE_SIZE);
memset(pml3, 0, sizeof(pagetable));
pml3_table->entries[pml3_idx] = (uint64_t)pml3 | VMM_PRESENT | VMM_WRITABLE | VMM_USER;
pml3_table->entries[pml3_idx] = (uint64_t)pml3 | VMM_PRESENT | VMM_WRITABLE;
}
pagetable *pml2_table = (pagetable *)(pml3_table->entries[pml3_idx] & 0x000FFFFFFFFFF000);
if (!(pml2_table->entries[pml2_idx] & 1)) {
void *pml2 = mem_alloc(PAGE_SIZE);
memset(pml2, 0, sizeof(pagetable));
pml2_table->entries[pml2_idx] = (uint64_t)pml2 | VMM_PRESENT | VMM_WRITABLE | VMM_USER;
pml2_table->entries[pml2_idx] = (uint64_t)pml2 | VMM_PRESENT | VMM_WRITABLE;
}
pagetable *pml1_table = (pagetable *)(pml2_table->entries[pml2_idx] & 0x000FFFFFFFFFF000);
if (!(pml1_table->entries[pml1_idx] & 1)) {
pml1_table->entries[pml1_idx] = (phys | 0x000FFFFFFFFFF000) | flags;
if ((pml1_table->entries[pml1_idx] & 1)) {
debug("_map(): Remapping present page\n");
}
pml1_table->entries[pml1_idx] = (phys & 0x000FFFFFFFFFF000) | flags;
}
void map_pages(pagetable *pm, uintptr_t virt, uintptr_t phys, size_t size, uint64_t flags)
{
for (size_t i = 0; i < ROUND_UP(size, PAGE_SIZE); i += PAGE_SIZE) {
_map(pm, virt + i, phys + i, flags);
}
debug("map_pages(): Mapped 0x%llx-0x%llx -> 0x%llx-0x%llx\n", phys, phys + (size * PAGE_SIZE), virt, virt + (size * PAGE_SIZE));
}
void map_page(pagetable *pm, uintptr_t virt, uintptr_t phys, uint64_t flags)
{
_map(pm, virt, phys, flags);
debug("map_page(): Mapped 0x%llx -> 0x%llx\n", phys, virt);
}
@ -77,6 +87,7 @@ pagetable *create_pagemap()
debug("create_pagemap(): Failed to allocate memory for a new pm.\n");
return NULL;
}
pm = (pagetable *)ROUND_UP((uint64_t)pm, PAGE_SIZE);
memset(pm, 0, sizeof(pagetable));
debug("create_pagemap(): Created new pm at 0x%llx\n", (uint64_t)pm);

View file

@ -2,10 +2,23 @@
.globl _aurix_handoff_end
.globl aurix_handoff
_aurix_handoff_start:
aurix_handoff:
cli
movq %rsi, %rsp
_aurix_handoff_start:
.section _aurix_handoff
movq %rdi, %cr3
jmpq *%rdx
xor %rax, %rax
xor %rbx, %rbx
xor %rcx, %rcx
xor %rdi, %rdi
xor %r8, %r8
xor %r9, %r9
xor %r10, %r10
xor %r11, %r11
xor %r12, %r12
xor %r13, %r13
xor %r14, %r14
xor %r15, %r15
callq *%rdx
_aurix_handoff_end: