Fixed ELF loading again, memory management needs to be redone properly

This commit is contained in:
Jozef Nagy 2025-04-16 23:11:21 +02:00
parent 67f719c73f
commit 819a24ab8d
Signed by untrusted user who does not match committer: crz
GPG key ID: 459A4811CEAC7068
9 changed files with 68 additions and 28 deletions

View file

@ -10,6 +10,8 @@ brew "make"
brew "gptfdisk" brew "gptfdisk"
brew "xorriso" brew "xorriso"
brew "qemu" brew "qemu"
brew "llvm"
brew "lld"
brew "util-linux" if OS.mac? brew "util-linux" if OS.mac?
brew "gsed" if OS.mac? brew "gsed" if OS.mac?

View file

@ -28,45 +28,55 @@
/* https://github.com/KevinAlavik/nekonix/blob/main/kernel/src/mm/vmm.c */ /* https://github.com/KevinAlavik/nekonix/blob/main/kernel/src/mm/vmm.c */
/* Thanks, Kevin <3 */ /* Thanks, Kevin <3 */
void map_pages(pagetable *pm, uintptr_t virt, uintptr_t phys, size_t size, uint64_t flags) static void _map(pagetable *pm, uintptr_t virt, uintptr_t phys, uint64_t flags)
{
for (size_t i = 0; i < ROUND_UP(size, PAGE_SIZE); i += PAGE_SIZE) {
map_page(pm, virt + i, phys + i, flags);
}
}
void map_page(pagetable *pm, uintptr_t virt, uintptr_t phys, uint64_t flags)
{ {
uint64_t pml1_idx = (virt >> 12) & 0x1ff; uint64_t pml1_idx = (virt >> 12) & 0x1ff;
uint64_t pml2_idx = (virt >> 21) & 0x1ff; uint64_t pml2_idx = (virt >> 21) & 0x1ff;
uint64_t pml3_idx = (virt >> 30) & 0x1ff; uint64_t pml3_idx = (virt >> 30) & 0x1ff;
uint64_t pml4_idx = (virt >> 39) & 0x1ff; uint64_t pml4_idx = (virt >> 39) & 0x1ff;
flags |= VMM_PRESENT;
if (!(pm->entries[pml4_idx] & 1)) { if (!(pm->entries[pml4_idx] & 1)) {
void *pml4 = mem_alloc(PAGE_SIZE); void *pml4 = mem_alloc(PAGE_SIZE);
memset(pml4, 0, sizeof(pagetable)); memset(pml4, 0, sizeof(pagetable));
pm->entries[pml4_idx] = (uint64_t)pml4 | VMM_PRESENT | VMM_WRITABLE | VMM_USER; pm->entries[pml4_idx] = (uint64_t)pml4 | VMM_PRESENT | VMM_WRITABLE;
} }
pagetable *pml3_table = (pagetable *)(pm->entries[pml4_idx] & 0x000FFFFFFFFFF000); pagetable *pml3_table = (pagetable *)(pm->entries[pml4_idx] & 0x000FFFFFFFFFF000);
if (!(pml3_table->entries[pml3_idx] & 1)) { if (!(pml3_table->entries[pml3_idx] & 1)) {
void *pml3 = mem_alloc(PAGE_SIZE); void *pml3 = mem_alloc(PAGE_SIZE);
memset(pml3, 0, sizeof(pagetable)); memset(pml3, 0, sizeof(pagetable));
pml3_table->entries[pml3_idx] = (uint64_t)pml3 | VMM_PRESENT | VMM_WRITABLE | VMM_USER; pml3_table->entries[pml3_idx] = (uint64_t)pml3 | VMM_PRESENT | VMM_WRITABLE;
} }
pagetable *pml2_table = (pagetable *)(pml3_table->entries[pml3_idx] & 0x000FFFFFFFFFF000); pagetable *pml2_table = (pagetable *)(pml3_table->entries[pml3_idx] & 0x000FFFFFFFFFF000);
if (!(pml2_table->entries[pml2_idx] & 1)) { if (!(pml2_table->entries[pml2_idx] & 1)) {
void *pml2 = mem_alloc(PAGE_SIZE); void *pml2 = mem_alloc(PAGE_SIZE);
memset(pml2, 0, sizeof(pagetable)); memset(pml2, 0, sizeof(pagetable));
pml2_table->entries[pml2_idx] = (uint64_t)pml2 | VMM_PRESENT | VMM_WRITABLE | VMM_USER; pml2_table->entries[pml2_idx] = (uint64_t)pml2 | VMM_PRESENT | VMM_WRITABLE;
} }
pagetable *pml1_table = (pagetable *)(pml2_table->entries[pml2_idx] & 0x000FFFFFFFFFF000); pagetable *pml1_table = (pagetable *)(pml2_table->entries[pml2_idx] & 0x000FFFFFFFFFF000);
if (!(pml1_table->entries[pml1_idx] & 1)) { if ((pml1_table->entries[pml1_idx] & 1)) {
pml1_table->entries[pml1_idx] = (phys | 0x000FFFFFFFFFF000) | flags; debug("_map(): Remapping present page\n");
} }
pml1_table->entries[pml1_idx] = (phys & 0x000FFFFFFFFFF000) | flags;
}
void map_pages(pagetable *pm, uintptr_t virt, uintptr_t phys, size_t size, uint64_t flags)
{
for (size_t i = 0; i < ROUND_UP(size, PAGE_SIZE); i += PAGE_SIZE) {
_map(pm, virt + i, phys + i, flags);
}
debug("map_pages(): Mapped 0x%llx-0x%llx -> 0x%llx-0x%llx\n", phys, phys + (size * PAGE_SIZE), virt, virt + (size * PAGE_SIZE));
}
void map_page(pagetable *pm, uintptr_t virt, uintptr_t phys, uint64_t flags)
{
_map(pm, virt, phys, flags);
debug("map_page(): Mapped 0x%llx -> 0x%llx\n", phys, virt); debug("map_page(): Mapped 0x%llx -> 0x%llx\n", phys, virt);
} }
@ -77,6 +87,7 @@ pagetable *create_pagemap()
debug("create_pagemap(): Failed to allocate memory for a new pm.\n"); debug("create_pagemap(): Failed to allocate memory for a new pm.\n");
return NULL; return NULL;
} }
pm = (pagetable *)ROUND_UP((uint64_t)pm, PAGE_SIZE);
memset(pm, 0, sizeof(pagetable)); memset(pm, 0, sizeof(pagetable));
debug("create_pagemap(): Created new pm at 0x%llx\n", (uint64_t)pm); debug("create_pagemap(): Created new pm at 0x%llx\n", (uint64_t)pm);

View file

@ -2,10 +2,23 @@
.globl _aurix_handoff_end .globl _aurix_handoff_end
.globl aurix_handoff .globl aurix_handoff
_aurix_handoff_start:
aurix_handoff: aurix_handoff:
cli cli
movq %rsi, %rsp movq %rsi, %rsp
_aurix_handoff_start: .section _aurix_handoff
movq %rdi, %cr3 movq %rdi, %cr3
jmpq *%rdx xor %rax, %rax
xor %rbx, %rbx
xor %rcx, %rcx
xor %rdi, %rdi
xor %r8, %r8
xor %r9, %r9
xor %r10, %r10
xor %r11, %r11
xor %r12, %r12
xor %r13, %r13
xor %r14, %r14
xor %r15, %r15
callq *%rdx
_aurix_handoff_end: _aurix_handoff_end:

View file

@ -65,21 +65,21 @@ uintptr_t elf64_load(char *data, pagetable *pagemap)
flags |= VMM_WRITABLE; flags |= VMM_WRITABLE;
if (!(ph[i].p_flags & PF_X)) if (!(ph[i].p_flags & PF_X))
flags |= VMM_NX; flags |= VMM_NX;
debug("elf64_load(): phys=0x%llx, virt=0x%llx, size=%lu\n", ph[i].p_paddr, ph[i].p_vaddr, ph[i].p_filesz);
uint64_t phys = (uint64_t)mem_alloc(ph[i].p_memsz); uint64_t phys = (uint64_t)mem_alloc(ph[i].p_memsz);
if (!phys) { if (!phys) {
debug("elf64_load(): Out of memory\n"); debug("elf64_load(): Out of memory\n");
return 0; return 0;
} }
debug("elf64_load(): phys=0x%llx, virt=0x%llx, size=%lu\n", phys, ph[i].p_vaddr, ph[i].p_filesz);
map_page(pagemap, ph[i].p_vaddr, phys, flags); map_page(pagemap, ph[i].p_vaddr, phys, flags);
memcpy((void*)ph[i].p_vaddr - lowest, data + ph[i].p_offset, ph[i].p_filesz); memcpy((void*)ph[i].p_vaddr - lowest, data + ph[i].p_offset, ph[i].p_filesz);
} }
debug("elf64_load(): ELF loaded successfully, entry: 0x%llx\n", header->e_entry); debug("elf64_load(): ELF loaded successfully, entry: 0x%llx\n", header->e_entry);
return (uintptr_t)((uint8_t *)data + (header->e_entry - lowest)); return (uintptr_t)((uint8_t *)data + header->e_entry);
} }
uintptr_t elf_load(char *data, pagetable *pagemap) uintptr_t elf_load(char *data, pagetable *pagemap)

View file

@ -23,6 +23,9 @@
#include <mm/vmm.h> #include <mm/vmm.h>
#include <vfs/vfs.h> #include <vfs/vfs.h>
#include <print.h> #include <print.h>
#include <axboot.h>
#include <efi.h>
#include <efilib.h>
extern __attribute__((noreturn)) void aurix_handoff(void *pagemap, void *stack, uint64_t entry, void *params); extern __attribute__((noreturn)) void aurix_handoff(void *pagemap, void *stack, uint64_t entry, void *params);
extern char _aurix_handoff_start[], _aurix_handoff_end[]; extern char _aurix_handoff_start[], _aurix_handoff_end[];
@ -35,20 +38,24 @@ void aurix_load(char *kernel)
// TODO: Do something with the kernel :p // TODO: Do something with the kernel :p
pagetable *pm = create_pagemap(); pagetable *pm = create_pagemap();
// __asm__ volatile("mov %%cr3, %0" : "=r"(pm));
if (!pm) { if (!pm) {
debug("aurix_load(): Failed to create kernel pagemap! Halting...\n"); debug("aurix_load(): Failed to create kernel pagemap! Halting...\n");
// TODO: Halt // TODO: Halt
while (1); while (1);
} }
map_pages(pm, (uintptr_t)_aurix_handoff_start, (uintptr_t)_aurix_handoff_start, (uint64_t)_aurix_handoff_end - (uint64_t)_aurix_handoff_start, VMM_PRESENT | VMM_USER | VMM_WRITABLE); map_pages(pm, (uintptr_t)pm, (uintptr_t)pm, PAGE_SIZE, VMM_WRITABLE);
map_pages(pm, (uintptr_t)_aurix_handoff_start, (uintptr_t)_aurix_handoff_start, (uint64_t)_aurix_handoff_end - (uint64_t)_aurix_handoff_start, 0);
void *stack = mem_alloc(16*1024); // 16 KiB stack should be well more than enough void *stack = mem_alloc(16*1024); // 16 KiB stack should be well more than enough
if (!stack) { if (!stack) {
debug("aurix_load(): Failed to allocate stack! Halting...\n"); debug("aurix_load(): Failed to allocate stack! Halting...\n");
while (1); while (1);
} }
map_pages(pm, (uintptr_t)stack, (uintptr_t)stack, 16*1024, VMM_WRITABLE | VMM_NX);
void *kernel_entry = (void *)elf_load(kbuf, pm); void *kernel_entry = (void *)elf_load(kbuf, pm);
if (!kernel_entry) { if (!kernel_entry) {
debug("aurix_load(): Failed to load '%s'! Halting...\n", kernel); debug("aurix_load(): Failed to load '%s'! Halting...\n", kernel);
@ -60,10 +67,14 @@ void aurix_load(char *kernel)
debug("aurix_load(): Handoff state: pm=0x%llx, stack=0x%llx, kernel_entry=0x%llx\n", pm, stack, kernel_entry); debug("aurix_load(): Handoff state: pm=0x%llx, stack=0x%llx, kernel_entry=0x%llx\n", pm, stack, kernel_entry);
aurix_handoff(pm, (void *)((uint8_t)stack + 16*1024), (uint64_t)kernel_entry, (void *)parameters); // this triggers a #GP ????
// aurix_handoff(pm, stack, (uint64_t)kernel_entry, (void *)parameters);
// __builtin_unreachable();
// __asm__ volatile("movq %[pml4], %%cr3\n" :: [pml4]"r"(pm) : "memory"); __asm__ volatile("movq %[pml4], %%cr3\n"
"movq %[stack], %%rsp\n"
"callq *%[entry]\n"
:: [pml4]"r"(pm), [stack]"r"(stack), [entry]"r"(kernel_entry) : "memory");
// __asm__ volatile("callq *%[entry]\n" // __asm__ volatile("callq *%[entry]\n"
// :: [entry]"r"(kernel_entry)); // :: [entry]"r"(kernel_entry));
}
}

View file

@ -55,7 +55,7 @@ KERNEL_CFLAGS := $(foreach d, $(INCLUDE_DIRS), -I$d) \
KERNEL_LDFLAGS := -Tarch/$(ARCH)/linker.ld \ KERNEL_LDFLAGS := -Tarch/$(ARCH)/linker.ld \
-nostdlib \ -nostdlib \
-static \ -static \
#-no-pie -no-pie
ifeq ($(BUILD_TYPE),debug) ifeq ($(BUILD_TYPE),debug)
KERNEL_CFLAGS += -O0 -g3 KERNEL_CFLAGS += -O0 -g3

View file

@ -32,7 +32,7 @@ PHDRS
SECTIONS SECTIONS
{ {
/* TODO: 0xffffffff80000000 */ /* TODO: 0xffffffff80000000 */
. = 0xffffffff80000000; . = 0x1000;
_linker_start_text = .; _linker_start_text = .;

View file

@ -31,4 +31,6 @@ void _start(void)
__asm__ volatile("wfe"); __asm__ volatile("wfe");
#endif #endif
} }
__builtin_unreachable();
} }

1
machine/riscv32/qemu.mk Normal file
View file

@ -0,0 +1 @@
QEMU_MACHINE_FLAGS := -M virt -bios default -nographic