vmm: Use the kernel's ELF file to map the sections (and now works!)
This commit is contained in:
parent
875dc2685b
commit
c658f738e4
9 changed files with 250 additions and 59 deletions
|
@ -25,36 +25,29 @@ SECTIONS
|
|||
|
||||
/* Define a section to contain the Limine requests and assign it to its own PHDR */
|
||||
.limine_requests : {
|
||||
reqs_start_ld = .;
|
||||
KEEP(*(.limine_requests_start))
|
||||
KEEP(*(.limine_requests))
|
||||
KEEP(*(.limine_requests_end))
|
||||
reqs_end_ld = .;
|
||||
} :limine_requests
|
||||
|
||||
/* Move to the next memory page for .text */
|
||||
. = ALIGN(CONSTANT(MAXPAGESIZE));
|
||||
|
||||
.text : {
|
||||
text_start_ld = .;
|
||||
*(.text .text.*)
|
||||
text_end_ld = .;
|
||||
} :text
|
||||
|
||||
/* Move to the next memory page for .rodata */
|
||||
. = ALIGN(CONSTANT(MAXPAGESIZE));
|
||||
|
||||
.rodata : {
|
||||
rodata_start_ld = .;
|
||||
*(.rodata .rodata.*)
|
||||
rodata_end_ld = .;
|
||||
} :rodata
|
||||
|
||||
/* Move to the next memory page for .data */
|
||||
. = ALIGN(CONSTANT(MAXPAGESIZE));
|
||||
|
||||
.data : {
|
||||
data_start_ld = .;
|
||||
*(.data .data.*)
|
||||
} :data
|
||||
|
||||
|
@ -65,7 +58,6 @@ SECTIONS
|
|||
.bss : {
|
||||
*(.bss .bss.*)
|
||||
*(COMMON)
|
||||
data_end_ld = .;
|
||||
} :data
|
||||
|
||||
/* Discard .note.* and .eh_frame* since they may cause issues on some hosts. */
|
||||
|
|
|
@ -14,6 +14,54 @@ __attribute__((aligned(0x10)))
|
|||
static idt_entry_t idt[256];
|
||||
static idtr_t idtr;
|
||||
|
||||
static void __panic_display_bt(registers_t *regs) {
|
||||
if (regs->cs == 0x43 || regs->cs == 0x3B) {
|
||||
fatal("The backtrace can't be dumped from a userspace process.\n");
|
||||
return; // Don't try to backtrace userspace
|
||||
}
|
||||
|
||||
fatal("-- BACKTRACE --\n");
|
||||
|
||||
// First print the current instruction pointer from the interrupt frame
|
||||
if (regs->rip) {
|
||||
fatal("* %p (current)\n", regs->rip);
|
||||
}
|
||||
|
||||
uint64_t *frame = (uint64_t*)regs->rbp;
|
||||
if (!frame || (uint64_t)frame < 0xffffffff80000000) {
|
||||
fatal("No further stack frames available\n");
|
||||
return;
|
||||
}
|
||||
|
||||
// Frame format in x86_64:
|
||||
// [rbp] -> previous rbp
|
||||
// [rbp+8] -> return address
|
||||
int depth = 0;
|
||||
while (frame && depth < 16) { // Limit depth to avoid infinite loops
|
||||
// Validate both frame and return address pointers
|
||||
uint64_t *ret_addr_ptr = frame + 1;
|
||||
if ((uint64_t)ret_addr_ptr < 0xffffffff80000000) {
|
||||
break;
|
||||
}
|
||||
|
||||
uint64_t ret_addr = *ret_addr_ptr;
|
||||
if (ret_addr < 0xffffffff80000000 || ret_addr > 0xfffffffffffff000) {
|
||||
break;
|
||||
}
|
||||
|
||||
fatal("* %p\n", ret_addr);
|
||||
|
||||
uint64_t next_rbp = *frame;
|
||||
if (next_rbp < 0xffffffff80000000 || next_rbp > 0xfffffffffffff000) {
|
||||
break;
|
||||
}
|
||||
|
||||
frame = (uint64_t*)next_rbp;
|
||||
depth++;
|
||||
}
|
||||
fatal("\n");
|
||||
}
|
||||
|
||||
void idt_interrupt_handler(registers_t *regs) {
|
||||
fatal("Kernel panic: CPU exception %d\n", regs->int_no);
|
||||
fatal("rax: %p, rbx: %p, rbp: %p, rdx\n", regs->rax, regs->rbx, regs->rbp, regs->rdx);
|
||||
|
@ -23,6 +71,7 @@ void idt_interrupt_handler(registers_t *regs) {
|
|||
fatal("r14: %p, r15: %p\n", regs->r14, regs->r15);
|
||||
fatal("rip: %p, cs: %p, ss: %p\n", regs->rip, regs->cs, regs->ss);
|
||||
fatal("rflags: %p, err: %p, rsp: %p\n", regs->rflags, regs->err_code, regs->rsp);
|
||||
__panic_display_bt(regs);
|
||||
hcf();
|
||||
}
|
||||
|
||||
|
|
|
@ -5,11 +5,11 @@
|
|||
* limine.c - Limine bootloader interface implementation.
|
||||
*/
|
||||
|
||||
#include "deps/limine.h"
|
||||
#include "limine.h"
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
#include <boot/limine.h>
|
||||
#include <deps/limine.h>
|
||||
#include <exec/elf.h>
|
||||
|
||||
__attribute__((used, section(".limine_requests")))
|
||||
static volatile LIMINE_BASE_REVISION(3);
|
||||
|
@ -50,6 +50,19 @@ static volatile struct limine_executable_address_request kaddr_req = {
|
|||
.revision = 0
|
||||
};
|
||||
|
||||
__attribute__((used, section(".limine_requests")))
|
||||
static volatile struct limine_executable_file_request execfile_req = {
|
||||
.id = LIMINE_EXECUTABLE_FILE_REQUEST,
|
||||
.revision = 0
|
||||
};
|
||||
|
||||
__attribute__((used, section(".limine_requests")))
|
||||
static volatile struct limine_paging_mode_request pgmode_req = {
|
||||
.id = LIMINE_PAGING_MODE_REQUEST,
|
||||
.mode = LIMINE_PAGING_MODE_X86_64_4LVL,
|
||||
.revision = 0
|
||||
};
|
||||
|
||||
__attribute__((used, section(".limine_requests_start")))
|
||||
static volatile LIMINE_REQUESTS_START_MARKER;
|
||||
|
||||
|
@ -96,3 +109,4 @@ limine_bootinfo_t *limine_get_bootinfo() {
|
|||
uint64_t limine_get_hhdm_offset() { return hhdm_req.response->offset; }
|
||||
uint64_t limine_get_kernel_vaddr() { return kaddr_req.response->virtual_base; }
|
||||
uint64_t limine_get_kernel_paddr() { return kaddr_req.response->physical_base; }
|
||||
uint64_t limine_get_kernel_ehdr_addr() { return (uint64_t)execfile_req.response->executable_file->address; }
|
|
@ -34,3 +34,4 @@ struct limine_memmap_response *limine_get_memmap();
|
|||
uint64_t limine_get_hhdm_offset();
|
||||
uint64_t limine_get_kernel_vaddr();
|
||||
uint64_t limine_get_kernel_paddr();
|
||||
uint64_t limine_get_kernel_ehdr_addr();
|
150
kernel/src/exec/elf.h
Normal file
150
kernel/src/exec/elf.h
Normal file
|
@ -0,0 +1,150 @@
|
|||
/*
|
||||
* The Soaplin Kernel
|
||||
* Copyright (C) 2025 The SILD Project
|
||||
*
|
||||
* elf.h - ELF definitions
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
// ELF magic.
|
||||
#define EI_MAG0 0
|
||||
#define ELFMAG0 0x7f
|
||||
|
||||
#define EI_MAG1 1
|
||||
#define ELFMAG1 'E'
|
||||
|
||||
#define EI_MAG2 2
|
||||
#define ELFMAG2 'L'
|
||||
|
||||
#define EI_MAG3 3
|
||||
#define ELFMAG3 'F'
|
||||
|
||||
// ELF class
|
||||
#define EI_CLASS 4
|
||||
#define ELFCLASSNONE 0
|
||||
#define ELFCLASS32 1
|
||||
#define ELFCLASS64 2
|
||||
|
||||
// Is processor-specific data little-endian or big-endian?
|
||||
#define EI_DATA 5
|
||||
#define ELFDATANONE 0
|
||||
#define ELFDATA2LSB 1
|
||||
#define ELFDATA2MSB 2
|
||||
|
||||
// ELF version
|
||||
#define EI_VERSION 6
|
||||
#define EV_NONE 0
|
||||
#define EV_CURRENT 1
|
||||
|
||||
// ELF ABI
|
||||
#define EI_OSABI 7
|
||||
#define ELFOSABI_NONE 0
|
||||
#define ELFOSABI_SYSV 0
|
||||
#define ELFOSABI_HPUX 1
|
||||
#define ELFOSABI_NETBSD 2
|
||||
#define ELFOSABI_LINUX 3
|
||||
#define ELFOSABI_SOLARIS 6
|
||||
#define ELFOSABI_IRIX 8
|
||||
#define ELFOSABI_FREEBSD 9
|
||||
#define ELFOSABI_TRU64 10
|
||||
#define ELFOSABI_ARM 97
|
||||
#define ELFOSABI_STANDALONE 255
|
||||
|
||||
// ABI version
|
||||
#define EI_ABIVERSION 8
|
||||
|
||||
// Unused bytes.
|
||||
#define EI_PAD 9
|
||||
|
||||
// Magic size.
|
||||
#define EI_NIDENT 16
|
||||
|
||||
// e_type
|
||||
#define ET_NONE 0
|
||||
#define ET_REL 1
|
||||
#define ET_EXEC 2
|
||||
#define ET_DYN 3
|
||||
#define ET_CORE 4
|
||||
|
||||
// e_machine (we only included machines supported by Soaplin.)
|
||||
#define EM_X86_64 62
|
||||
|
||||
typedef uint64_t Elf64_Addr;
|
||||
typedef uint64_t Elf64_Off;
|
||||
typedef uint16_t Elf64_Section;
|
||||
typedef uint16_t Elf64_Versym;
|
||||
typedef uint8_t Elf_Byte;
|
||||
typedef uint16_t Elf64_Half;
|
||||
typedef int32_t Elf64_Sword;
|
||||
typedef uint32_t Elf64_Word;
|
||||
typedef int64_t Elf64_Sxword;
|
||||
typedef uint64_t Elf64_Xword;
|
||||
|
||||
typedef struct {
|
||||
unsigned char e_ident[EI_NIDENT];
|
||||
uint16_t e_type;
|
||||
uint16_t e_machine;
|
||||
uint32_t e_version;
|
||||
Elf64_Addr e_entry;
|
||||
Elf64_Off e_phoff;
|
||||
Elf64_Off e_shoff;
|
||||
uint32_t e_flags;
|
||||
uint16_t e_ehsize;
|
||||
uint16_t e_phentsize;
|
||||
uint16_t e_phnum;
|
||||
uint16_t e_shentsize;
|
||||
uint16_t e_shnum;
|
||||
uint16_t e_shstrndx;
|
||||
} Elf64_Ehdr;
|
||||
|
||||
#define SHN_UNDEF 0
|
||||
|
||||
#define PT_NULL 0
|
||||
#define PT_LOAD 1
|
||||
#define PT_DYNAMIC 2
|
||||
#define PT_INTERP 3
|
||||
#define PT_NOTE 4
|
||||
#define PT_SHLIB 5
|
||||
|
||||
#define PF_X (1 << 0)
|
||||
#define PF_W (1 << 1)
|
||||
#define PF_R (1 << 2)
|
||||
|
||||
typedef struct {
|
||||
uint32_t p_type;
|
||||
uint32_t p_flags;
|
||||
Elf64_Off p_offset;
|
||||
Elf64_Addr p_vaddr;
|
||||
Elf64_Addr p_paddr;
|
||||
uint64_t p_filesz;
|
||||
uint64_t p_memsz;
|
||||
uint64_t p_align;
|
||||
} Elf64_Phdr;
|
||||
|
||||
#define SHT_SYMTAB 2
|
||||
#define SHT_STRTAB 3
|
||||
|
||||
typedef struct {
|
||||
uint32_t st_name;
|
||||
unsigned char st_info;
|
||||
unsigned char st_other;
|
||||
uint16_t st_shndx;
|
||||
Elf64_Addr st_value;
|
||||
uint64_t st_size;
|
||||
} Elf64_Sym;
|
||||
|
||||
typedef struct {
|
||||
uint32_t sh_name;
|
||||
uint32_t sh_type;
|
||||
uint64_t sh_flags;
|
||||
Elf64_Addr sh_addr;
|
||||
Elf64_Off sh_offset;
|
||||
uint64_t sh_size;
|
||||
uint32_t sh_link;
|
||||
uint32_t sh_info;
|
||||
uint64_t sh_addralign;
|
||||
uint64_t sh_entsize;
|
||||
} Elf64_Shdr;
|
|
@ -6,12 +6,12 @@
|
|||
*/
|
||||
|
||||
#define NANOPRINTF_USE_FIELD_WIDTH_FORMAT_SPECIFIERS 1
|
||||
#define NANOPRINTF_USE_PRECISION_FORMAT_SPECIFIERS 0
|
||||
#define NANOPRINTF_USE_PRECISION_FORMAT_SPECIFIERS 1
|
||||
#define NANOPRINTF_USE_LARGE_FORMAT_SPECIFIERS 1
|
||||
#define NANOPRINTF_USE_SMALL_FORMAT_SPECIFIERS 1
|
||||
#define NANOPRINTF_USE_FLOAT_FORMAT_SPECIFIERS 0
|
||||
#define NANOPRINTF_USE_BINARY_FORMAT_SPECIFIERS 1
|
||||
#define NANOPRINTF_USE_WRITEBACK_FORMAT_SPECIFIERS 1
|
||||
#define NANOPRINTF_USE_WRITEBACK_FORMAT_SPECIFIERS 0
|
||||
|
||||
// Compile nanoprintf in this translation unit.
|
||||
#define NANOPRINTF_IMPLEMENTATION
|
||||
|
|
|
@ -27,7 +27,6 @@ void pmm_free_page(void *mem) {
|
|||
pmm_page_t *page = (pmm_page_t*)higher_half((uint64_t)mem);
|
||||
page->next = pmm_free_list_head ? (pmm_page_t*)higher_half((uint64_t)pmm_free_list_head) : 0x0;
|
||||
pmm_free_list_head = page;
|
||||
//trace("pmm: free: page free list head is now %p\n", page);
|
||||
|
||||
pmm_available_pages++;
|
||||
}
|
||||
|
@ -67,9 +66,7 @@ void *pmm_alloc_page() {
|
|||
pmm_available_pages--;
|
||||
|
||||
pmm_page_t *page = pmm_free_list_head;
|
||||
trace("pmm: alloc: page is %p\n", page);
|
||||
pmm_free_list_head = page->next;
|
||||
//trace("pmm: alloc: free page list head is now %p\n", page);
|
||||
|
||||
memset(page, 0, PMM_PAGE_SIZE);
|
||||
return (void*)physical((uint64_t)page);
|
||||
|
|
|
@ -5,12 +5,13 @@
|
|||
* vmm.c - Virtual memory manager
|
||||
*/
|
||||
|
||||
#include "boot/limine.h"
|
||||
#include "lib/log.h"
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#include <arch/cpu.h>
|
||||
#include <boot/limine.h>
|
||||
#include <exec/elf.h>
|
||||
#include <lib/log.h>
|
||||
#include <mm/memop.h>
|
||||
#include <mm/pmm.h>
|
||||
#include <mm/vmm.h>
|
||||
|
@ -31,36 +32,37 @@ void vmm_init() {
|
|||
|
||||
uint64_t kvaddr = limine_get_kernel_vaddr();
|
||||
uint64_t kpaddr = limine_get_kernel_paddr();
|
||||
uint64_t reqs_start = ALIGN_DOWN((uint64_t)reqs_start_ld, PMM_PAGE_SIZE);
|
||||
uint64_t reqs_end = ALIGN_UP((uint64_t)reqs_end_ld, PMM_PAGE_SIZE);
|
||||
uint64_t text_start = ALIGN_DOWN((uint64_t)text_start_ld, PMM_PAGE_SIZE);
|
||||
uint64_t text_end = ALIGN_UP((uint64_t)text_end_ld, PMM_PAGE_SIZE);
|
||||
uint64_t rodata_start = ALIGN_DOWN((uint64_t)rodata_start_ld, PMM_PAGE_SIZE);
|
||||
uint64_t rodata_end = ALIGN_UP((uint64_t)rodata_end_ld, PMM_PAGE_SIZE);
|
||||
uint64_t data_start = ALIGN_DOWN((uint64_t)data_start_ld, PMM_PAGE_SIZE);
|
||||
uint64_t data_end = ALIGN_UP((uint64_t)data_end_ld, PMM_PAGE_SIZE);
|
||||
|
||||
// Now, map the kernel's sections
|
||||
for (uint64_t i = reqs_start; i < reqs_end; i += PMM_PAGE_SIZE)
|
||||
vmm_map(vmm_kernel_pm, i, i - kvaddr + kpaddr, PTE_PRESENT | PTE_WRITE); // why would i write into Limine requests?
|
||||
trace("vmm: Mapped limine rqs: PW\n");
|
||||
for (uint64_t i = text_start; i < text_end; i += PMM_PAGE_SIZE)
|
||||
vmm_map(vmm_kernel_pm, i, i - kvaddr + kpaddr, PTE_PRESENT);
|
||||
trace("vmm: Mapped text: P\n");
|
||||
for (uint64_t i = rodata_start; i < rodata_end; i += PMM_PAGE_SIZE)
|
||||
vmm_map(vmm_kernel_pm, i, i - kvaddr + kpaddr, PTE_PRESENT | PTE_NX);
|
||||
trace("vmm: Mapped rodata: P NX\n");
|
||||
for (uint64_t i = data_start; i < data_end; i += PMM_PAGE_SIZE)
|
||||
vmm_map(vmm_kernel_pm, i, i - kvaddr + kpaddr, PTE_PRESENT | PTE_WRITE | PTE_NX);
|
||||
trace("vmm: Mapped data: PW NX\n");
|
||||
char *elf_addr = (char *)limine_get_kernel_ehdr_addr();
|
||||
Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elf_addr;
|
||||
|
||||
for (uint16_t i = 0; i < ehdr->e_phnum; i++) {
|
||||
Elf64_Phdr *cur_phdr = (Elf64_Phdr*)(elf_addr + ehdr->e_phoff + (i * ehdr->e_phentsize));
|
||||
if (cur_phdr->p_type != PT_LOAD)
|
||||
continue;
|
||||
|
||||
uintptr_t phys = (cur_phdr->p_vaddr - kvaddr) + kpaddr;
|
||||
uint64_t flags = PTE_PRESENT;
|
||||
if ((cur_phdr->p_flags & PF_X) == 0) {
|
||||
flags |= PTE_NX;
|
||||
}
|
||||
if (cur_phdr->p_flags & PF_W) {
|
||||
flags |= PTE_WRITE;
|
||||
}
|
||||
|
||||
size_t length = ALIGN_UP(cur_phdr->p_memsz, PMM_PAGE_SIZE);
|
||||
|
||||
for (uint64_t i = 0; i < length; i += PMM_PAGE_SIZE) {
|
||||
vmm_map(vmm_kernel_pm, cur_phdr->p_vaddr + i, phys + i, flags);
|
||||
}
|
||||
trace("vmm: Mapped range: %p -> %p (length: %x)\n", phys, cur_phdr->p_vaddr, length);
|
||||
}
|
||||
|
||||
// Map the lower 4 GiB into the higher-half
|
||||
for (uint64_t i = 0; i < 0x100000000; i += PMM_PAGE_SIZE)
|
||||
vmm_map(vmm_kernel_pm, higher_half(i), i, PTE_PRESENT | PTE_WRITE);
|
||||
trace("vmm: Mapped lower 4gib to higher half with flags: PW\n");
|
||||
|
||||
cpu_load_pm(vmm_kernel_pm);
|
||||
trace("vmm: Mapped range: %p -> %p (length: %x)\n", 0x0, 0xFFFF800000000000, 0x100000000);
|
||||
|
||||
vmm_load_pm(vmm_kernel_pm);
|
||||
trace("vmm: Initialized.\n");
|
||||
}
|
||||
|
||||
|
@ -96,7 +98,6 @@ void vmm_free_pm(pagemap_t pm) {
|
|||
|
||||
static uint64_t *__vmm_get_next_lvl(uint64_t *level, uint64_t entry,
|
||||
uint64_t flags, bool alloc) {
|
||||
//trace("level: %p, level[entry]: %p\n", level, level + entry);
|
||||
if (level[entry] & PTE_PRESENT)
|
||||
return (uint64_t *)higher_half(PTE_GET_ADDR(level[entry]));
|
||||
if (alloc) {
|
||||
|
@ -111,7 +112,6 @@ static uint64_t *__vmm_get_next_lvl(uint64_t *level, uint64_t entry,
|
|||
void vmm_map(pagemap_t pm, uint64_t vaddr, uint64_t paddr, uint64_t flags) {
|
||||
if (!pm) return;
|
||||
|
||||
//trace("pm: %p, vaddr: %p, paddr: %p\n", pm, vaddr, paddr);
|
||||
uint64_t pml4_entry = (vaddr >> 39) & 0x1ff;
|
||||
uint64_t pml3_entry = (vaddr >> 30) & 0x1ff;
|
||||
uint64_t pml2_entry = (vaddr >> 21) & 0x1ff;
|
||||
|
|
|
@ -21,23 +21,11 @@
|
|||
|
||||
typedef uint64_t *pagemap_t;
|
||||
|
||||
// These are defined in the linker file.
|
||||
extern char reqs_start_ld;
|
||||
extern char reqs_end_ld;
|
||||
|
||||
extern char text_start_ld;
|
||||
extern char text_end_ld;
|
||||
|
||||
extern char rodata_start_ld;
|
||||
extern char rodata_end_ld;
|
||||
|
||||
extern char data_start_ld;
|
||||
extern char data_end_ld;
|
||||
|
||||
void vmm_init();
|
||||
pagemap_t vmm_alloc_pm();
|
||||
void vmm_free_pm(pagemap_t pm);
|
||||
|
||||
void vmm_load_pm(pagemap_t pm);
|
||||
void vmm_map(pagemap_t pm, uint64_t vaddr, uint64_t paddr, uint64_t flags);
|
||||
void vmm_map_user(pagemap_t pm, uint64_t vaddr, uint64_t paddr,
|
||||
uint64_t flags);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue