ugh
This commit is contained in:
parent
f27e0a240c
commit
287a7fb312
3 changed files with 162 additions and 150 deletions
|
@ -60,10 +60,10 @@ program_t *elf_load(char *data) {
|
|||
uint64_t offset = phdr[i].p_offset;
|
||||
|
||||
uint64_t flags = VMM_PRESENT;
|
||||
if (phdr[i].p_flags & PF_W)
|
||||
//if (phdr[i].p_flags & PF_W)
|
||||
flags |= VMM_WRITABLE;
|
||||
if (!(phdr[i].p_flags & PF_X))
|
||||
flags |= VMM_NX;
|
||||
//if (!(phdr[i].p_flags & PF_X))
|
||||
// flags |= VMM_NX;
|
||||
|
||||
flags |= VMM_USER;
|
||||
|
||||
|
|
|
@ -1,23 +1,23 @@
|
|||
#include "mm/vmm.h"
|
||||
#include "limine.h"
|
||||
#include "mm/pmm.h"
|
||||
#include "mm/memop.h"
|
||||
#include "mm/pmm.h"
|
||||
#include "sys/log.h"
|
||||
#include "vmm.h"
|
||||
#include <stddef.h>
|
||||
|
||||
__attribute__((
|
||||
used,
|
||||
section(
|
||||
".limine_requests"))) static volatile struct limine_paging_mode_request
|
||||
pmrq = {.id = LIMINE_PAGING_MODE_REQUEST,
|
||||
.revision = 3,
|
||||
.mode = LIMINE_PAGING_MODE_X86_64_4LVL};
|
||||
|
||||
__attribute__((used, section(".limine_requests")))
|
||||
static volatile struct limine_paging_mode_request pmrq = {
|
||||
.id = LIMINE_PAGING_MODE_REQUEST,
|
||||
.revision = 3,
|
||||
.mode = LIMINE_PAGING_MODE_X86_64_4LVL
|
||||
};
|
||||
|
||||
__attribute__((used, section(".limine_requests")))
|
||||
static volatile struct limine_executable_address_request karq = {
|
||||
.id = LIMINE_EXECUTABLE_ADDRESS_REQUEST,
|
||||
.revision = 0,
|
||||
__attribute__((used, section(".limine_requests"))) static volatile struct
|
||||
limine_executable_address_request karq = {
|
||||
.id = LIMINE_EXECUTABLE_ADDRESS_REQUEST,
|
||||
.revision = 0,
|
||||
};
|
||||
|
||||
pagemap_t *vmm_kernel_pm = NULL;
|
||||
|
@ -25,179 +25,184 @@ pagemap_t *vmm_current_pm = NULL;
|
|||
int vmm_kernel_pm_exists = 0;
|
||||
|
||||
pagemap_t *vmm_alloc_pm() {
|
||||
pagemap_t *pm = (pagemap_t*)HIGHER_HALF((uint64_t)pmm_request_page());
|
||||
memset(pm, 0, PMM_PAGE_SIZE);
|
||||
pagemap_t *pm = (pagemap_t *)HIGHER_HALF((uint64_t)pmm_request_page());
|
||||
memset(pm, 0, PMM_PAGE_SIZE);
|
||||
|
||||
/*if (vmm_kernel_pm_exists) {
|
||||
pm->toplevel = (uint64_t*)HIGHER_HALF((uint64_t)pmm_request_page());
|
||||
memset(pm->toplevel, 0, PMM_PAGE_SIZE);
|
||||
|
||||
/*if (vmm_kernel_pm_exists) {
|
||||
pm->toplevel = (uint64_t*)HIGHER_HALF((uint64_t)pmm_request_page());
|
||||
memset(pm->toplevel, 0, PMM_PAGE_SIZE);
|
||||
for (int i = 256; i < 512; i++) {
|
||||
pm->toplevel[i] = vmm_kernel_pm->toplevel[i];
|
||||
}
|
||||
} else {
|
||||
|
||||
for (int i = 256; i < 512; i++) {
|
||||
pm->toplevel[i] = vmm_kernel_pm->toplevel[i];
|
||||
}
|
||||
} else {
|
||||
__asm__ volatile("mov %%cr3, %0" : "=r"(pm->toplevel) : : "memory");
|
||||
pm->toplevel = HIGHER_HALF(pm->toplevel);
|
||||
logln(info, "vmm", "Limine-provided kernel PML4: %p", pm->toplevel);
|
||||
|
||||
__asm__ volatile("mov %%cr3, %0" : "=r"(pm->toplevel) : : "memory");
|
||||
pm->toplevel = HIGHER_HALF(pm->toplevel);
|
||||
logln(info, "vmm", "Limine-provided kernel PML4: %p", pm->toplevel);
|
||||
}*/
|
||||
|
||||
}*/
|
||||
pm->toplevel = (uint64_t *)HIGHER_HALF((uint64_t)pmm_request_page());
|
||||
memset(pm->toplevel, 0, PMM_PAGE_SIZE);
|
||||
|
||||
pm->toplevel = (uint64_t*)HIGHER_HALF((uint64_t)pmm_request_page());
|
||||
memset(pm->toplevel, 0, PMM_PAGE_SIZE);
|
||||
|
||||
if (vmm_kernel_pm_exists) {
|
||||
for (int i = 256; i < 512; i++) {
|
||||
pm->toplevel[i] = vmm_kernel_pm->toplevel[i];
|
||||
}
|
||||
if (vmm_kernel_pm_exists) {
|
||||
for (int i = 256; i < 512; i++) {
|
||||
pm->toplevel[i] = vmm_kernel_pm->toplevel[i];
|
||||
}
|
||||
}
|
||||
|
||||
return pm;
|
||||
return pm;
|
||||
}
|
||||
|
||||
void vmm_release_pm(pagemap_t *pm) {
|
||||
memset(pm->toplevel, 0, PMM_PAGE_SIZE);
|
||||
memset(pm, 0, PMM_PAGE_SIZE);
|
||||
pmm_free_page(pm->toplevel);
|
||||
pmm_free_page(pm);
|
||||
memset(pm->toplevel, 0, PMM_PAGE_SIZE);
|
||||
memset(pm, 0, PMM_PAGE_SIZE);
|
||||
pmm_free_page(pm->toplevel);
|
||||
pmm_free_page(pm);
|
||||
}
|
||||
|
||||
void vmm_sanity_check() {
|
||||
uint64_t *my_memory = pmm_request_page();
|
||||
*my_memory = 0x40;
|
||||
uint64_t *my_memory = pmm_request_page();
|
||||
*my_memory = 0x40;
|
||||
|
||||
pagemap_t *pm = vmm_alloc_pm();
|
||||
vmm_map(pm, 0x1000, (uint64_t)my_memory, VMM_PRESENT | VMM_WRITABLE);
|
||||
pagemap_t *pm = vmm_alloc_pm();
|
||||
vmm_map(pm, 0x1000, (uint64_t)my_memory, VMM_PRESENT | VMM_WRITABLE);
|
||||
|
||||
uint64_t *my_ptr = (uint64_t*)0x1000;
|
||||
uint64_t ptr_val = 0;
|
||||
vmm_load_pagemap(pm);
|
||||
ptr_val = *my_ptr;
|
||||
vmm_load_pagemap(vmm_kernel_pm);
|
||||
uint64_t *my_ptr = (uint64_t *)0x1000;
|
||||
uint64_t ptr_val = 0;
|
||||
vmm_load_pagemap(pm);
|
||||
ptr_val = *my_ptr;
|
||||
vmm_load_pagemap(vmm_kernel_pm);
|
||||
|
||||
if (ptr_val != 0x40) {
|
||||
log("vmm - sanity check failed. system halted.\n");
|
||||
asm("cli");
|
||||
while (1)
|
||||
asm("hlt");
|
||||
}
|
||||
if (ptr_val != 0x40) {
|
||||
log("vmm - sanity check failed. system halted.\n");
|
||||
asm("cli");
|
||||
while (1)
|
||||
asm("hlt");
|
||||
}
|
||||
}
|
||||
|
||||
void vmm_init() {
|
||||
if (pmrq.response->mode != LIMINE_PAGING_MODE_X86_64_4LVL) {
|
||||
log("vmm - Soaplin only supports 4-level paging!\n");
|
||||
asm("cli; hlt;");
|
||||
}
|
||||
vmm_kernel_pm = vmm_alloc_pm();
|
||||
vmm_kernel_pm_exists = 1;
|
||||
if (pmrq.response->mode != LIMINE_PAGING_MODE_X86_64_4LVL) {
|
||||
log("vmm - Soaplin only supports 4-level paging!\n");
|
||||
asm("cli; hlt;");
|
||||
}
|
||||
vmm_kernel_pm = vmm_alloc_pm();
|
||||
vmm_kernel_pm_exists = 1;
|
||||
|
||||
uint64_t kphysaddr = karq.response->physical_base;
|
||||
uint64_t kvirtaddr = karq.response->virtual_base;
|
||||
uint64_t kphysaddr = karq.response->physical_base;
|
||||
uint64_t kvirtaddr = karq.response->virtual_base;
|
||||
|
||||
uint64_t reqs_start = ALIGN_DOWN((uint64_t)reqs_start_ld, PMM_PAGE_SIZE);
|
||||
uint64_t reqs_end = ALIGN_UP((uint64_t)reqs_end_ld, PMM_PAGE_SIZE);
|
||||
uint64_t text_start = ALIGN_DOWN((uint64_t)text_start_ld, PMM_PAGE_SIZE);
|
||||
uint64_t text_end = ALIGN_UP((uint64_t)text_end_ld, PMM_PAGE_SIZE);
|
||||
uint64_t rodata_start = ALIGN_DOWN((uint64_t)rodata_start_ld, PMM_PAGE_SIZE);
|
||||
uint64_t rodata_end = ALIGN_UP((uint64_t)rodata_end_ld, PMM_PAGE_SIZE);
|
||||
uint64_t data_start = ALIGN_DOWN((uint64_t)data_start_ld, PMM_PAGE_SIZE);
|
||||
uint64_t data_end = ALIGN_UP((uint64_t)data_end_ld, PMM_PAGE_SIZE);
|
||||
uint64_t reqs_start = ALIGN_DOWN((uint64_t)reqs_start_ld, PMM_PAGE_SIZE);
|
||||
uint64_t reqs_end = ALIGN_UP((uint64_t)reqs_end_ld, PMM_PAGE_SIZE);
|
||||
uint64_t text_start = ALIGN_DOWN((uint64_t)text_start_ld, PMM_PAGE_SIZE);
|
||||
uint64_t text_end = ALIGN_UP((uint64_t)text_end_ld, PMM_PAGE_SIZE);
|
||||
uint64_t rodata_start = ALIGN_DOWN((uint64_t)rodata_start_ld, PMM_PAGE_SIZE);
|
||||
uint64_t rodata_end = ALIGN_UP((uint64_t)rodata_end_ld, PMM_PAGE_SIZE);
|
||||
uint64_t data_start = ALIGN_DOWN((uint64_t)data_start_ld, PMM_PAGE_SIZE);
|
||||
uint64_t data_end = ALIGN_UP((uint64_t)data_end_ld, PMM_PAGE_SIZE);
|
||||
|
||||
log("vmm - mapping .requests section...\n");
|
||||
for (uint64_t req = reqs_start; req < reqs_end; req += PMM_PAGE_SIZE) {
|
||||
vmm_map(vmm_kernel_pm, req, req - kvirtaddr + kphysaddr,
|
||||
VMM_PRESENT | VMM_WRITABLE);
|
||||
}
|
||||
|
||||
log("vmm - mapping .requests section...\n");
|
||||
for (uint64_t req = reqs_start; req < reqs_end; req += PMM_PAGE_SIZE) {
|
||||
vmm_map(vmm_kernel_pm, req, req - kvirtaddr + kphysaddr, VMM_PRESENT | VMM_WRITABLE);
|
||||
}
|
||||
log("vmm - mapping .text section...\n");
|
||||
for (uint64_t text = text_start; text < text_end; text += PMM_PAGE_SIZE) {
|
||||
vmm_map(vmm_kernel_pm, text, text - kvirtaddr + kphysaddr, VMM_PRESENT);
|
||||
}
|
||||
|
||||
log("vmm - mapping .text section...\n");
|
||||
for (uint64_t text = text_start; text < text_end; text += PMM_PAGE_SIZE) {
|
||||
vmm_map(vmm_kernel_pm, text, text - kvirtaddr + kphysaddr, VMM_PRESENT);
|
||||
}
|
||||
log("vmm - mapping .rodata section...\n");
|
||||
for (uint64_t roData = rodata_start; roData < rodata_end;
|
||||
roData += PMM_PAGE_SIZE)
|
||||
vmm_map(vmm_kernel_pm, roData, roData - kvirtaddr + kphysaddr,
|
||||
VMM_PRESENT | VMM_NX);
|
||||
|
||||
log("vmm - mapping .rodata section...\n");
|
||||
for (uint64_t roData = rodata_start; roData < rodata_end; roData += PMM_PAGE_SIZE)
|
||||
vmm_map(vmm_kernel_pm, roData, roData - kvirtaddr + kphysaddr,
|
||||
VMM_PRESENT | VMM_NX);
|
||||
log("vmm - mapping .data section...\n");
|
||||
for (uint64_t data = data_start; data < data_end; data += PMM_PAGE_SIZE)
|
||||
vmm_map(vmm_kernel_pm, data, data - kvirtaddr + kphysaddr,
|
||||
VMM_PRESENT | VMM_WRITABLE | VMM_NX);
|
||||
|
||||
log("vmm - mapping .data section...\n");
|
||||
for (uint64_t data = data_start; data < data_end; data += PMM_PAGE_SIZE)
|
||||
vmm_map(vmm_kernel_pm, data, data - kvirtaddr + kphysaddr,
|
||||
VMM_PRESENT | VMM_WRITABLE | VMM_NX);
|
||||
log("vmm - mapping address from 0x0 to 0x100000000...\n");
|
||||
for (uint64_t gb4 = 0; gb4 < 0x100000000; gb4 += PMM_PAGE_SIZE) {
|
||||
vmm_map(vmm_kernel_pm, gb4, gb4, VMM_PRESENT | VMM_WRITABLE);
|
||||
vmm_map(vmm_kernel_pm, (uint64_t)HIGHER_HALF(gb4), gb4,
|
||||
VMM_PRESENT | VMM_WRITABLE);
|
||||
}
|
||||
|
||||
log("vmm - mapping address from 0x0 to 0x100000000...\n");
|
||||
for (uint64_t gb4 = 0; gb4 < 0x100000000; gb4 += PMM_PAGE_SIZE) {
|
||||
vmm_map(vmm_kernel_pm, gb4, gb4, VMM_PRESENT | VMM_WRITABLE);
|
||||
vmm_map(vmm_kernel_pm, (uint64_t)HIGHER_HALF(gb4), gb4, VMM_PRESENT | VMM_WRITABLE);
|
||||
}
|
||||
vmm_load_pagemap(vmm_kernel_pm);
|
||||
|
||||
vmm_load_pagemap(vmm_kernel_pm);
|
||||
|
||||
vmm_sanity_check();
|
||||
log("vmm - initialized!\n");
|
||||
vmm_sanity_check();
|
||||
log("vmm - initialized!\n");
|
||||
}
|
||||
|
||||
void vmm_load_pagemap(pagemap_t *pm) {
|
||||
if (!pm)
|
||||
return;
|
||||
if (!pm)
|
||||
return;
|
||||
|
||||
if (!pm->toplevel)
|
||||
return;
|
||||
if (!pm->toplevel)
|
||||
return;
|
||||
|
||||
vmm_current_pm = pm;
|
||||
__asm__ volatile("mov %0, %%cr3" : : "r"(PHYSICAL(pm->toplevel)) : "memory");
|
||||
vmm_current_pm = pm;
|
||||
__asm__ volatile("mov %0, %%cr3" : : "r"(PHYSICAL(pm->toplevel)) : "memory");
|
||||
}
|
||||
|
||||
static uint64_t *__vmm_get_next_lvl(uint64_t *level, uint64_t entry, uint64_t flags) {
|
||||
(void)flags;
|
||||
if (!(level[entry] & 1)){
|
||||
uint64_t *pml = HIGHER_HALF(pmm_request_page());
|
||||
memset(pml, 0, PMM_PAGE_SIZE);
|
||||
level[entry] = ((uint64_t)PHYSICAL(pml) & 0x000FFFFFFFFFF000ULL) | 0b111;
|
||||
}
|
||||
return HIGHER_HALF(PTE_GET_ADDR(level[entry]));
|
||||
static uint64_t *__vmm_get_next_lvl(uint64_t *level, uint64_t entry,
|
||||
uint64_t flags) {
|
||||
if (!(level[entry] & 1)) {
|
||||
uint64_t *pml = HIGHER_HALF(pmm_request_page());
|
||||
memset(pml, 0, PMM_PAGE_SIZE);
|
||||
level[entry] = (uint64_t)PHYSICAL(pml);
|
||||
|
||||
}
|
||||
level[entry] |= (flags & 0xFFF); // N'ajoute que les flags pertinents
|
||||
return HIGHER_HALF(PTE_GET_ADDR(level[entry]));
|
||||
}
|
||||
|
||||
uint64_t vmm_get_flags(pagemap_t* pm, uint64_t vaddr) {
|
||||
uint64_t pml4_entry = (vaddr >> 39) & 0x1ff;
|
||||
uint64_t pml3_entry = (vaddr >> 30) & 0x1ff;
|
||||
uint64_t pml2_entry = (vaddr >> 21) & 0x1ff;
|
||||
uint64_t pml1_entry = (vaddr >> 12) & 0x1ff;
|
||||
uint64_t vmm_get_flags(pagemap_t *pm, uint64_t vaddr) {
|
||||
uint64_t pml4_entry = (vaddr >> 39) & 0x1ff;
|
||||
uint64_t pml3_entry = (vaddr >> 30) & 0x1ff;
|
||||
uint64_t pml2_entry = (vaddr >> 21) & 0x1ff;
|
||||
uint64_t pml1_entry = (vaddr >> 12) & 0x1ff;
|
||||
|
||||
uint64_t *pml3 = __vmm_get_next_lvl(pm->toplevel, pml4_entry, 0);
|
||||
uint64_t *pml2 = __vmm_get_next_lvl(pml3, pml3_entry, 0);
|
||||
uint64_t *pml1 = __vmm_get_next_lvl(pml2, pml2_entry, 0);
|
||||
uint64_t *pml3 = __vmm_get_next_lvl(pm->toplevel, pml4_entry, 0);
|
||||
uint64_t *pml2 = __vmm_get_next_lvl(pml3, pml3_entry, 0);
|
||||
uint64_t *pml1 = __vmm_get_next_lvl(pml2, pml2_entry, 0);
|
||||
|
||||
return pml1[pml1_entry] & 0x7000000000000FFF;
|
||||
return pml1[pml1_entry] & 0x7000000000000FFF;
|
||||
}
|
||||
|
||||
uint64_t virt_to_phys(pagemap_t *pagemap, uint64_t virt)
|
||||
{
|
||||
uint64_t pml4_idx = (virt >> 39) & 0x1FF;
|
||||
uint64_t pml3_idx = (virt >> 30) & 0x1FF;
|
||||
uint64_t pml2_idx = (virt >> 21) & 0x1FF;
|
||||
uint64_t pml1_idx = (virt >> 12) & 0x1FF;
|
||||
uint64_t virt_to_phys(pagemap_t *pagemap, uint64_t virt) {
|
||||
uint64_t pml4_idx = (virt >> 39) & 0x1FF;
|
||||
uint64_t pml3_idx = (virt >> 30) & 0x1FF;
|
||||
uint64_t pml2_idx = (virt >> 21) & 0x1FF;
|
||||
uint64_t pml1_idx = (virt >> 12) & 0x1FF;
|
||||
|
||||
uint64_t *pml3 = __vmm_get_next_lvl(pagemap->toplevel, pml4_idx, 0);
|
||||
uint64_t *pml2 = __vmm_get_next_lvl(pml3, pml3_idx, 0);
|
||||
uint64_t *pml1 = __vmm_get_next_lvl(pml2, pml2_idx, 0);
|
||||
uint64_t *pml3 = __vmm_get_next_lvl(pagemap->toplevel, pml4_idx, 0);
|
||||
uint64_t *pml2 = __vmm_get_next_lvl(pml3, pml3_idx, 0);
|
||||
uint64_t *pml1 = __vmm_get_next_lvl(pml2, pml2_idx, 0);
|
||||
|
||||
uint64_t phys_addr = pml1[pml1_idx] & 0x000FFFFFFFFFF000; // Masque pour obtenir l'adresse physique (en retirant les bits de flags)
|
||||
uint64_t phys_addr =
|
||||
pml1[pml1_idx] &
|
||||
0x000FFFFFFFFFF000; // Masque pour obtenir l'adresse physique (en retirant
|
||||
// les bits de flags)
|
||||
|
||||
return phys_addr;
|
||||
return phys_addr;
|
||||
}
|
||||
|
||||
void vmm_map(pagemap_t *pm, uint64_t vaddr, uint64_t paddr, uint64_t flags) {
|
||||
uint64_t pml4_entry = (vaddr >> 39) & 0x1ff;
|
||||
uint64_t pml3_entry = (vaddr >> 30) & 0x1ff;
|
||||
uint64_t pml2_entry = (vaddr >> 21) & 0x1ff;
|
||||
uint64_t pml1_entry = (vaddr >> 12) & 0x1ff;
|
||||
uint64_t pml4_entry = (vaddr >> 39) & 0x1ff;
|
||||
uint64_t pml3_entry = (vaddr >> 30) & 0x1ff;
|
||||
uint64_t pml2_entry = (vaddr >> 21) & 0x1ff;
|
||||
uint64_t pml1_entry = (vaddr >> 12) & 0x1ff;
|
||||
|
||||
uint64_t *pml3 = __vmm_get_next_lvl(pm->toplevel, pml4_entry, flags);
|
||||
uint64_t *pml2 = __vmm_get_next_lvl(pml3, pml3_entry, flags);
|
||||
uint64_t *pml1 = __vmm_get_next_lvl(pml2, pml2_entry, flags);
|
||||
uint64_t *pml3 = __vmm_get_next_lvl(pm->toplevel, pml4_entry, flags);
|
||||
uint64_t *pml2 = __vmm_get_next_lvl(pml3, pml3_entry, flags);
|
||||
uint64_t *pml1 = __vmm_get_next_lvl(pml2, pml2_entry, flags);
|
||||
|
||||
pml1[pml1_entry] = paddr | flags;
|
||||
pml1[pml1_entry] = paddr | flags;
|
||||
}
|
||||
void vmm_unmap(pagemap_t *pm, uint64_t vaddr) {
|
||||
uint64_t pml1_entry = (vaddr >> 12) & 0x1ff;
|
||||
|
@ -206,13 +211,16 @@ void vmm_unmap(pagemap_t *pm, uint64_t vaddr) {
|
|||
uint64_t pml4_entry = (vaddr >> 39) & 0x1ff;
|
||||
|
||||
uint64_t *pml3 = __vmm_get_next_lvl(pm->toplevel, pml4_entry, 0);
|
||||
if (pml3 == NULL) return;
|
||||
if (pml3 == NULL)
|
||||
return;
|
||||
uint64_t *pml2 = __vmm_get_next_lvl(pml3, pml3_entry, 0);
|
||||
if (pml2 == NULL) return;
|
||||
if (pml2 == NULL)
|
||||
return;
|
||||
uint64_t *pml1 = __vmm_get_next_lvl(pml2, pml2_entry, 0);
|
||||
if (pml1 == NULL) return;
|
||||
if (pml1 == NULL)
|
||||
return;
|
||||
|
||||
pml1[pml1_entry] = 0;
|
||||
|
||||
__asm__ volatile ("invlpg (%0)" : : "b"(vaddr) : "memory");
|
||||
__asm__ volatile("invlpg (%0)" : : "b"(vaddr) : "memory");
|
||||
}
|
|
@ -150,5 +150,9 @@ void schedule(registers_t *regs)
|
|||
memcpy(regs, &curr_proc->regs, sizeof(registers_t));
|
||||
|
||||
// Finally, load our pagemap
|
||||
vmm_load_pagemap(curr_proc->pm);
|
||||
//if (memcmp(curr_proc->name, "Init", 4)== 0) {
|
||||
vmm_load_pagemap(curr_proc->pm);
|
||||
//asm("cli");
|
||||
//while (1)
|
||||
// asm("hlt");}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue