From 9c21f343bab84ba31daf3a66e3c55da746386a01 Mon Sep 17 00:00:00 2001 From: RaphProductions <81994075+RaphProductions@users.noreply.github.com> Date: Sun, 18 May 2025 17:24:30 +0200 Subject: [PATCH] pmm: implement a new form of allocator --- kernel/src/boot/limine.c | 11 ++++++ kernel/src/boot/limine.h | 4 ++- kernel/src/main.c | 8 ++++- kernel/src/mm/pmm.c | 57 ++++++++++++++++++++++------- kernel/src/mm/pmm.h | 25 +++++++++++++ kernel/src/mm/pmm.md | 78 ++++++++++++++++++++++++++++++++++++++++ 6 files changed, 169 insertions(+), 14 deletions(-) create mode 100644 kernel/src/mm/pmm.md diff --git a/kernel/src/boot/limine.c b/kernel/src/boot/limine.c index f913498..abcdf0c 100644 --- a/kernel/src/boot/limine.c +++ b/kernel/src/boot/limine.c @@ -38,6 +38,12 @@ static volatile struct limine_memmap_request memmap_req = { .revision = 0 }; +__attribute__((used, section(".limine_requests"))) +static volatile struct limine_hhdm_request hhdm_req = { + .id = LIMINE_HHDM_REQUEST, + .revision = 0 +}; + __attribute__((used, section(".limine_requests_start"))) static volatile LIMINE_REQUESTS_START_MARKER; @@ -79,4 +85,9 @@ limine_bootinfo_t *limine_get_bootinfo() { #endif return &__limine_bootinfo; +} + +uint64_t limine_get_hhdm_offset() +{ + return hhdm_req.response->offset; } \ No newline at end of file diff --git a/kernel/src/boot/limine.h b/kernel/src/boot/limine.h index 443cd44..3142f9d 100644 --- a/kernel/src/boot/limine.h +++ b/kernel/src/boot/limine.h @@ -29,4 +29,6 @@ limine_fb_t *limine_get_fb(int id); limine_bootinfo_t *limine_get_bootinfo(); // Get the memory map. -struct limine_memmap_response *limine_get_memmap(); \ No newline at end of file +struct limine_memmap_response *limine_get_memmap(); + +uint64_t limine_get_hhdm_offset(); \ No newline at end of file diff --git a/kernel/src/main.c b/kernel/src/main.c index 14c33ba..05437c7 100644 --- a/kernel/src/main.c +++ b/kernel/src/main.c @@ -16,6 +16,7 @@ #include #include #include +#include #include void kmain(void) { @@ -30,7 +31,12 @@ void kmain(void) { arch_init_stage1(); pmm_init(); - + uint8_t* mem = pmm_alloc_page() + 0xFFFF800000000000; + memcpy(mem, "HelloWorld\0", 11); + trace("pmm: Read from allocated memory: %s\n", mem); + pmm_free_page(mem); + trace("pmm: Freed memory.\n"); + // We're done, just hang... hcf(); } diff --git a/kernel/src/mm/pmm.c b/kernel/src/mm/pmm.c index da05a58..ae39604 100644 --- a/kernel/src/mm/pmm.c +++ b/kernel/src/mm/pmm.c @@ -17,29 +17,54 @@ uint64_t pmm_available_pages = 0; uint64_t pmm_total_pages = 0; -static pmm_page_t* pmm_free_list_head = NULL; +static pmm_region_t *pmm_region_list_head = NULL; +static pmm_page_t *pmm_free_list_head = NULL; void pmm_free_page(void *mem) { pmm_page_t *page = (pmm_page_t*)mem; - page->next = pmm_free_list_head; + pmm_page_t *page_hhalf = (pmm_page_t*)higher_half((uint64_t)page); + page_hhalf->next = pmm_free_list_head; pmm_free_list_head = page; pmm_available_pages++; } +static void __pmm_steal_pages_from_region_head(int pages) { + pmm_region_list_head->length -= PMM_PAGE_SIZE; + void *page = (void*)pmm_region_list_head->base + + pmm_region_list_head->length; + pmm_free_page(page); + + if (pmm_region_list_head->length == 0) + { + // If a region is totally consumed, + // we can turn it into a free page :) + // So our 4kb aren't really lost + void *mem = (void*)pmm_region_list_head; + pmm_region_list_head = pmm_region_list_head->next; + + pmm_free_page(mem); + } +} + void *pmm_alloc_page() { if (!pmm_free_list_head) { - fatal("pmm: out of memory!\n"); - hcf(); + if (!pmm_region_list_head) { + fatal("pmm: out of memory!\n"); + hcf(); + } + __pmm_steal_pages_from_region_head(4); + // et voila, we now have 4 free pages to allocate } pmm_available_pages--; pmm_page_t *page = pmm_free_list_head; - pmm_free_list_head = page->next; + pmm_page_t *page_hhalf = (pmm_page_t*)higher_half((uint64_t)page); + pmm_free_list_head = page_hhalf->next; - memset(page, 0, PMM_PAGE_SIZE); + //memset(page_hhalf, 0, PMM_PAGE_SIZE); return page; } @@ -49,15 +74,23 @@ void pmm_init() { for (uint64_t i = 0; i < mmap->entry_count; i++) { struct limine_memmap_entry *entry = mmap->entries[i]; - if (entry->type == LIMINE_MEMMAP_USABLE) + if (entry->type == LIMINE_MEMMAP_USABLE || + entry->type == LIMINE_MEMMAP_BOOTLOADER_RECLAIMABLE) { trace("pmm: found a usable memory block: %p-%p\n", entry->base, entry->base + entry->length); - uint64_t newlen = ALIGN_UP(entry->length, PMM_PAGE_SIZE); - for (uint64_t j = 0; j < newlen; j += PMM_PAGE_SIZE) { - pmm_free_page((void*)(entry->base + j)); - pmm_total_pages++; - } + uint64_t newlen = ALIGN_DOWN(entry->length, PMM_PAGE_SIZE); + + // Give a page to store the PMM region. + // When the region is fully consumed, the + // page is freed so that it can be used (i love recycling) + pmm_region_t *reg = (pmm_region_t*)higher_half(entry->base); + reg->base = entry->base + PMM_PAGE_SIZE; + reg->length = newlen - PMM_PAGE_SIZE; + reg->next = pmm_region_list_head; + pmm_region_list_head = reg; + + pmm_available_pages += reg->length / PMM_PAGE_SIZE; } } diff --git a/kernel/src/mm/pmm.h b/kernel/src/mm/pmm.h index db52cc5..70d5fc7 100644 --- a/kernel/src/mm/pmm.h +++ b/kernel/src/mm/pmm.h @@ -8,6 +8,7 @@ #pragma once #include +#include #define DIV_ROUND_UP(x, y) \ (((uint64_t)(x) + ((uint64_t)(y) - 1)) / (uint64_t)(y)) @@ -20,4 +21,28 @@ typedef struct __pmm_page { struct __pmm_page *next; } pmm_page_t; +typedef struct __pmm_region { + uint64_t base; + uint64_t length; + struct __pmm_region *next; +} pmm_region_t; + +inline uint64_t higher_half(uint64_t addr) { + uint64_t hhdm_off = limine_get_hhdm_offset(); + if (addr > hhdm_off) + return addr; + + return addr + hhdm_off; +} + +inline uint64_t physical(uint64_t addr) { + uint64_t hhdm_off = limine_get_hhdm_offset(); + if (addr < hhdm_off) + return addr; + + return addr - hhdm_off; +} + +void pmm_free_page(void *mem); +void *pmm_alloc_page(); void pmm_init(); \ No newline at end of file diff --git a/kernel/src/mm/pmm.md b/kernel/src/mm/pmm.md new file mode 100644 index 0000000..a5bad46 --- /dev/null +++ b/kernel/src/mm/pmm.md @@ -0,0 +1,78 @@ +# Soaplin's Physical Memory Manager + +The Physical Memory Manager (PMM) in Soaplin uses a lazy-loading design that efficiently manages physical memory pages while minimizing boot time overhead. + +## Design Overview + +The PMM uses a two-level allocation strategy: +1. Region List - tracks large blocks of available physical memory +2. Free Page List - manages individual pages ready for immediate allocation + +### Memory Regions + +Each memory region is tracked by a `pmm_region_t` structure that contains: +- Base address of the available memory +- Length of remaining memory +- Pointer to next region + +The region structure is cleverly stored in the first page of the region itself, making the overhead minimal (just one 4KB page per region). +When the region has been totally consumed, it's metadata page is turned +into a free page that can be allocated. + +### Free Page List + +The free page list is a singly-linked list of individual pages that are ready for immediate allocation. It gets refilled from regions only when needed. + +## Lazy Loading + +Instead of initializing all free pages at boot time, the PMM: +1. Only initializes region structures during boot +2. Adds pages to the free list on-demand +3. Consumes memory regions gradually as needed + +This approach provides several benefits: +- Very fast boot times regardless of RAM size +- Memory overhead proportional to number of regions, not total RAM +- No performance penalty during normal operation + +## Memory Organization + +Physical memory is organized as follows: +- Each region's first page contains the region metadata +- Remaining pages in each region are available for allocation +- Pages are standard 4KB size +- Free pages are linked together in the free list + +## Usage + +The PMM provides three main functions: +- `pmm_init()` - Initializes the PMM from the bootloader's memory map +- `pmm_alloc_page()` - Allocates a single 4KB page +- `pmm_free_page()` - Returns a page to the free list + +## Implementation Details + +### Region Initialization +During boot, the PMM: +1. Receives memory map from Limine +2. Identifies usable memory regions +3. Sets up region tracking structures +4. Calculates total available pages + +### Page Allocation +When allocating pages: +1. First tries the free list +2. If free list is empty: + - Takes 4 pages from current region + - Adds it to free list + - Updates region metadata + - If the region has been consumed + - Let the next region take the head + - Free the region's metadata page. +3. Returns the page to the caller + +### Memory Tracking +The PMM maintains counters for: +- Total available pages +- Currently free pages +This allows for memory usage monitoring and OOM detection.