pmm: implement a new form of allocator

This commit is contained in:
RaphProductions 2025-05-18 17:24:30 +02:00
parent 89bb8c8a4b
commit 9c21f343ba
6 changed files with 169 additions and 14 deletions

View file

@ -38,6 +38,12 @@ static volatile struct limine_memmap_request memmap_req = {
.revision = 0 .revision = 0
}; };
__attribute__((used, section(".limine_requests")))
static volatile struct limine_hhdm_request hhdm_req = {
.id = LIMINE_HHDM_REQUEST,
.revision = 0
};
__attribute__((used, section(".limine_requests_start"))) __attribute__((used, section(".limine_requests_start")))
static volatile LIMINE_REQUESTS_START_MARKER; static volatile LIMINE_REQUESTS_START_MARKER;
@ -80,3 +86,8 @@ limine_bootinfo_t *limine_get_bootinfo() {
return &__limine_bootinfo; return &__limine_bootinfo;
} }
uint64_t limine_get_hhdm_offset()
{
return hhdm_req.response->offset;
}

View file

@ -30,3 +30,5 @@ limine_bootinfo_t *limine_get_bootinfo();
// Get the memory map. // Get the memory map.
struct limine_memmap_response *limine_get_memmap(); struct limine_memmap_response *limine_get_memmap();
uint64_t limine_get_hhdm_offset();

View file

@ -16,6 +16,7 @@
#include <lib/ansi.h> #include <lib/ansi.h>
#include <lib/log.h> #include <lib/log.h>
#include <lib/logoutputs_sk.h> #include <lib/logoutputs_sk.h>
#include <mm/memop.h>
#include <mm/pmm.h> #include <mm/pmm.h>
void kmain(void) { void kmain(void) {
@ -30,6 +31,11 @@ void kmain(void) {
arch_init_stage1(); arch_init_stage1();
pmm_init(); pmm_init();
uint8_t* mem = pmm_alloc_page() + 0xFFFF800000000000;
memcpy(mem, "HelloWorld\0", 11);
trace("pmm: Read from allocated memory: %s\n", mem);
pmm_free_page(mem);
trace("pmm: Freed memory.\n");
// We're done, just hang... // We're done, just hang...
hcf(); hcf();

View file

@ -17,29 +17,54 @@
uint64_t pmm_available_pages = 0; uint64_t pmm_available_pages = 0;
uint64_t pmm_total_pages = 0; uint64_t pmm_total_pages = 0;
static pmm_page_t* pmm_free_list_head = NULL; static pmm_region_t *pmm_region_list_head = NULL;
static pmm_page_t *pmm_free_list_head = NULL;
void pmm_free_page(void *mem) { void pmm_free_page(void *mem) {
pmm_page_t *page = (pmm_page_t*)mem; pmm_page_t *page = (pmm_page_t*)mem;
page->next = pmm_free_list_head; pmm_page_t *page_hhalf = (pmm_page_t*)higher_half((uint64_t)page);
page_hhalf->next = pmm_free_list_head;
pmm_free_list_head = page; pmm_free_list_head = page;
pmm_available_pages++; pmm_available_pages++;
} }
static void __pmm_steal_pages_from_region_head(int pages) {
pmm_region_list_head->length -= PMM_PAGE_SIZE;
void *page = (void*)pmm_region_list_head->base +
pmm_region_list_head->length;
pmm_free_page(page);
if (pmm_region_list_head->length == 0)
{
// If a region is totally consumed,
// we can turn it into a free page :)
// So our 4kb aren't really lost
void *mem = (void*)pmm_region_list_head;
pmm_region_list_head = pmm_region_list_head->next;
pmm_free_page(mem);
}
}
void *pmm_alloc_page() { void *pmm_alloc_page() {
if (!pmm_free_list_head) if (!pmm_free_list_head)
{ {
if (!pmm_region_list_head) {
fatal("pmm: out of memory!\n"); fatal("pmm: out of memory!\n");
hcf(); hcf();
} }
__pmm_steal_pages_from_region_head(4);
// et voila, we now have 4 free pages to allocate
}
pmm_available_pages--; pmm_available_pages--;
pmm_page_t *page = pmm_free_list_head; pmm_page_t *page = pmm_free_list_head;
pmm_free_list_head = page->next; pmm_page_t *page_hhalf = (pmm_page_t*)higher_half((uint64_t)page);
pmm_free_list_head = page_hhalf->next;
memset(page, 0, PMM_PAGE_SIZE); //memset(page_hhalf, 0, PMM_PAGE_SIZE);
return page; return page;
} }
@ -49,15 +74,23 @@ void pmm_init() {
for (uint64_t i = 0; i < mmap->entry_count; i++) { for (uint64_t i = 0; i < mmap->entry_count; i++) {
struct limine_memmap_entry *entry = mmap->entries[i]; struct limine_memmap_entry *entry = mmap->entries[i];
if (entry->type == LIMINE_MEMMAP_USABLE) if (entry->type == LIMINE_MEMMAP_USABLE ||
entry->type == LIMINE_MEMMAP_BOOTLOADER_RECLAIMABLE)
{ {
trace("pmm: found a usable memory block: %p-%p\n", entry->base, entry->base + entry->length); trace("pmm: found a usable memory block: %p-%p\n", entry->base, entry->base + entry->length);
uint64_t newlen = ALIGN_UP(entry->length, PMM_PAGE_SIZE); uint64_t newlen = ALIGN_DOWN(entry->length, PMM_PAGE_SIZE);
for (uint64_t j = 0; j < newlen; j += PMM_PAGE_SIZE) {
pmm_free_page((void*)(entry->base + j)); // Give a page to store the PMM region.
pmm_total_pages++; // When the region is fully consumed, the
} // page is freed so that it can be used (i love recycling)
pmm_region_t *reg = (pmm_region_t*)higher_half(entry->base);
reg->base = entry->base + PMM_PAGE_SIZE;
reg->length = newlen - PMM_PAGE_SIZE;
reg->next = pmm_region_list_head;
pmm_region_list_head = reg;
pmm_available_pages += reg->length / PMM_PAGE_SIZE;
} }
} }

View file

@ -8,6 +8,7 @@
#pragma once #pragma once
#include <stdint.h> #include <stdint.h>
#include <boot/limine.h>
#define DIV_ROUND_UP(x, y) \ #define DIV_ROUND_UP(x, y) \
(((uint64_t)(x) + ((uint64_t)(y) - 1)) / (uint64_t)(y)) (((uint64_t)(x) + ((uint64_t)(y) - 1)) / (uint64_t)(y))
@ -20,4 +21,28 @@ typedef struct __pmm_page {
struct __pmm_page *next; struct __pmm_page *next;
} pmm_page_t; } pmm_page_t;
typedef struct __pmm_region {
uint64_t base;
uint64_t length;
struct __pmm_region *next;
} pmm_region_t;
inline uint64_t higher_half(uint64_t addr) {
uint64_t hhdm_off = limine_get_hhdm_offset();
if (addr > hhdm_off)
return addr;
return addr + hhdm_off;
}
inline uint64_t physical(uint64_t addr) {
uint64_t hhdm_off = limine_get_hhdm_offset();
if (addr < hhdm_off)
return addr;
return addr - hhdm_off;
}
void pmm_free_page(void *mem);
void *pmm_alloc_page();
void pmm_init(); void pmm_init();

78
kernel/src/mm/pmm.md Normal file
View file

@ -0,0 +1,78 @@
# Soaplin's Physical Memory Manager
The Physical Memory Manager (PMM) in Soaplin uses a lazy-loading design that efficiently manages physical memory pages while minimizing boot time overhead.
## Design Overview
The PMM uses a two-level allocation strategy:
1. Region List - tracks large blocks of available physical memory
2. Free Page List - manages individual pages ready for immediate allocation
### Memory Regions
Each memory region is tracked by a `pmm_region_t` structure that contains:
- Base address of the available memory
- Length of remaining memory
- Pointer to next region
The region structure is cleverly stored in the first page of the region itself, making the overhead minimal (just one 4KB page per region).
When the region has been totally consumed, it's metadata page is turned
into a free page that can be allocated.
### Free Page List
The free page list is a singly-linked list of individual pages that are ready for immediate allocation. It gets refilled from regions only when needed.
## Lazy Loading
Instead of initializing all free pages at boot time, the PMM:
1. Only initializes region structures during boot
2. Adds pages to the free list on-demand
3. Consumes memory regions gradually as needed
This approach provides several benefits:
- Very fast boot times regardless of RAM size
- Memory overhead proportional to number of regions, not total RAM
- No performance penalty during normal operation
## Memory Organization
Physical memory is organized as follows:
- Each region's first page contains the region metadata
- Remaining pages in each region are available for allocation
- Pages are standard 4KB size
- Free pages are linked together in the free list
## Usage
The PMM provides three main functions:
- `pmm_init()` - Initializes the PMM from the bootloader's memory map
- `pmm_alloc_page()` - Allocates a single 4KB page
- `pmm_free_page()` - Returns a page to the free list
## Implementation Details
### Region Initialization
During boot, the PMM:
1. Receives memory map from Limine
2. Identifies usable memory regions
3. Sets up region tracking structures
4. Calculates total available pages
### Page Allocation
When allocating pages:
1. First tries the free list
2. If free list is empty:
- Takes 4 pages from current region
- Adds it to free list
- Updates region metadata
- If the region has been consumed
- Let the next region take the head
- Free the region's metadata page.
3. Returns the page to the caller
### Memory Tracking
The PMM maintains counters for:
- Total available pages
- Currently free pages
This allows for memory usage monitoring and OOM detection.