1
0
Fork 0

feat/kernel: Added a physical page manager, has caching for single page alloc

This commit is contained in:
Kevin Alavik 2025-05-14 15:39:33 +02:00
parent e2d6cfceea
commit 2bbc7dd70f
Signed by: cmpsb
GPG key ID: 10D1CC0526FDC6D7
8 changed files with 309 additions and 1 deletions

15
kernel/src/boot/emk.h Normal file
View file

@ -0,0 +1,15 @@
#ifndef EMK_H
#define EMK_H
#include <boot/limine.h>
#include <stdint.h>
extern uint64_t hhdm_offset;
extern struct limine_memmap_response *memmap;
#define HIGHER_HALF(ptr) ((void *)((uint64_t)ptr) + hhdm_offset)
#define PHYSICAL(ptr) ((void *)((uint64_t)ptr) - hhdm_offset)
#define BIT(x) (1ULL << (x))
#endif // EMK_H

View file

@ -1,5 +1,6 @@
/* EMK 1.0 Copyright (c) 2025 Piraterna */
#include <boot/limine.h>
#include <boot/emk.h>
#include <arch/cpu.h>
#include <arch/io.h>
#include <dev/serial.h>
@ -7,11 +8,22 @@
#include <util/log.h>
#include <arch/gdt.h>
#include <arch/idt.h>
#include <sys/kpanic.h>
#include <mm/pmm.h>
__attribute__((used, section(".limine_requests"))) static volatile LIMINE_BASE_REVISION(3);
__attribute__((used, section(".limine_requests"))) static volatile struct limine_memmap_request memmap_request = {
.id = LIMINE_MEMMAP_REQUEST,
.revision = 0};
__attribute__((used, section(".limine_requests"))) static volatile struct limine_hhdm_request hhdm_request = {
.id = LIMINE_HHDM_REQUEST,
.revision = 0};
__attribute__((used, section(".limine_requests_start"))) static volatile LIMINE_REQUESTS_START_MARKER;
__attribute__((used, section(".limine_requests_end"))) static volatile LIMINE_REQUESTS_END_MARKER;
uint64_t hhdm_offset = 0;
struct limine_memmap_response *memmap = NULL;
void emk_entry(void)
{
if (serial_init(COM1) != 0)
@ -33,5 +45,30 @@ void emk_entry(void)
idt_init();
log_early("Initialized IDT");
if (!hhdm_request.response)
{
kpanic(NULL, "Failed to get HHDM request");
}
if (!memmap_request.response)
{
kpanic(NULL, "Failed to get memmap request");
}
memmap = memmap_request.response;
hhdm_offset = hhdm_request.response->offset;
log_early("HHDM Offset: %llx", hhdm_offset);
pmm_init();
log_early("Initialized PMM");
/* Test allocate a single physical page */
char *a = pmm_request_pages(1, true);
if (!a)
kpanic(NULL, "Failed to allocate single physical page");
*a = 32;
log_early("Allocated 1 physical page: %llx", (uint64_t)a);
pmm_release_pages(a, 1);
hlt();
}

21
kernel/src/lib/bitmap.h Normal file
View file

@ -0,0 +1,21 @@
#ifndef BITMAP_H
#define BITMAP_H
#include <stdint.h>
static inline void bitmap_set(uint8_t *bitmap, uint64_t bit)
{
bitmap[bit / 8] |= 1 << (bit % 8);
}
static inline void bitmap_clear(uint8_t *bitmap, uint64_t bit)
{
bitmap[bit / 8] &= ~(1 << (bit % 8));
}
static inline uint8_t bitmap_get(uint8_t *bitmap, uint64_t bit)
{
return (bitmap[bit / 8] & (1 << (bit % 8))) != 0;
}
#endif // BITMAP_H

170
kernel/src/mm/pmm.c Normal file
View file

@ -0,0 +1,170 @@
#include <mm/pmm.h>
#include <boot/emk.h>
#include <util/log.h>
#include <arch/cpu.h>
#include <util/align.h>
#include <lib/string.h>
#include <lib/bitmap.h>
#include <sys/spinlock.h>
#define PAGE_CACHE_SIZE 1024
#define MIN_ALIGN PAGE_SIZE
#define BITMAP_WORD_SIZE (sizeof(uint64_t) * 8)
uint64_t bitmap_pages;
uint64_t bitmap_size;
uint8_t *bitmap;
static uint64_t free_pages;
static spinlock_t pmm_lock;
static uint64_t page_cache[PAGE_CACHE_SIZE];
static size_t cache_size;
static size_t cache_index;
static inline bool is_aligned(void *addr, size_t align)
{
return ((uintptr_t)addr % align) == 0;
}
void pmm_init(void)
{
spinlock_init(&pmm_lock);
uint64_t high = 0;
free_pages = 0;
for (uint64_t i = 0; i < memmap->entry_count; i++)
{
struct limine_memmap_entry *e = memmap->entries[i];
if (e->type == LIMINE_MEMMAP_USABLE)
{
uint64_t top = e->base + e->length;
if (top > high)
high = top;
free_pages += e->length / PAGE_SIZE;
log_early("Usable memory region: 0x%.16llx -> 0x%.16llx", e->base, e->base + e->length);
}
}
bitmap_pages = high / PAGE_SIZE;
bitmap_size = ALIGN_UP(bitmap_pages / 8, PAGE_SIZE);
for (uint64_t i = 0; i < memmap->entry_count; i++)
{
struct limine_memmap_entry *e = memmap->entries[i];
if (e->type == LIMINE_MEMMAP_USABLE && e->length >= bitmap_size)
{
bitmap = (uint8_t *)(e->base + hhdm_offset);
memset(bitmap, 0xFF, bitmap_size);
e->base += bitmap_size;
e->length -= bitmap_size;
free_pages -= bitmap_size / PAGE_SIZE;
break;
}
}
cache_size = PAGE_CACHE_SIZE;
cache_index = 0;
memset(page_cache, 0, sizeof(page_cache));
for (uint64_t i = 0; i < memmap->entry_count; i++)
{
struct limine_memmap_entry *e = memmap->entries[i];
if (e->type == LIMINE_MEMMAP_USABLE)
{
for (uint64_t j = e->base; j < e->base + e->length; j += PAGE_SIZE)
{
if ((j / PAGE_SIZE) < bitmap_pages)
{
bitmap_clear(bitmap, j / PAGE_SIZE);
}
}
}
}
}
void *pmm_request_pages(size_t pages, bool higher_half)
{
if (pages == 0 || pages > free_pages)
return NULL;
spinlock_acquire(&pmm_lock);
if (pages == 1 && cache_index > 0)
{
void *addr = (void *)(page_cache[--cache_index] * PAGE_SIZE);
bitmap_set(bitmap, (uint64_t)addr / PAGE_SIZE);
free_pages--;
spinlock_release(&pmm_lock);
return higher_half ? (void *)((uint64_t)addr + hhdm_offset) : addr;
}
uint64_t word_count = (bitmap_pages + BITMAP_WORD_SIZE - 1) / BITMAP_WORD_SIZE;
uint64_t *bitmap_words = (uint64_t *)bitmap;
for (uint64_t i = 0; i < word_count; i++)
{
if (bitmap_words[i] != UINT64_MAX)
{
uint64_t start_bit = i * BITMAP_WORD_SIZE;
uint64_t consecutive = 0;
for (uint64_t j = 0; j < BITMAP_WORD_SIZE && start_bit + j < bitmap_pages; j++)
{
if (!bitmap_get(bitmap, start_bit + j))
{
if (++consecutive == pages)
{
for (uint64_t k = 0; k < pages; k++)
{
bitmap_set(bitmap, start_bit + j - pages + 1 + k);
}
free_pages -= pages;
void *addr = (void *)((start_bit + j - pages + 1) * PAGE_SIZE);
spinlock_release(&pmm_lock);
return higher_half ? (void *)((uint64_t)addr + hhdm_offset) : addr;
}
}
else
{
consecutive = 0;
}
}
}
}
spinlock_release(&pmm_lock);
return NULL;
}
void pmm_release_pages(void *ptr, size_t pages)
{
if (!ptr || !is_aligned(ptr, MIN_ALIGN))
return;
spinlock_acquire(&pmm_lock);
uint64_t start = ((uint64_t)ptr - (hhdm_offset * ((uint64_t)ptr >= hhdm_offset))) / PAGE_SIZE;
if (start + pages > bitmap_pages)
{
spinlock_release(&pmm_lock);
return;
}
for (size_t i = 0; i < pages; i++)
{
if (bitmap_get(bitmap, start + i))
{
bitmap_clear(bitmap, start + i);
free_pages++;
if (pages == 1 && cache_index < cache_size)
{
page_cache[cache_index++] = start;
}
}
}
spinlock_release(&pmm_lock);
}

13
kernel/src/mm/pmm.h Normal file
View file

@ -0,0 +1,13 @@
#ifndef PMM_H
#define PMM_H
#include <stddef.h>
#include <stdbool.h>
#define PAGE_SIZE 0x1000
void pmm_init();
void *pmm_request_pages(size_t pages, bool higher_half);
void pmm_release_pages(void *ptr, size_t pages);
#endif // PMM_H

40
kernel/src/sys/spinlock.h Normal file
View file

@ -0,0 +1,40 @@
#ifndef SPINLOCK_H
#define SPINLOCK_H
#include <stdbool.h>
#include <stdint.h>
typedef struct
{
volatile uint32_t lock; // 0 = unlocked, 1 = locked
} spinlock_t;
static inline void spinlock_init(spinlock_t *lock)
{
lock->lock = 0;
}
static inline void spinlock_acquire(spinlock_t *lock)
{
while (__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE))
{
asm volatile("pause" ::: "memory");
}
}
static inline void spinlock_release(spinlock_t *lock)
{
__atomic_clear(&lock->lock, __ATOMIC_RELEASE);
}
static inline bool spinlock_try_acquire(spinlock_t *lock)
{
return !__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE);
}
static inline bool spinlock_held(spinlock_t *lock)
{
return __atomic_load_n(&lock->lock, __ATOMIC_RELAXED) != 0;
}
#endif // SPINLOCK_H

10
kernel/src/util/align.h Normal file
View file

@ -0,0 +1,10 @@
#ifndef ALIGN_H
#define ALIGN_H
#define DIV_ROUND_UP(x, y) (((uint64_t)(x) + ((uint64_t)(y) - 1)) / (uint64_t)(y))
#define ALIGN_UP(x, y) (DIV_ROUND_UP(x, y) * (uint64_t)(y))
#define ALIGN_DOWN(x, y) (((uint64_t)(x) / (uint64_t)(y)) * (uint64_t)(y))
#define IS_PAGE_ALIGNED(x) (((uintptr_t)(x) & (PAGE_SIZE - 1)) == 0)
#endif // ALIGN_H