diff --git a/.vscode/settings.json b/.vscode/settings.json index 3e223b7..6f862d5 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -2,6 +2,8 @@ "files.associations": { "cstdio": "c", "stdarg.h": "c", - "idt.h": "c" + "idt.h": "c", + "limine.h": "c", + "stddef.h": "c" } } \ No newline at end of file diff --git a/kernel/src/boot/emk.h b/kernel/src/boot/emk.h new file mode 100644 index 0000000..74cf3d4 --- /dev/null +++ b/kernel/src/boot/emk.h @@ -0,0 +1,15 @@ +#ifndef EMK_H +#define EMK_H + +#include +#include + +extern uint64_t hhdm_offset; +extern struct limine_memmap_response *memmap; + +#define HIGHER_HALF(ptr) ((void *)((uint64_t)ptr) + hhdm_offset) +#define PHYSICAL(ptr) ((void *)((uint64_t)ptr) - hhdm_offset) + +#define BIT(x) (1ULL << (x)) + +#endif // EMK_H \ No newline at end of file diff --git a/kernel/src/emk.c b/kernel/src/emk.c index ec1e27c..4275b96 100644 --- a/kernel/src/emk.c +++ b/kernel/src/emk.c @@ -1,5 +1,6 @@ /* EMK 1.0 Copyright (c) 2025 Piraterna */ #include +#include #include #include #include @@ -7,11 +8,22 @@ #include #include #include +#include +#include __attribute__((used, section(".limine_requests"))) static volatile LIMINE_BASE_REVISION(3); +__attribute__((used, section(".limine_requests"))) static volatile struct limine_memmap_request memmap_request = { + .id = LIMINE_MEMMAP_REQUEST, + .revision = 0}; +__attribute__((used, section(".limine_requests"))) static volatile struct limine_hhdm_request hhdm_request = { + .id = LIMINE_HHDM_REQUEST, + .revision = 0}; __attribute__((used, section(".limine_requests_start"))) static volatile LIMINE_REQUESTS_START_MARKER; __attribute__((used, section(".limine_requests_end"))) static volatile LIMINE_REQUESTS_END_MARKER; +uint64_t hhdm_offset = 0; +struct limine_memmap_response *memmap = NULL; + void emk_entry(void) { if (serial_init(COM1) != 0) @@ -33,5 +45,30 @@ void emk_entry(void) idt_init(); log_early("Initialized IDT"); + if (!hhdm_request.response) + { + kpanic(NULL, "Failed to get HHDM request"); + } + + if (!memmap_request.response) + { + kpanic(NULL, "Failed to get memmap request"); + } + + memmap = memmap_request.response; + hhdm_offset = hhdm_request.response->offset; + log_early("HHDM Offset: %llx", hhdm_offset); + pmm_init(); + log_early("Initialized PMM"); + + /* Test allocate a single physical page */ + char *a = pmm_request_pages(1, true); + if (!a) + kpanic(NULL, "Failed to allocate single physical page"); + + *a = 32; + log_early("Allocated 1 physical page: %llx", (uint64_t)a); + pmm_release_pages(a, 1); + hlt(); } \ No newline at end of file diff --git a/kernel/src/lib/bitmap.h b/kernel/src/lib/bitmap.h new file mode 100644 index 0000000..9f84ae9 --- /dev/null +++ b/kernel/src/lib/bitmap.h @@ -0,0 +1,21 @@ +#ifndef BITMAP_H +#define BITMAP_H + +#include + +static inline void bitmap_set(uint8_t *bitmap, uint64_t bit) +{ + bitmap[bit / 8] |= 1 << (bit % 8); +} + +static inline void bitmap_clear(uint8_t *bitmap, uint64_t bit) +{ + bitmap[bit / 8] &= ~(1 << (bit % 8)); +} + +static inline uint8_t bitmap_get(uint8_t *bitmap, uint64_t bit) +{ + return (bitmap[bit / 8] & (1 << (bit % 8))) != 0; +} + +#endif // BITMAP_H \ No newline at end of file diff --git a/kernel/src/mm/pmm.c b/kernel/src/mm/pmm.c new file mode 100644 index 0000000..b53c855 --- /dev/null +++ b/kernel/src/mm/pmm.c @@ -0,0 +1,170 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#define PAGE_CACHE_SIZE 1024 +#define MIN_ALIGN PAGE_SIZE +#define BITMAP_WORD_SIZE (sizeof(uint64_t) * 8) + +uint64_t bitmap_pages; +uint64_t bitmap_size; +uint8_t *bitmap; +static uint64_t free_pages; +static spinlock_t pmm_lock; +static uint64_t page_cache[PAGE_CACHE_SIZE]; +static size_t cache_size; +static size_t cache_index; + +static inline bool is_aligned(void *addr, size_t align) +{ + return ((uintptr_t)addr % align) == 0; +} + +void pmm_init(void) +{ + spinlock_init(&pmm_lock); + + uint64_t high = 0; + free_pages = 0; + + for (uint64_t i = 0; i < memmap->entry_count; i++) + { + struct limine_memmap_entry *e = memmap->entries[i]; + if (e->type == LIMINE_MEMMAP_USABLE) + { + uint64_t top = e->base + e->length; + if (top > high) + high = top; + free_pages += e->length / PAGE_SIZE; + log_early("Usable memory region: 0x%.16llx -> 0x%.16llx", e->base, e->base + e->length); + } + } + + bitmap_pages = high / PAGE_SIZE; + bitmap_size = ALIGN_UP(bitmap_pages / 8, PAGE_SIZE); + + for (uint64_t i = 0; i < memmap->entry_count; i++) + { + struct limine_memmap_entry *e = memmap->entries[i]; + if (e->type == LIMINE_MEMMAP_USABLE && e->length >= bitmap_size) + { + bitmap = (uint8_t *)(e->base + hhdm_offset); + memset(bitmap, 0xFF, bitmap_size); + e->base += bitmap_size; + e->length -= bitmap_size; + free_pages -= bitmap_size / PAGE_SIZE; + break; + } + } + + cache_size = PAGE_CACHE_SIZE; + cache_index = 0; + memset(page_cache, 0, sizeof(page_cache)); + + for (uint64_t i = 0; i < memmap->entry_count; i++) + { + struct limine_memmap_entry *e = memmap->entries[i]; + if (e->type == LIMINE_MEMMAP_USABLE) + { + for (uint64_t j = e->base; j < e->base + e->length; j += PAGE_SIZE) + { + if ((j / PAGE_SIZE) < bitmap_pages) + { + bitmap_clear(bitmap, j / PAGE_SIZE); + } + } + } + } +} + +void *pmm_request_pages(size_t pages, bool higher_half) +{ + if (pages == 0 || pages > free_pages) + return NULL; + + spinlock_acquire(&pmm_lock); + + if (pages == 1 && cache_index > 0) + { + void *addr = (void *)(page_cache[--cache_index] * PAGE_SIZE); + bitmap_set(bitmap, (uint64_t)addr / PAGE_SIZE); + free_pages--; + spinlock_release(&pmm_lock); + return higher_half ? (void *)((uint64_t)addr + hhdm_offset) : addr; + } + + uint64_t word_count = (bitmap_pages + BITMAP_WORD_SIZE - 1) / BITMAP_WORD_SIZE; + uint64_t *bitmap_words = (uint64_t *)bitmap; + + for (uint64_t i = 0; i < word_count; i++) + { + if (bitmap_words[i] != UINT64_MAX) + { + uint64_t start_bit = i * BITMAP_WORD_SIZE; + uint64_t consecutive = 0; + + for (uint64_t j = 0; j < BITMAP_WORD_SIZE && start_bit + j < bitmap_pages; j++) + { + if (!bitmap_get(bitmap, start_bit + j)) + { + if (++consecutive == pages) + { + for (uint64_t k = 0; k < pages; k++) + { + bitmap_set(bitmap, start_bit + j - pages + 1 + k); + } + free_pages -= pages; + + void *addr = (void *)((start_bit + j - pages + 1) * PAGE_SIZE); + spinlock_release(&pmm_lock); + return higher_half ? (void *)((uint64_t)addr + hhdm_offset) : addr; + } + } + else + { + consecutive = 0; + } + } + } + } + + spinlock_release(&pmm_lock); + return NULL; +} + +void pmm_release_pages(void *ptr, size_t pages) +{ + if (!ptr || !is_aligned(ptr, MIN_ALIGN)) + return; + + spinlock_acquire(&pmm_lock); + + uint64_t start = ((uint64_t)ptr - (hhdm_offset * ((uint64_t)ptr >= hhdm_offset))) / PAGE_SIZE; + + if (start + pages > bitmap_pages) + { + spinlock_release(&pmm_lock); + return; + } + + for (size_t i = 0; i < pages; i++) + { + if (bitmap_get(bitmap, start + i)) + { + bitmap_clear(bitmap, start + i); + free_pages++; + + if (pages == 1 && cache_index < cache_size) + { + page_cache[cache_index++] = start; + } + } + } + + spinlock_release(&pmm_lock); +} diff --git a/kernel/src/mm/pmm.h b/kernel/src/mm/pmm.h new file mode 100644 index 0000000..e0542e8 --- /dev/null +++ b/kernel/src/mm/pmm.h @@ -0,0 +1,13 @@ +#ifndef PMM_H +#define PMM_H + +#include +#include + +#define PAGE_SIZE 0x1000 + +void pmm_init(); +void *pmm_request_pages(size_t pages, bool higher_half); +void pmm_release_pages(void *ptr, size_t pages); + +#endif // PMM_H \ No newline at end of file diff --git a/kernel/src/sys/spinlock.h b/kernel/src/sys/spinlock.h new file mode 100644 index 0000000..3d7d1eb --- /dev/null +++ b/kernel/src/sys/spinlock.h @@ -0,0 +1,40 @@ +#ifndef SPINLOCK_H +#define SPINLOCK_H + +#include +#include + +typedef struct +{ + volatile uint32_t lock; // 0 = unlocked, 1 = locked +} spinlock_t; + +static inline void spinlock_init(spinlock_t *lock) +{ + lock->lock = 0; +} + +static inline void spinlock_acquire(spinlock_t *lock) +{ + while (__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE)) + { + asm volatile("pause" ::: "memory"); + } +} + +static inline void spinlock_release(spinlock_t *lock) +{ + __atomic_clear(&lock->lock, __ATOMIC_RELEASE); +} + +static inline bool spinlock_try_acquire(spinlock_t *lock) +{ + return !__atomic_test_and_set(&lock->lock, __ATOMIC_ACQUIRE); +} + +static inline bool spinlock_held(spinlock_t *lock) +{ + return __atomic_load_n(&lock->lock, __ATOMIC_RELAXED) != 0; +} + +#endif // SPINLOCK_H \ No newline at end of file diff --git a/kernel/src/util/align.h b/kernel/src/util/align.h new file mode 100644 index 0000000..d6fc4c1 --- /dev/null +++ b/kernel/src/util/align.h @@ -0,0 +1,10 @@ +#ifndef ALIGN_H +#define ALIGN_H + +#define DIV_ROUND_UP(x, y) (((uint64_t)(x) + ((uint64_t)(y) - 1)) / (uint64_t)(y)) +#define ALIGN_UP(x, y) (DIV_ROUND_UP(x, y) * (uint64_t)(y)) +#define ALIGN_DOWN(x, y) (((uint64_t)(x) / (uint64_t)(y)) * (uint64_t)(y)) + +#define IS_PAGE_ALIGNED(x) (((uintptr_t)(x) & (PAGE_SIZE - 1)) == 0) + +#endif // ALIGN_H \ No newline at end of file