+ vmm: higher half should not be identify-mapped + panic: now displays current process + kernel: fix physical addresses (ḑ̷̩̜̦̥̰͔̻͔̖͎̳̗̫̓̓͊́̒͜ơ̸͙͎̠͎̩̤̭̬̙͚̬̣͇̤̼̑̐̿̿͆͠͝ ̷̗̟̘͎̥̤̭̂͛͆́͂͝͠ͅN̴̨̛̥̩̺͚̺̠͙̼̙̯̱͚̫̊̐̅̀̌̏͆̋̕͜͝͠ͅö̵̪͚̞̞̜͎͉̦́́̍̀̃̋̇̄̓͊̎͝t̶̡͍̩̤̹̤̂̑̓͌͂̾̑̈́͒̾̾́͐͆͊͂ ̴̨̫̺̦̊̍̒͛͌̌͂͘̚̚͘͘͝I̶̥͚̯̖̙̩͂́͜d̸̢̡̗͉̠̹̒͠e̸̡̪̺͎͖͚̗̟̟̥͍͑̈̋̉̋̓̐̊̚͘͜n̸̡̍͐͗̈͌̀̓̃́́͠t̶̢͈͈̦̻̰͎̪̰̒̄͒̃̐͜ĩ̴͕̼̻͓͚͕̲̬̤͈̜̣̐̍́̾̀̏̏̑͒̚ͅf̷̡̨̼̻̠̠͔̪͍͛y̴͉͓̓͒̆̎̚ ̶̟͙͖̙̟͍̟͕̞̥̹͇̌̉́̑͗͋̀̕ͅͅM̷͙̬̲̓a̶̫̰̞̺̖̍̀p̶̡̨̡̗̖̹̩̫̯̞̬͋͂̏̍̾̽͜ͅ ̶̧̨̧̫͉̝̮̳͎͍̱̟̪̝̀̽͑̂̿̄̈̇̓͘Ḫ̶̨̨̗̣̪͓̺͙͈͙̀ḭ̶̧̡͇̹͙̩͍͎̮̤̦̜̻͎̞̔̐̇̉̓͒͛̅̿͊̍͆͘̕g̷͖͙͍͓̯̪̩̑̑͋̈́͌͐̊̀͝͠͝ĥ̴̢̡̫̪̟̞̭̟͕̖͎͊͑͛̆͝e̴̡̨̗̱̱͙͔̻̤͎͆̒̾̾̓̈͊̓ͅr̵̛͈̩͍̔̌̃̇͊̽̀̉̽̊͌̿́ ̷̨̡̛̩̹̹̇̇̈́̑̍̊͒̄́͛H̷̨̪̜̤͍̻͎̲̜͋́̆͋̂̚͘͘͜͠ą̷̠͓̫̲́́̽̉̒͌́̓ͅl̴̢̛͈̤̺̱̙̬̆̎̄̊̈́̐̾̏̿̕f̸̢̰͓̦̺̰̯͚̣̙͔̺̂͜͜)
31 lines
No EOL
769 B
C
31 lines
No EOL
769 B
C
#pragma once
|
|
|
|
#include <stdbool.h>
|
|
#include <stdint.h>
|
|
|
|
typedef struct spinlock {
|
|
volatile int locked;
|
|
} spinlock_t;
|
|
|
|
static inline void spinlock_acquire(spinlock_t *lock) {
|
|
// uint64_t timeout = 1000000; // Adjust this value based on your needs
|
|
|
|
for (;;) {
|
|
if (__atomic_exchange_n(&lock->locked, 1, __ATOMIC_ACQUIRE) == 0) {
|
|
return;
|
|
}
|
|
|
|
while (__atomic_load_n(&lock->locked, __ATOMIC_RELAXED)) {
|
|
/**if (--timeout == 0) {
|
|
// Force unlock after too many attempts
|
|
__atomic_store_n(&lock->locked, 0, __ATOMIC_RELEASE);
|
|
continue;
|
|
}**/
|
|
__asm__ volatile("pause" ::: "memory");
|
|
}
|
|
}
|
|
}
|
|
|
|
static inline void spinlock_release(spinlock_t *lock) {
|
|
__atomic_store_n(&lock->locked, 0, __ATOMIC_RELEASE);
|
|
} |