os-k/kaleid/kernel/mm/paging.c

235 lines
6.4 KiB
C

#include <kernel.h>
#include <init/boot.h>
#include <ex/malloc.h>
#include <mm/mm.h>
#include <ke/idt.h>
#include <lib/buf.h>
#include <io/vga.h>
enum
{
MF_PRESENT = 1 << 0,
MF_READWRITE = 1 << 1,
MF_USERMODE = 1 << 2,
MF_WRITETHR = 1 << 3,
MF_CACHEDIS = 1 << 4,
MF_ACCESSED = 1 << 5,
MF_DIRTY = 1 << 6,
MF_HUGE = 1 << 7,
MF_NX = 1 << 31
};
#define RAM_MAX 32
#define NB_4K 150
// * 2 MB
//-----------
volatile pdpe_t MmPML4[512] __attribute__((__aligned__(KPAGESIZE)));
volatile pde_t MmPDP[512] __attribute__((__aligned__(KPAGESIZE)));
volatile pde_t MmPD[512 * RAM_MAX] __attribute__((__aligned__(KPAGESIZE)));;
volatile pte_t MmPT[512 * NB_4K] __attribute__((__aligned__(KPAGESIZE)));;
extern ulong _text;
extern ulong _text_end;
extern ulong _rodata;
extern ulong _rodata_end;
extern ulong _data;
extern ulong _data_end;
ulong MmStackGuards[2] = { 0 };
//
// Creates our new page table structure and loads it
//
void MmInitPaging(void)
{
extern MemoryMap_t memoryMap;
ulong phRamSize = memoryMap.freeRamSize + memoryMap.nonfreeRamSize;
memzero((void *)&MmPML4[0], sizeof(MmPML4));
memzero((void *)&MmPDP[0], sizeof(MmPDP));
memzero((void *)&MmPD[0], sizeof(MmPD));
memzero((void *)&MmPT[0], sizeof(MmPT));
for (volatile ulong i = 0; i < 512 * NB_4K; i++) {
// STACK GUARD PAGE
if ((ulong)(i*KPAGESIZE) == (ulong)BtLoaderInfo.stackEndAddr) {
MmPT[i] = ((ulong)(i*KPAGESIZE));
MmStackGuards[0] = ((ulong)(i*KPAGESIZE));
continue;
}
// ENOMEM like
if ((ulong)(i*KPAGESIZE) > (ulong)phRamSize) {
break;
}
// STACK GARD PAGE
if ((ulong)(i*KPAGESIZE) == (ulong)BtLoaderInfo.kernelEndAddr) {
MmPT[i] = ((ulong)(i*KPAGESIZE));
MmStackGuards[1] = ((ulong)(i*KPAGESIZE));
continue;
}
// TEXT
if ((ulong)(i*KPAGESIZE) >= (ulong)_text && (ulong)(i*KPAGESIZE) <= (ulong)_text_end) {
MmPT[i] = ((ulong)(i*KPAGESIZE));
continue;
}
// RODATA
if ((ulong)(i*KPAGESIZE) >= (ulong)_rodata && (ulong)(i*KPAGESIZE) <= (ulong)_rodata_end) {
MmPT[i] = ((ulong)(i*KPAGESIZE)) | MF_PRESENT;
continue;
}
// DATA
if ((ulong)(i*KPAGESIZE) >= (ulong)_data && (ulong)(i*KPAGESIZE) <= (ulong)_data_end) {
MmPT[i] = ((ulong)(i*KPAGESIZE)) | MF_PRESENT;
continue;
}
MmPT[i] = ((ulong)(i*KPAGESIZE)) | MF_PRESENT | MF_READWRITE;
}
for (volatile ulong i = 0; i < NB_4K; i++) {
MmPD[i] = (ulong)(&MmPT[i*512])| MF_PRESENT | MF_READWRITE;
}
for (volatile ulong i = NB_4K; i < 512 * RAM_MAX; i++) {
// ENOMEM like
if ((ulong)(i* UPAGESIZE) > (ulong)phRamSize) {
break;
}
MmPD[i] = 0;
MmPD[i] = ((ulong)(i* UPAGESIZE)) | MF_PRESENT | MF_READWRITE | MF_HUGE;
}
for (volatile int i = 0; i < RAM_MAX; i++) {
MmPDP[i] = (ulong)(&MmPD[i*512])| MF_PRESENT | MF_READWRITE;
}
MmPML4[0] = (ulong)(&MmPDP[0])| MF_PRESENT | MF_READWRITE;
MmLoadPML4((void *)MmPML4);
//DebugLog("\tPaging tables initialized at %p, %p\n", &MmPD, &MmPT);
//DebugLog("\tStack Guards at %p, %p\n", MmStackGuards[0], MmStackGuards[1]);
}
//
// Reloads the page tables
//
void MmReloadPaging(void)
{
extern MemoryMap_t memoryMap;
ulong phRamSize = memoryMap.freeRamSize + memoryMap.nonfreeRamSize;
for (volatile ulong i = 0; i < 512 * NB_4K; i++) {
// STACK GUARD PAGE
if ((ulong)(i*KPAGESIZE) == (ulong)BtLoaderInfo.stackEndAddr) {
MmPT[i] = ((ulong)(i*KPAGESIZE));
MmStackGuards[0] = ((ulong)(i*KPAGESIZE));
continue;
}
// ENOMEM like
if ((ulong)(i*KPAGESIZE) > (ulong)phRamSize) {
break;
}
// STACK GARD PAGE
if ((ulong)(i*KPAGESIZE) == (ulong)BtLoaderInfo.kernelEndAddr) {
MmPT[i] = ((ulong)(i*KPAGESIZE));
MmStackGuards[1] = ((ulong)(i*KPAGESIZE));
continue;
}
MmPT[i] = ((ulong)(i*KPAGESIZE)) | MF_PRESENT | MF_READWRITE;
}
for (volatile ulong i = 0; i < NB_4K; i++) {
MmPD[i] = (ulong)(&MmPT[i*512])| MF_PRESENT | MF_READWRITE;
}
for (volatile ulong i = NB_4K; i < 512 * RAM_MAX; i++) {
// ENOMEM like
if ((ulong)(i* UPAGESIZE) > (ulong)phRamSize) {
break;
}
MmPD[i] = 0;
MmPD[i] = ((ulong)(i* UPAGESIZE)) | MF_PRESENT | MF_READWRITE | MF_HUGE;
}
for (volatile int i = 0; i < RAM_MAX; i++) {
MmPDP[i] = (ulong)(&MmPD[i*512])| MF_PRESENT | MF_READWRITE;
}
MmPML4[0] = (ulong)(&MmPDP[0])| MF_PRESENT | MF_READWRITE;
MmLoadPML4((void *)MmPML4);
DebugLog("\tPaging tables initialized at %p, %p\n", &MmPD, &MmPT);
DebugLog("\tStack Guards at %p, %p\n", MmStackGuards[0], MmStackGuards[1]);
}
// Returns the rank of the Stack Guards
void *MmGetStackGuards(char rank)
{
return (void *)MmStackGuards[(int)rank];
}
// Returns an address corresponding to the PT rank
void *MmTranslateKPageToAddr(void *rank)
{
return (void *)MmPT[(ulong)rank];
}
//
// Page fault handler
//
static void PagingHandler(ISRFrame_t *regs)
{
ulong StackGuardOne = (ulong)MmGetStackGuards(0);
if (regs->cr2 >= StackGuardOne && (regs->rsp + 4*KB >= regs->cr2)) {
bprintf(BStdOut,
"\n\n%CPANIC\n[ISR 0x8] Irrecoverable Kernel Stack Underflow\n\n"
" Double Fault Error code : %#x (%b)\n"
" Stack Guard bypassed : %#x",
VGA_COLOR_LIGHT_RED,
regs->ErrorCode,
regs->ErrorCode,
StackGuardOne
);
} else {
//XXX page fault
bprintf(BStdOut, "\n\n%CPANIC\n[ISR 0x%x] Irrecoverable Kernel Page Fault at %p\n\n"
" Error code : 0x%x (%b)",
VGA_COLOR_LIGHT_RED,
regs->intNo,
regs->cr2,
regs->ErrorCode,
regs->ErrorCode
);
}
KeBrkDumpRegisters(regs);
BStdOut->flusher(BStdOut);
KeHaltCPU();
}
void MmActivatePageHandler(void)
{
KeRegisterISR(PagingHandler, 0xe);
DebugLog("\tPaging activated\n");
}