os-k/kaleid/kernel/mm/paging.c

224 lines
5.9 KiB
C
Raw Normal View History

2019-05-14 14:39:35 +02:00
#include <kernel.h>
2019-05-15 15:55:57 +02:00
#include <init/boot.h>
2019-05-15 19:11:47 +02:00
#include <ex/malloc.h>
#include <mm/mm.h>
2019-05-18 13:39:58 +02:00
#include <ke/idt.h>
2019-05-19 00:07:01 +02:00
#include <lib/buf.h>
#include <io/vga.h>
2019-05-14 14:39:35 +02:00
// Page directory pointer offset
2019-05-15 02:26:55 +02:00
typedef ulong pdpe_t;
2019-05-14 14:39:35 +02:00
// Page directory offset
2019-05-15 02:26:55 +02:00
typedef ulong pde_t;
2019-05-14 14:39:35 +02:00
// Page table entry
2019-05-15 02:26:55 +02:00
typedef ulong pte_t;
2019-05-14 14:39:35 +02:00
// paging.asm
void MmLoadPML4(void *);
void MmEnableWriteProtect(void);
void MmDisableWriteProtect(void);
2019-05-22 08:11:50 +02:00
void *MmGetStackGuards(char rank);
2019-05-14 14:39:35 +02:00
enum
{
MF_PRESENT = 1 << 0,
MF_READWRITE = 1 << 1,
MF_USERMODE = 1 << 2,
MF_WRITETHR = 1 << 3,
MF_CACHEDIS = 1 << 4,
MF_ACCESSED = 1 << 5,
2019-05-15 11:15:21 +02:00
MF_DIRTY = 1 << 6,
2019-05-15 15:55:57 +02:00
MF_HUGE = 1 << 7,
MF_NX = 1 << 31
2019-05-14 14:39:35 +02:00
};
2019-05-16 23:56:23 +02:00
#define RAM_MAX 32
2019-05-15 19:11:47 +02:00
#define NB_4K 150
// * 2 MB
2019-05-15 02:26:55 +02:00
2019-05-14 14:39:35 +02:00
//-----------
2019-05-18 12:58:54 +02:00
volatile pdpe_t MmPML4[512] __attribute__((__aligned__(KPAGESIZE)));
2019-05-14 14:39:35 +02:00
2019-05-18 12:58:54 +02:00
volatile pde_t MmPDP[512] __attribute__((__aligned__(KPAGESIZE)));
2019-05-14 14:39:35 +02:00
2019-05-18 12:58:54 +02:00
volatile pde_t MmPD[512 * RAM_MAX] __attribute__((__aligned__(KPAGESIZE)));;
2019-05-14 14:39:35 +02:00
2019-05-18 12:58:54 +02:00
volatile pte_t MmPT[512 * NB_4K] __attribute__((__aligned__(KPAGESIZE)));;
2019-05-14 14:39:35 +02:00
2019-05-22 00:38:04 +02:00
ulong MmStackGuards[2] = { 0 };
2019-05-15 15:55:57 +02:00
//
// Creates our new page table structure and loads it
2019-05-18 13:39:58 +02:00
//
2019-05-14 14:39:35 +02:00
void MmInitPaging(void)
{
2019-05-15 19:11:47 +02:00
extern MemoryMap_t memoryMap;
2019-05-16 23:56:23 +02:00
ulong phRamSize = memoryMap.freeRamSize + memoryMap.nonfreeRamSize;
2019-05-15 21:02:16 +02:00
2019-05-15 15:55:57 +02:00
memzero((void *)&MmPML4[0], sizeof(MmPML4));
memzero((void *)&MmPDP[0], sizeof(MmPDP));
memzero((void *)&MmPD[0], sizeof(MmPD));
memzero((void *)&MmPT[0], sizeof(MmPT));
2019-05-15 02:26:55 +02:00
2019-05-16 23:56:23 +02:00
for (volatile ulong i = 0; i < 512 * NB_4K; i++) {
2019-05-15 15:55:57 +02:00
// STACK GUARD PAGE
2019-05-18 12:58:54 +02:00
if ((ulong)(i*KPAGESIZE) == (ulong)BtLoaderInfo.stackEndAddr) {
MmPT[i] = ((ulong)(i*KPAGESIZE));
2019-05-22 08:11:50 +02:00
MmStackGuards[0] = ((ulong)(i*KPAGESIZE));
2019-05-15 15:55:57 +02:00
continue;
}
2019-05-15 19:11:47 +02:00
// ENOMEM like
2019-05-18 12:58:54 +02:00
if ((ulong)(i*KPAGESIZE) > (ulong)phRamSize) {
2019-05-16 23:56:23 +02:00
break;
2019-05-15 15:55:57 +02:00
}
// STACK GARD PAGE
2019-05-18 12:58:54 +02:00
if ((ulong)(i*KPAGESIZE) == (ulong)BtLoaderInfo.kernelEndAddr) {
MmPT[i] = ((ulong)(i*KPAGESIZE));
2019-05-22 08:11:50 +02:00
MmStackGuards[1] = ((ulong)(i*KPAGESIZE));
2019-05-15 15:55:57 +02:00
continue;
}
2019-05-18 12:58:54 +02:00
MmPT[i] = ((ulong)(i*KPAGESIZE)) | MF_PRESENT | MF_READWRITE;
2019-05-15 11:15:21 +02:00
}
2019-05-16 23:56:23 +02:00
for (volatile ulong i = 0; i < NB_4K; i++) {
2019-05-15 15:55:57 +02:00
MmPD[i] = (ulong)(&MmPT[i*512])| MF_PRESENT | MF_READWRITE;
2019-05-15 11:15:21 +02:00
}
2019-05-16 23:56:23 +02:00
for (volatile ulong i = NB_4K; i < 512 * RAM_MAX; i++) {
2019-05-15 19:11:47 +02:00
// ENOMEM like
2019-05-18 12:58:54 +02:00
if ((ulong)(i* UPAGESIZE) > (ulong)phRamSize) {
2019-05-16 23:56:23 +02:00
break;
2019-05-15 19:11:47 +02:00
}
2019-05-16 23:56:23 +02:00
MmPD[i] = 0;
2019-05-18 12:58:54 +02:00
MmPD[i] = ((ulong)(i* UPAGESIZE)) | MF_PRESENT | MF_READWRITE | MF_HUGE;
2019-05-14 14:39:35 +02:00
}
2019-05-16 23:56:23 +02:00
for (volatile int i = 0; i < RAM_MAX; i++) {
2019-05-15 15:55:57 +02:00
MmPDP[i] = (ulong)(&MmPD[i*512])| MF_PRESENT | MF_READWRITE;
2019-05-15 02:26:55 +02:00
}
2019-05-15 15:55:57 +02:00
MmPML4[0] = (ulong)(&MmPDP[0])| MF_PRESENT | MF_READWRITE;
2019-05-14 14:39:35 +02:00
2019-05-16 23:56:23 +02:00
MmLoadPML4((void *)MmPML4);
2019-05-18 13:39:58 +02:00
DebugLog("\tPaging tables initialized at %p, %p\n", &MmPD, &MmPT);
2019-05-22 00:38:04 +02:00
DebugLog("\tStack Guards at %p, %p\n", MmStackGuards[0], MmStackGuards[1]);
2019-05-18 13:39:58 +02:00
}
//
// Reloads the page tables
//
void MmReloadPaging(void)
{
extern MemoryMap_t memoryMap;
ulong phRamSize = memoryMap.freeRamSize + memoryMap.nonfreeRamSize;
2019-05-22 18:15:23 +02:00
2019-05-18 13:39:58 +02:00
for (volatile ulong i = 0; i < 512 * NB_4K; i++) {
// STACK GUARD PAGE
if ((ulong)(i*KPAGESIZE) == (ulong)BtLoaderInfo.stackEndAddr) {
MmPT[i] = ((ulong)(i*KPAGESIZE));
2019-05-22 18:15:23 +02:00
MmStackGuards[0] = ((ulong)(i*KPAGESIZE));
2019-05-18 13:39:58 +02:00
continue;
}
// ENOMEM like
if ((ulong)(i*KPAGESIZE) > (ulong)phRamSize) {
break;
}
// STACK GARD PAGE
if ((ulong)(i*KPAGESIZE) == (ulong)BtLoaderInfo.kernelEndAddr) {
MmPT[i] = ((ulong)(i*KPAGESIZE));
2019-05-22 18:15:23 +02:00
MmStackGuards[1] = ((ulong)(i*KPAGESIZE));
2019-05-18 13:39:58 +02:00
continue;
}
MmPT[i] = ((ulong)(i*KPAGESIZE)) | MF_PRESENT | MF_READWRITE;
}
for (volatile ulong i = 0; i < NB_4K; i++) {
MmPD[i] = (ulong)(&MmPT[i*512])| MF_PRESENT | MF_READWRITE;
}
for (volatile ulong i = NB_4K; i < 512 * RAM_MAX; i++) {
// ENOMEM like
if ((ulong)(i* UPAGESIZE) > (ulong)phRamSize) {
break;
}
MmPD[i] = 0;
MmPD[i] = ((ulong)(i* UPAGESIZE)) | MF_PRESENT | MF_READWRITE | MF_HUGE;
}
2019-05-22 18:15:23 +02:00
for (volatile int i = 0; i < RAM_MAX; i++) {
MmPDP[i] = (ulong)(&MmPD[i*512])| MF_PRESENT | MF_READWRITE;
}
MmPML4[0] = (ulong)(&MmPDP[0])| MF_PRESENT | MF_READWRITE;
MmLoadPML4((void *)MmPML4);
DebugLog("\tPaging tables initialized at %p, %p\n", &MmPD, &MmPT);
DebugLog("\tStack Guards at %p, %p\n", MmStackGuards[0], MmStackGuards[1]);
2019-05-18 13:39:58 +02:00
}
2019-05-22 08:11:50 +02:00
// Returns the rank of the Stack Guards
void *MmGetStackGuards(char rank)
{
return (void *)MmStackGuards[(int)rank];
}
// Returns an address corresponding to the PT rank
void *MmTranslateKPageToAddr(void *rank)
{
return (void *)MmPT[(ulong)rank];
}
2019-05-18 13:39:58 +02:00
//
// Page fault handler
//
static void PagingHandler(ISRFrame_t *regs)
{
ulong StackGuardOne = (ulong)MmGetStackGuards(0);
if (regs->cr2 >= StackGuardOne && (regs->rsp + 4*KB >= regs->cr2)) {
bprintf(BStdOut,
"\n\n%CPANIC\n[ISR 0x8] Irrecoverable Kernel Stack Underflow\n\n"
" Double Fault Error code : %#x (%b)\n"
" Stack Guard bypassed : %#x",
VGA_COLOR_LIGHT_RED,
regs->ErrorCode,
regs->ErrorCode,
StackGuardOne
);
} else {
bprintf(BStdOut, "\n\n%CPANIC\n[ISR 0x%x] Irrecoverable Kernel Page Fault at %p\n\n"
2019-05-19 01:33:16 +02:00
" Error code : 0x%x (%b)",
2019-05-18 20:31:02 +02:00
2019-05-19 00:07:01 +02:00
VGA_COLOR_LIGHT_RED,
2019-05-18 13:39:58 +02:00
regs->intNo,
2019-05-18 20:31:02 +02:00
regs->cr2,
2019-05-18 13:39:58 +02:00
regs->ErrorCode,
2019-05-19 01:33:16 +02:00
regs->ErrorCode
2019-05-18 13:39:58 +02:00
);
}
2019-05-19 00:07:01 +02:00
2019-05-19 01:33:16 +02:00
KeBrkDumpRegisters(regs);
2019-05-19 00:07:01 +02:00
BStdOut->flusher(BStdOut);
KeHaltCPU();
2019-05-18 13:39:58 +02:00
}
void MmActivatePageHandler(void)
{
KeRegisterISR(PagingHandler, 0xe);
2019-05-15 02:26:55 +02:00
}