2019-05-14 14:39:35 +02:00
|
|
|
#include <kernel.h>
|
2019-05-15 15:55:57 +02:00
|
|
|
#include <init/boot.h>
|
2019-05-15 19:11:47 +02:00
|
|
|
#include <ex/malloc.h>
|
2020-01-09 18:19:49 +01:00
|
|
|
#include <mm/heap.h>
|
2019-05-15 19:11:47 +02:00
|
|
|
#include <mm/mm.h>
|
2019-05-18 13:39:58 +02:00
|
|
|
#include <ke/idt.h>
|
2019-05-19 00:07:01 +02:00
|
|
|
#include <lib/buf.h>
|
|
|
|
#include <io/vga.h>
|
|
|
|
|
2020-01-09 20:58:57 +01:00
|
|
|
#define USERSPACE 0x80000000
|
2019-05-15 02:26:55 +02:00
|
|
|
|
2019-05-14 14:39:35 +02:00
|
|
|
//-----------
|
|
|
|
|
2020-01-10 13:36:33 +01:00
|
|
|
static pml4_t MmPageMapLevel4[512] __attribute__((__aligned__(KPAGESIZE)));
|
|
|
|
static ulong *MmPhysicalPageTable;
|
2019-05-14 14:39:35 +02:00
|
|
|
|
2019-12-21 13:55:02 +01:00
|
|
|
extern ulong _text;
|
|
|
|
extern ulong _text_end;
|
|
|
|
extern ulong _rodata;
|
|
|
|
extern ulong _rodata_end;
|
|
|
|
extern ulong _data;
|
|
|
|
extern ulong _data_end;
|
|
|
|
|
2019-05-22 00:38:04 +02:00
|
|
|
ulong MmStackGuards[2] = { 0 };
|
2020-01-09 18:19:49 +01:00
|
|
|
ulong MmVirtLastAddress = 0;
|
|
|
|
|
|
|
|
enum
|
|
|
|
{
|
|
|
|
PRESENT = 1 << 0,
|
|
|
|
READWRITE = 1 << 1,
|
|
|
|
USERMODE = 1 << 2,
|
|
|
|
WRITETHR = 1 << 3,
|
|
|
|
CACHEDIS = 1 << 4,
|
|
|
|
ACCESSED = 1 << 5,
|
|
|
|
DIRTY = 1 << 6,
|
|
|
|
HUGE = 1 << 7,
|
2020-01-09 20:58:57 +01:00
|
|
|
NX = 1UL << 63
|
2020-01-09 18:19:49 +01:00
|
|
|
};
|
2019-05-15 15:55:57 +02:00
|
|
|
|
2020-01-09 22:34:38 +01:00
|
|
|
//-----------
|
|
|
|
|
2019-05-15 15:55:57 +02:00
|
|
|
//
|
|
|
|
// Creates our new page table structure and loads it
|
2019-05-18 13:39:58 +02:00
|
|
|
//
|
2019-05-14 14:39:35 +02:00
|
|
|
void MmInitPaging(void)
|
|
|
|
{
|
2019-05-15 19:11:47 +02:00
|
|
|
extern MemoryMap_t memoryMap;
|
2020-01-09 20:58:57 +01:00
|
|
|
pdpe_t *MmPDP = NULL;
|
|
|
|
pde_t *MmPD = NULL;
|
|
|
|
pte_t *MmPT = NULL;
|
2020-01-09 18:19:49 +01:00
|
|
|
ulong index;
|
|
|
|
ulong lastKernelAddr = (ulong)(_heap_start + _heap_max);
|
|
|
|
ulong firstDirectoryAddr = 0;
|
|
|
|
ulong lastDirectoryAddr = 0;
|
2020-01-10 13:36:33 +01:00
|
|
|
ulong phDirSize = 0;
|
2020-01-09 18:19:49 +01:00
|
|
|
|
|
|
|
// Maximum PHYSICAL address in memory
|
2019-05-16 23:56:23 +02:00
|
|
|
ulong phRamSize = memoryMap.freeRamSize + memoryMap.nonfreeRamSize;
|
2019-05-15 21:02:16 +02:00
|
|
|
|
2020-01-09 18:19:49 +01:00
|
|
|
// Difference between the end of kernel and the begin of userspace
|
2020-01-09 22:17:44 +01:00
|
|
|
ulong diffKernUsr = (ulong)USERSPACE - lastKernelAddr - KPAGESIZE;
|
2020-01-09 18:19:49 +01:00
|
|
|
|
|
|
|
// Maximum VIRTUAL address in memory
|
|
|
|
MmVirtLastAddress = phRamSize + diffKernUsr;
|
|
|
|
|
2020-01-09 22:17:44 +01:00
|
|
|
//DebugLog("\tPaging gap : %u MB (%p)\n\tLast virtual address %p\n", diffKernUsr / MB, diffKernUsr, MmVirtLastAddress);
|
2020-01-09 18:19:49 +01:00
|
|
|
|
|
|
|
memzero((void *)&MmPageMapLevel4[0], sizeof(MmPageMapLevel4));
|
2020-01-10 13:36:33 +01:00
|
|
|
phDirSize = (phRamSize / KPAGESIZE)*sizeof(ulong);
|
|
|
|
|
|
|
|
//MmPhysicalPageTable = (ulong*)malloc((phRamSize / KPAGESIZE)*sizeof(ulong));
|
|
|
|
DebugLog("\t\tRam %u MB, pagesize %u KB, size %u MB\n", phRamSize / MB, KPAGESIZE / KB, phDirSize / MB);
|
2020-01-09 18:19:49 +01:00
|
|
|
|
|
|
|
for (ulong curAddrPML4 = 0;
|
|
|
|
curAddrPML4 < MmVirtLastAddress;
|
|
|
|
curAddrPML4 += ((ulong)KPAGESIZE * 0x8000000)) {
|
|
|
|
// Create an entry in PML4 each 512GB
|
|
|
|
// 0x8000000 = 512 ^ 3
|
|
|
|
|
|
|
|
MmPDP = (pdpe_t *)malloc(512*sizeof(pde_t));
|
|
|
|
|
|
|
|
if (!firstDirectoryAddr) {
|
|
|
|
firstDirectoryAddr = (ulong)MmPDP;
|
|
|
|
}
|
|
|
|
|
|
|
|
index = (curAddrPML4 / ((ulong)KPAGESIZE * 0x8000000)) % 512;
|
|
|
|
|
|
|
|
//DebugLog("\t\t\t\tPDP %d : %p\n", index, MmPDP);
|
|
|
|
MmPageMapLevel4[index] = (pdpe_t *)((ulong)MmPDP | PRESENT | READWRITE);
|
|
|
|
|
|
|
|
for (ulong curAddrPDP = curAddrPML4;
|
|
|
|
curAddrPDP < (curAddrPML4 + ((ulong)KPAGESIZE * 0x8000000)) &&
|
|
|
|
curAddrPDP < MmVirtLastAddress;
|
|
|
|
curAddrPDP += ((ulong)KPAGESIZE * 0x40000)) {
|
|
|
|
// Create an intry in PDP each 1GB
|
|
|
|
// 0x40000 = 512 ^ 2
|
|
|
|
|
|
|
|
MmPD = (pde_t *)malloc(512*sizeof(pde_t));
|
|
|
|
|
|
|
|
index = (curAddrPDP / ((ulong)KPAGESIZE * 0x40000)) % 512;
|
|
|
|
|
|
|
|
//DebugLog("\t\t\t\tPD %d : %p\n", index, MmPD);
|
|
|
|
MmPDP[index] = (pde_t *)((ulong)MmPD | PRESENT | READWRITE);
|
|
|
|
|
|
|
|
for (ulong curAddrPD = curAddrPDP;
|
|
|
|
curAddrPD < (curAddrPDP + ((ulong)KPAGESIZE * 0x40000)) &&
|
|
|
|
curAddrPD < MmVirtLastAddress;
|
|
|
|
curAddrPD += ((ulong)KPAGESIZE * 0x200)) {
|
|
|
|
// Create an intry in PD each 2MB
|
|
|
|
// 0x200 = 512
|
|
|
|
|
|
|
|
MmPT = (pte_t *)malloc(512*sizeof(pte_t));
|
|
|
|
|
|
|
|
index = (curAddrPD / ((ulong)KPAGESIZE * 0x200)) % 512;
|
|
|
|
|
|
|
|
//DebugLog("\t\t\t\tPT %d : %p\n", index, MmPT);
|
|
|
|
MmPD[index] = (pte_t *)((ulong)MmPT | PRESENT | READWRITE);
|
|
|
|
|
|
|
|
for (ulong curAddrPT = curAddrPD;
|
|
|
|
curAddrPT < (curAddrPD + ((ulong)KPAGESIZE * 0x200)) &&
|
|
|
|
curAddrPT < MmVirtLastAddress;
|
|
|
|
curAddrPT += (ulong)KPAGESIZE) {
|
|
|
|
// Create an entry in PT each page of 4KB
|
|
|
|
|
|
|
|
index = (curAddrPT / ((ulong)KPAGESIZE)) % 512;
|
|
|
|
|
|
|
|
//DebugLog("\t\t\t\tPage %d : %p\n", index, curAddrPT);
|
|
|
|
|
|
|
|
// STACK GUARD PAGE */
|
|
|
|
if ((ulong)curAddrPT == (ulong)BtLoaderInfo.stackEndAddr) {
|
|
|
|
MmPT[index] = (ulong)curAddrPT | PRESENT;
|
|
|
|
MmStackGuards[0] = (ulong)curAddrPT;
|
|
|
|
//DebugLog("\tStack Guard at %p\n", curAddrPT);
|
|
|
|
}
|
|
|
|
else if ((ulong)curAddrPT == (ulong)BtLoaderInfo.kernelEndAddr) {
|
|
|
|
MmPT[index] = (ulong)curAddrPT | PRESENT;
|
|
|
|
MmStackGuards[1] = (ulong)curAddrPT;
|
|
|
|
//DebugLog("\tStack Guard at %p\n", curAddrPT);
|
|
|
|
}
|
|
|
|
// SECTION .TEXT PROTECTION
|
|
|
|
else if ((ulong)curAddrPT >= (ulong)&_text && (ulong)curAddrPT <= (ulong)&_text_end) {
|
|
|
|
MmPT[index] = (ulong)curAddrPT | PRESENT;
|
|
|
|
//DebugLog("\tSection .text at %p\n", curAddrPT);
|
|
|
|
}
|
|
|
|
// SECTION .DATA PROTECTION
|
|
|
|
else if ((ulong)curAddrPT >= (ulong)&_data && (ulong)curAddrPT <= (ulong)&_data_end) {
|
2020-01-09 20:58:57 +01:00
|
|
|
MmPT[index] = (ulong)curAddrPT | PRESENT | WRITETHR | READWRITE | NX;
|
2020-01-09 18:19:49 +01:00
|
|
|
//DebugLog("\tSection .data at %p\n", curAddrPT);
|
|
|
|
}
|
|
|
|
// SECTION .RODATA PROTECTION
|
|
|
|
else if ((ulong)curAddrPT >= (ulong)&_rodata && (ulong)curAddrPT <= (ulong)&_rodata_end) {
|
2020-01-09 20:58:57 +01:00
|
|
|
MmPT[index] = (ulong)curAddrPT | PRESENT | WRITETHR | NX;
|
2020-01-09 18:19:49 +01:00
|
|
|
//DebugLog("\tSection .rodata at %p\n", curAddrPT);
|
|
|
|
}
|
2020-01-09 20:58:57 +01:00
|
|
|
// While we're inside the kernel pages
|
2020-01-09 18:19:49 +01:00
|
|
|
else if ((ulong)curAddrPT <= lastKernelAddr) {
|
|
|
|
MmPT[index] = (ulong)curAddrPT | PRESENT | READWRITE;
|
2020-01-09 20:58:57 +01:00
|
|
|
|
|
|
|
if ((ulong)curAddrPT == lastKernelAddr) {
|
2020-01-09 22:17:44 +01:00
|
|
|
//DebugLog("\tLast page of kernel at %p\n", curAddrPT);
|
2020-01-09 20:58:57 +01:00
|
|
|
}
|
2020-01-09 18:19:49 +01:00
|
|
|
}
|
2020-01-09 20:58:57 +01:00
|
|
|
// While we're inside the userspace pages
|
2020-01-09 18:19:49 +01:00
|
|
|
else if ((ulong)curAddrPT >= USERSPACE) {
|
2020-01-09 22:17:44 +01:00
|
|
|
MmPT[index] = ((ulong)curAddrPT - diffKernUsr) | PRESENT; // Not present for instance
|
2020-01-10 13:36:33 +01:00
|
|
|
//MmPhysicalPageTable[(ulong)curAddrPT - diffKernUsr] = curAddrPT;
|
2020-01-09 18:19:49 +01:00
|
|
|
|
|
|
|
if ((ulong)curAddrPT == USERSPACE) {
|
2020-01-09 22:17:44 +01:00
|
|
|
DebugLog("\tUserspace at %p:%p\n", curAddrPT, curAddrPT - diffKernUsr);
|
2020-01-09 18:19:49 +01:00
|
|
|
}
|
|
|
|
}
|
2020-01-09 20:58:57 +01:00
|
|
|
else {
|
|
|
|
MmPT[index] = 0;
|
|
|
|
}
|
|
|
|
|
2020-01-10 12:51:23 +01:00
|
|
|
//KeFlushTlbSingle(curAddrPT);
|
2020-01-09 18:19:49 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-05-15 11:15:21 +02:00
|
|
|
}
|
2020-01-09 20:58:57 +01:00
|
|
|
lastDirectoryAddr = (ulong)MmPT;
|
2019-05-15 11:15:21 +02:00
|
|
|
|
2020-01-09 18:19:49 +01:00
|
|
|
MmLoadPML4((void *)MmPageMapLevel4);
|
2020-01-09 20:58:57 +01:00
|
|
|
MmEnableWriteProtect();
|
2020-01-10 13:36:33 +01:00
|
|
|
|
|
|
|
DebugLog("\tPage table size : %u MB\n", (lastDirectoryAddr - firstDirectoryAddr + phDirSize)/MB);
|
2019-05-18 13:39:58 +02:00
|
|
|
}
|
|
|
|
|
2020-01-09 20:58:57 +01:00
|
|
|
//
|
2020-01-09 22:34:38 +01:00
|
|
|
// Get a page from an address
|
2020-01-09 20:58:57 +01:00
|
|
|
//
|
2020-01-09 23:01:00 +01:00
|
|
|
static pte_t *MmGetPageDescriptorFromVirtual(void *virtualAddr)
|
2020-01-09 20:58:57 +01:00
|
|
|
{
|
|
|
|
ulong virtAddrPage = (ulong)virtualAddr & ( ~(KPAGESIZE - 1));
|
|
|
|
|
|
|
|
if (virtAddrPage > MmVirtLastAddress) {
|
2020-01-09 22:34:38 +01:00
|
|
|
KeStartPanic("MmSetPage() Out of bound of the address space !");
|
2020-01-09 20:58:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
pdpe_t *pdp = (pdpe_t*)((ulong)MmPageMapLevel4[(virtAddrPage / ((ulong)KPAGESIZE * 0x8000000)) % 512] & ~(KPAGESIZE - 1));
|
2020-01-09 22:34:38 +01:00
|
|
|
//DebugLog("pdp\t: %p\n", pdp);
|
2020-01-09 20:58:57 +01:00
|
|
|
pde_t *pd = (pde_t*)( (ulong)pdp[(virtAddrPage / ((ulong)KPAGESIZE * 0x40000)) % 512] & ~(KPAGESIZE - 1));
|
2020-01-09 22:34:38 +01:00
|
|
|
//DebugLog("pd\t: %p\n", pd);
|
2020-01-09 20:58:57 +01:00
|
|
|
pte_t *pt = (pte_t*)( (ulong)pd[(virtAddrPage / ((ulong)KPAGESIZE * 0x200)) % 512] & ~(KPAGESIZE - 1));
|
2020-01-09 22:34:38 +01:00
|
|
|
//DebugLog("pt\t: %p\n", pt);
|
2020-01-09 20:58:57 +01:00
|
|
|
|
2020-01-09 23:01:00 +01:00
|
|
|
pte_t *page = &pt[(virtAddrPage / ((ulong)KPAGESIZE)) % 512];
|
2020-01-09 22:34:38 +01:00
|
|
|
//DebugLog("page (with flags): %p\n", page);
|
|
|
|
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
// Translates a virtual address to its physical equivalent
|
|
|
|
//
|
|
|
|
void *MmTransVirtToPhyAddr(void* virtualAddr)
|
|
|
|
{
|
|
|
|
ulong virtAddrPage = (ulong)virtualAddr & ( ~(KPAGESIZE - 1));
|
2020-01-09 23:01:00 +01:00
|
|
|
pte_t *page = MmGetPageDescriptorFromVirtual(virtualAddr);
|
2020-01-09 20:58:57 +01:00
|
|
|
|
2020-01-09 23:01:00 +01:00
|
|
|
if (*page == (*page & ~(KPAGESIZE - 1))) {
|
2020-01-09 20:58:57 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-01-09 23:01:00 +01:00
|
|
|
return (void*)((*page & ~(KPAGESIZE - 1))+ ((ulong)virtualAddr - (ulong)virtAddrPage));
|
2020-01-09 20:58:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void *MmTransPhyToVirtAddr(void* physicalAddr)
|
|
|
|
{
|
2020-01-10 13:36:33 +01:00
|
|
|
return (void*)MmPhysicalPageTable[(ulong)physicalAddr];
|
2020-01-09 20:58:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Add flags to a page
|
|
|
|
//
|
|
|
|
void MmSetPage(void* virtualAddr, ulong flags)
|
|
|
|
{
|
2020-01-09 23:01:00 +01:00
|
|
|
pte_t *page = MmGetPageDescriptorFromVirtual(virtualAddr);
|
2020-01-09 20:58:57 +01:00
|
|
|
|
2020-01-09 23:01:00 +01:00
|
|
|
*page |= flags;
|
2020-01-09 20:58:57 +01:00
|
|
|
|
2020-01-09 23:01:00 +01:00
|
|
|
KeFlushTlbSingle(*page);
|
2020-01-09 20:58:57 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Remove flags of a page
|
|
|
|
//
|
|
|
|
void MmUnsetPage(void* virtualAddr, ulong flags)
|
|
|
|
{
|
2020-01-09 23:01:00 +01:00
|
|
|
pte_t *page = MmGetPageDescriptorFromVirtual(virtualAddr);
|
2020-01-09 20:58:57 +01:00
|
|
|
|
2020-01-09 23:01:00 +01:00
|
|
|
*page &= (~flags);
|
2020-01-09 22:34:38 +01:00
|
|
|
|
2020-01-09 23:01:00 +01:00
|
|
|
KeFlushTlbSingle(*page);
|
2020-01-09 22:34:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Map a page in memory
|
|
|
|
//
|
|
|
|
void MmMapPage(void* virtualAddr, void* physicalAddr, ulong flags)
|
|
|
|
{
|
2020-01-09 23:01:00 +01:00
|
|
|
pte_t *page = MmGetPageDescriptorFromVirtual(virtualAddr);
|
2020-01-09 20:58:57 +01:00
|
|
|
|
2020-01-09 23:01:00 +01:00
|
|
|
*page = ((ulong)physicalAddr & ~(KPAGESIZE - 1)) | flags;
|
2020-01-09 22:42:41 +01:00
|
|
|
|
2020-01-09 23:01:00 +01:00
|
|
|
KeFlushTlbSingle(*page);
|
2020-01-09 22:34:38 +01:00
|
|
|
}
|
2020-01-09 20:58:57 +01:00
|
|
|
|
2020-01-09 22:34:38 +01:00
|
|
|
//
|
|
|
|
// Unmap a page in memory
|
|
|
|
//
|
|
|
|
void MmUnmapPage(void* virtualAddr)
|
|
|
|
{
|
2020-01-09 23:01:00 +01:00
|
|
|
pte_t *page = MmGetPageDescriptorFromVirtual(virtualAddr);
|
2020-01-09 20:58:57 +01:00
|
|
|
|
2020-01-09 23:01:00 +01:00
|
|
|
*page = 0;
|
2020-01-09 22:42:41 +01:00
|
|
|
|
2020-01-09 23:01:00 +01:00
|
|
|
KeFlushTlbSingle(*page);
|
2020-01-09 20:58:57 +01:00
|
|
|
}
|
|
|
|
|
2020-01-10 13:36:33 +01:00
|
|
|
//
|
|
|
|
// Kernel Page allocator
|
|
|
|
//
|
|
|
|
void *MmKAllocPageBlock(void *start) {
|
|
|
|
pte_t *startPage = MmGetPageDescriptorFromVirtual(start);
|
|
|
|
|
|
|
|
//for (ulong curPage = 0; curPage < )
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// User page allocator
|
|
|
|
//
|
|
|
|
void *MmUAllocPageBlock(void *start) {
|
|
|
|
pte_t *startPage = MmGetPageDescriptorFromVirtual(start);
|
|
|
|
|
|
|
|
//for (ulong curPage = 0; curPage < )
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-01-09 22:34:38 +01:00
|
|
|
//-----------
|
|
|
|
|
2020-01-09 20:58:57 +01:00
|
|
|
//
|
2019-05-22 08:11:50 +02:00
|
|
|
// Returns the rank of the Stack Guards
|
2020-01-09 20:58:57 +01:00
|
|
|
//
|
2019-05-22 08:11:50 +02:00
|
|
|
void *MmGetStackGuards(char rank)
|
|
|
|
{
|
|
|
|
return (void *)MmStackGuards[(int)rank];
|
|
|
|
}
|
|
|
|
|
2019-05-18 13:39:58 +02:00
|
|
|
//
|
|
|
|
// Page fault handler
|
|
|
|
//
|
|
|
|
static void PagingHandler(ISRFrame_t *regs)
|
|
|
|
{
|
2019-05-22 08:35:58 +02:00
|
|
|
ulong StackGuardOne = (ulong)MmGetStackGuards(0);
|
2020-01-07 16:56:46 +01:00
|
|
|
ulong StackGuardTwo = (ulong)MmGetStackGuards(1);
|
2020-01-09 18:19:49 +01:00
|
|
|
if ((regs->cr2 >= StackGuardOne) && (regs->cr2 <= StackGuardOne + KPAGESIZE) && (regs->rsp <= regs->cr2)) {
|
2019-05-22 08:35:58 +02:00
|
|
|
bprintf(BStdOut,
|
|
|
|
"\n\n%CPANIC\n[ISR 0x8] Irrecoverable Kernel Stack Underflow\n\n"
|
2020-01-09 18:19:49 +01:00
|
|
|
" Page Fault Error code : %#x (%b)\n"
|
2019-05-22 08:35:58 +02:00
|
|
|
" Stack Guard bypassed : %#x",
|
|
|
|
|
|
|
|
VGA_COLOR_LIGHT_RED,
|
|
|
|
regs->ErrorCode,
|
|
|
|
regs->ErrorCode,
|
|
|
|
StackGuardOne
|
|
|
|
);
|
2020-01-09 18:19:49 +01:00
|
|
|
} else if ((regs->cr2 >= StackGuardTwo) && (regs->cr2 <= StackGuardTwo + KPAGESIZE) && (regs->rsp >= regs->cr2)) {
|
2020-01-07 16:56:46 +01:00
|
|
|
bprintf(BStdOut,
|
|
|
|
"\n\n%CPANIC\n[ISR 0x8] Irrecoverable Kernel Stack Overflow\n\n"
|
2020-01-09 18:19:49 +01:00
|
|
|
" Page Fault Error code : %#x (%b)\n"
|
2020-01-07 16:56:46 +01:00
|
|
|
" Stack Guard bypassed : %#x",
|
|
|
|
|
|
|
|
VGA_COLOR_LIGHT_RED,
|
|
|
|
regs->ErrorCode,
|
|
|
|
regs->ErrorCode,
|
|
|
|
StackGuardTwo
|
|
|
|
);
|
2020-01-09 18:19:49 +01:00
|
|
|
} else if (regs->cr2 == 0) {
|
|
|
|
bprintf(BStdOut,
|
|
|
|
"\n\n%CPANIC\n[ISR 0x8] Null vector exception !\n\n"
|
|
|
|
" Page Fault Error code : %#x (%b)\n",
|
|
|
|
|
|
|
|
VGA_COLOR_LIGHT_RED,
|
|
|
|
regs->intNo,
|
|
|
|
regs->ErrorCode,
|
|
|
|
regs->ErrorCode
|
|
|
|
);
|
|
|
|
} else if (regs->cr2 >= MmVirtLastAddress || regs->cr2 <= 0) {
|
|
|
|
bprintf(BStdOut,
|
|
|
|
"\n\n%CPANIC\n[ISR 0x8] Out of bound of the address space at %p !\n\n"
|
|
|
|
" End of the address space : %p\n"
|
|
|
|
" Page Fault Error code : %#x (%b)\n",
|
|
|
|
|
|
|
|
VGA_COLOR_LIGHT_RED,
|
|
|
|
regs->cr2,
|
|
|
|
MmVirtLastAddress,
|
|
|
|
regs->ErrorCode,
|
|
|
|
regs->ErrorCode
|
|
|
|
);
|
2019-05-22 08:35:58 +02:00
|
|
|
} else {
|
2019-11-16 22:41:46 +01:00
|
|
|
//XXX page fault
|
2020-01-09 22:17:44 +01:00
|
|
|
bprintf(BStdOut, "\n\n%CPANIC\n[ISR 0x8] Irrecoverable Page Fault at %p\n\n"
|
2019-05-19 01:33:16 +02:00
|
|
|
" Error code : 0x%x (%b)",
|
2019-05-18 20:31:02 +02:00
|
|
|
|
2019-05-19 00:07:01 +02:00
|
|
|
VGA_COLOR_LIGHT_RED,
|
2019-05-18 20:31:02 +02:00
|
|
|
regs->cr2,
|
2019-05-18 13:39:58 +02:00
|
|
|
regs->ErrorCode,
|
2019-05-19 01:33:16 +02:00
|
|
|
regs->ErrorCode
|
2019-05-18 13:39:58 +02:00
|
|
|
);
|
2019-05-22 08:35:58 +02:00
|
|
|
}
|
2019-05-19 00:07:01 +02:00
|
|
|
|
2020-01-09 22:17:44 +01:00
|
|
|
bprintf(BStdOut, "\n Description : ");
|
|
|
|
|
|
|
|
if (regs->ErrorCode & PRESENT) {
|
|
|
|
bprintf(BStdOut, "Page-protection violation ");
|
|
|
|
} else {
|
|
|
|
bprintf(BStdOut, "Non present page ");
|
|
|
|
}
|
|
|
|
if (regs->ErrorCode & READWRITE) {
|
|
|
|
bprintf(BStdOut, "during write access ");
|
|
|
|
} else {
|
|
|
|
bprintf(BStdOut, "during read access ");
|
|
|
|
}
|
|
|
|
if (regs->ErrorCode & (1 << 3))
|
|
|
|
bprintf(BStdOut, "from userspace ");
|
|
|
|
if (regs->ErrorCode & (1 << 4))
|
|
|
|
bprintf(BStdOut, "after instruction fetching ");
|
|
|
|
|
2019-05-19 01:33:16 +02:00
|
|
|
KeBrkDumpRegisters(regs);
|
|
|
|
|
2019-05-19 00:07:01 +02:00
|
|
|
BStdOut->flusher(BStdOut);
|
|
|
|
|
|
|
|
KeHaltCPU();
|
2019-05-18 13:39:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void MmActivatePageHandler(void)
|
|
|
|
{
|
|
|
|
KeRegisterISR(PagingHandler, 0xe);
|
2019-11-16 22:41:46 +01:00
|
|
|
DebugLog("\tPaging activated\n");
|
2019-05-15 02:26:55 +02:00
|
|
|
}
|