//----------------------------------------------------------------------------// // OS on Kaleid // // // // Desc: Paging memory related functions // // // // // // Copyright © 2018-2020 The OS/K Team // // // // This file is part of OS/K. // // // // OS/K is free software: you can redistribute it and/or modify // // it under the terms of the GNU General Public License as published by // // the Free Software Foundation, either version 3 of the License, or // // any later version. // // // // OS/K is distributed in the hope that it will be useful, // // but WITHOUT ANY WARRANTY//without even the implied warranty of // // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // // GNU General Public License for more details. // // // // You should have received a copy of the GNU General Public License // // along with OS/K. If not, see . // //----------------------------------------------------------------------------// #include #include #include #include #include #include #include #include #include #include //----------- static pml4_t MmPageMapLevel4[512] __attribute__((__aligned__(KPAGESIZE))); static ulong *MmPhysicalPageTable __attribute__((__aligned__(KPAGESIZE))); extern ulong _text; extern ulong _text_end; extern ulong _rodata; extern ulong _rodata_end; extern ulong _data; extern ulong _data_end; extern MemoryMap_t memoryMap; static ulong MmStackGuards[2] = { 0 }; ulong MmVirtLastAddress = 0; ulong MmPhysLastKernAddress = 0; //----------- // // Creates our new page table structure and loads it // void MmInitPaging(void) { pdpe_t *MmPDP = NULL; pde_t *MmPD = NULL; pte_t *MmPT = NULL; ulong index, xedni; ulong curAddrPML4; ulong curAddrPDP; ulong curAddrPD; ulong curAddrPT; ulong firstDirectoryAddr = 0; ulong lastDirectoryAddr = 0; ulong phDirSize = 0; DebugLog("Activating paging...\n"); // Maximum PHYSICAL address in memory ulong phRamSize = memoryMap.freeRamSize + memoryMap.nonfreeRamSize; // Difference between the end of kernel and the begin of userspace MmPhysLastKernAddress = (ulong)(_heap_start + _heap_max); // Size of physical table phDirSize = (((phRamSize + KPAGESIZE) / KPAGESIZE)*sizeof(ulong)); // Maximum VIRTUAL address in memory MmVirtLastAddress = phRamSize; // Alloc structures memzero((void *)&MmPageMapLevel4[0], 512*sizeof(ulong)); MmPhysicalPageTable = memalign(phDirSize, KPAGESIZE); //DebugLog("\t\t\t\tPhysical map addr : %p\n", MmPhysicalPageTable); for (curAddrPML4 = 0; curAddrPML4 < 512 * KPAGESIZE * 0x8000000; curAddrPML4 += ((ulong)KPAGESIZE * 0x8000000)) { // Create an entry in PML4 each 512GB // 0x8000000 = 512 ^ 3 index = (curAddrPML4 / ((ulong)KPAGESIZE * 0x8000000)) % 512; if (curAddrPML4 > MmPhysLastKernAddress) { MmPageMapLevel4[index] = (pdpe_t *)0; ////DebugLog("PML4 %d\n", index); continue; } MmPDP = memalign(512*sizeof(pde_t), KPAGESIZE); if (!firstDirectoryAddr) { firstDirectoryAddr = (ulong)MmPDP; } //DebugLog("\t\t\t\tPDP %d : %p\n", index, MmPDP); MmPageMapLevel4[index] = (pdpe_t *)((ulong)MmPDP | PRESENT | READWRITE); for (curAddrPDP = curAddrPML4; curAddrPDP < (curAddrPML4 + ((ulong)KPAGESIZE * 0x8000000)); curAddrPDP += ((ulong)KPAGESIZE * 0x40000)) { // Create an intry in PDP each 1GB // 0x40000 = 512 ^ 2 index = (curAddrPDP / ((ulong)KPAGESIZE * 0x40000)) % 512; if (curAddrPDP > MmPhysLastKernAddress) { MmPDP[index] = (pde_t *)0; //DebugLog("PDP %d\n", index); continue; } MmPD = memalign(512*sizeof(pde_t), KPAGESIZE); index = (curAddrPDP / ((ulong)KPAGESIZE * 0x40000)) % 512; //DebugLog("\t\t\t\tPD %d : %p\n", index, MmPD); MmPDP[index] = (pde_t *)((ulong)MmPD | PRESENT | READWRITE); for (curAddrPD = curAddrPDP; curAddrPD < (curAddrPDP + ((ulong)KPAGESIZE * 0x40000)); curAddrPD += ((ulong)KPAGESIZE * 0x200)) { // Create an intry in PD each 2MB // 0x200 = 512 index = (curAddrPD / ((ulong)KPAGESIZE * 0x200)) % 512; if (curAddrPD > MmPhysLastKernAddress) { MmPD[index] = (pte_t *)0; //DebugLog("PD %d\n", index); continue; } MmPT = memalign(512*sizeof(pte_t), KPAGESIZE); //DebugLog("\t\t\t\tPT %d : %p\n", index, MmPT); MmPD[index] = (pte_t *)((ulong)MmPT | PRESENT | READWRITE); for (curAddrPT = curAddrPD; curAddrPT < (curAddrPD + ((ulong)KPAGESIZE * 0x200)); curAddrPT += (ulong)KPAGESIZE) { // Create an entry in PT each page of 4KB index = (curAddrPT / ((ulong)KPAGESIZE)) % 512; xedni = (curAddrPT / ((ulong)KPAGESIZE)); // STACK GUARD PAGE */ if ((ulong)curAddrPT == (ulong)BtLoaderInfo.stackEndAddr) { MmPT[index] = (ulong)curAddrPT | PRESENT; MmPhysicalPageTable[xedni] = (ulong)curAddrPT; MmStackGuards[0] = (ulong)curAddrPT; DebugLog("Stack Guard at %p\n", curAddrPT); } else if ((ulong)curAddrPT == (ulong)BtLoaderInfo.kernelEndAddr) { MmPT[index] = (ulong)curAddrPT | PRESENT; MmPhysicalPageTable[xedni] = (ulong)curAddrPT; MmStackGuards[1] = (ulong)curAddrPT; DebugLog("Stack Guard at %p\n", curAddrPT); } // SECTION .TEXT PROTECTION else if ((ulong)curAddrPT >= (ulong)&_text && (ulong)curAddrPT <= (ulong)&_text_end) { MmPT[index] = (ulong)curAddrPT | PRESENT; MmPhysicalPageTable[xedni] = (ulong)curAddrPT; DebugLog("Section .text at %p\n", curAddrPT); } // SECTION .DATA PROTECTION else if ((ulong)curAddrPT >= (ulong)&_data && (ulong)curAddrPT <= (ulong)&_data_end) { MmPT[index] = (ulong)curAddrPT | PRESENT | WRITETHR | READWRITE | NX; MmPhysicalPageTable[xedni] = (ulong)curAddrPT; DebugLog("Section .data at %p\n", curAddrPT); } // SECTION .RODATA PROTECTION else if ((ulong)curAddrPT >= (ulong)&_rodata && (ulong)curAddrPT <= (ulong)&_rodata_end) { MmPT[index] = (ulong)curAddrPT | PRESENT | NX; MmPhysicalPageTable[xedni] = (ulong)curAddrPT; DebugLog("Section .rodata at %p\n", curAddrPT); } // While we're inside the kernel pages else if ((ulong)curAddrPT <= MmPhysLastKernAddress) { MmPT[index] = (ulong)curAddrPT | PRESENT | READWRITE; MmPhysicalPageTable[xedni] = (ulong)curAddrPT; } } } } } lastDirectoryAddr = (ulong)MmPT; MmLoadPML4((void *)MmPageMapLevel4); MmEnableWriteProtect(); DebugLog("Page table size : %u MB\n", (lastDirectoryAddr - firstDirectoryAddr + phDirSize)/MB); } // // Get a page from an address // ulong *MmGetPageDescriptorFromVirtual(void *virtualAddr) { // Select bit from 39 to 47 register ulong pml4Index = ((ulong)virtualAddr & 0xFF8000000000) >> 39; // Select bit from 30 to 39 register ulong pdpIndex = ((ulong)virtualAddr & 0x7FC0000000) >> 30; // etc etc register ulong pdIndex = ((ulong)virtualAddr & 0x3FE00000) >> 21; // etc register ulong ptIndex = ((ulong)virtualAddr & 0x1FF000) >> 12; pdpe_t *pdp = NULL; pde_t *pd = NULL; pte_t *pt = NULL; //DebugLog("PML4[%d], PDP[%d], PD[%d], PT[%d]\n", // pml4Index, pdpIndex, pdIndex, ptIndex); // Select bit from 12 to 51 if (!((ulong)MmPageMapLevel4[pml4Index] & 0xFFFFFFFFFF000)) { // Alloc space MmPageMapLevel4[pml4Index] = memalign(512*sizeof(pdpe_t), KPAGESIZE); // Set present MmPageMapLevel4[pml4Index] = (pml4_t)((ulong)MmPageMapLevel4[pml4Index] | PRESENT | READWRITE); pdp = (pdpe_t *)((ulong)MmPageMapLevel4[pml4Index] & 0xFFFFFFFFFF000); //DebugLog("\tCreate PDP at %p\n", MmPageMapLevel4[pml4Index]); } else { pdp = (pdpe_t *)((ulong)MmPageMapLevel4[pml4Index] & 0xFFFFFFFFFF000); } //DebugLog("\tPDP[%d] = %p\n", pdpIndex, pdp[pdpIndex]); // Select bit from 12 to 51 if (!((ulong)pdp[pdpIndex] & 0xFFFFFFFFFF000)) { pdp[pdpIndex] = memalign(512*sizeof(pde_t), KPAGESIZE); pdp[pdpIndex] = (pdpe_t)((ulong)pdp[pdpIndex] | PRESENT | READWRITE); pd = (pde_t *)((ulong)pdp[pdpIndex] & 0xFFFFFFFFFF000); //DebugLog("\tCreate PD at %p\n", (ulong)pdp[pdpIndex]); } else { pd = (pde_t *)((ulong)pdp[pdpIndex] & 0xFFFFFFFFFF000); } //DebugLog("\tPD[%d] = %p\n", pdIndex, pd[pdIndex]); // Select bit from 12 to 51 if (!((ulong)pd[pdIndex] & 0xFFFFFFFFFF000)) { pd[pdIndex] = memalign(512*sizeof(pte_t), KPAGESIZE); pd[pdIndex] = (pde_t)((ulong)pd[pdIndex] | PRESENT | READWRITE); pt = (pte_t *)((ulong)pd[pdIndex] & 0xFFFFFFFFFF000); //DebugLog("\tCreate PT at %p\n", (ulong)pd[pdIndex]); } else { pt = (pte_t *)((ulong)pd[pdIndex] & 0xFFFFFFFFFF000); } //DebugLog("\tPT[%d] = %p\n", ptIndex, pt[ptIndex]); MmLoadPML4((void *)MmPageMapLevel4); return &pt[ptIndex]; } // // Translates a virtual address to its physical equivalent // void *MmTransVirtToPhyAddr(void* virtualAddr) { ulong virtAddrPage = (ulong)virtualAddr & ( ~(KPAGESIZE - 1)); ulong *page = MmGetPageDescriptorFromVirtual(virtualAddr); if (!(page)) { return NULL; } return (void*)(((ulong)*page & 0xFFFFFFFFFF000)+ ((ulong)virtualAddr - (ulong)virtAddrPage)); } void *MmTransPhyToVirtAddr(void* physicalAddr) { ulong phyAddrPage = (ulong)physicalAddr & ( ~((KPAGESIZE - 1) | NX)); return (void*)( MmPhysicalPageTable[(ulong)physicalAddr / ((ulong)KPAGESIZE) ] + ((ulong)physicalAddr - phyAddrPage)); } // // Add flags to a page // void MmSetPage(void* virtualAddr, ulong flags) { ulong *page = MmGetPageDescriptorFromVirtual(virtualAddr); *page |= flags; KeFlushTlbSingle((ulong)virtualAddr); } // // Remove flags of a page // void MmUnsetPage(void* virtualAddr, ulong flags) { ulong *page = MmGetPageDescriptorFromVirtual(virtualAddr); *page |= flags; KeFlushTlbSingle((ulong)virtualAddr); } // // Map a page in memory // void MmMapPage(void* virtualAddr, void* physicalAddr, ulong flags) { //DebugLog("Request %p:%p with %lu\n", virtualAddr, physicalAddr, flags); ulong *page = MmGetPageDescriptorFromVirtual(virtualAddr); *page = (ulong)physicalAddr | flags; MmPhysicalPageTable[(ulong)physicalAddr / ((ulong)KPAGESIZE) ] = (ulong)virtualAddr; KeFlushTlbSingle((ulong)virtualAddr); //DebugLog("Done %p at page %p\n", *page, page); if ((ulong)virtualAddr > MmVirtLastAddress) MmVirtLastAddress = (ulong)virtualAddr + KPAGESIZE; } // // Unmap a page in memory // void MmUnmapPage(void* virtualAddr) { ulong *page = MmGetPageDescriptorFromVirtual(virtualAddr); //DebugLog("Request %p:%p with %lu\n", virtualAddr, physicalAddr, flags); MmPhysicalPageTable[(ulong)MmTransVirtToPhyAddr (virtualAddr) / ((ulong)KPAGESIZE) ] = (ulong)0; *page = (ulong)0; //DebugLog("Done %p at page %p\n", *page, page); if ((ulong)virtualAddr == MmVirtLastAddress) MmVirtLastAddress = (ulong)virtualAddr - KPAGESIZE; KeFlushTlbSingle((ulong)virtualAddr); } //----------- // // Returns the rank of the Stack Guards // void *MmGetStackGuards(char rank) { return (void *)MmStackGuards[(int)rank]; } // // Page fault handler // static void PagingHandler(ISRFrame_t *regs) { ulong StackGuardOne = (ulong)MmGetStackGuards(0); ulong StackGuardTwo = (ulong)MmGetStackGuards(1); if ((regs->cr2 >= StackGuardOne) && (regs->cr2 <= StackGuardOne + KPAGESIZE) && (regs->rsp <= regs->cr2)) { bprintf(BStdOut, "\n\n%CPANIC\n[ISR 0x8] Irrecoverable Kernel Stack Underflow\n\n" " Page Fault Error code : %#x (%b)\n" " Stack Guard bypassed : %#x", VGA_COLOR_LIGHT_RED, regs->ErrorCode, regs->ErrorCode, StackGuardOne ); } else if ((regs->cr2 >= StackGuardTwo) && (regs->cr2 <= StackGuardTwo + KPAGESIZE) && (regs->rsp >= regs->cr2)) { bprintf(BStdOut, "\n\n%CPANIC\n[ISR 0x8] Irrecoverable Kernel Stack Overflow\n\n" " Page Fault Error code : %#x (%b)\n" " Stack Guard bypassed : %#x", VGA_COLOR_LIGHT_RED, regs->ErrorCode, regs->ErrorCode, StackGuardTwo ); } else if (regs->cr2 == 0) { bprintf(BStdOut, "\n\n%CPANIC\n[ISR 0x8] Null vector exception !\n\n" " Page Fault Error code : %#x (%b)\n", VGA_COLOR_LIGHT_RED, regs->intNo, regs->ErrorCode, regs->ErrorCode ); } else if (regs->cr2 >= MmVirtLastAddress || regs->cr2 <= 0) { bprintf(BStdOut, "\n\n%CPANIC\n[ISR 0x8] Out of bound of the address space at %p !\n\n" " End of the address space : %p\n" " Page Fault Error code : %#x (%b)\n", VGA_COLOR_LIGHT_RED, regs->cr2, MmVirtLastAddress, regs->ErrorCode, regs->ErrorCode ); } else { //XXX page fault bprintf(BStdOut, "\n\n%CPANIC\n[ISR 0x8] Irrecoverable Page Fault at %p\n\n" " Error code : 0x%x (%b)", VGA_COLOR_LIGHT_RED, regs->cr2, regs->ErrorCode, regs->ErrorCode ); } bprintf(BStdOut, "\n Description : "); if (regs->ErrorCode & PRESENT) { bprintf(BStdOut, "Page-protection violation "); } else { bprintf(BStdOut, "Non present page "); } if (regs->ErrorCode & READWRITE) { bprintf(BStdOut, "during write access "); } else { bprintf(BStdOut, "during read access "); } if (regs->ErrorCode & (1 << 3)) bprintf(BStdOut, "from userspace "); if (regs->ErrorCode & (1 << 4)) bprintf(BStdOut, "after instruction fetching "); KeBrkDumpRegisters(regs); BStdOut->flusher(BStdOut); KeHaltCPU(); } void MmActivatePageHandler(void) { KeRegisterISR(PagingHandler, 0xe); DebugLog("Page handler activated\n"); }