Bug with MmGetPageDescriptorFromVirtual()

This commit is contained in:
Adrien Bourmault 2020-01-19 01:44:29 +01:00
parent 77f4895d48
commit 9e033fa441
3 changed files with 80 additions and 55 deletions

View File

@ -31,7 +31,7 @@ global newStackEnd
global GDT64
[section .text]
KERNEL_STACK equ 16 * 1024 ; 16KB of stack
KERNEL_STACK equ 64 * 1024 ; 64KB of stack
newKernelEnd dq 0x0
newStackEnd dq 0x0

View File

@ -78,13 +78,12 @@ void MmInitPaging(void)
// Difference between the end of kernel and the begin of userspace
MmPhysLastKernAddress = (ulong)(_heap_start + _heap_max);
ulong diffKernUsr = (ulong)USERSPACE - MmPhysLastKernAddress - KPAGESIZE;
// Size of physical table
phDirSize = (((phRamSize + KPAGESIZE) / KPAGESIZE)*sizeof(ulong));
// Maximum VIRTUAL address in memory
MmVirtLastAddress = phRamSize + diffKernUsr;
MmVirtLastAddress = phRamSize;
// Alloc structures
memzero((void *)&MmPageMapLevel4[0], 512*sizeof(ulong));
@ -100,13 +99,13 @@ void MmInitPaging(void)
index = (curAddrPML4 / ((ulong)KPAGESIZE * 0x8000000)) % 512;
if (curAddrPML4 > phRamSize) {
if (curAddrPML4 > MmPhysLastKernAddress) {
MmPageMapLevel4[index] = (pdpe_t *)0;
//DebugLog("PML4 %d\n", index);
////DebugLog("PML4 %d\n", index);
continue;
}
MmPDP = (pdpe_t *)malloc(512*sizeof(pde_t));
KalAllocMemoryEx((void**)&MmPDP, 512*sizeof(pde_t), M_ZEROED, KPAGESIZE);
if (!firstDirectoryAddr) {
firstDirectoryAddr = (ulong)MmPDP;
@ -123,13 +122,13 @@ void MmInitPaging(void)
index = (curAddrPDP / ((ulong)KPAGESIZE * 0x40000)) % 512;
if (curAddrPDP > phRamSize) {
if (curAddrPDP > MmPhysLastKernAddress) {
MmPDP[index] = (pde_t *)0;
//DebugLog("PDP %d\n", index);
continue;
}
MmPD = (pde_t *)malloc(512*sizeof(pde_t));
KalAllocMemoryEx((void**)&MmPD, 512*sizeof(pde_t), M_ZEROED, KPAGESIZE);
index = (curAddrPDP / ((ulong)KPAGESIZE * 0x40000)) % 512;
@ -144,16 +143,13 @@ void MmInitPaging(void)
index = (curAddrPD / ((ulong)KPAGESIZE * 0x200)) % 512;
if (curAddrPD > phRamSize) {
if (curAddrPD > MmPhysLastKernAddress) {
MmPD[index] = (pte_t *)0;
//DebugLog("PD %d\n", index);
continue;
}
if (index == 0x447c0ffe4dbf9e55)
KeStartPanic("ERROR");
MmPT = (pte_t *)malloc(512*sizeof(pte_t));
KalAllocMemoryEx((void**)&MmPT, 512*sizeof(pte_t), M_ZEROED, KPAGESIZE);
//DebugLog("\t\t\t\tPT %d : %p\n", index, MmPT);
MmPD[index] = (pte_t *)((ulong)MmPT | PRESENT | READWRITE);
@ -171,31 +167,31 @@ void MmInitPaging(void)
MmPT[index] = (ulong)curAddrPT | PRESENT;
MmPhysicalPageTable[xedni] = (ulong)curAddrPT;
MmStackGuards[0] = (ulong)curAddrPT;
DebugLog("\tStack Guard at %p\n", curAddrPT);
//DebugLog("\tStack Guard at %p\n", curAddrPT);
}
else if ((ulong)curAddrPT == (ulong)BtLoaderInfo.kernelEndAddr) {
MmPT[index] = (ulong)curAddrPT | PRESENT;
MmPhysicalPageTable[xedni] = (ulong)curAddrPT;
MmStackGuards[1] = (ulong)curAddrPT;
DebugLog("\tStack Guard at %p\n", curAddrPT);
//DebugLog("\tStack Guard at %p\n", curAddrPT);
}
// SECTION .TEXT PROTECTION
else if ((ulong)curAddrPT >= (ulong)&_text && (ulong)curAddrPT <= (ulong)&_text_end) {
MmPT[index] = (ulong)curAddrPT | PRESENT;
MmPhysicalPageTable[xedni] = (ulong)curAddrPT;
DebugLog("\tSection .text at %p\n", curAddrPT);
//DebugLog("\tSection .text at %p\n", curAddrPT);
}
// SECTION .DATA PROTECTION
else if ((ulong)curAddrPT >= (ulong)&_data && (ulong)curAddrPT <= (ulong)&_data_end) {
MmPT[index] = (ulong)curAddrPT | PRESENT | WRITETHR | READWRITE | NX;
MmPhysicalPageTable[xedni] = (ulong)curAddrPT;
DebugLog("\tSection .data at %p\n", curAddrPT);
//DebugLog("\tSection .data at %p\n", curAddrPT);
}
// SECTION .RODATA PROTECTION
else if ((ulong)curAddrPT >= (ulong)&_rodata && (ulong)curAddrPT <= (ulong)&_rodata_end) {
MmPT[index] = (ulong)curAddrPT | PRESENT | NX;
MmPhysicalPageTable[xedni] = (ulong)curAddrPT;
DebugLog("\tSection .rodata at %p\n", curAddrPT);
//DebugLog("\tSection .rodata at %p\n", curAddrPT);
}
// While we're inside the kernel pages
else if ((ulong)curAddrPT <= MmPhysLastKernAddress) {
@ -221,27 +217,54 @@ void MmInitPaging(void)
//
// Get a page from an address
//
static ulong *MmGetPageDescriptorFromVirtual(void *virtualAddr) //XXX MUST CREATE NEW ENTRIES IN TREE
static ulong *MmGetPageDescriptorFromVirtual(void *virtualAddr)
{
ulong virtAddrPage = (ulong)virtualAddr & ( ~((KPAGESIZE - 1) | NX));
volatile ulong virtAddrPage;
volatile pdpe_t *pdp;
volatile pde_t *pd;
volatile pte_t *pt;
volatile ulong *page;
volatile ulong index;
if (virtAddrPage > MmVirtLastAddress) {
KeStartPanic("MmSetPage() Out of bound of the address space !");
DebugLog("Get virtual descriptor %p\n", virtualAddr);
while (virtualAddr) {
virtAddrPage = (ulong)virtualAddr & ( ~((KPAGESIZE - 1) | NX));
index = (virtAddrPage / ((ulong)KPAGESIZE * 0x8000000)) % 512;
pdp = (pdpe_t*)((ulong)MmPageMapLevel4[index] & ( ~(KPAGESIZE - 1)) );
DebugLog("pdp at %p\t: %p\n", &pdp, pdp);
if (!pdp) {
KalAllocMemoryEx((void**)&pdp, 512*sizeof(pdpe_t), M_ZEROED, KPAGESIZE);
MmPageMapLevel4[index] = (pdpe_t *)((ulong)pdp | PRESENT | READWRITE);
//DebugLog("Created pdp\t: %p\n", pdp);
continue;
}
index = (virtAddrPage / ((ulong)KPAGESIZE * 0x40000)) % 512;
pd = (pde_t*)( (ulong)pdp[index] & ( ~(KPAGESIZE - 1)) );
DebugLog("pd at %p\t: %p\n", &pd, pd);
if (!pd) {
KalAllocMemoryEx((void**)&pd, 512*sizeof(pde_t), M_ZEROED, KPAGESIZE);
pdp[index] = (pde_t *)((ulong)pd | PRESENT | READWRITE);
//DebugLog("Created pd\t: %p\n", pd);
continue;
}
index = (virtAddrPage / ((ulong)KPAGESIZE * 0x200)) % 512;
pt = (pte_t*)( (ulong)pd[index] & ( ~(KPAGESIZE - 1)) );
DebugLog("pt at %p\t: %p\n", &pt, pt);
if (!pt) {
KalAllocMemoryEx((void**)&pt, 512*sizeof(pte_t), M_ZEROED, KPAGESIZE);
pd[index] = (pte_t *)((ulong)pt | PRESENT | READWRITE);
//DebugLog("Created pt\t: %p\n", pt);
continue;
}
break;
}
pdpe_t *pdp = (pdpe_t*)((ulong)MmPageMapLevel4[(virtAddrPage / ((ulong)KPAGESIZE * 0x8000000)) % 512]);
DebugLog("pdp\t: %p\n", pdp);
pde_t *pd = (pde_t*)( (ulong)pdp[(virtAddrPage / ((ulong)KPAGESIZE * 0x40000)) % 512] );
DebugLog("pd\t: %p\n", pd);
pte_t *pt = (pte_t*)( (ulong)pd[(virtAddrPage / ((ulong)KPAGESIZE * 0x200)) % 512] );
DebugLog("pt\t: %p\n", pt);
ulong index = ((ulong)virtualAddr / ((ulong)KPAGESIZE)) % 512;
ulong *page = &(pt[index]);
DebugLog("page (with flags): %p\n", page);
KeSleep(6000);
index = ((ulong)virtualAddr / ((ulong)KPAGESIZE)) % 512;
page = &(pt[index]);
DebugLog("page (with flags): %p\n", *page);
return page;
}
@ -250,10 +273,10 @@ static ulong *MmGetPageDescriptorFromVirtual(void *virtualAddr) //XXX MUST CREAT
//
void *MmTransVirtToPhyAddr(void* virtualAddr)
{
ulong virtAddrPage = (ulong)virtualAddr & ( ~((KPAGESIZE - 1) | NX));
ulong virtAddrPage = (ulong)virtualAddr & ( ~(KPAGESIZE - 1));
ulong *page = MmGetPageDescriptorFromVirtual(virtualAddr);
if (*page == (*page & ~((KPAGESIZE - 1) | NX))) {
if (!(*page)) {
return NULL;
}
@ -299,9 +322,9 @@ void MmMapPage(void* virtualAddr, void* physicalAddr, ulong flags)
{
ulong *page = MmGetPageDescriptorFromVirtual(virtualAddr);
DebugLog("Request %p:%p with %lu\n", virtualAddr, physicalAddr, flags);
//DebugLog("Request %p:%p with %lu, at page %p\n", virtualAddr, physicalAddr, flags, page);
page[0] = (ulong)physicalAddr | flags;
*page = (ulong)physicalAddr | flags;
MmPhysicalPageTable[(ulong)physicalAddr
/ ((ulong)KPAGESIZE)
@ -309,8 +332,10 @@ void MmMapPage(void* virtualAddr, void* physicalAddr, ulong flags)
KeFlushTlbSingle(*page);
//DebugLog("Done %p at page %p\n", *page, page);
if ((ulong)virtualAddr > MmVirtLastAddress)
MmVirtLastAddress = (ulong)virtualAddr;
MmVirtLastAddress = (ulong)virtualAddr + KPAGESIZE;
}
//

View File

@ -157,13 +157,13 @@ ulong MmAllocPageFrameEx(void ***frameListPtr, size_t *pageNumber, size_t size,
// Maximum PHYSICAL address in memory
ulong phRamSize = memoryMap.freeRamSize + memoryMap.nonfreeRamSize;
//DebugLog("Allocating %d pages...\n", *pageNumber);
////DebugLog("Allocating %d pages...\n", *pageNumber);
if (contiguous) {
for (void *curPage = (void*)(MmPhysLastKernAddress + KPAGESIZE); curPage < (void*)phRamSize; curPage += KPAGESIZE) {
if (!isPageBusy(curPage)) {
(*frameListPtr)[curNumber] = curPage;
inBlock = true;
//DebugLog("Select page : %p\n", curPage);
////DebugLog("Select page : %p\n", curPage);
if (++curNumber >= *pageNumber) {
break;
}
@ -178,7 +178,7 @@ ulong MmAllocPageFrameEx(void ***frameListPtr, size_t *pageNumber, size_t size,
for (void *curPage = (void*)(MmPhysLastKernAddress + KPAGESIZE); curPage < (void*)phRamSize; curPage += KPAGESIZE) {
if (!isPageBusy(curPage)) {
(*frameListPtr)[curNumber] = curPage;
//DebugLog("Select page : %p\n", curPage);
////DebugLog("Select page : %p\n", curPage);
if (++curNumber >= *pageNumber) {
break;
}
@ -192,7 +192,7 @@ ulong MmAllocPageFrameEx(void ***frameListPtr, size_t *pageNumber, size_t size,
for (size_t i = 0; i < *pageNumber; i++) {
addPageToBusyList((*frameListPtr)[i], id);
//DebugLog("Allocating page : %p\n", *frameListPtr[i]);
////DebugLog("Allocating page : %p\n", *frameListPtr[i]);
}
NSuccessfulAlloc++;
@ -240,14 +240,14 @@ error_t MmMapPageFrame(ulong id, void *virtAddr, ulong flags)
while(busyPage->next) {
busyPage = busyPage->next;
//DebugLog("Physical : %p is %p\n", busyPage->phyAddress, MmTransPhyToVirtAddr(busyPage->phyAddress));
////DebugLog("Physical : %p is %p\n", busyPage->phyAddress, MmTransPhyToVirtAddr(busyPage->phyAddress));
if (MmTransPhyToVirtAddr(busyPage->phyAddress)) {
return EADDRINUSE;
}
if (id == busyPage->id) {
DebugLog("Map %p at %p\n", busyPage->phyAddress, virtAddr + offset);
//DebugLog("Map %p at %p\n", busyPage->phyAddress, virtAddr + offset);
MmMapPage((void*)((ulong)virtAddr + offset), busyPage->phyAddress, flags);
offset += KPAGESIZE;
}
@ -265,10 +265,10 @@ error_t MmUnmapPageFrame(ulong id)
busyPage = busyPage->next;
actualPhys = MmTransPhyToVirtAddr(busyPage->phyAddress);
//DebugLog("Physical : %p is %p\n", busyPage->phyAddress, actualPhys);
////DebugLog("Physical : %p is %p\n", busyPage->phyAddress, actualPhys);
if (actualPhys && id == busyPage->id) {
//DebugLog("Unmap %p from %p\n", busyPage->phyAddress, MmTransPhyToVirtAddr(busyPage->phyAddress));
////DebugLog("Unmap %p from %p\n", busyPage->phyAddress, MmTransPhyToVirtAddr(busyPage->phyAddress));
MmUnmapPage(MmTransPhyToVirtAddr(busyPage->phyAddress));
}
}
@ -292,11 +292,11 @@ error_t MmTestBusyPage(void)
/* } else { */
/* MmFreePageFrame(tab[rand() % (j+1)]); */
/* } */
/* DebugLog("Alloc : %d; Free : %d; Count : %lu Mo\n", NSuccessfulAlloc, NSuccessfulFree, MmBusyPagesSpace() / MB); */
/* //DebugLog("Alloc : %d; Free : %d; Count : %lu Mo\n", NSuccessfulAlloc, NSuccessfulFree, MmBusyPagesSpace() / MB); */
/* } */
ulong a = KeGetTicks();
DebugLog("Start alloc : %lu s\n", a/1000);
DebugLog("Start alloc 30 MB: %lu s\n", a/1000);
tab[j++] = MmAllocPageFrame(30*MB, NORMAL);
ulong b = KeGetTicks();
DebugLog("End alloc : %lu s\n", b/1000);
@ -304,8 +304,8 @@ error_t MmTestBusyPage(void)
DebugLog("Alloc : %d; Free : %d; Count : %lu Mo\n", NSuccessfulAlloc, NSuccessfulFree, MmBusyPagesSpace() / MB);
a = KeGetTicks();
DebugLog("Start alloc : %lu s\n", a/1000);
tab[j++] = MmAllocPageFrame(20*MB, NORMAL);
DebugLog("Start alloc 30MB : %lu s\n", a/1000);
tab[j++] = MmAllocPageFrame(5*MB, NORMAL);
b = KeGetTicks();
DebugLog("End alloc : %lu s\n", b/1000);
DebugLog("Alloc time : %lu s\n", (b-a)/1000);
@ -322,14 +322,14 @@ error_t MmTestBusyPage(void)
DebugLog("Alloc : %d; Free : %d; Count : %lu Mo\n", NSuccessfulAlloc, NSuccessfulFree, MmBusyPagesSpace() / MB);
a = KeGetTicks();
DebugLog("Start map : %lu ms\n", a);
MmMapPageFrame(tab[1], (void*)USERSPACE, PRESENT | USERMODE | READWRITE);
DebugLog("Start map at %p: %lu ms\n", USERSPACE, a);
MmMapPageFrame(tab[1], (void*)USERSPACE, PRESENT | READWRITE);
b = KeGetTicks();
DebugLog("End map : %lu ms\n", b);
DebugLog("Map time : %lu ms\n", (b-a));
//printBusyPages();
DebugLog("Finished !\n");
//DebugLog("Finished !\n");
return EOK;
}