Heap smashing, human readability and stack security

This commit is contained in:
Adrien Bourmault 2019-05-13 02:00:02 +02:00
parent 2623fc2d77
commit 7e3d6c4817
8 changed files with 49 additions and 24 deletions

View File

@ -169,7 +169,6 @@ _loader64:
mov esi, GoKernel mov esi, GoKernel
call write call write
mov qword [newKernelEnd], KERNEL_STACK
mov rdi, [mbInfo] mov rdi, [mbInfo]
mov rsi, [mbMagic] mov rsi, [mbMagic]
mov rdx, GDT64.code mov rdx, GDT64.code

View File

@ -126,9 +126,13 @@ InitStack:
;; Begin address to fill and length ;; Begin address to fill and length
mov qword [realKernelEnd], kernelEnd mov qword [realKernelEnd], kernelEnd
mov rdi, kernelEnd mov qword [newKernelEnd], KERNEL_STACK
mov rcx, KERNEL_STACK - kernelEnd - 16 mov qword [kernelEnd], qword 0xbad0bad
; stop before the return address mov rdi, kernelEnd + 16
mov rcx, KERNEL_STACK - kernelEnd - 16 * 2
; The Stack can begin at kernelEnd + 16 in order to not overwrite the
; kernel by pushing
; We must stop before the return address in the current stack so -16 in rcx
;; If bit 0 is on, fill one byte ;; If bit 0 is on, fill one byte
sar rcx, 1 ; Shift bit 0 into CY sar rcx, 1 ; Shift bit 0 into CY

View File

@ -30,7 +30,7 @@ global newKernelEnd
global realKernelEnd global realKernelEnd
[section .text] [section .text]
KERNEL_STACK equ kernelEnd + 4096 * 2 * 1024 ; 8MB of stack KERNEL_STACK equ kernelEnd + 16 + 4096 * 2 * 1024 ; 8MB of stack
newKernelEnd dq 0x0 newKernelEnd dq 0x0
realKernelEnd dq 0x0 realKernelEnd dq 0x0

View File

@ -48,6 +48,7 @@ error_t MmSetMaxHeapSize(size_t);
error_t MmGrowHeap(size_t); error_t MmGrowHeap(size_t);
error_t MmShrinkHeap(size_t); error_t MmShrinkHeap(size_t);
void MmIsHeapSmashed(void);
//----------------------------------------------------------------------------// //----------------------------------------------------------------------------//
#endif #endif

View File

@ -28,6 +28,6 @@ ulong __stack_chk_guard = 0x447c0ffe4dbf9e55;
noreturn void __stack_chk_fail(void) noreturn void __stack_chk_fail(void)
{ {
KeStartPanic("Stack smashed!"); KeStartPanic("Stack has been smashed!\n");
} }

View File

@ -25,6 +25,7 @@
#include <kernel/mm.h> #include <kernel/mm.h>
#include <kernel/heap.h> #include <kernel/heap.h>
#include <extras/locks.h> #include <extras/locks.h>
#include <kernel/boot.h>
// Start address of the heap // Start address of the heap
void *_heap_start; void *_heap_start;
@ -44,6 +45,7 @@ static Lock_t _heap_lock = ExINITLOCK(KLOCK_SPINLOCK);
void MmInitHeap(void) void MmInitHeap(void)
{ {
assert(_heap_end == NULL); assert(_heap_end == NULL);
MmIsHeapSmashed();
// Get the first available zone address // Get the first available zone address
_heap_start = MmGetFirstAvailZone((void*)0); _heap_start = MmGetFirstAvailZone((void*)0);
@ -80,6 +82,7 @@ void MmUnlockHeap(void)
// //
size_t MmGetHeapSize(void) size_t MmGetHeapSize(void)
{ {
MmIsHeapSmashed();
return (size_t)_heap_end - (size_t)_heap_start; return (size_t)_heap_end - (size_t)_heap_start;
} }
@ -88,6 +91,7 @@ size_t MmGetHeapSize(void)
// //
size_t MmGetMaxHeapSize(void) size_t MmGetMaxHeapSize(void)
{ {
MmIsHeapSmashed();
return _heap_max; return _heap_max;
} }
@ -96,6 +100,7 @@ size_t MmGetMaxHeapSize(void)
// //
error_t MmSetMaxHeapSize(size_t new) error_t MmSetMaxHeapSize(size_t new)
{ {
MmIsHeapSmashed();
if (new > MmGetAvailZoneSize((void *)_heap_start)) { if (new > MmGetAvailZoneSize((void *)_heap_start)) {
return ENOMEM; return ENOMEM;
} }
@ -114,6 +119,7 @@ error_t MmSetMaxHeapSize(size_t new)
// //
error_t MmGrowHeap(size_t req) error_t MmGrowHeap(size_t req)
{ {
MmIsHeapSmashed();
assert(req % alignof(QWORD) == 0); assert(req % alignof(QWORD) == 0);
if ((size_t)_heap_end + req > (size_t)_heap_start + _heap_max) { if ((size_t)_heap_end + req > (size_t)_heap_start + _heap_max) {
@ -131,6 +137,7 @@ error_t MmGrowHeap(size_t req)
// //
error_t MmShrinkHeap(size_t req) error_t MmShrinkHeap(size_t req)
{ {
MmIsHeapSmashed();
assert(req % alignof(QWORD) == 0); assert(req % alignof(QWORD) == 0);
if (req > (size_t)_heap_end - (size_t)_heap_start) { if (req > (size_t)_heap_end - (size_t)_heap_start) {
@ -142,3 +149,13 @@ error_t MmShrinkHeap(size_t req)
return EOK; return EOK;
} }
//
// The incarnate paranoia
//
void MmIsHeapSmashed(void)
{
ulong *heapStart = BtLoaderInfo.kernelEndAddr + 8;
if (*heapStart != 0xbad00badbad00bad)
KeStartPanic("Heap has been smashed !\n");
}

View File

@ -106,6 +106,10 @@ static error_t InitMemoryMap(void)
/*DebugLog("[InitMemoryMap] Physical Ram Size : %d Mio\n\n", /*DebugLog("[InitMemoryMap] Physical Ram Size : %d Mio\n\n",
(memoryMap.freeRamSize + memoryMap.nonfreeRamSize) / MB);*/ (memoryMap.freeRamSize + memoryMap.nonfreeRamSize) / MB);*/
// Magic value in memory to prevent smashing
ulong * heapStart = BtLoaderInfo.kernelEndAddr + 8;
*heapStart = 0xbad00badbad00bad;
return EOK; return EOK;
} }
@ -113,7 +117,7 @@ size_t MmGetAvailZoneSize(void *start) {
uint i; uint i;
// Because the kernel is the kernel // Because the kernel is the kernel
if (start < BtLoaderInfo.kernelEndAddr) if (start < BtLoaderInfo.kernelEndAddr + 16)
return 0; return 0;
// Search the zone where the start address is // Search the zone where the start address is
@ -138,8 +142,8 @@ void *MmGetFirstAvailZone(void *start) {
void *current = 0; void *current = 0;
// Because the kernel is the kernel // Because the kernel is the kernel
if ((ulong)start < (ulong)BtLoaderInfo.kernelEndAddr) { if ((ulong)start < (ulong)BtLoaderInfo.kernelEndAddr+16) {
return MmGetFirstAvailZone(BtLoaderInfo.kernelEndAddr+1); return MmGetFirstAvailZone(BtLoaderInfo.kernelEndAddr+16);
} }
// Search the zone where the start address is // Search the zone where the start address is
@ -195,7 +199,7 @@ void MmPrintMemoryMap(void) {
ulong len = memoryMap.entry[i].length; ulong len = memoryMap.entry[i].length;
KernLog("mem zone: %lp\t%s\twith length: %04luMB + %04luKB + %04luB\n", KernLog("mem zone: %lp\t%s\twith length: %4luMB + %4luKB + %4luB\n",
memoryMap.entry[i].addr, avStr, memoryMap.entry[i].addr, avStr,
_ADDR_TO_MB(len), _ADDR_TO_KB(len), _ADDR_TO_B(len) _ADDR_TO_MB(len), _ADDR_TO_KB(len), _ADDR_TO_B(len)
); );

View File

@ -98,24 +98,24 @@ error_t CmdMemUsage(int argc, char **argv, char *cmdline)
KeRestoreIRQs(flags); KeRestoreIRQs(flags);
img_diff = realKernelEnd - (size_t)BtLoaderInfo.kernelAddr; img_diff = realKernelEnd - (size_t)BtLoaderInfo.kernelAddr;
stack_diff = (size_t)BtLoaderInfo.kernelEndAddr - (realKernelEnd + 1); stack_diff = (size_t)BtLoaderInfo.kernelEndAddr - (realKernelEnd + 16);
heap_diff = (size_t)heap_end - (size_t)heap_start; heap_diff = (size_t)heap_end - (size_t)heap_start;
KernLog("Kernel image\n"); KernLog("Kernel image\n");
KernLog("\tstarts at:\t\t%p (%04luMB + %04luKB + %04luB)\n", KernLog("\tstarts at:\t\t%p (%4luMB + %4luKB + %4luB)\n",
BtLoaderInfo.kernelAddr, BtLoaderInfo.kernelAddr,
_ADDR_TO_MB((size_t)BtLoaderInfo.kernelAddr), _ADDR_TO_MB((size_t)BtLoaderInfo.kernelAddr),
_ADDR_TO_KB((size_t)BtLoaderInfo.kernelAddr), _ADDR_TO_KB((size_t)BtLoaderInfo.kernelAddr),
_ADDR_TO_B((size_t)BtLoaderInfo.kernelAddr)); _ADDR_TO_B((size_t)BtLoaderInfo.kernelAddr));
KernLog("\tends at:\t\t%p (%04luMB + %04luKB + %04luB)\n", KernLog("\tends at:\t\t%p (%4luMB + %4luKB + %4luB)\n",
realKernelEnd, realKernelEnd,
_ADDR_TO_MB(realKernelEnd), _ADDR_TO_MB(realKernelEnd),
_ADDR_TO_KB(realKernelEnd), _ADDR_TO_KB(realKernelEnd),
_ADDR_TO_B(realKernelEnd)); _ADDR_TO_B(realKernelEnd));
KernLog("\tsize:\t\t\t%04luMB + %04luKB + %04luB (%p)\n", KernLog("\tsize:\t\t\t%4luMB + %4luKB + %4luB (%p)\n",
_ADDR_TO_MB(img_diff), _ADDR_TO_MB(img_diff),
_ADDR_TO_KB(img_diff), _ADDR_TO_KB(img_diff),
_ADDR_TO_B(img_diff), _ADDR_TO_B(img_diff),
@ -123,7 +123,7 @@ error_t CmdMemUsage(int argc, char **argv, char *cmdline)
KernLog("Kernel stack\n"); KernLog("Kernel stack\n");
KernLog("\tstarts at:\t\t%p (%04luMB + %04luKB + %04luB)\n", KernLog("\tstarts at:\t\t%p (%4luMB + %4luKB + %4luB)\n",
BtLoaderInfo.kernelEndAddr, BtLoaderInfo.kernelEndAddr,
_ADDR_TO_MB((size_t)BtLoaderInfo.kernelEndAddr), _ADDR_TO_MB((size_t)BtLoaderInfo.kernelEndAddr),
_ADDR_TO_KB((size_t)BtLoaderInfo.kernelEndAddr), _ADDR_TO_KB((size_t)BtLoaderInfo.kernelEndAddr),
@ -135,25 +135,25 @@ error_t CmdMemUsage(int argc, char **argv, char *cmdline)
stack_cur = (size_t)BtLoaderInfo.kernelEndAddr - (size_t)&var; stack_cur = (size_t)BtLoaderInfo.kernelEndAddr - (size_t)&var;
KernLog("\tends at:\t\t%p (%04luMB + %04luKB + %04luB)\n", KernLog("\tends at:\t\t%p (%4luMB + %4luKB + %4luB)\n",
stack_cur, stack_cur,
_ADDR_TO_MB(stack_cur), _ADDR_TO_MB(stack_cur),
_ADDR_TO_KB(stack_cur), _ADDR_TO_KB(stack_cur),
_ADDR_TO_B(stack_cur)); _ADDR_TO_B(stack_cur));
KernLog("\tmin addr:\t\t%p (%04luMB + %04luKB + %04luB)\n", KernLog("\tmin addr:\t\t%p (%4luMB + %4luKB + %4luB)\n",
realKernelEnd+1, realKernelEnd+1,
_ADDR_TO_MB(realKernelEnd+1), _ADDR_TO_MB(realKernelEnd+1),
_ADDR_TO_KB(realKernelEnd+1), _ADDR_TO_KB(realKernelEnd+1),
_ADDR_TO_B(realKernelEnd+1)); _ADDR_TO_B(realKernelEnd+1));
KernLog("\tsize (cur):\t\t%04luMB + %04luKB + %04luB (%p)\n", KernLog("\tsize (cur):\t\t%4luMB + %4luKB + %4luB (%p)\n",
_ADDR_TO_MB(stack_cur), _ADDR_TO_MB(stack_cur),
_ADDR_TO_KB(stack_cur), _ADDR_TO_KB(stack_cur),
_ADDR_TO_B(stack_cur), _ADDR_TO_B(stack_cur),
stack_cur); stack_cur);
KernLog("\tsize (max):\t\t%04luMB + %04luKB + %04luB (%p)\n", KernLog("\tsize (max):\t\t%4luMB + %4luKB + %4luB (%p)\n",
_ADDR_TO_MB(stack_diff), _ADDR_TO_MB(stack_diff),
_ADDR_TO_KB(stack_diff), _ADDR_TO_KB(stack_diff),
_ADDR_TO_B(stack_diff), _ADDR_TO_B(stack_diff),
@ -161,29 +161,29 @@ error_t CmdMemUsage(int argc, char **argv, char *cmdline)
KernLog("Kernel heap\n"); KernLog("Kernel heap\n");
KernLog("\tstarts at:\t\t%p (%04luMB + %04luKB + %04luB)\n", KernLog("\tstarts at:\t\t%p (%4luMB + %4luKB + %4luB)\n",
heap_start, _ADDR_TO_MB(heap_start), heap_start, _ADDR_TO_MB(heap_start),
_ADDR_TO_KB(heap_start), _ADDR_TO_KB(heap_start),
_ADDR_TO_B(heap_start)); _ADDR_TO_B(heap_start));
KernLog("\tends at:\t\t%p (%04luMB + %04luKB + %04luB)\n", KernLog("\tends at:\t\t%p (%4luMB + %4luKB + %4luB)\n",
heap_end, _ADDR_TO_MB(heap_end), heap_end, _ADDR_TO_MB(heap_end),
_ADDR_TO_KB(heap_end), _ADDR_TO_KB(heap_end),
_ADDR_TO_B(heap_end)); _ADDR_TO_B(heap_end));
KernLog("\tmax addr:\t\t%p (%04luMB + %04luKB + %04luB)\n", KernLog("\tmax addr:\t\t%p (%4luMB + %4luKB + %4luB)\n",
heap_start + heap_max, heap_start + heap_max,
_ADDR_TO_MB(heap_start + heap_max), _ADDR_TO_MB(heap_start + heap_max),
_ADDR_TO_KB(heap_start + heap_max), _ADDR_TO_KB(heap_start + heap_max),
_ADDR_TO_B(heap_start + heap_max)); _ADDR_TO_B(heap_start + heap_max));
KernLog("\tsize (cur):\t\t%04luMB + %04luKB + %04luB (%p)\n", KernLog("\tsize (cur):\t\t%4luMB + %4luKB + %4luB (%p)\n",
_ADDR_TO_MB(heap_diff), _ADDR_TO_MB(heap_diff),
_ADDR_TO_KB(heap_diff), _ADDR_TO_KB(heap_diff),
_ADDR_TO_B(heap_diff), _ADDR_TO_B(heap_diff),
heap_diff); heap_diff);
KernLog("\tsize (max):\t\t%04luMB + %04luKB + %04luB (%p)\n", KernLog("\tsize (max):\t\t%4luMB + %4luKB + %4luB (%p)\n",
_ADDR_TO_MB(heap_max), _ADDR_TO_MB(heap_max),
_ADDR_TO_KB(heap_max), _ADDR_TO_KB(heap_max),
_ADDR_TO_B(heap_max), _ADDR_TO_B(heap_max),