Main Page | Class Hierarchy | Class List | File List | Class Members | File Members

heappage.c

Go to the documentation of this file.
00001 /*++ 00002 00003 Copyright (c) 1994-2000 Microsoft Corporation 00004 00005 Module Name: 00006 00007 heappage.c 00008 00009 Abstract: 00010 00011 Implementation of NT RtlHeap family of APIs for debugging 00012 applications with heap usage bugs. Each allocation returned to 00013 the calling app is placed at the end of a virtual page such that 00014 the following virtual page is protected (ie, NO_ACCESS). 00015 So, when the errant app attempts to reference or modify memory 00016 beyond the allocated portion of a heap block, an access violation 00017 is immediately caused. This facilitates debugging the app 00018 because the access violation occurs at the exact point in the 00019 app where the heap corruption or abuse would occur. Note that 00020 significantly more memory (pagefile) is required to run an app 00021 using this heap implementation as opposed to the retail heap 00022 manager. 00023 00024 Author: 00025 00026 Tom McGuire (TomMcg) 06-Jan-1995 00027 Silviu Calinoiu (SilviuC) 22-Feb-2000 00028 00029 Revision History: 00030 00031 --*/ 00032 00033 #include "ntrtlp.h" 00034 #include "heappage.h" // external interface (hooks) to debug heap manager 00035 #include "heappagi.h" 00036 #include "heappriv.h" 00037 00038 int __cdecl sprintf(char *, const char *, ...); 00039 00040 // 00041 // Remainder of entire file is wrapped with #ifdef DEBUG_PAGE_HEAP so that 00042 // it will compile away to nothing if DEBUG_PAGE_HEAP is not defined in 00043 // heappage.h 00044 // 00045 00046 #ifdef DEBUG_PAGE_HEAP 00047 00048 // 00049 // Page size 00050 // 00051 00052 #if defined(_X86_) 00053 #ifndef PAGE_SIZE 00054 #define PAGE_SIZE 0x1000 00055 #endif 00056 #define USER_ALIGNMENT 8 00057 #elif defined(_MIPS_) 00058 #ifndef PAGE_SIZE 00059 #define PAGE_SIZE 0x1000 00060 #endif 00061 #define USER_ALIGNMENT 8 00062 #elif defined(_PPC_) 00063 #ifndef PAGE_SIZE 00064 #define PAGE_SIZE 0x1000 00065 #endif 00066 #define USER_ALIGNMENT 8 00067 #elif defined(_IA64_) 00068 #ifndef PAGE_SIZE 00069 #define PAGE_SIZE 0x2000 00070 #endif 00071 #define USER_ALIGNMENT 16 00072 #elif defined(_AXP64_) 00073 #ifndef PAGE_SIZE 00074 #define PAGE_SIZE 0x2000 00075 #endif 00076 #define USER_ALIGNMENT 16 00077 #elif defined(_ALPHA_) 00078 #ifndef PAGE_SIZE 00079 #define PAGE_SIZE 0x2000 00080 #endif 00081 #define USER_ALIGNMENT 8 00082 #else 00083 #error // platform not defined 00084 #endif 00085 00086 // 00087 // Few constants 00088 // 00089 00090 #define DPH_HEAP_SIGNATURE 0xFFEEDDCC 00091 #define FILL_BYTE 0xEE 00092 #define HEAD_FILL_SIZE 0x10 00093 #define RESERVE_SIZE 0x100000 00094 #define VM_UNIT_SIZE 0x10000 00095 #define POOL_SIZE 0x4000 00096 #define INLINE __inline 00097 #define MIN_FREE_LIST_LENGTH 8 00098 00099 // 00100 // Few macros 00101 // 00102 00103 #define ROUNDUP2( x, n ) ((( x ) + (( n ) - 1 )) & ~(( n ) - 1 )) 00104 00105 #if INTERNAL_DEBUG 00106 #define DEBUG_CODE( a ) a 00107 #else 00108 #define DEBUG_CODE( a ) 00109 #endif 00110 00111 #define RETAIL_ASSERT( a ) ( (a) ? TRUE : \ 00112 RtlpDebugPageHeapAssert( "Page heap: assert: (" #a ")\n" )) 00113 00114 #define DEBUG_ASSERT( a ) DEBUG_CODE( RETAIL_ASSERT( a )) 00115 00116 #define HEAP_HANDLE_FROM_ROOT( HeapRoot ) \ 00117 ((PVOID)(((PCHAR)(HeapRoot)) - PAGE_SIZE )) 00118 00119 #define IF_GENERATE_EXCEPTION( Flags, Status ) { \ 00120 if (( Flags ) & HEAP_GENERATE_EXCEPTIONS ) \ 00121 RtlpDebugPageHeapException((ULONG)(Status)); \ 00122 } 00123 00124 #define OUT_OF_VM_BREAK( Flags, szText ) { \ 00125 if (( Flags ) & HEAP_BREAK_WHEN_OUT_OF_VM ) \ 00126 RtlpDebugPageHeapBreak(( szText )); \ 00127 } 00128 00129 // 00130 // List manipulation macros 00131 // 00132 00133 #define ENQUEUE_HEAD( Node, Head, Tail ) { \ 00134 (Node)->pNextAlloc = (Head); \ 00135 if ((Head) == NULL ) \ 00136 (Tail) = (Node); \ 00137 (Head) = (Node); \ 00138 } 00139 00140 #define ENQUEUE_TAIL( Node, Head, Tail ) { \ 00141 if ((Tail) == NULL ) \ 00142 (Head) = (Node); \ 00143 else \ 00144 (Tail)->pNextAlloc = (Node); \ 00145 (Tail) = (Node); \ 00146 } 00147 00148 #define DEQUEUE_NODE( Node, Prev, Head, Tail ) { \ 00149 PVOID Next = (Node)->pNextAlloc; \ 00150 if ((Head) == (Node)) \ 00151 (Head) = Next; \ 00152 if ((Tail) == (Node)) \ 00153 (Tail) = (Prev); \ 00154 if ((Prev) != (NULL)) \ 00155 (Prev)->pNextAlloc = Next; \ 00156 } 00157 00158 // 00159 // Bias/unbias pointer 00160 // 00161 00162 #define BIAS_POINTER(p) ((PVOID)((ULONG_PTR)(p) | (ULONG_PTR)0x01)) 00163 #define UNBIAS_POINTER(p) ((PVOID)((ULONG_PTR)(p) & ~((ULONG_PTR)0x01))) 00164 #define IS_BIASED_POINTER(p) ((PVOID)((ULONG_PTR)(p) & (ULONG_PTR)0x01)) 00165 00166 // 00167 // Protect/Unprotect heap structures macros 00168 // 00169 00170 #define PROTECT_HEAP_STRUCTURES( HeapRoot ) { \ 00171 if ((HeapRoot)->HeapFlags & HEAP_PROTECTION_ENABLED ) \ 00172 RtlpDebugPageHeapProtectStructures( (HeapRoot) ); \ 00173 } 00174 00175 #define UNPROTECT_HEAP_STRUCTURES( HeapRoot ) { \ 00176 if ((HeapRoot)->HeapFlags & HEAP_PROTECTION_ENABLED ) \ 00177 RtlpDebugPageHeapUnProtectStructures( (HeapRoot) ); \ 00178 } 00179 00180 // 00181 // RtlpDebugPageHeap 00182 // 00183 // Global variable that marks that page heap is enabled. It is set 00184 // in \nt\base\ntdll\ldrinit.c by reading the GlobalFlag registry 00185 // value (system wide or per process one) and checking if the 00186 // FLG_HEAP_PAGE_ALLOCS is set. 00187 // 00188 00189 BOOLEAN RtlpDebugPageHeap; 00190 00191 // 00192 // Internal version used to figure out what are people running 00193 // in various VBLs. 00194 // 00195 00196 PCHAR RtlpDphVersion = "03/14/2000"; 00197 00198 // 00199 // Page heaps list manipulation. 00200 // 00201 // We maintain a list of all page heaps in the process to support 00202 // APIs like GetProcessHeaps. The list is also useful for debug 00203 // extensions that need to iterate the heaps. The list is protected 00204 // by RtlpDphHeapListCriticalSection lock. 00205 // 00206 00207 BOOLEAN RtlpDphHeapListHasBeenInitialized; 00208 RTL_CRITICAL_SECTION RtlpDphHeapListCriticalSection; 00209 PDPH_HEAP_ROOT RtlpDphHeapListHead; 00210 PDPH_HEAP_ROOT RtlpDphHeapListTail; 00211 ULONG RtlpDphHeapListCount; 00212 00213 // 00214 // `RtlpDebugPageHeapGlobalFlags' stores the global page heap flags. 00215 // The value of this variable is copied into the per heap 00216 // flags (ExtraFlags field) during heap creation. 00217 // 00218 // The initial value is so that by default we use page heap only with 00219 // normal allocations. This way if system wide global flag for page 00220 // heap is set the machine will still boot. After that we can enable 00221 // page heap with "sudden death" for specific processes. The most useful 00222 // flags for this case would be: 00223 // 00224 // PAGE_HEAP_ENABLE_PAGE_HEAP | 00225 // PAGE_HEAP_COLLECT_STACK_TRACES ; 00226 // 00227 // If no flags specified the default is page heap light with 00228 // stack trace collection. 00229 // 00230 00231 ULONG RtlpDphGlobalFlags = PAGE_HEAP_COLLECT_STACK_TRACES; 00232 00233 // 00234 // Page heap global flags. 00235 // 00236 // These values are read from registry in \nt\base\ntdll\ldrinit.c. 00237 // 00238 00239 ULONG RtlpDphSizeRangeStart; 00240 ULONG RtlpDphSizeRangeEnd; 00241 ULONG RtlpDphDllRangeStart; 00242 ULONG RtlpDphDllRangeEnd; 00243 ULONG RtlpDphRandomProbability; 00244 WCHAR RtlpDphTargetDlls [512]; 00245 UNICODE_STRING RtlpDphTargetDllsUnicode; 00246 00247 // 00248 // `RtlpDphDebugLevel' controls debug messages in the code. 00249 // 00250 // (SilviuC): The value should always be zero for the retail bits. 00251 // 00252 00253 #define DPH_DEBUG_INTERNAL_VALIDATION 0x0001 00254 #define DPH_DEBUG_RESERVED_2 0x0002 00255 #define DPH_DEBUG_RESERVED_4 0x0004 00256 #define DPH_DEBUG_RESERVED_8 0x0008 00257 #define DPH_DEBUG_DECOMMIT_RANGES 0x0010 00258 #define DPH_DEBUG_BREAK_FOR_SIZE_ZERO 0x0020 00259 #define DPH_DEBUG_BREAK_FOR_NULL_FREE 0x0040 00260 #define DPH_DEBUG_NEVER_FREE 0x0080 00261 #define DPH_DEBUG_SLOW_CHECKS 0x0100 00262 00263 ULONG RtlpDphDebugLevel; 00264 00265 // 00266 // `RtlpDphGlobalCounter' contains process wide counters. 00267 // The definition of counters is in `rtl\heappagi.h' 00268 // 00269 // `RtlpDphSizeCounter' contains size distribution for allocations 00270 // using 128 bytes granularity. 00271 // 00272 00273 #define BUMP_GLOBAL_COUNTER(n) InterlockedIncrement(&(RtlpDphGlobalCounter[n])) 00274 00275 #define BUMP_SIZE_COUNTER(Size) if (Size/128 <= MAX_SIZE_COUNTER_INDEX) { \ 00276 InterlockedIncrement(&(RtlpDphSizeCounter[Size/128])); \ 00277 } \ 00278 else { \ 00279 InterlockedIncrement(&(RtlpDphSizeCounter[MAX_SIZE_COUNTER_INDEX])); \ 00280 } 00281 00282 #define MAX_GLOBAL_COUNTER_INDEX 15 00283 #define MAX_SIZE_COUNTER_INDEX 64 00284 00285 ULONG RtlpDphGlobalCounter[MAX_GLOBAL_COUNTER_INDEX + 1]; 00286 ULONG RtlpDphSizeCounter[MAX_SIZE_COUNTER_INDEX + 1]; 00287 00288 // 00289 // Threshold for delaying a free operation in the normal heap. 00290 // If we get over this limit we start actually freeing blocks. 00291 // 00292 00293 SIZE_T RtlpDphDelayedFreeCacheSize = 256 * PAGE_SIZE; 00294 00295 // 00296 // Process wide trace database and the maximum size it can 00297 // grow to. 00298 // 00299 00300 SIZE_T RtlpDphTraceDatabaseMaximumSize = 256 * PAGE_SIZE; 00301 PRTL_TRACE_DATABASE RtlpDphTraceDatabase; 00302 00303 // 00304 // Support for normal heap allocations 00305 // 00306 // In order to make better use of memory available page heap will 00307 // allocate some of the block into a normal NT heap that it manages. 00308 // We will call these blocks "normal blocks" as opposed to "page blocks". 00309 // 00310 // All normal blocks have the requested size increased by DPH_BLOCK_INFORMATION. 00311 // The address returned is of course of the first byte after the block 00312 // info structure. Upon free, blocks are checked for corruption and 00313 // then released into the normal heap. 00314 // 00315 // All these normal heap functions are called with the page heap 00316 // lock acquired. 00317 // 00318 00319 PVOID 00320 RtlpDphNormalHeapAllocate ( 00321 PDPH_HEAP_ROOT Heap, 00322 ULONG Flags, 00323 SIZE_T Size 00324 ); 00325 00326 BOOLEAN 00327 RtlpDphNormalHeapFree ( 00328 PDPH_HEAP_ROOT Heap, 00329 ULONG Flags, 00330 PVOID Block 00331 ); 00332 00333 PVOID 00334 RtlpDphNormalHeapReAllocate ( 00335 PDPH_HEAP_ROOT Heap, 00336 ULONG Flags, 00337 PVOID OldBlock, 00338 SIZE_T Size 00339 ); 00340 00341 SIZE_T 00342 RtlpDphNormalHeapSize ( 00343 PDPH_HEAP_ROOT Heap, 00344 ULONG Flags, 00345 PVOID Block 00346 ); 00347 00348 BOOLEAN 00349 RtlpDphNormalHeapSetUserFlags( 00350 IN PDPH_HEAP_ROOT Heap, 00351 IN ULONG Flags, 00352 IN PVOID Address, 00353 IN ULONG UserFlagsReset, 00354 IN ULONG UserFlagsSet 00355 ); 00356 00357 BOOLEAN 00358 RtlpDphNormalHeapSetUserValue( 00359 IN PDPH_HEAP_ROOT Heap, 00360 IN ULONG Flags, 00361 IN PVOID Address, 00362 IN PVOID UserValue 00363 ); 00364 00365 BOOLEAN 00366 RtlpDphNormalHeapGetUserInfo( 00367 IN PDPH_HEAP_ROOT Heap, 00368 IN ULONG Flags, 00369 IN PVOID Address, 00370 OUT PVOID* UserValue, 00371 OUT PULONG UserFlags 00372 ); 00373 00374 BOOLEAN 00375 RtlpDphNormalHeapValidate( 00376 IN PDPH_HEAP_ROOT Heap, 00377 IN ULONG Flags, 00378 IN PVOID Address 00379 ); 00380 00381 // 00382 // Support for DPH_BLOCK_INFORMATION management 00383 // 00384 // This header information prefixes both the normal and page heap 00385 // blocks. 00386 // 00387 00388 VOID 00389 RtlpDphReportCorruptedBlock ( 00390 PVOID Block, 00391 ULONG Reason 00392 ); 00393 00394 BOOLEAN 00395 RtlpDphIsNormalHeapBlock ( 00396 PDPH_HEAP_ROOT Heap, 00397 PVOID Block, 00398 PULONG Reason, 00399 BOOLEAN CheckPattern 00400 ); 00401 00402 BOOLEAN 00403 RtlpDphIsNormalFreeHeapBlock ( 00404 PVOID Block, 00405 PULONG Reason, 00406 BOOLEAN CheckPattern 00407 ); 00408 00409 BOOLEAN 00410 RtlpDphIsPageHeapBlock ( 00411 PDPH_HEAP_ROOT Heap, 00412 PVOID Block, 00413 PULONG Reason, 00414 BOOLEAN CheckPattern 00415 ); 00416 00417 BOOLEAN 00418 RtlpDphWriteNormalHeapBlockInformation ( 00419 PDPH_HEAP_ROOT Heap, 00420 PVOID Block, 00421 SIZE_T RequestedSize, 00422 SIZE_T ActualSize 00423 ); 00424 00425 BOOLEAN 00426 RtlpDphWritePageHeapBlockInformation ( 00427 PDPH_HEAP_ROOT Heap, 00428 PVOID Block, 00429 SIZE_T RequestedSize, 00430 SIZE_T ActualSize 00431 ); 00432 00433 // 00434 // Delayed free queue (of normal heap allocations) management 00435 // 00436 00437 VOID 00438 RtlpDphInitializeDelayedFreeQueue ( 00439 ); 00440 00441 VOID 00442 RtlpDphAddToDelayedFreeQueue ( 00443 PDPH_BLOCK_INFORMATION Info 00444 ); 00445 00446 BOOLEAN 00447 RtlpDphNeedToTrimDelayedFreeQueue ( 00448 PSIZE_T TrimSize 00449 ); 00450 00451 VOID 00452 RtlpDphTrimDelayedFreeQueue ( 00453 SIZE_T TrimSize, 00454 ULONG Flags 00455 ); 00456 00457 VOID 00458 RtlpDphFreeDelayedBlocksFromHeap ( 00459 PVOID PageHeap, 00460 PVOID NormalHeap 00461 ); 00462 00463 // 00464 // Decision normal heap vs. page heap 00465 // 00466 00467 RtlpDphShouldAllocateInPageHeap ( 00468 PDPH_HEAP_ROOT Heap, 00469 SIZE_T Size 00470 ); 00471 00472 // 00473 // Stack trace detection for trace database. 00474 // 00475 00476 PRTL_TRACE_BLOCK 00477 RtlpDphLogStackTrace ( 00478 ULONG FramesToSkip 00479 ); 00480 00481 // 00482 // Page heap general support functions 00483 // 00484 00485 VOID 00486 RtlpDebugPageHeapBreak( 00487 IN PCH Text 00488 ); 00489 00490 BOOLEAN 00491 RtlpDebugPageHeapAssert( 00492 IN PCH Text 00493 ); 00494 00495 VOID 00496 RtlpDebugPageHeapEnterCritSect( 00497 IN PDPH_HEAP_ROOT HeapRoot, 00498 IN ULONG Flags 00499 ); 00500 00501 INLINE 00502 VOID 00503 RtlpDebugPageHeapLeaveCritSect( 00504 IN PDPH_HEAP_ROOT HeapRoot 00505 ); 00506 00507 VOID 00508 RtlpDebugPageHeapException( 00509 IN ULONG ExceptionCode 00510 ); 00511 00512 PVOID 00513 RtlpDebugPageHeapPointerFromHandle( 00514 IN PVOID HeapHandle 00515 ); 00516 00517 PCCH 00518 RtlpDebugPageHeapProtectionText( 00519 IN ULONG Access, 00520 IN OUT PCHAR Buffer 00521 ); 00522 00523 // 00524 // Virtual memory manipulation functions 00525 // 00526 00527 BOOLEAN 00528 RtlpDebugPageHeapRobustProtectVM( 00529 IN PVOID VirtualBase, 00530 IN SIZE_T VirtualSize, 00531 IN ULONG NewAccess, 00532 IN BOOLEAN Recursion 00533 ); 00534 00535 INLINE 00536 BOOLEAN 00537 RtlpDebugPageHeapProtectVM( 00538 IN PVOID VirtualBase, 00539 IN SIZE_T VirtualSize, 00540 IN ULONG NewAccess 00541 ); 00542 00543 INLINE 00544 PVOID 00545 RtlpDebugPageHeapAllocateVM( 00546 IN SIZE_T nSize 00547 ); 00548 00549 INLINE 00550 BOOLEAN 00551 RtlpDebugPageHeapReleaseVM( 00552 IN PVOID pVirtual 00553 ); 00554 00555 INLINE 00556 BOOLEAN 00557 RtlpDebugPageHeapCommitVM( 00558 IN PVOID pVirtual, 00559 IN SIZE_T nSize 00560 ); 00561 00562 INLINE 00563 BOOLEAN 00564 RtlpDebugPageHeapDecommitVM( 00565 IN PVOID pVirtual, 00566 IN SIZE_T nSize 00567 ); 00568 00569 // 00570 // Target dlls logic 00571 // 00572 // RtlpDphTargetDllsLoadCallBack is called in ntdll\ldrapi.c 00573 // (LdrpLoadDll) whenever a new dll is loaded in the process 00574 // space. 00575 // 00576 00577 VOID 00578 RtlpDphTargetDllsLogicInitialize ( 00579 ); 00580 00581 VOID 00582 RtlpDphTargetDllsLoadCallBack ( 00583 PUNICODE_STRING Name, 00584 PVOID Address, 00585 ULONG Size 00586 ); 00587 00588 const WCHAR * 00589 RtlpDphIsDllTargeted ( 00590 const WCHAR * Name 00591 ); 00592 00593 // 00594 // Internal heap validation 00595 // 00596 00597 VOID 00598 RtlpDphInternalValidatePageHeap ( 00599 PDPH_HEAP_ROOT Heap, 00600 PUCHAR ExemptAddress, 00601 SIZE_T ExemptSize 00602 ); 00603 00607 00608 VOID 00609 RtlpDebugPageHeapBreak( 00610 IN PCH Text 00611 ) 00612 { 00613 DbgPrint( Text ); 00614 DbgBreakPoint(); 00615 } 00616 00617 BOOLEAN 00618 RtlpDebugPageHeapAssert( 00619 IN PCH Text 00620 ) 00621 { 00622 RtlpDebugPageHeapBreak( Text ); 00623 return FALSE; 00624 } 00625 00626 VOID 00627 RtlpDebugPageHeapEnterCritSect( 00628 IN PDPH_HEAP_ROOT HeapRoot, 00629 IN ULONG Flags 00630 ) 00631 { 00632 if (Flags & HEAP_NO_SERIALIZE) { 00633 00634 if (! RtlTryEnterCriticalSection( HeapRoot->HeapCritSect )) { 00635 00636 if (HeapRoot->nRemoteLockAcquired == 0) { 00637 00638 // 00639 // Another thread owns the CritSect. This is an application 00640 // bug since multithreaded access to heap was attempted with 00641 // the HEAP_NO_SERIALIZE flag specified. 00642 // 00643 00644 RtlpDebugPageHeapBreak( "Page heap: Multithreaded access with HEAP_NO_SERIALIZE\n" ); 00645 00646 // 00647 // In the interest of allowing the errant app to continue, 00648 // we'll force serialization and continue. 00649 // 00650 00651 HeapRoot->HeapFlags &= ~HEAP_NO_SERIALIZE; 00652 00653 } 00654 00655 RtlEnterCriticalSection( HeapRoot->HeapCritSect ); 00656 00657 } 00658 } 00659 else { 00660 RtlEnterCriticalSection( HeapRoot->HeapCritSect ); 00661 } 00662 } 00663 00664 INLINE 00665 VOID 00666 RtlpDebugPageHeapLeaveCritSect( 00667 IN PDPH_HEAP_ROOT HeapRoot 00668 ) 00669 { 00670 RtlLeaveCriticalSection( HeapRoot->HeapCritSect ); 00671 } 00672 00673 VOID 00674 RtlpDebugPageHeapException( 00675 IN ULONG ExceptionCode 00676 ) 00677 { 00678 EXCEPTION_RECORD ER; 00679 00680 ER.ExceptionCode = ExceptionCode; 00681 ER.ExceptionFlags = 0; 00682 ER.ExceptionRecord = NULL; 00683 ER.ExceptionAddress = RtlpDebugPageHeapException; 00684 ER.NumberParameters = 0; 00685 RtlRaiseException( &ER ); 00686 } 00687 00688 PVOID 00689 RtlpDebugPageHeapPointerFromHandle( 00690 IN PVOID HeapHandle 00691 ) 00692 { 00693 try { 00694 if (((PHEAP)(HeapHandle))->ForceFlags & HEAP_FLAG_PAGE_ALLOCS) { 00695 00696 PDPH_HEAP_ROOT HeapRoot = (PVOID)(((PCHAR)(HeapHandle)) + PAGE_SIZE ); 00697 00698 if (HeapRoot->Signature == DPH_HEAP_SIGNATURE) { 00699 return HeapRoot; 00700 } 00701 } 00702 } 00703 except( EXCEPTION_EXECUTE_HANDLER ) { 00704 } 00705 00706 RtlpDebugPageHeapBreak( "Page heap: Bad heap handle\n" ); 00707 return NULL; 00708 } 00709 00710 PCCH 00711 RtlpDebugPageHeapProtectionText( 00712 IN ULONG Access, 00713 IN OUT PCHAR Buffer 00714 ) 00715 { 00716 switch (Access) { 00717 case PAGE_NOACCESS: return "PAGE_NOACCESS"; 00718 case PAGE_READONLY: return "PAGE_READONLY"; 00719 case PAGE_READWRITE: return "PAGE_READWRITE"; 00720 case PAGE_WRITECOPY: return "PAGE_WRITECOPY"; 00721 case PAGE_EXECUTE: return "PAGE_EXECUTE"; 00722 case PAGE_EXECUTE_READ: return "PAGE_EXECUTE_READ"; 00723 case PAGE_EXECUTE_READWRITE: return "PAGE_EXECUTE_READWRITE"; 00724 case PAGE_EXECUTE_WRITECOPY: return "PAGE_EXECUTE_WRITECOPY"; 00725 case PAGE_GUARD: return "PAGE_GUARD"; 00726 case 0: return "UNKNOWN"; 00727 default: sprintf( Buffer, "0x%08X", Access ); 00728 return Buffer; 00729 } 00730 } 00731 00735 00736 BOOLEAN 00737 RtlpDebugPageHeapRobustProtectVM( 00738 IN PVOID VirtualBase, 00739 IN SIZE_T VirtualSize, 00740 IN ULONG NewAccess, 00741 IN BOOLEAN Recursion 00742 ) 00743 { 00744 PVOID CopyOfVirtualBase = VirtualBase; 00745 SIZE_T CopyOfVirtualSize = VirtualSize; 00746 ULONG OldAccess; 00747 NTSTATUS Status; 00748 00749 Status = ZwProtectVirtualMemory( 00750 NtCurrentProcess(), 00751 &CopyOfVirtualBase, 00752 &CopyOfVirtualSize, 00753 NewAccess, 00754 &OldAccess 00755 ); 00756 00757 if (NT_SUCCESS( Status )) 00758 return TRUE; 00759 00760 if (! Recursion) { 00761 00762 // 00763 // We failed to change the protection on a range of memory. 00764 // This can happen if if the range of memory spans more than 00765 // one adjancent blocks allocated by separate calls to 00766 // ZwAllocateVirtualMemory. It also seems fails occasionally 00767 // for reasons unknown to me, but always when attempting to 00768 // change the protection on more than one page in a single call. 00769 // So, fall back to changing pages individually in this range. 00770 // This should be rare, so it should not be a performance problem. 00771 // 00772 00773 PCHAR VirtualExtent = (PCHAR)ROUNDUP2((ULONG_PTR)((PCHAR)VirtualBase + VirtualSize ), PAGE_SIZE ); 00774 PCHAR VirtualPage = (PCHAR)((ULONG_PTR)VirtualBase & ~( PAGE_SIZE - 1 )); 00775 BOOLEAN SuccessAll = TRUE; 00776 BOOLEAN SuccessOne; 00777 00778 while (VirtualPage < VirtualExtent) { 00779 00780 SuccessOne = RtlpDebugPageHeapRobustProtectVM( 00781 VirtualPage, 00782 PAGE_SIZE, 00783 NewAccess, 00784 TRUE 00785 ); 00786 00787 if (! SuccessOne) { 00788 SuccessAll = FALSE; 00789 } 00790 00791 VirtualPage += PAGE_SIZE; 00792 00793 } 00794 00795 return SuccessAll; // TRUE if all succeeded, FALSE if any failed 00796 } 00797 00798 else { 00799 00800 MEMORY_BASIC_INFORMATION mbi; 00801 CHAR OldProtectionText[ 12 ]; // big enough for "0x12345678" 00802 CHAR NewProtectionText[ 12 ]; // big enough for "0x12345678" 00803 00804 mbi.Protect = 0; // in case ZwQueryVirtualMemory fails 00805 00806 ZwQueryVirtualMemory( 00807 NtCurrentProcess(), 00808 VirtualBase, 00809 MemoryBasicInformation, 00810 &mbi, 00811 sizeof( mbi ), 00812 NULL 00813 ); 00814 00815 DbgPrint( 00816 "Page heap: Failed changing VM at %08X size 0x%X\n" 00817 " from %s to %s (Status %08X)\n", 00818 VirtualBase, 00819 VirtualSize, 00820 RtlpDebugPageHeapProtectionText( mbi.Protect, OldProtectionText ), 00821 RtlpDebugPageHeapProtectionText( NewAccess, NewProtectionText ), 00822 Status 00823 ); 00824 } 00825 00826 return FALSE; 00827 } 00828 00829 INLINE 00830 BOOLEAN 00831 RtlpDebugPageHeapProtectVM( 00832 IN PVOID VirtualBase, 00833 IN SIZE_T VirtualSize, 00834 IN ULONG NewAccess 00835 ) 00836 { 00837 return RtlpDebugPageHeapRobustProtectVM( VirtualBase, VirtualSize, NewAccess, FALSE ); 00838 } 00839 00840 INLINE 00841 PVOID 00842 RtlpDebugPageHeapAllocateVM( 00843 IN SIZE_T nSize 00844 ) 00845 { 00846 NTSTATUS Status; 00847 PVOID pVirtual; 00848 00849 pVirtual = NULL; 00850 00851 Status = ZwAllocateVirtualMemory( NtCurrentProcess(), 00852 &pVirtual, 00853 0, 00854 &nSize, 00855 MEM_COMMIT, 00856 PAGE_NOACCESS ); 00857 00858 return NT_SUCCESS( Status ) ? pVirtual : NULL; 00859 } 00860 00861 INLINE 00862 BOOLEAN 00863 RtlpDebugPageHeapReleaseVM( 00864 IN PVOID pVirtual 00865 ) 00866 { 00867 SIZE_T nSize = 0; 00868 00869 return NT_SUCCESS( ZwFreeVirtualMemory( NtCurrentProcess(), 00870 &pVirtual, 00871 &nSize, 00872 MEM_RELEASE )); 00873 } 00874 00875 INLINE 00876 BOOLEAN 00877 RtlpDebugPageHeapCommitVM( 00878 IN PVOID pVirtual, 00879 IN SIZE_T nSize 00880 ) 00881 { 00882 PCHAR pStart, pEnd, pCurrent; 00883 NTSTATUS Status; 00884 SIZE_T CommitSize; 00885 BOOLEAN Failed = FALSE; 00886 00887 pStart = (PCHAR)((ULONG_PTR)pVirtual & ~(PAGE_SIZE - 1)); 00888 pEnd = (PCHAR)(((ULONG_PTR)pVirtual + nSize) & ~(PAGE_SIZE - 1)); 00889 00890 for (pCurrent = pStart; pCurrent < pEnd; pCurrent += PAGE_SIZE) { 00891 00892 CommitSize = PAGE_SIZE; 00893 00894 Status = ZwAllocateVirtualMemory( 00895 NtCurrentProcess(), 00896 &pCurrent, 00897 0, 00898 &CommitSize, 00899 MEM_COMMIT, 00900 PAGE_NOACCESS); 00901 00902 if (! NT_SUCCESS(Status)) { 00903 00904 // 00905 // The call can fail in low memory conditions. In this case we 00906 // try to recover and will probably fail the original allocation. 00907 // 00908 00909 if ((RtlpDphDebugLevel & DPH_DEBUG_DECOMMIT_RANGES)) { 00910 DbgPrint ("Page heap: Commit (%p) failed with %08X\n", pCurrent, Status); 00911 DbgBreakPoint(); 00912 } 00913 00914 Failed = TRUE; 00915 break; 00916 } 00917 } 00918 00919 00920 if (Failed) { 00921 00922 // 00923 // We need to roll back whatever succeeded. 00924 // 00925 00926 for (pCurrent -= PAGE_SIZE; pCurrent >= pStart && pCurrent < pEnd; pCurrent -= PAGE_SIZE) { 00927 00928 CommitSize = PAGE_SIZE; 00929 00930 Status = ZwFreeVirtualMemory( 00931 NtCurrentProcess(), 00932 &pCurrent, 00933 &CommitSize, 00934 MEM_DECOMMIT); 00935 00936 if (! NT_SUCCESS(Status)) { 00937 00938 // 00939 // There is now valid reason known to me for a correct free operation 00940 // failure. So, in this case we make a little bit of fuss about it. 00941 // 00942 00943 DbgPrint ("Page heap: Decommit (%p) failed with %08X\n", pCurrent, Status); 00944 00945 if ((RtlpDphDebugLevel & DPH_DEBUG_DECOMMIT_RANGES)) { 00946 DbgBreakPoint(); 00947 } 00948 } 00949 } 00950 } 00951 00952 if (Failed) { 00953 return FALSE; 00954 } 00955 else { 00956 return TRUE; 00957 } 00958 } 00959 00960 INLINE 00961 BOOLEAN 00962 RtlpDebugPageHeapDecommitVM( 00963 IN PVOID pVirtual, 00964 IN SIZE_T nSize 00965 ) 00966 { 00967 PCHAR pStart, pEnd, pCurrent; 00968 NTSTATUS Status; 00969 SIZE_T DecommitSize; 00970 BOOLEAN Failed = FALSE; 00971 00972 pStart = (PCHAR)((ULONG_PTR)pVirtual & ~(PAGE_SIZE - 1)); 00973 pEnd = (PCHAR)(((ULONG_PTR)pVirtual + nSize) & ~(PAGE_SIZE - 1)); 00974 00975 for (pCurrent = pStart; pCurrent < pEnd; pCurrent += PAGE_SIZE) { 00976 00977 DecommitSize = PAGE_SIZE; 00978 00979 Status = ZwFreeVirtualMemory( 00980 NtCurrentProcess(), 00981 &pCurrent, 00982 &DecommitSize, 00983 MEM_DECOMMIT); 00984 00985 if (! NT_SUCCESS(Status)) { 00986 00987 // 00988 // There is now valid reason known to me for a correct free operation 00989 // failure. So, in this case we make a little bit of fuss about it. 00990 // 00991 00992 DbgPrint ("Page heap: Decommit (%p) failed with %08X\n", pCurrent, Status); 00993 00994 if ((RtlpDphDebugLevel & DPH_DEBUG_DECOMMIT_RANGES)) { 00995 DbgBreakPoint(); 00996 } 00997 00998 Failed = TRUE; 00999 } 01000 } 01001 01002 if (Failed) { 01003 return FALSE; 01004 } 01005 else { 01006 return TRUE; 01007 } 01008 } 01009 01013 01014 PDPH_HEAP_BLOCK 01015 RtlpDebugPageHeapTakeNodeFromUnusedList( 01016 IN PDPH_HEAP_ROOT pHeap 01017 ) 01018 { 01019 PDPH_HEAP_BLOCK pNode = pHeap->pUnusedNodeListHead; 01020 PDPH_HEAP_BLOCK pPrev = NULL; 01021 01022 // 01023 // UnusedNodeList is LIFO with most recent entry at head of list. 01024 // 01025 01026 if (pNode) { 01027 01028 DEQUEUE_NODE( pNode, pPrev, pHeap->pUnusedNodeListHead, pHeap->pUnusedNodeListTail ); 01029 01030 pHeap->nUnusedNodes -= 1; 01031 01032 } 01033 01034 return pNode; 01035 } 01036 01037 VOID 01038 RtlpDebugPageHeapReturnNodeToUnusedList( 01039 IN PDPH_HEAP_ROOT pHeap, 01040 IN PDPH_HEAP_BLOCK pNode 01041 ) 01042 { 01043 // 01044 // UnusedNodeList is LIFO with most recent entry at head of list. 01045 // 01046 01047 ENQUEUE_HEAD( pNode, pHeap->pUnusedNodeListHead, pHeap->pUnusedNodeListTail ); 01048 01049 pHeap->nUnusedNodes += 1; 01050 } 01051 01052 PDPH_HEAP_BLOCK 01053 RtlpDebugPageHeapFindBusyMem( 01054 IN PDPH_HEAP_ROOT pHeap, 01055 IN PVOID pUserMem, 01056 OUT PDPH_HEAP_BLOCK *pPrevAlloc 01057 ) 01058 { 01059 PDPH_HEAP_BLOCK pNode = pHeap->pBusyAllocationListHead; 01060 PDPH_HEAP_BLOCK pPrev = NULL; 01061 01062 while (pNode != NULL) { 01063 01064 if (pNode->pUserAllocation == pUserMem) { 01065 01066 if (pPrevAlloc) 01067 *pPrevAlloc = pPrev; 01068 01069 return pNode; 01070 } 01071 01072 pPrev = pNode; 01073 pNode = pNode->pNextAlloc; 01074 } 01075 01076 return NULL; 01077 } 01078 01079 VOID 01080 RtlpDebugPageHeapRemoveFromAvailableList( 01081 IN PDPH_HEAP_ROOT pHeap, 01082 IN PDPH_HEAP_BLOCK pNode, 01083 IN PDPH_HEAP_BLOCK pPrev 01084 ) 01085 { 01086 DEQUEUE_NODE( pNode, pPrev, pHeap->pAvailableAllocationListHead, pHeap->pAvailableAllocationListTail ); 01087 01088 pHeap->nAvailableAllocations -= 1; 01089 pHeap->nAvailableAllocationBytesCommitted -= pNode->nVirtualBlockSize; 01090 } 01091 01092 VOID 01093 RtlpDebugPageHeapPlaceOnFreeList( 01094 IN PDPH_HEAP_ROOT pHeap, 01095 IN PDPH_HEAP_BLOCK pAlloc 01096 ) 01097 { 01098 // 01099 // FreeAllocationList is stored FIFO to enhance finding 01100 // reference-after-freed bugs by keeping previously freed 01101 // allocations on the free list as long as possible. 01102 // 01103 01104 pAlloc->pNextAlloc = NULL; 01105 01106 ENQUEUE_TAIL( pAlloc, pHeap->pFreeAllocationListHead, pHeap->pFreeAllocationListTail ); 01107 01108 pHeap->nFreeAllocations += 1; 01109 pHeap->nFreeAllocationBytesCommitted += pAlloc->nVirtualBlockSize; 01110 } 01111 01112 VOID 01113 RtlpDebugPageHeapRemoveFromFreeList( 01114 IN PDPH_HEAP_ROOT pHeap, 01115 IN PDPH_HEAP_BLOCK pNode, 01116 IN PDPH_HEAP_BLOCK pPrev 01117 ) 01118 { 01119 DEQUEUE_NODE( pNode, pPrev, pHeap->pFreeAllocationListHead, pHeap->pFreeAllocationListTail ); 01120 01121 pHeap->nFreeAllocations -= 1; 01122 pHeap->nFreeAllocationBytesCommitted -= pNode->nVirtualBlockSize; 01123 01124 pNode->StackTrace = NULL; 01125 } 01126 01127 VOID 01128 RtlpDebugPageHeapPlaceOnVirtualList( 01129 IN PDPH_HEAP_ROOT pHeap, 01130 IN PDPH_HEAP_BLOCK pNode 01131 ) 01132 { 01133 // 01134 // VirtualStorageList is LIFO so that releasing VM blocks will 01135 // occur in exact reverse order. 01136 // 01137 01138 ENQUEUE_HEAD( pNode, pHeap->pVirtualStorageListHead, pHeap->pVirtualStorageListTail ); 01139 01140 pHeap->nVirtualStorageRanges += 1; 01141 pHeap->nVirtualStorageBytes += pNode->nVirtualBlockSize; 01142 } 01143 01144 VOID 01145 RtlpDebugPageHeapPlaceOnBusyList( 01146 IN PDPH_HEAP_ROOT pHeap, 01147 IN PDPH_HEAP_BLOCK pNode 01148 ) 01149 { 01150 // 01151 // BusyAllocationList is LIFO to achieve better temporal locality 01152 // of reference (older allocations are farther down the list). 01153 // 01154 01155 ENQUEUE_HEAD( pNode, pHeap->pBusyAllocationListHead, pHeap->pBusyAllocationListTail ); 01156 01157 pHeap->nBusyAllocations += 1; 01158 pHeap->nBusyAllocationBytesCommitted += pNode->nVirtualBlockSize; 01159 pHeap->nBusyAllocationBytesAccessible += pNode->nVirtualAccessSize; 01160 } 01161 01162 VOID 01163 RtlpDebugPageHeapRemoveFromBusyList( 01164 IN PDPH_HEAP_ROOT pHeap, 01165 IN PDPH_HEAP_BLOCK pNode, 01166 IN PDPH_HEAP_BLOCK pPrev 01167 ) 01168 { 01169 DEQUEUE_NODE( pNode, pPrev, pHeap->pBusyAllocationListHead, pHeap->pBusyAllocationListTail ); 01170 01171 pHeap->nBusyAllocations -= 1; 01172 pHeap->nBusyAllocationBytesCommitted -= pNode->nVirtualBlockSize; 01173 pHeap->nBusyAllocationBytesAccessible -= pNode->nVirtualAccessSize; 01174 } 01175 01176 PDPH_HEAP_BLOCK 01177 RtlpDebugPageHeapSearchAvailableMemListForBestFit( 01178 IN PDPH_HEAP_ROOT pHeap, 01179 IN SIZE_T nSize, 01180 OUT PDPH_HEAP_BLOCK *pPrevAvailNode 01181 ) 01182 { 01183 PDPH_HEAP_BLOCK pAvail, pFound, pAvailPrev, pFoundPrev; 01184 SIZE_T nAvail, nFound; 01185 01186 nFound = 0x7FFFFFFF; 01187 pFound = NULL; 01188 pFoundPrev = NULL; 01189 pAvailPrev = NULL; 01190 pAvail = pHeap->pAvailableAllocationListHead; 01191 01192 while (( pAvail != NULL ) && ( nFound > nSize )) { 01193 01194 nAvail = pAvail->nVirtualBlockSize; 01195 01196 if (( nAvail >= nSize ) && ( nAvail < nFound )) { 01197 nFound = nAvail; 01198 pFound = pAvail; 01199 pFoundPrev = pAvailPrev; 01200 } 01201 01202 pAvailPrev = pAvail; 01203 pAvail = pAvail->pNextAlloc; 01204 } 01205 01206 *pPrevAvailNode = pFoundPrev; 01207 return pFound; 01208 } 01209 01210 VOID 01211 RtlpDebugPageHeapCoalesceNodeIntoAvailable( 01212 IN PDPH_HEAP_ROOT pHeap, 01213 IN PDPH_HEAP_BLOCK pNode 01214 ) 01215 { 01216 PDPH_HEAP_BLOCK pPrev; 01217 PDPH_HEAP_BLOCK pNext; 01218 PUCHAR pVirtual; 01219 SIZE_T nVirtual; 01220 01221 pPrev = NULL; 01222 pNext = pHeap->pAvailableAllocationListHead; 01223 01224 pVirtual = pNode->pVirtualBlock; 01225 nVirtual = pNode->nVirtualBlockSize; 01226 01227 pHeap->nAvailableAllocationBytesCommitted += nVirtual; 01228 pHeap->nAvailableAllocations += 1; 01229 01230 // 01231 // Walk list to insertion point. 01232 // 01233 01234 while (( pNext ) && ( pNext->pVirtualBlock < pVirtual )) { 01235 pPrev = pNext; 01236 pNext = pNext->pNextAlloc; 01237 } 01238 01239 if (pPrev) { 01240 01241 if (( pPrev->pVirtualBlock + pPrev->nVirtualBlockSize ) == pVirtual) { 01242 01243 // 01244 // pPrev and pNode are adjacent, so simply add size of 01245 // pNode entry to pPrev entry. 01246 // 01247 01248 pPrev->nVirtualBlockSize += nVirtual; 01249 01250 RtlpDebugPageHeapReturnNodeToUnusedList( pHeap, pNode ); 01251 01252 pHeap->nAvailableAllocations--; 01253 01254 pNode = pPrev; 01255 pVirtual = pPrev->pVirtualBlock; 01256 nVirtual = pPrev->nVirtualBlockSize; 01257 01258 } 01259 01260 else { 01261 01262 // 01263 // pPrev and pNode are not adjacent, so insert the pNode 01264 // block into the list after pPrev. 01265 // 01266 01267 pNode->pNextAlloc = pPrev->pNextAlloc; 01268 pPrev->pNextAlloc = pNode; 01269 01270 } 01271 } 01272 01273 else { 01274 01275 // 01276 // pNode should be inserted at head of list. 01277 // 01278 01279 pNode->pNextAlloc = pHeap->pAvailableAllocationListHead; 01280 pHeap->pAvailableAllocationListHead = pNode; 01281 01282 } 01283 01284 01285 if (pNext) { 01286 01287 if (( pVirtual + nVirtual ) == pNext->pVirtualBlock) { 01288 01289 // 01290 // pNode and pNext are adjacent, so simply add size of 01291 // pNext entry to pNode entry and remove pNext entry 01292 // from the list. 01293 // 01294 01295 pNode->nVirtualBlockSize += pNext->nVirtualBlockSize; 01296 01297 pNode->pNextAlloc = pNext->pNextAlloc; 01298 01299 if (pHeap->pAvailableAllocationListTail == pNext) { 01300 pHeap->pAvailableAllocationListTail = pNode; 01301 } 01302 01303 RtlpDebugPageHeapReturnNodeToUnusedList( pHeap, pNext ); 01304 01305 pHeap->nAvailableAllocations--; 01306 01307 } 01308 } 01309 01310 else { 01311 01312 // 01313 // pNode is tail of list. 01314 // 01315 01316 pHeap->pAvailableAllocationListTail = pNode; 01317 01318 } 01319 } 01320 01321 VOID 01322 RtlpDebugPageHeapCoalesceFreeIntoAvailable( 01323 IN PDPH_HEAP_ROOT pHeap, 01324 IN ULONG nLeaveOnFreeList 01325 ) 01326 { 01327 PDPH_HEAP_BLOCK pNode = pHeap->pFreeAllocationListHead; 01328 SIZE_T nFree = pHeap->nFreeAllocations; 01329 PDPH_HEAP_BLOCK pNext; 01330 01331 DEBUG_ASSERT( nFree >= nLeaveOnFreeList ); 01332 01333 while (( pNode ) && ( nFree-- > nLeaveOnFreeList )) { 01334 01335 pNext = pNode->pNextAlloc; // preserve next pointer across shuffling 01336 01337 RtlpDebugPageHeapRemoveFromFreeList( pHeap, pNode, NULL ); 01338 01339 RtlpDebugPageHeapCoalesceNodeIntoAvailable( pHeap, pNode ); 01340 01341 pNode = pNext; 01342 01343 } 01344 01345 DEBUG_ASSERT(( nFree = (volatile SIZE_T)( pHeap->nFreeAllocations )) >= nLeaveOnFreeList ); 01346 DEBUG_ASSERT(( pNode != NULL ) || ( nFree == 0 )); 01347 01348 } 01349 01350 // forward 01351 BOOLEAN 01352 RtlpDebugPageHeapGrowVirtual( 01353 IN PDPH_HEAP_ROOT pHeap, 01354 IN SIZE_T nSize 01355 ); 01356 01357 PDPH_HEAP_BLOCK 01358 RtlpDebugPageHeapFindAvailableMem( 01359 IN PDPH_HEAP_ROOT pHeap, 01360 IN SIZE_T nSize, 01361 OUT PDPH_HEAP_BLOCK *pPrevAvailNode, 01362 IN BOOLEAN bGrowVirtual 01363 ) 01364 { 01365 PDPH_HEAP_BLOCK pAvail; 01366 ULONG nLeaveOnFreeList; 01367 01368 // 01369 // If we use uncommitted ranges it is really important to 01370 // call FindAvailableMemory only with page aligned sizes. 01371 // 01372 01373 if ((pHeap->ExtraFlags & PAGE_HEAP_SMART_MEMORY_USAGE)) { 01374 DEBUG_ASSERT ((nSize & ~(PAGE_SIZE - 1)) == nSize); 01375 } 01376 01377 // 01378 // First search existing AvailableList for a "best-fit" block 01379 // (the smallest block that will satisfy the request). 01380 // 01381 01382 pAvail = RtlpDebugPageHeapSearchAvailableMemListForBestFit( 01383 pHeap, 01384 nSize, 01385 pPrevAvailNode 01386 ); 01387 01388 while (( pAvail == NULL ) && ( pHeap->nFreeAllocations > MIN_FREE_LIST_LENGTH )) { 01389 01390 // 01391 // Failed to find sufficient memory on AvailableList. Coalesce 01392 // 3/4 of the FreeList memory to the AvailableList and try again. 01393 // Continue this until we have sufficient memory in AvailableList, 01394 // or the FreeList length is reduced to MIN_FREE_LIST_LENGTH entries. 01395 // We don't shrink the FreeList length below MIN_FREE_LIST_LENGTH 01396 // entries to preserve the most recent MIN_FREE_LIST_LENGTH entries 01397 // for reference-after-freed purposes. 01398 // 01399 01400 nLeaveOnFreeList = pHeap->nFreeAllocations / 4; 01401 01402 if (nLeaveOnFreeList < MIN_FREE_LIST_LENGTH) 01403 nLeaveOnFreeList = MIN_FREE_LIST_LENGTH; 01404 01405 RtlpDebugPageHeapCoalesceFreeIntoAvailable( pHeap, nLeaveOnFreeList ); 01406 01407 pAvail = RtlpDebugPageHeapSearchAvailableMemListForBestFit( 01408 pHeap, 01409 nSize, 01410 pPrevAvailNode 01411 ); 01412 01413 } 01414 01415 01416 if (( pAvail == NULL ) && ( bGrowVirtual )) { 01417 01418 // 01419 // After coalescing FreeList into AvailableList, still don't have 01420 // enough memory (large enough block) to satisfy request, so we 01421 // need to allocate more VM. 01422 // 01423 01424 if (RtlpDebugPageHeapGrowVirtual( pHeap, nSize )) { 01425 01426 pAvail = RtlpDebugPageHeapSearchAvailableMemListForBestFit( 01427 pHeap, 01428 nSize, 01429 pPrevAvailNode 01430 ); 01431 01432 if (pAvail == NULL) { 01433 01434 // 01435 // Failed to satisfy request with more VM. If remainder 01436 // of free list combined with available list is larger 01437 // than the request, we might still be able to satisfy 01438 // the request by merging all of the free list onto the 01439 // available list. Note we lose our MIN_FREE_LIST_LENGTH 01440 // reference-after-freed insurance in this case, but it 01441 // is a rare case, and we'd prefer to satisfy the allocation. 01442 // 01443 01444 if (( pHeap->nFreeAllocationBytesCommitted + 01445 pHeap->nAvailableAllocationBytesCommitted ) >= nSize) { 01446 01447 RtlpDebugPageHeapCoalesceFreeIntoAvailable( pHeap, 0 ); 01448 01449 pAvail = RtlpDebugPageHeapSearchAvailableMemListForBestFit( 01450 pHeap, 01451 nSize, 01452 pPrevAvailNode 01453 ); 01454 } 01455 } 01456 } 01457 } 01458 01459 // 01460 // If we use uncommitted ranges we need to commit the memory 01461 // range now. Note that the memory will be committed but 01462 // the protection on it will be N/A. 01463 // 01464 01465 if (pAvail && (pHeap->ExtraFlags & PAGE_HEAP_SMART_MEMORY_USAGE)) { 01466 01467 BOOLEAN Success; 01468 01469 // 01470 // (SilviuC): The memory here might be already committed if we use 01471 // it for the first time. Whenever we allocate virtual memory to grow 01472 // the heap we commit it. This is the reason the consumption does not 01473 // decrease as spectacular as we expected. We need to fix it. 01474 // It affects 0x43 flags. 01475 // 01476 01477 Success = RtlpDebugPageHeapCommitVM (pAvail->pVirtualBlock, nSize); 01478 01479 if (!Success) { 01480 01481 // 01482 // We did not manage to commit memory for this block. This 01483 // can happen in low memory conditions. We need to return 01484 // the node back into free pool (together with virtual space 01485 // region taken) and fail the current call. 01486 // 01487 01488 RtlpDebugPageHeapPlaceOnFreeList( pHeap, pAvail ); 01489 01490 return NULL; 01491 } 01492 } 01493 01494 return pAvail; 01495 } 01496 01497 VOID 01498 RtlpDebugPageHeapPlaceOnPoolList( 01499 IN PDPH_HEAP_ROOT pHeap, 01500 IN PDPH_HEAP_BLOCK pNode 01501 ) 01502 { 01503 01504 // 01505 // NodePoolList is FIFO. 01506 // 01507 01508 pNode->pNextAlloc = NULL; 01509 01510 ENQUEUE_TAIL( pNode, pHeap->pNodePoolListHead, pHeap->pNodePoolListTail ); 01511 01512 pHeap->nNodePoolBytes += pNode->nVirtualBlockSize; 01513 pHeap->nNodePools += 1; 01514 01515 } 01516 01517 VOID 01518 RtlpDebugPageHeapAddNewPool( 01519 IN PDPH_HEAP_ROOT pHeap, 01520 IN PVOID pVirtual, 01521 IN SIZE_T nSize, 01522 IN BOOLEAN bAddToPoolList 01523 ) 01524 { 01525 PDPH_HEAP_BLOCK pNode, pFirst; 01526 ULONG n, nCount; 01527 01528 // 01529 // Assume pVirtual points to committed block of nSize bytes. 01530 // 01531 01532 pFirst = pVirtual; 01533 nCount = (ULONG)(nSize / sizeof( DPH_HEAP_BLOCK )); 01534 01535 for (n = nCount - 1, pNode = pFirst; n > 0; pNode++, n--) 01536 pNode->pNextAlloc = pNode + 1; 01537 01538 pNode->pNextAlloc = NULL; 01539 01540 // 01541 // Now link this list into the tail of the UnusedNodeList 01542 // 01543 01544 ENQUEUE_TAIL( pFirst, pHeap->pUnusedNodeListHead, pHeap->pUnusedNodeListTail ); 01545 01546 pHeap->pUnusedNodeListTail = pNode; 01547 01548 pHeap->nUnusedNodes += nCount; 01549 01550 if (bAddToPoolList) { 01551 01552 // 01553 // Now add an entry on the PoolList by taking a node from the 01554 // UnusedNodeList, which should be guaranteed to be non-empty 01555 // since we just added new nodes to it. 01556 // 01557 01558 pNode = RtlpDebugPageHeapTakeNodeFromUnusedList( pHeap ); 01559 01560 DEBUG_ASSERT( pNode != NULL ); 01561 01562 pNode->pVirtualBlock = pVirtual; 01563 pNode->nVirtualBlockSize = nSize; 01564 01565 RtlpDebugPageHeapPlaceOnPoolList( pHeap, pNode ); 01566 01567 } 01568 } 01569 01570 PDPH_HEAP_BLOCK 01571 RtlpDebugPageHeapAllocateNode( 01572 IN PDPH_HEAP_ROOT pHeap 01573 ) 01574 { 01575 PDPH_HEAP_BLOCK pNode, pPrev, pReturn; 01576 PUCHAR pVirtual; 01577 SIZE_T nVirtual; 01578 SIZE_T nRequest; 01579 01580 DEBUG_ASSERT( ! pHeap->InsideAllocateNode ); 01581 DEBUG_CODE( pHeap->InsideAllocateNode = TRUE ); 01582 01583 pReturn = NULL; 01584 01585 if (pHeap->pUnusedNodeListHead == NULL) { 01586 01587 // 01588 // We're out of nodes -- allocate new node pool 01589 // from AvailableList. Set bGrowVirtual to FALSE 01590 // since growing virtual will require new nodes, causing 01591 // recursion. Note that simply calling FindAvailableMem 01592 // might return some nodes to the pUnusedNodeList, even if 01593 // the call fails, so we'll check that the UnusedNodeList 01594 // is still empty before we try to use or allocate more 01595 // memory. 01596 // 01597 01598 nRequest = POOL_SIZE; 01599 01600 pNode = RtlpDebugPageHeapFindAvailableMem( 01601 pHeap, 01602 nRequest, 01603 &pPrev, 01604 FALSE 01605 ); 01606 01607 if (( pHeap->pUnusedNodeListHead == NULL ) && ( pNode == NULL )) { 01608 01609 // 01610 // Reduce request size to PAGE_SIZE and see if 01611 // we can find at least a page on the available 01612 // list. 01613 // 01614 01615 nRequest = PAGE_SIZE; 01616 01617 pNode = RtlpDebugPageHeapFindAvailableMem( 01618 pHeap, 01619 nRequest, 01620 &pPrev, 01621 FALSE 01622 ); 01623 01624 } 01625 01626 if (pHeap->pUnusedNodeListHead == NULL) { 01627 01628 if (pNode == NULL) { 01629 01630 // 01631 // Insufficient memory on Available list. Try allocating a 01632 // new virtual block. 01633 // 01634 01635 nRequest = POOL_SIZE; 01636 nVirtual = RESERVE_SIZE; 01637 pVirtual = RtlpDebugPageHeapAllocateVM( nVirtual ); 01638 01639 if (pVirtual == NULL) { 01640 01641 // 01642 // Unable to allocate full RESERVE_SIZE block, 01643 // so reduce request to single VM unit (64K) 01644 // and try again. 01645 // 01646 01647 nVirtual = VM_UNIT_SIZE; 01648 pVirtual = RtlpDebugPageHeapAllocateVM( nVirtual ); 01649 01650 if (pVirtual == NULL) { 01651 01652 // 01653 // Can't allocate any VM. 01654 // 01655 01656 goto EXIT; 01657 } 01658 } 01659 } 01660 01661 else { 01662 01663 RtlpDebugPageHeapRemoveFromAvailableList( pHeap, pNode, pPrev ); 01664 01665 pVirtual = pNode->pVirtualBlock; 01666 nVirtual = pNode->nVirtualBlockSize; 01667 01668 } 01669 01670 // 01671 // We now have allocated VM referenced by pVirtual,nVirtual. 01672 // Make nRequest portion of VM accessible for new node pool. 01673 // 01674 01675 if (! RtlpDebugPageHeapProtectVM( pVirtual, nRequest, PAGE_READWRITE )) { 01676 01677 if (pNode == NULL) { 01678 RtlpDebugPageHeapReleaseVM( pVirtual ); 01679 } 01680 else { 01681 RtlpDebugPageHeapCoalesceNodeIntoAvailable( pHeap, pNode ); 01682 } 01683 01684 goto EXIT; 01685 } 01686 01687 // 01688 // Now we have accessible memory for new pool. Add the 01689 // new memory to the pool. If the new memory came from 01690 // AvailableList versus fresh VM, zero the memory first. 01691 // 01692 01693 if (pNode != NULL) { 01694 RtlZeroMemory( pVirtual, nRequest ); 01695 } 01696 01697 RtlpDebugPageHeapAddNewPool( pHeap, pVirtual, nRequest, TRUE ); 01698 01699 // 01700 // If any memory remaining, put it on available list. 01701 // 01702 01703 if (pNode == NULL) { 01704 01705 // 01706 // Memory came from new VM -- add appropriate list entries 01707 // for new VM and add remainder of VM to free list. 01708 // 01709 01710 pNode = RtlpDebugPageHeapTakeNodeFromUnusedList( pHeap ); 01711 DEBUG_ASSERT( pNode != NULL ); 01712 pNode->pVirtualBlock = pVirtual; 01713 pNode->nVirtualBlockSize = nVirtual; 01714 RtlpDebugPageHeapPlaceOnVirtualList( pHeap, pNode ); 01715 01716 pNode = RtlpDebugPageHeapTakeNodeFromUnusedList( pHeap ); 01717 DEBUG_ASSERT( pNode != NULL ); 01718 pNode->pVirtualBlock = pVirtual + nRequest; 01719 pNode->nVirtualBlockSize = nVirtual - nRequest; 01720 01721 RtlpDebugPageHeapCoalesceNodeIntoAvailable( pHeap, pNode ); 01722 01723 } 01724 01725 else { 01726 01727 if (pNode->nVirtualBlockSize > nRequest) { 01728 01729 pNode->pVirtualBlock += nRequest; 01730 pNode->nVirtualBlockSize -= nRequest; 01731 01732 RtlpDebugPageHeapCoalesceNodeIntoAvailable( pHeap, pNode ); 01733 } 01734 01735 else { 01736 01737 // 01738 // Used up entire available block -- return node to 01739 // unused list. 01740 // 01741 01742 RtlpDebugPageHeapReturnNodeToUnusedList( pHeap, pNode ); 01743 01744 } 01745 } 01746 } 01747 } 01748 01749 pReturn = RtlpDebugPageHeapTakeNodeFromUnusedList( pHeap ); 01750 DEBUG_ASSERT( pReturn != NULL ); 01751 01752 EXIT: 01753 01754 DEBUG_CODE( pHeap->InsideAllocateNode = FALSE ); 01755 return pReturn; 01756 } 01757 01758 BOOLEAN 01759 RtlpDebugPageHeapGrowVirtual( 01760 IN PDPH_HEAP_ROOT pHeap, 01761 IN SIZE_T nSize 01762 ) 01763 { 01764 PDPH_HEAP_BLOCK pVirtualNode; 01765 PDPH_HEAP_BLOCK pAvailNode; 01766 PVOID pVirtual; 01767 SIZE_T nVirtual; 01768 01769 pVirtualNode = RtlpDebugPageHeapAllocateNode( pHeap ); 01770 01771 if (pVirtualNode == NULL) { 01772 return FALSE; 01773 } 01774 01775 pAvailNode = RtlpDebugPageHeapAllocateNode( pHeap ); 01776 01777 if (pAvailNode == NULL) { 01778 RtlpDebugPageHeapReturnNodeToUnusedList( pHeap, pVirtualNode ); 01779 return FALSE; 01780 } 01781 01782 nSize = ROUNDUP2( nSize, VM_UNIT_SIZE ); 01783 nVirtual = ( nSize > RESERVE_SIZE ) ? nSize : RESERVE_SIZE; 01784 pVirtual = RtlpDebugPageHeapAllocateVM( nVirtual ); 01785 01786 if (( pVirtual == NULL ) && ( nSize < RESERVE_SIZE )) { 01787 nVirtual = nSize; 01788 pVirtual = RtlpDebugPageHeapAllocateVM( nVirtual ); 01789 } 01790 01791 if (pVirtual == NULL) { 01792 RtlpDebugPageHeapReturnNodeToUnusedList( pHeap, pVirtualNode ); 01793 RtlpDebugPageHeapReturnNodeToUnusedList( pHeap, pAvailNode ); 01794 return FALSE; 01795 } 01796 01797 pVirtualNode->pVirtualBlock = pVirtual; 01798 pVirtualNode->nVirtualBlockSize = nVirtual; 01799 RtlpDebugPageHeapPlaceOnVirtualList( pHeap, pVirtualNode ); 01800 01801 pAvailNode->pVirtualBlock = pVirtual; 01802 pAvailNode->nVirtualBlockSize = nVirtual; 01803 RtlpDebugPageHeapCoalesceNodeIntoAvailable( pHeap, pAvailNode ); 01804 01805 return TRUE; 01806 } 01807 01808 VOID 01809 RtlpDebugPageHeapProtectStructures( 01810 IN PDPH_HEAP_ROOT pHeap 01811 ) 01812 { 01813 PDPH_HEAP_BLOCK pNode; 01814 01815 // 01816 // Assume CritSect is owned so we're the only thread twiddling 01817 // the protection. 01818 // 01819 01820 DEBUG_ASSERT( pHeap->HeapFlags & HEAP_PROTECTION_ENABLED ); 01821 01822 if (--pHeap->nUnProtectionReferenceCount == 0) { 01823 01824 pNode = pHeap->pNodePoolListHead; 01825 01826 while (pNode != NULL) { 01827 01828 RtlpDebugPageHeapProtectVM( pNode->pVirtualBlock, 01829 pNode->nVirtualBlockSize, 01830 PAGE_READONLY ); 01831 01832 pNode = pNode->pNextAlloc; 01833 01834 } 01835 } 01836 } 01837 01838 VOID 01839 RtlpDebugPageHeapUnProtectStructures( 01840 IN PDPH_HEAP_ROOT pHeap 01841 ) 01842 { 01843 PDPH_HEAP_BLOCK pNode; 01844 01845 DEBUG_ASSERT( pHeap->HeapFlags & HEAP_PROTECTION_ENABLED ); 01846 01847 if (pHeap->nUnProtectionReferenceCount == 0) { 01848 01849 pNode = pHeap->pNodePoolListHead; 01850 01851 while (pNode != NULL) { 01852 01853 RtlpDebugPageHeapProtectVM( pNode->pVirtualBlock, 01854 pNode->nVirtualBlockSize, 01855 PAGE_READWRITE ); 01856 01857 pNode = pNode->pNextAlloc; 01858 01859 } 01860 } 01861 01862 pHeap->nUnProtectionReferenceCount += 1; 01863 } 01864 01868 01869 #if INTERNAL_DEBUG 01870 01871 VOID 01872 RtlpDebugPageHeapVerifyList( 01873 IN PDPH_HEAP_BLOCK pListHead, 01874 IN PDPH_HEAP_BLOCK pListTail, 01875 IN SIZE_T nExpectedLength, 01876 IN SIZE_T nExpectedVirtual, 01877 IN PCCH pListName 01878 ) 01879 { 01880 PDPH_HEAP_BLOCK pPrev = NULL; 01881 PDPH_HEAP_BLOCK pNode = pListHead; 01882 PDPH_HEAP_BLOCK pTest = pListHead ? pListHead->pNextAlloc : NULL; 01883 ULONG nNode = 0; 01884 SIZE_T nSize = 0; 01885 01886 while (pNode) { 01887 01888 if (pNode == pTest) { 01889 DbgPrint( "Page heap: Internal %s list is circular\n", pListName ); 01890 RtlpDebugPageHeapBreak( "" ); 01891 return; 01892 } 01893 01894 nNode += 1; 01895 nSize += pNode->nVirtualBlockSize; 01896 01897 if (pTest) { 01898 pTest = pTest->pNextAlloc; 01899 if (pTest) { 01900 pTest = pTest->pNextAlloc; 01901 } 01902 } 01903 01904 pPrev = pNode; 01905 pNode = pNode->pNextAlloc; 01906 01907 } 01908 01909 if (pPrev != pListTail) { 01910 DbgPrint( "Page heap: Internal %s list has incorrect tail pointer\n", pListName ); 01911 RtlpDebugPageHeapBreak( "" ); 01912 } 01913 01914 if (( nExpectedLength != 0xFFFFFFFF ) && ( nExpectedLength != nNode )) { 01915 DbgPrint( "Page heap: Internal %s list has incorrect length\n", pListName ); 01916 RtlpDebugPageHeapBreak( "" ); 01917 } 01918 01919 if (( nExpectedVirtual != 0xFFFFFFFF ) && ( nExpectedVirtual != nSize )) { 01920 DbgPrint( "Page heap: Internal %s list has incorrect virtual size\n", pListName ); 01921 RtlpDebugPageHeapBreak( "" ); 01922 } 01923 01924 } 01925 01926 VOID 01927 RtlpDebugPageHeapVerifyIntegrity( 01928 IN PDPH_HEAP_ROOT pHeap 01929 ) 01930 { 01931 01932 RtlpDebugPageHeapVerifyList( 01933 pHeap->pVirtualStorageListHead, 01934 pHeap->pVirtualStorageListTail, 01935 pHeap->nVirtualStorageRanges, 01936 pHeap->nVirtualStorageBytes, 01937 "VIRTUAL" 01938 ); 01939 01940 RtlpDebugPageHeapVerifyList( 01941 pHeap->pBusyAllocationListHead, 01942 pHeap->pBusyAllocationListTail, 01943 pHeap->nBusyAllocations, 01944 pHeap->nBusyAllocationBytesCommitted, 01945 "BUSY" 01946 ); 01947 01948 RtlpDebugPageHeapVerifyList( 01949 pHeap->pFreeAllocationListHead, 01950 pHeap->pFreeAllocationListTail, 01951 pHeap->nFreeAllocations, 01952 pHeap->nFreeAllocationBytesCommitted, 01953 "FREE" 01954 ); 01955 01956 RtlpDebugPageHeapVerifyList( 01957 pHeap->pAvailableAllocationListHead, 01958 pHeap->pAvailableAllocationListTail, 01959 pHeap->nAvailableAllocations, 01960 pHeap->nAvailableAllocationBytesCommitted, 01961 "AVAILABLE" 01962 ); 01963 01964 RtlpDebugPageHeapVerifyList( 01965 pHeap->pUnusedNodeListHead, 01966 pHeap->pUnusedNodeListTail, 01967 pHeap->nUnusedNodes, 01968 0xFFFFFFFF, 01969 "FREENODE" 01970 ); 01971 01972 RtlpDebugPageHeapVerifyList( 01973 pHeap->pNodePoolListHead, 01974 pHeap->pNodePoolListTail, 01975 pHeap->nNodePools, 01976 pHeap->nNodePoolBytes, 01977 "NODEPOOL" 01978 ); 01979 } 01980 01981 #endif // #if INTERNAL_DEBUG 01982 01986 01987 // 01988 // Here's where the exported interface functions are defined. 01989 // 01990 01991 //silviuc: i think this pragma works only for the next function defined 01992 #if (( DPH_CAPTURE_STACK_TRACE ) && ( i386 ) && ( FPO )) 01993 #pragma optimize( "y", off ) // disable FPO for consistent stack traces 01994 #endif 01995 01996 PVOID 01997 RtlpDebugPageHeapCreate( 01998 IN ULONG Flags, 01999 IN PVOID HeapBase OPTIONAL, 02000 IN SIZE_T ReserveSize OPTIONAL, 02001 IN SIZE_T CommitSize OPTIONAL, 02002 IN PVOID Lock OPTIONAL, 02003 IN PRTL_HEAP_PARAMETERS Parameters OPTIONAL 02004 ) 02005 { 02006 SYSTEM_BASIC_INFORMATION SystemInfo; 02007 PDPH_HEAP_BLOCK Node; 02008 PDPH_HEAP_ROOT HeapRoot; 02009 PVOID HeapHandle; 02010 PUCHAR pVirtual; 02011 SIZE_T nVirtual; 02012 SIZE_T Size; 02013 NTSTATUS Status; 02014 02015 // 02016 // If `Parameters' is -1 then this is a recursive call to 02017 // RtlpDebugPageHeapCreate and we will return NULL so that 02018 // the normal heap manager will create a normal heap. 02019 // I agree this is a hack but we need this so that we maintain 02020 // a very loose dependency between the normal and page heap 02021 // manager. 02022 // 02023 02024 if ((SIZE_T)Parameters == (SIZE_T)-1) { 02025 return NULL; 02026 } 02027 02028 // 02029 // We don't handle heaps where HeapBase is already allocated 02030 // from user or where Lock is provided by user. 02031 // 02032 02033 DEBUG_ASSERT( HeapBase == NULL ); 02034 DEBUG_ASSERT( Lock == NULL ); 02035 02036 if (( HeapBase != NULL ) || ( Lock != NULL )) 02037 return NULL; 02038 02039 // 02040 // Note that we simply ignore ReserveSize, CommitSize, and 02041 // Parameters as we always have a growable heap with our 02042 // own thresholds, etc. 02043 // 02044 02045 ZwQuerySystemInformation( SystemBasicInformation, 02046 &SystemInfo, 02047 sizeof( SystemInfo ), 02048 NULL ); 02049 02050 RETAIL_ASSERT( SystemInfo.PageSize == PAGE_SIZE ); 02051 RETAIL_ASSERT( SystemInfo.AllocationGranularity == VM_UNIT_SIZE ); 02052 DEBUG_ASSERT(( PAGE_SIZE + POOL_SIZE + PAGE_SIZE ) < VM_UNIT_SIZE ); 02053 02054 nVirtual = RESERVE_SIZE; 02055 pVirtual = RtlpDebugPageHeapAllocateVM( nVirtual ); 02056 02057 if (pVirtual == NULL) { 02058 02059 nVirtual = VM_UNIT_SIZE; 02060 pVirtual = RtlpDebugPageHeapAllocateVM( nVirtual ); 02061 02062 if (pVirtual == NULL) { 02063 OUT_OF_VM_BREAK( Flags, "Page heap: Insufficient memory to create heap\n" ); 02064 IF_GENERATE_EXCEPTION( Flags, STATUS_NO_MEMORY ); 02065 return NULL; 02066 } 02067 } 02068 02069 if (! RtlpDebugPageHeapProtectVM( pVirtual, PAGE_SIZE + POOL_SIZE + PAGE_SIZE, PAGE_READWRITE )) { 02070 RtlpDebugPageHeapReleaseVM( pVirtual ); 02071 IF_GENERATE_EXCEPTION( Flags, STATUS_NO_MEMORY ); 02072 return NULL; 02073 } 02074 02075 // 02076 // Out of our initial allocation, the initial page is the fake 02077 // retail HEAP structure. The second page begins our DPH_HEAP 02078 // structure followed by (POOL_SIZE-sizeof(DPH_HEAP)) bytes for 02079 // the initial pool. The next page contains out CRIT_SECT 02080 // variable, which must always be READWRITE. Beyond that, the 02081 // remainder of the virtual allocation is placed on the available 02082 // list. 02083 // 02084 // |_____|___________________|_____|__ _ _ _ _ _ _ _ _ _ _ _ _ __| 02085 // 02086 // ^pVirtual 02087 // 02088 // ^FakeRetailHEAP 02089 // 02090 // ^HeapRoot 02091 // 02092 // ^InitialNodePool 02093 // 02094 // ^CRITICAL_SECTION 02095 // 02096 // ^AvailableSpace 02097 // 02098 // 02099 // 02100 // Our DPH_HEAP structure starts at the page following the 02101 // fake retail HEAP structure pointed to by the "heap handle". 02102 // For the fake HEAP structure, we'll fill it with 0xEEEEEEEE 02103 // except for the Heap->Flags and Heap->ForceFlags fields, 02104 // which we must set to include our HEAP_FLAG_PAGE_ALLOCS flag, 02105 // and then we'll make the whole page read-only. 02106 // 02107 02108 RtlFillMemory( pVirtual, PAGE_SIZE, FILL_BYTE ); 02109 02110 ((PHEAP)pVirtual)->Flags = Flags | HEAP_FLAG_PAGE_ALLOCS; 02111 ((PHEAP)pVirtual)->ForceFlags = Flags | HEAP_FLAG_PAGE_ALLOCS; 02112 02113 if (! RtlpDebugPageHeapProtectVM( pVirtual, PAGE_SIZE, PAGE_READONLY )) { 02114 RtlpDebugPageHeapReleaseVM( pVirtual ); 02115 IF_GENERATE_EXCEPTION( Flags, STATUS_NO_MEMORY ); 02116 return NULL; 02117 } 02118 02119 HeapRoot = (PDPH_HEAP_ROOT)( pVirtual + PAGE_SIZE ); 02120 02121 HeapRoot->Signature = DPH_HEAP_SIGNATURE; 02122 HeapRoot->HeapFlags = Flags; 02123 HeapRoot->HeapCritSect = (PVOID)((PCHAR)HeapRoot + POOL_SIZE ); 02124 02125 // 02126 // Copy the page heap global flags into per heap flags. 02127 // 02128 02129 HeapRoot->ExtraFlags = RtlpDphGlobalFlags; 02130 02131 // 02132 // If the PAGE_HEAP_UNALIGNED_ALLOCATIONS bit is set 02133 // in ExtraFlags we will set the HEAP_NO_ALIGNMENT flag 02134 // in the HeapFlags. This last bit controls if allocations 02135 // will be aligned or not. The reason we do this transfer is 02136 // that ExtraFlags can be set from the registry whereas the 02137 // normal HeapFlags cannot. 02138 // 02139 02140 if ((HeapRoot->ExtraFlags & PAGE_HEAP_UNALIGNED_ALLOCATIONS)) { 02141 HeapRoot->HeapFlags |= HEAP_NO_ALIGNMENT; 02142 } 02143 02144 // 02145 // Initialize the seed for the random generator used to decide 02146 // from where should we make allocations if random decision 02147 // flag is on. 02148 // 02149 02150 { 02151 LARGE_INTEGER PerformanceCounter; 02152 02153 PerformanceCounter.LowPart = 0xABCDDCBA; 02154 02155 NtQueryPerformanceCounter ( 02156 &PerformanceCounter, 02157 NULL); 02158 02159 HeapRoot->Seed = PerformanceCounter.LowPart; 02160 } 02161 02162 RtlZeroMemory (HeapRoot->Counter, sizeof(HeapRoot->Counter)); 02163 02164 // 02165 // Create the normal heap associated with the page heap. 02166 // The last parameter value (-1) is very important because 02167 // it stops the recursive call into page heap create. 02168 // 02169 02170 HeapRoot->NormalHeap = RtlCreateHeap( 02171 02172 Flags, 02173 HeapBase, 02174 ReserveSize, 02175 CommitSize, 02176 Lock, 02177 (PRTL_HEAP_PARAMETERS)-1 ); 02178 02179 // 02180 // Initialize heap lock. 02181 // 02182 02183 RtlInitializeCriticalSection( HeapRoot->HeapCritSect ); 02184 02185 // 02186 // On the page that contains our DPH_HEAP structure, use 02187 // the remaining memory beyond the DPH_HEAP structure as 02188 // pool for allocating heap nodes. 02189 // 02190 02191 RtlpDebugPageHeapAddNewPool( HeapRoot, 02192 HeapRoot + 1, 02193 POOL_SIZE - sizeof( DPH_HEAP_ROOT ), 02194 FALSE 02195 ); 02196 02197 // 02198 // Make initial PoolList entry by taking a node from the 02199 // UnusedNodeList, which should be guaranteed to be non-empty 02200 // since we just added new nodes to it. 02201 // 02202 02203 Node = RtlpDebugPageHeapAllocateNode( HeapRoot ); 02204 DEBUG_ASSERT( Node != NULL ); 02205 Node->pVirtualBlock = (PVOID)HeapRoot; 02206 Node->nVirtualBlockSize = POOL_SIZE; 02207 RtlpDebugPageHeapPlaceOnPoolList( HeapRoot, Node ); 02208 02209 // 02210 // Make VirtualStorageList entry for initial VM allocation 02211 // 02212 02213 Node = RtlpDebugPageHeapAllocateNode( HeapRoot ); 02214 DEBUG_ASSERT( Node != NULL ); 02215 Node->pVirtualBlock = pVirtual; 02216 Node->nVirtualBlockSize = nVirtual; 02217 RtlpDebugPageHeapPlaceOnVirtualList( HeapRoot, Node ); 02218 02219 // 02220 // Make AvailableList entry containing remainder of initial VM 02221 // and add to (create) the AvailableList. 02222 // 02223 02224 Node = RtlpDebugPageHeapAllocateNode( HeapRoot ); 02225 DEBUG_ASSERT( Node != NULL ); 02226 Node->pVirtualBlock = pVirtual + ( PAGE_SIZE + POOL_SIZE + PAGE_SIZE ); 02227 Node->nVirtualBlockSize = nVirtual - ( PAGE_SIZE + POOL_SIZE + PAGE_SIZE ); 02228 RtlpDebugPageHeapCoalesceNodeIntoAvailable( HeapRoot, Node ); 02229 02230 // 02231 // Get heap creation stack trace. 02232 // 02233 02234 HeapRoot->CreateStackTrace = RtlpDphLogStackTrace(1); 02235 02236 // 02237 // Initialize heap internal structure protection. 02238 // 02239 02240 HeapRoot->nUnProtectionReferenceCount = 1; // initialize 02241 02242 // 02243 // If this is the first heap creation in this process, then we 02244 // need to initialize the process heap list critical section, 02245 // the global delayed free queue for normal blocks and the 02246 // trace database. 02247 // 02248 02249 if (! RtlpDphHeapListHasBeenInitialized) { 02250 02251 RtlpDphHeapListHasBeenInitialized = TRUE; 02252 02253 RtlInitializeCriticalSection( &RtlpDphHeapListCriticalSection ); 02254 RtlpDphInitializeDelayedFreeQueue (); 02255 02256 // 02257 // Do not make fuss if the trace database creation fails. 02258 // This is something we can live with. 02259 // 02260 // The number of buckets is chosen to be a prime not too 02261 // close to a power of two (Knuth says so). Three possible 02262 // values are: 1567, 3089, 6263. 02263 // 02264 02265 RtlpDphTraceDatabase = RtlTraceDatabaseCreate ( 02266 6263, 02267 RtlpDphTraceDatabaseMaximumSize, 02268 0, 02269 0, 02270 NULL); 02271 02272 #if DBG 02273 if (RtlpDphTraceDatabase == NULL) { 02274 DbgPrint ("Page heap: warning: failed to create trace database for %p", 02275 HeapRoot); 02276 } 02277 #endif 02278 // 02279 // Create the Unicode string containing the target dlls. 02280 // If no target dlls have been specified the string will 02281 // be initialized with the empty string. 02282 // 02283 02284 RtlInitUnicodeString ( 02285 &RtlpDphTargetDllsUnicode, 02286 RtlpDphTargetDlls); 02287 02288 // 02289 // Initialize the target dlls logic 02290 // 02291 02292 RtlpDphTargetDllsLogicInitialize (); 02293 } 02294 02295 // 02296 // Add this heap entry to the process heap linked list. 02297 // 02298 02299 RtlEnterCriticalSection( &RtlpDphHeapListCriticalSection ); 02300 02301 if (RtlpDphHeapListHead == NULL) { 02302 RtlpDphHeapListHead = HeapRoot; 02303 RtlpDphHeapListTail = HeapRoot; 02304 } 02305 else { 02306 HeapRoot->pPrevHeapRoot = RtlpDphHeapListTail; 02307 UNPROTECT_HEAP_STRUCTURES(RtlpDphHeapListTail); 02308 RtlpDphHeapListTail->pNextHeapRoot = HeapRoot; 02309 PROTECT_HEAP_STRUCTURES(RtlpDphHeapListTail); 02310 RtlpDphHeapListTail = HeapRoot; 02311 } 02312 02313 PROTECT_HEAP_STRUCTURES( HeapRoot ); // now protected 02314 02315 RtlpDphHeapListCount += 1; 02316 02317 RtlLeaveCriticalSection( &RtlpDphHeapListCriticalSection ); 02318 02319 DEBUG_CODE( RtlpDebugPageHeapVerifyIntegrity( HeapRoot )); 02320 02321 DbgPrint( "Page heap: process 0x%X created heap @ %p (%p, flags 0x%X)\n", 02322 NtCurrentTeb()->ClientId.UniqueProcess, 02323 HEAP_HANDLE_FROM_ROOT( HeapRoot ), 02324 HeapRoot->NormalHeap, 02325 HeapRoot->ExtraFlags); 02326 02327 if ((RtlpDphDebugLevel & DPH_DEBUG_INTERNAL_VALIDATION)) { 02328 RtlpDphInternalValidatePageHeap (HeapRoot, NULL, 0); 02329 } 02330 02331 return HEAP_HANDLE_FROM_ROOT( HeapRoot ); // same as pVirtual 02332 02333 } 02334 02335 PVOID 02336 RtlpDebugPageHeapAllocate( 02337 IN PVOID HeapHandle, 02338 IN ULONG Flags, 02339 IN SIZE_T Size 02340 ) 02341 { 02342 PDPH_HEAP_ROOT HeapRoot; 02343 PDPH_HEAP_BLOCK pAvailNode; 02344 PDPH_HEAP_BLOCK pPrevAvailNode; 02345 PDPH_HEAP_BLOCK pBusyNode; 02346 SIZE_T nBytesAllocate; 02347 SIZE_T nBytesAccess; 02348 SIZE_T nActual; 02349 PVOID pVirtual; 02350 PVOID pReturn; 02351 PUCHAR pBlockHeader; 02352 ULONG Reason; 02353 BOOLEAN ForcePageHeap = FALSE; 02354 02355 if (IS_BIASED_POINTER(HeapHandle)) { 02356 HeapHandle = UNBIAS_POINTER(HeapHandle); 02357 ForcePageHeap = TRUE; 02358 } 02359 02360 HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle ); 02361 if (HeapRoot == NULL) 02362 return NULL; 02363 02364 // 02365 // Is zero size allocation ? 02366 // 02367 02368 if (Size == 0) { 02369 02370 if ((RtlpDphDebugLevel & DPH_DEBUG_BREAK_FOR_SIZE_ZERO)) { 02371 02372 DbgPrint ("Page heap: request for zero sized block \n"); 02373 DbgBreakPoint(); 02374 } 02375 } 02376 02377 // 02378 // Get the heap lock. 02379 // 02380 02381 RtlpDebugPageHeapEnterCritSect( HeapRoot, Flags ); 02382 UNPROTECT_HEAP_STRUCTURES( HeapRoot ); 02383 02384 // 02385 // We cannot validate the heap when a forced allocation into page heap 02386 // is requested due to accounting problems. Allocate is called in this way 02387 // from ReAllocate while the old node (just about to be freed) is in limbo 02388 // and is not accounted in any internal structure. 02389 // 02390 02391 if ((RtlpDphDebugLevel & DPH_DEBUG_INTERNAL_VALIDATION) && !ForcePageHeap) { 02392 RtlpDphInternalValidatePageHeap (HeapRoot, NULL, 0); 02393 } 02394 02395 Flags |= HeapRoot->HeapFlags; 02396 02397 // 02398 // Compute alloc statistics. Note that we need to 02399 // take the heap lock for this and unprotect the 02400 // heap structures. 02401 // 02402 02403 BUMP_GLOBAL_COUNTER (DPH_COUNTER_NO_OF_ALLOCS); 02404 BUMP_SIZE_COUNTER (Size); 02405 02406 HeapRoot->Counter[DPH_COUNTER_NO_OF_ALLOCS] += 1; 02407 02408 if (Size < 1024) { 02409 BUMP_GLOBAL_COUNTER (DPH_COUNTER_SIZE_BELOW_1K); 02410 HeapRoot->Counter[DPH_COUNTER_SIZE_BELOW_1K] += 1; 02411 } 02412 else if (Size < 4096) { 02413 BUMP_GLOBAL_COUNTER (DPH_COUNTER_SIZE_BELOW_4K); 02414 HeapRoot->Counter[DPH_COUNTER_SIZE_BELOW_4K] += 1; 02415 } 02416 else { 02417 BUMP_GLOBAL_COUNTER (DPH_COUNTER_SIZE_ABOVE_4K); 02418 HeapRoot->Counter[DPH_COUNTER_SIZE_ABOVE_4K] += 1; 02419 } 02420 02421 // 02422 // Figure out if we need to minimize memory impact. This 02423 // might trigger an allocation in the normal heap. 02424 // 02425 02426 if (! ForcePageHeap) { 02427 02428 if (! (RtlpDphShouldAllocateInPageHeap (HeapRoot, Size))) { 02429 02430 pReturn = RtlpDphNormalHeapAllocate ( 02431 HeapRoot, 02432 Flags, 02433 Size); 02434 02435 goto EXIT; 02436 } 02437 } 02438 02439 // 02440 // Check the heap a little bit on checked builds. 02441 // 02442 02443 DEBUG_CODE( RtlpDebugPageHeapVerifyIntegrity( HeapRoot )); 02444 02445 pReturn = NULL; 02446 02447 // 02448 // Validate requested size so we don't overflow 02449 // while rounding up size computations. We do this 02450 // after we've acquired the critsect so we can still 02451 // catch serialization problems. 02452 // 02453 02454 if (Size > 0x7FFF0000) { 02455 OUT_OF_VM_BREAK( Flags, "Page heap: Invalid allocation size\n" ); 02456 goto EXIT; 02457 } 02458 02459 // 02460 // Determine number of pages needed for READWRITE portion 02461 // of allocation and add an extra page for the NO_ACCESS 02462 // memory beyond the READWRITE page(s). 02463 // 02464 02465 nBytesAccess = ROUNDUP2( Size + sizeof(DPH_BLOCK_INFORMATION), PAGE_SIZE ); 02466 nBytesAllocate = nBytesAccess + PAGE_SIZE; 02467 02468 // 02469 // RtlpDebugPageHeapFindAvailableMem will first attempt to satisfy 02470 // the request from memory on the Available list. If that fails, 02471 // it will coalesce some of the Free list memory into the Available 02472 // list and try again. If that still fails, new VM is allocated and 02473 // added to the Available list. If that fails, the function will 02474 // finally give up and return NULL. 02475 // 02476 02477 pAvailNode = RtlpDebugPageHeapFindAvailableMem( 02478 HeapRoot, 02479 nBytesAllocate, 02480 &pPrevAvailNode, 02481 TRUE 02482 ); 02483 02484 if (pAvailNode == NULL) { 02485 OUT_OF_VM_BREAK( Flags, "Page heap: Unable to allocate virtual memory\n" ); 02486 goto EXIT; 02487 } 02488 02489 // 02490 // Now can't call AllocateNode until pAvailNode is 02491 // adjusted and/or removed from Avail list since AllocateNode 02492 // might adjust the Avail list. 02493 // 02494 02495 pVirtual = pAvailNode->pVirtualBlock; 02496 02497 if (nBytesAccess > 0) { 02498 02499 if ((HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) { 02500 02501 if (! RtlpDebugPageHeapProtectVM( (PUCHAR)pVirtual + PAGE_SIZE, nBytesAccess, PAGE_READWRITE )) { 02502 goto EXIT; 02503 } 02504 } 02505 else { 02506 02507 if (! RtlpDebugPageHeapProtectVM( pVirtual, nBytesAccess, PAGE_READWRITE )) { 02508 goto EXIT; 02509 } 02510 } 02511 } 02512 02513 // 02514 // If we use uncommitted ranges we need to decommit the protection 02515 // page at the end. BAckward overruns flag disables smart memory 02516 // usage flag. 02517 // 02518 02519 if ((HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) { 02520 02521 // nothing 02522 02523 } 02524 else { 02525 02526 if ((HeapRoot->ExtraFlags & PAGE_HEAP_SMART_MEMORY_USAGE)) { 02527 02528 RtlpDebugPageHeapDecommitVM ( 02529 (PCHAR)pVirtual + nBytesAccess, 02530 PAGE_SIZE); 02531 } 02532 } 02533 02534 // 02535 // pAvailNode (still on avail list) points to block large enough 02536 // to satisfy request, but it might be large enough to split 02537 // into two blocks -- one for request, remainder leave on 02538 // avail list. 02539 // 02540 02541 if (pAvailNode->nVirtualBlockSize > nBytesAllocate) { 02542 02543 // 02544 // Adjust pVirtualBlock and nVirtualBlock size of existing 02545 // node in avail list. The node will still be in correct 02546 // address space order on the avail list. This saves having 02547 // to remove and then re-add node to avail list. Note since 02548 // we're changing sizes directly, we need to adjust the 02549 // avail and busy list counters manually. 02550 // 02551 // Note: since we're leaving at least one page on the 02552 // available list, we are guaranteed that AllocateNode 02553 // will not fail. 02554 // 02555 02556 pAvailNode->pVirtualBlock += nBytesAllocate; 02557 pAvailNode->nVirtualBlockSize -= nBytesAllocate; 02558 HeapRoot->nAvailableAllocationBytesCommitted -= nBytesAllocate; 02559 02560 pBusyNode = RtlpDebugPageHeapAllocateNode( HeapRoot ); 02561 02562 DEBUG_ASSERT( pBusyNode != NULL ); 02563 02564 pBusyNode->pVirtualBlock = pVirtual; 02565 pBusyNode->nVirtualBlockSize = nBytesAllocate; 02566 02567 } 02568 02569 else { 02570 02571 // 02572 // Entire avail block is needed, so simply remove it from avail list. 02573 // 02574 02575 RtlpDebugPageHeapRemoveFromAvailableList( HeapRoot, pAvailNode, pPrevAvailNode ); 02576 02577 pBusyNode = pAvailNode; 02578 02579 } 02580 02581 // 02582 // Now pBusyNode points to our committed virtual block. 02583 // 02584 02585 if (HeapRoot->HeapFlags & HEAP_NO_ALIGNMENT) 02586 nActual = Size; 02587 else 02588 nActual = ROUNDUP2( Size, USER_ALIGNMENT ); 02589 02590 pBusyNode->nVirtualAccessSize = nBytesAccess; 02591 pBusyNode->nUserRequestedSize = Size; 02592 pBusyNode->nUserActualSize = nActual; 02593 02594 if ((HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) { 02595 02596 pBusyNode->pUserAllocation = pBusyNode->pVirtualBlock 02597 + PAGE_SIZE; 02598 } 02599 else { 02600 02601 pBusyNode->pUserAllocation = pBusyNode->pVirtualBlock 02602 + pBusyNode->nVirtualAccessSize 02603 - nActual; 02604 } 02605 02606 pBusyNode->UserValue = NULL; 02607 pBusyNode->UserFlags = Flags & HEAP_SETTABLE_USER_FLAGS; 02608 02609 // 02610 // RtlpDebugPageHeapAllocate gets called from RtlDebugAllocateHeap, 02611 // which gets called from RtlAllocateHeapSlowly, which gets called 02612 // from RtlAllocateHeap. To keep from wasting lots of stack trace 02613 // storage, we'll skip the bottom 3 entries, leaving RtlAllocateHeap 02614 // as the first recorded entry. 02615 // 02616 02617 if ((HeapRoot->ExtraFlags & PAGE_HEAP_COLLECT_STACK_TRACES)) { 02618 02619 pBusyNode->StackTrace = RtlpDphLogStackTrace(3); 02620 02621 if (pBusyNode->StackTrace) { 02622 02623 RtlTraceDatabaseLock (RtlpDphTraceDatabase); 02624 pBusyNode->StackTrace->UserCount += 1; 02625 pBusyNode->StackTrace->UserSize += pBusyNode->nUserRequestedSize; 02626 pBusyNode->StackTrace->UserContext = HeapRoot; 02627 RtlTraceDatabaseUnlock (RtlpDphTraceDatabase); 02628 } 02629 } 02630 else { 02631 pBusyNode->StackTrace = NULL; 02632 } 02633 02634 RtlpDebugPageHeapPlaceOnBusyList( HeapRoot, pBusyNode ); 02635 02636 pReturn = pBusyNode->pUserAllocation; 02637 02638 // 02639 // For requests the specify HEAP_ZERO_MEMORY, we'll fill the 02640 // user-requested portion of the block with zeros. For requests 02641 // that don't specify HEAP_ZERO_MEMORY, we fill the whole user block 02642 // with DPH_PAGE_BLOCK_INFIX. 02643 // 02644 02645 if ((Flags & HEAP_ZERO_MEMORY)) { 02646 02647 // 02648 // SilviuC: The call below can be saved if we have a way 02649 // to figure out if the memory for the block was freshly 02650 // virtual allocated (this is already zeroed). This has 02651 // an impact for large allocations. 02652 // 02653 02654 RtlZeroMemory( pBusyNode->pUserAllocation, Size ); 02655 } 02656 else { 02657 02658 RtlFillMemory( pBusyNode->pUserAllocation, Size, DPH_PAGE_BLOCK_INFIX); 02659 } 02660 02661 if ((HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) { 02662 02663 // nothing 02664 02665 } 02666 else { 02667 02668 RtlpDphWritePageHeapBlockInformation ( 02669 HeapRoot, 02670 pBusyNode->pUserAllocation, 02671 Size, 02672 nBytesAccess); 02673 } 02674 02675 EXIT: 02676 02677 if ((RtlpDphDebugLevel & DPH_DEBUG_INTERNAL_VALIDATION) && !ForcePageHeap) { 02678 RtlpDphInternalValidatePageHeap (HeapRoot, NULL, 0); 02679 } 02680 02681 PROTECT_HEAP_STRUCTURES( HeapRoot ); 02682 DEBUG_CODE( RtlpDebugPageHeapVerifyIntegrity( HeapRoot )); 02683 RtlpDebugPageHeapLeaveCritSect( HeapRoot ); 02684 02685 if (pReturn == NULL) { 02686 IF_GENERATE_EXCEPTION( Flags, STATUS_NO_MEMORY ); 02687 } 02688 02689 return pReturn; 02690 } 02691 02692 BOOLEAN 02693 RtlpDebugPageHeapFree( 02694 IN PVOID HeapHandle, 02695 IN ULONG Flags, 02696 IN PVOID Address 02697 ) 02698 { 02699 02700 PDPH_HEAP_ROOT HeapRoot; 02701 PDPH_HEAP_BLOCK Node, Prev; 02702 BOOLEAN Success; 02703 PCH p; 02704 ULONG Reason; 02705 02706 // 02707 // Check if null frees are of any concern. 02708 // 02709 02710 if (Address == NULL) { 02711 02712 if ((RtlpDphDebugLevel & DPH_DEBUG_BREAK_FOR_NULL_FREE)) { 02713 02714 DbgPrint ("Page heap: attempt to free null pointer \n"); 02715 DbgBreakPoint(); 02716 } 02717 02718 // 02719 // For C++ apps that delete NULL this is valid. 02720 // 02721 02722 return TRUE; 02723 } 02724 02725 HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle ); 02726 if (HeapRoot == NULL) 02727 return FALSE; 02728 02729 // 02730 // Acquire heap lock and unprotect heap structures. 02731 // 02732 02733 RtlpDebugPageHeapEnterCritSect( HeapRoot, Flags ); 02734 DEBUG_CODE( RtlpDebugPageHeapVerifyIntegrity( HeapRoot )); 02735 UNPROTECT_HEAP_STRUCTURES( HeapRoot ); 02736 02737 if ((RtlpDphDebugLevel & DPH_DEBUG_INTERNAL_VALIDATION)) { 02738 RtlpDphInternalValidatePageHeap (HeapRoot, NULL, 0); 02739 } 02740 02741 Flags |= HeapRoot->HeapFlags; 02742 02743 // 02744 // Compute free statistics 02745 // 02746 02747 BUMP_GLOBAL_COUNTER (DPH_COUNTER_NO_OF_FREES); 02748 HeapRoot->Counter[DPH_COUNTER_NO_OF_FREES] += 1; 02749 02750 02751 Success = FALSE; 02752 02753 Node = RtlpDebugPageHeapFindBusyMem( HeapRoot, Address, &Prev ); 02754 02755 if (Node == NULL) { 02756 02757 // 02758 // No wonder we did not find the block in the page heap 02759 // structures because the block was probably allocated 02760 // from the normal heap. Or there is a real bug. 02761 // If there is a bug NormalHeapFree will break into debugger. 02762 // 02763 02764 Success = RtlpDphNormalHeapFree ( 02765 02766 HeapRoot, 02767 Flags, 02768 Address); 02769 02770 goto EXIT; 02771 } 02772 02773 // 02774 // If tail was allocated, make sure filler not overwritten 02775 // 02776 02777 if ((HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) { 02778 02779 if (Node->nVirtualAccessSize > 0) { 02780 RtlpDebugPageHeapProtectVM( Node->pVirtualBlock + PAGE_SIZE, 02781 Node->nVirtualAccessSize, 02782 PAGE_NOACCESS ); 02783 } 02784 } 02785 else { 02786 02787 // 02788 // (SilviuC): This can be done at the beginning of the function. 02789 // 02790 02791 if (! (RtlpDphIsPageHeapBlock (HeapRoot, Address, &Reason, TRUE))) { 02792 02793 RtlpDphReportCorruptedBlock (Address, Reason); 02794 } 02795 02796 if (Node->nVirtualAccessSize > 0) { 02797 02798 // 02799 // Mark the block as freed. The information is gone if we 02800 // will decommit the region but will remain if smart memory 02801 // flag is not set and can help debug failures. 02802 // 02803 02804 { 02805 PDPH_BLOCK_INFORMATION Info = (PDPH_BLOCK_INFORMATION)(Node->pUserAllocation); 02806 02807 Info -= 1; 02808 Info->StartStamp -= 1; 02809 Info->EndStamp -= 1; 02810 } 02811 02812 RtlpDebugPageHeapProtectVM( Node->pVirtualBlock, 02813 Node->nVirtualAccessSize, 02814 PAGE_NOACCESS ); 02815 } 02816 } 02817 02818 RtlpDebugPageHeapRemoveFromBusyList( HeapRoot, Node, Prev ); 02819 02820 // 02821 // If we use uncommitted ranges we need to decommit the memory 02822 // range now for the allocation. Note that the next page (guard) 02823 // was already decommitted when we allocated the block. 02824 // 02825 02826 if ((HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) { 02827 02828 // nothing 02829 02830 } 02831 else { 02832 02833 if ((HeapRoot->ExtraFlags & PAGE_HEAP_SMART_MEMORY_USAGE)) { 02834 02835 RtlpDebugPageHeapDecommitVM ( 02836 Node->pVirtualBlock, 02837 Node->nVirtualAccessSize); 02838 } 02839 } 02840 02841 02842 RtlpDebugPageHeapPlaceOnFreeList( HeapRoot, Node ); 02843 02844 // 02845 // RtlpDebugPageHeapFree gets called from RtlDebugFreeHeap, which 02846 // gets called from RtlFreeHeapSlowly, which gets called from 02847 // RtlFreeHeap. To keep from wasting lots of stack trace storage, 02848 // we'll skip the bottom 3 entries, leaving RtlFreeHeap as the 02849 // first recorded entry. 02850 // 02851 02852 if ((HeapRoot->ExtraFlags & PAGE_HEAP_COLLECT_STACK_TRACES)) { 02853 02854 if (Node->StackTrace) { 02855 02856 RtlTraceDatabaseLock (RtlpDphTraceDatabase); 02857 02858 if (Node->StackTrace->UserCount > 0) { 02859 Node->StackTrace->UserCount -= 1; 02860 } 02861 02862 if (Node->StackTrace->UserSize >= Node->nUserRequestedSize) { 02863 Node->StackTrace->UserSize -= Node->nUserRequestedSize; 02864 } 02865 02866 RtlTraceDatabaseUnlock (RtlpDphTraceDatabase); 02867 } 02868 02869 Node->StackTrace = RtlpDphLogStackTrace(3); 02870 } 02871 else { 02872 Node->StackTrace = NULL; 02873 } 02874 02875 Success = TRUE; 02876 02877 EXIT: 02878 02879 if ((RtlpDphDebugLevel & DPH_DEBUG_INTERNAL_VALIDATION)) { 02880 RtlpDphInternalValidatePageHeap (HeapRoot, NULL, 0); 02881 } 02882 02883 PROTECT_HEAP_STRUCTURES( HeapRoot ); 02884 DEBUG_CODE( RtlpDebugPageHeapVerifyIntegrity( HeapRoot )); 02885 RtlpDebugPageHeapLeaveCritSect( HeapRoot ); 02886 02887 if (! Success) { 02888 IF_GENERATE_EXCEPTION( Flags, STATUS_ACCESS_VIOLATION ); 02889 } 02890 02891 return Success; 02892 } 02893 02894 PVOID 02895 RtlpDebugPageHeapReAllocate( 02896 IN PVOID HeapHandle, 02897 IN ULONG Flags, 02898 IN PVOID Address, 02899 IN SIZE_T Size 02900 ) 02901 { 02902 PDPH_HEAP_ROOT HeapRoot; 02903 PDPH_HEAP_BLOCK OldNode, OldPrev, NewNode; 02904 PVOID NewAddress; 02905 PUCHAR p; 02906 SIZE_T CopyDataSize; 02907 ULONG SaveFlags; 02908 BOOLEAN ReallocInNormalHeap = FALSE; 02909 ULONG Reason; 02910 BOOLEAN ForcePageHeap = FALSE; 02911 BOOLEAN OriginalAllocationInPageHeap = FALSE; 02912 02913 if (IS_BIASED_POINTER(HeapHandle)) { 02914 HeapHandle = UNBIAS_POINTER(HeapHandle); 02915 ForcePageHeap = TRUE; 02916 } 02917 02918 // 02919 // Is zero size allocation ? 02920 // 02921 02922 if (Size == 0) { 02923 02924 if ((RtlpDphDebugLevel & DPH_DEBUG_BREAK_FOR_SIZE_ZERO)) { 02925 02926 DbgPrint ("Page heap: request for zero sized block \n"); 02927 DbgBreakPoint(); 02928 } 02929 } 02930 02931 HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle ); 02932 if (HeapRoot == NULL) 02933 return NULL; 02934 02935 // 02936 // Get heap lock and unprotect heap structures. 02937 // 02938 02939 RtlpDebugPageHeapEnterCritSect( HeapRoot, Flags ); 02940 DEBUG_CODE( RtlpDebugPageHeapVerifyIntegrity( HeapRoot )); 02941 UNPROTECT_HEAP_STRUCTURES( HeapRoot ); 02942 02943 if ((RtlpDphDebugLevel & DPH_DEBUG_INTERNAL_VALIDATION)) { 02944 RtlpDphInternalValidatePageHeap (HeapRoot, NULL, 0); 02945 } 02946 02947 Flags |= HeapRoot->HeapFlags; 02948 02949 // 02950 // Compute realloc statistics 02951 // 02952 02953 BUMP_GLOBAL_COUNTER (DPH_COUNTER_NO_OF_REALLOCS); 02954 BUMP_SIZE_COUNTER (Size); 02955 02956 HeapRoot->Counter[DPH_COUNTER_NO_OF_REALLOCS] += 1; 02957 02958 if (Size < 1024) { 02959 BUMP_GLOBAL_COUNTER (DPH_COUNTER_SIZE_BELOW_1K); 02960 HeapRoot->Counter[DPH_COUNTER_SIZE_BELOW_1K] += 1; 02961 } 02962 else if (Size < 4096) { 02963 BUMP_GLOBAL_COUNTER (DPH_COUNTER_SIZE_BELOW_4K); 02964 HeapRoot->Counter[DPH_COUNTER_SIZE_BELOW_4K] += 1; 02965 } 02966 else { 02967 BUMP_GLOBAL_COUNTER (DPH_COUNTER_SIZE_ABOVE_4K); 02968 HeapRoot->Counter[DPH_COUNTER_SIZE_ABOVE_4K] += 1; 02969 } 02970 02971 02972 NewAddress = NULL; 02973 02974 // 02975 // Check Flags for non-moveable reallocation and fail it 02976 // unconditionally. Apps that specify this flag should be 02977 // prepared to deal with failure anyway. 02978 // 02979 02980 if (Flags & HEAP_REALLOC_IN_PLACE_ONLY) { 02981 goto EXIT; 02982 } 02983 02984 // 02985 // Validate requested size so we don't overflow 02986 // while rounding up size computations. We do this 02987 // after we've acquired the critsect so we can still 02988 // catch serialization problems. 02989 // 02990 02991 if (Size > 0x7FFF0000) { 02992 OUT_OF_VM_BREAK( Flags, "Page heap: Invalid allocation size\n" ); 02993 goto EXIT; 02994 } 02995 02996 OldNode = RtlpDebugPageHeapFindBusyMem( HeapRoot, Address, &OldPrev ); 02997 02998 if (OldNode) { 02999 OriginalAllocationInPageHeap = TRUE; 03000 } 03001 03002 if (OldNode == NULL) { 03003 03004 // 03005 // No wonder we did not find the block in the page heap 03006 // structures because the block was probably allocated 03007 // from the normal heap. Or there is a real bug. If there 03008 // is a bug NormalHeapReAllocate will break into debugger. 03009 // 03010 03011 NewAddress = RtlpDphNormalHeapReAllocate ( 03012 03013 HeapRoot, 03014 Flags, 03015 Address, 03016 Size); 03017 03018 goto EXIT; 03019 } 03020 03021 // 03022 // If tail was allocated, make sure filler not overwritten 03023 // 03024 03025 if ((HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) { 03026 03027 // nothing 03028 } 03029 else { 03030 03031 // 03032 // (SilviuC): This can be done at the beginning of the function. 03033 // 03034 03035 if (! (RtlpDphIsPageHeapBlock (HeapRoot, Address, &Reason, TRUE))) { 03036 03037 RtlpDphReportCorruptedBlock (Address, Reason); 03038 } 03039 } 03040 03041 // 03042 // Before allocating a new block, remove the old block from 03043 // the busy list. When we allocate the new block, the busy 03044 // list pointers will change, possibly leaving our acquired 03045 // Prev pointer invalid. 03046 // 03047 03048 RtlpDebugPageHeapRemoveFromBusyList( HeapRoot, OldNode, OldPrev ); 03049 03050 // 03051 // Allocate new memory for new requested size. Use try/except 03052 // to trap exception if Flags caused out-of-memory exception. 03053 // 03054 03055 try { 03056 03057 if (!ForcePageHeap && !(RtlpDphShouldAllocateInPageHeap (HeapRoot, Size))) { 03058 03059 NewAddress = RtlpDphNormalHeapAllocate ( 03060 HeapRoot, 03061 Flags, 03062 Size); 03063 03064 ReallocInNormalHeap = TRUE; 03065 } 03066 else { 03067 03068 // 03069 // Force the allocation in page heap by biasing 03070 // the heap handle. Validate the heap here since when we use 03071 // biased pointers validation inside Allocate is disabled. 03072 // 03073 03074 if ((RtlpDphDebugLevel & DPH_DEBUG_INTERNAL_VALIDATION)) { 03075 RtlpDphInternalValidatePageHeap (HeapRoot, OldNode->pVirtualBlock, OldNode->nVirtualBlockSize); 03076 } 03077 03078 NewAddress = RtlpDebugPageHeapAllocate( 03079 BIAS_POINTER(HeapHandle), 03080 Flags, 03081 Size); 03082 03083 03084 if ((RtlpDphDebugLevel & DPH_DEBUG_INTERNAL_VALIDATION)) { 03085 RtlpDphInternalValidatePageHeap (HeapRoot, OldNode->pVirtualBlock, OldNode->nVirtualBlockSize); 03086 } 03087 03088 ReallocInNormalHeap = FALSE; 03089 } 03090 } 03091 except( EXCEPTION_EXECUTE_HANDLER ) { 03092 } 03093 03094 // 03095 // We managed to make a new allocation (normal or page heap). 03096 // Now we need to copy from old to new all sorts of stuff 03097 // (contents, user flags/values). 03098 // 03099 03100 if (NewAddress) { 03101 03102 // 03103 // Copy old block contents into the new node. 03104 // 03105 03106 CopyDataSize = OldNode->nUserRequestedSize; 03107 03108 if (CopyDataSize > Size) { 03109 CopyDataSize = Size; 03110 } 03111 03112 if (CopyDataSize > 0) { 03113 03114 RtlCopyMemory( 03115 NewAddress, 03116 Address, 03117 CopyDataSize 03118 ); 03119 } 03120 03121 // 03122 // If new allocation was done in page heap we need to detect the new node 03123 // and copy over user flags/values. 03124 // 03125 03126 if (! ReallocInNormalHeap) { 03127 03128 NewNode = RtlpDebugPageHeapFindBusyMem( HeapRoot, NewAddress, NULL ); 03129 03130 // 03131 // This block could not be in normal heap therefore from this 03132 // respect the call above should always succeed. 03133 // 03134 03135 DEBUG_ASSERT( NewNode != NULL ); 03136 03137 NewNode->UserValue = OldNode->UserValue; 03138 NewNode->UserFlags = ( Flags & HEAP_SETTABLE_USER_FLAGS ) ? 03139 ( Flags & HEAP_SETTABLE_USER_FLAGS ) : 03140 OldNode->UserFlags; 03141 03142 } 03143 03144 // 03145 // We need to cover the case where old allocation was in page heap. 03146 // In this case we still need to cleanup the old node and 03147 // insert it back in free list. Actually the way the code is written 03148 // we take this code path only if original allocation was in page heap. 03149 // This is the reason for the assert. 03150 // 03151 03152 03153 RETAIL_ASSERT (OriginalAllocationInPageHeap); 03154 03155 if (OriginalAllocationInPageHeap) { 03156 03157 if (OldNode->nVirtualAccessSize > 0) { 03158 RtlpDebugPageHeapProtectVM( OldNode->pVirtualBlock, 03159 OldNode->nVirtualAccessSize, 03160 PAGE_NOACCESS ); 03161 } 03162 03163 // 03164 // If we use uncommitted ranges we need to decommit the memory 03165 // range now. Note that the next page (guard) was already decommitted 03166 // when we made the allocation. 03167 // 03168 03169 if ((HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) { 03170 03171 // nothing 03172 03173 } 03174 else { 03175 03176 if ((HeapRoot->ExtraFlags & PAGE_HEAP_SMART_MEMORY_USAGE)) { 03177 03178 RtlpDebugPageHeapDecommitVM ( 03179 OldNode->pVirtualBlock, 03180 OldNode->nVirtualAccessSize); 03181 } 03182 } 03183 03184 RtlpDebugPageHeapPlaceOnFreeList( HeapRoot, OldNode ); 03185 03186 // 03187 // RtlpDebugPageHeapReAllocate gets called from RtlDebugReAllocateHeap, 03188 // which gets called from RtlReAllocateHeap. To keep from wasting 03189 // lots of stack trace storage, we'll skip the bottom 2 entries, 03190 // leaving RtlReAllocateHeap as the first recorded entry in the 03191 // freed stack trace. 03192 // 03193 // Note. For realloc we need to do the accounting for free in the 03194 // trace block. The accounting for alloc is done in the real 03195 // alloc operation which always happens for page heap reallocs. 03196 // 03197 03198 if ((HeapRoot->ExtraFlags & PAGE_HEAP_COLLECT_STACK_TRACES)) { 03199 03200 if (OldNode->StackTrace) { 03201 03202 RtlTraceDatabaseLock (RtlpDphTraceDatabase); 03203 03204 if (OldNode->StackTrace->UserCount > 0) { 03205 OldNode->StackTrace->UserCount -= 1; 03206 } 03207 03208 if (OldNode->StackTrace->UserSize >= OldNode->nUserRequestedSize) { 03209 OldNode->StackTrace->UserSize -= OldNode->nUserRequestedSize; 03210 } 03211 03212 RtlTraceDatabaseUnlock (RtlpDphTraceDatabase); 03213 } 03214 03215 OldNode->StackTrace = RtlpDphLogStackTrace(2); 03216 } 03217 else { 03218 OldNode->StackTrace = NULL; 03219 } 03220 } 03221 } 03222 03223 else { 03224 03225 // 03226 // Failed to allocate a new block. Return old block to busy list. 03227 // 03228 03229 if (OriginalAllocationInPageHeap) { 03230 03231 RtlpDebugPageHeapPlaceOnBusyList( HeapRoot, OldNode ); 03232 } 03233 03234 } 03235 03236 EXIT: 03237 03238 if ((RtlpDphDebugLevel & DPH_DEBUG_INTERNAL_VALIDATION)) { 03239 RtlpDphInternalValidatePageHeap (HeapRoot, NULL, 0); 03240 } 03241 03242 PROTECT_HEAP_STRUCTURES( HeapRoot ); 03243 DEBUG_CODE( RtlpDebugPageHeapVerifyIntegrity( HeapRoot )); 03244 RtlpDebugPageHeapLeaveCritSect( HeapRoot ); 03245 03246 if (NewAddress == NULL) { 03247 IF_GENERATE_EXCEPTION( Flags, STATUS_NO_MEMORY ); 03248 } 03249 03250 return NewAddress; 03251 } 03252 03253 //silviuc: does this really work for all functions in between 03254 #if (( DPH_CAPTURE_STACK_TRACE ) && ( i386 ) && ( FPO )) 03255 #pragma optimize( "", on ) // restore original optimizations 03256 #endif 03257 03258 PVOID 03259 RtlpDebugPageHeapDestroy( 03260 IN PVOID HeapHandle 03261 ) 03262 { 03263 PDPH_HEAP_ROOT HeapRoot; 03264 PDPH_HEAP_ROOT PrevHeapRoot; 03265 PDPH_HEAP_ROOT NextHeapRoot; 03266 PDPH_HEAP_BLOCK Node; 03267 PDPH_HEAP_BLOCK Next; 03268 ULONG Flags; 03269 PUCHAR p; 03270 ULONG Reason; 03271 PVOID NormalHeap; 03272 03273 if (HeapHandle == RtlProcessHeap()) { 03274 RtlpDebugPageHeapBreak( "Page heap: Attempt to destroy process heap\n" ); 03275 return NULL; 03276 } 03277 03278 HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle ); 03279 if (HeapRoot == NULL) 03280 return NULL; 03281 03282 Flags = HeapRoot->HeapFlags | HEAP_NO_SERIALIZE; 03283 03284 RtlpDebugPageHeapEnterCritSect( HeapRoot, Flags ); 03285 DEBUG_CODE( RtlpDebugPageHeapVerifyIntegrity( HeapRoot )); 03286 UNPROTECT_HEAP_STRUCTURES( HeapRoot ); 03287 03288 // 03289 // Save normal heap pointer for later. 03290 // 03291 03292 NormalHeap = HeapRoot->NormalHeap; 03293 03294 // 03295 // Walk all busy allocations and check for tail fill corruption 03296 // 03297 03298 Node = HeapRoot->pBusyAllocationListHead; 03299 03300 while (Node) { 03301 03302 if (! (HeapRoot->ExtraFlags & PAGE_HEAP_CATCH_BACKWARD_OVERRUNS)) { 03303 if (! (RtlpDphIsPageHeapBlock (HeapRoot, Node->pUserAllocation, &Reason, TRUE))) { 03304 RtlpDphReportCorruptedBlock (Node->pUserAllocation, Reason); 03305 } 03306 } 03307 03308 Node = Node->pNextAlloc; 03309 } 03310 03311 // 03312 // Remove this heap entry from the process heap linked list. 03313 // 03314 03315 RtlEnterCriticalSection( &RtlpDphHeapListCriticalSection ); 03316 03317 if (HeapRoot->pPrevHeapRoot) { 03318 HeapRoot->pPrevHeapRoot->pNextHeapRoot = HeapRoot->pNextHeapRoot; 03319 } 03320 else { 03321 RtlpDphHeapListHead = HeapRoot->pNextHeapRoot; 03322 } 03323 03324 if (HeapRoot->pNextHeapRoot) { 03325 HeapRoot->pNextHeapRoot->pPrevHeapRoot = HeapRoot->pPrevHeapRoot; 03326 } 03327 else { 03328 RtlpDphHeapListTail = HeapRoot->pPrevHeapRoot; 03329 } 03330 03331 RtlpDphHeapListCount--; 03332 03333 RtlLeaveCriticalSection( &RtlpDphHeapListCriticalSection ); 03334 03335 03336 // 03337 // Must release critical section before deleting it; otherwise, 03338 // checked build Teb->CountOfOwnedCriticalSections gets out of sync. 03339 // 03340 03341 RtlLeaveCriticalSection( HeapRoot->HeapCritSect ); 03342 RtlDeleteCriticalSection( HeapRoot->HeapCritSect ); 03343 03344 // 03345 // This is weird. A virtual block might contain storage for 03346 // one of the nodes necessary to walk this list. In fact, 03347 // we're guaranteed that the root node contains at least one 03348 // virtual alloc node. 03349 // 03350 // Each time we alloc new VM, we make that the head of the 03351 // of the VM list, like a LIFO structure. I think we're ok 03352 // because no VM list node should be on a subsequently alloc'd 03353 // VM -- only a VM list entry might be on its own memory (as 03354 // is the case for the root node). We read pNode->pNextAlloc 03355 // before releasing the VM in case pNode existed on that VM. 03356 // I think this is safe -- as long as the VM list is LIFO and 03357 // we don't do any list reorganization. 03358 // 03359 03360 Node = HeapRoot->pVirtualStorageListHead; 03361 03362 while (Node) { 03363 Next = Node->pNextAlloc; 03364 if (! RtlpDebugPageHeapReleaseVM( Node->pVirtualBlock )) { 03365 RtlpDebugPageHeapBreak( "Page heap: Unable to release virtual memory\n" ); 03366 } 03367 Node = Next; 03368 } 03369 03370 // 03371 // Free all blocks in the delayed free queue that belong to the 03372 // normal heap just about to be destroyed. Note that this is 03373 // not a bug. The application freed the blocks correctly but 03374 // we delayed the free operation. 03375 // 03376 03377 RtlpDphFreeDelayedBlocksFromHeap (HeapRoot, NormalHeap); 03378 03379 // 03380 // Destroy normal heap. Note that this will not make a recursive 03381 // call into this function because this is not a page heap and 03382 // code in NT heap manager will detect this. 03383 // 03384 03385 RtlDestroyHeap (NormalHeap); 03386 03387 // 03388 // That's it. All the VM, including the root node, should now 03389 // be released. RtlDestroyHeap always returns NULL. 03390 // 03391 03392 DbgPrint( "Page heap: process 0x%X destroyed heap @ %p (%p)\n", 03393 NtCurrentTeb()->ClientId.UniqueProcess, 03394 HeapRoot, 03395 NormalHeap); 03396 03397 return NULL; 03398 } 03399 03400 SIZE_T 03401 RtlpDebugPageHeapSize( 03402 IN PVOID HeapHandle, 03403 IN ULONG Flags, 03404 IN PVOID Address 03405 ) 03406 { 03407 PDPH_HEAP_ROOT HeapRoot; 03408 PDPH_HEAP_BLOCK Node; 03409 SIZE_T Size; 03410 03411 Size = -1; 03412 03413 HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle ); 03414 if (HeapRoot == NULL) { 03415 return Size; 03416 } 03417 03418 Flags |= HeapRoot->HeapFlags; 03419 03420 RtlpDebugPageHeapEnterCritSect( HeapRoot, Flags ); 03421 UNPROTECT_HEAP_STRUCTURES( HeapRoot ); 03422 03423 Node = RtlpDebugPageHeapFindBusyMem( HeapRoot, Address, NULL ); 03424 03425 if (Node == NULL) { 03426 03427 // 03428 // No wonder we did not find the block in the page heap 03429 // structures because the block was probably allocated 03430 // from the normal heap. Or there is a real bug. If there 03431 // is a bug NormalHeapSize will break into debugger. 03432 // 03433 03434 Size = RtlpDphNormalHeapSize ( 03435 03436 HeapRoot, 03437 Flags, 03438 Address); 03439 03440 goto EXIT; 03441 } 03442 else { 03443 Size = Node->nUserRequestedSize; 03444 } 03445 03446 EXIT: 03447 PROTECT_HEAP_STRUCTURES( HeapRoot ); 03448 RtlpDebugPageHeapLeaveCritSect( HeapRoot ); 03449 03450 if (Size == -1) { 03451 IF_GENERATE_EXCEPTION( Flags, STATUS_ACCESS_VIOLATION ); 03452 } 03453 03454 return Size; 03455 } 03456 03457 ULONG 03458 RtlpDebugPageHeapGetProcessHeaps( 03459 ULONG NumberOfHeaps, 03460 PVOID *ProcessHeaps 03461 ) 03462 { 03463 PDPH_HEAP_ROOT HeapRoot; 03464 ULONG Count; 03465 03466 // 03467 // Although we'd expect GetProcessHeaps never to be called 03468 // before at least the very first heap creation, we should 03469 // still be safe and initialize the critical section if 03470 // necessary. 03471 // 03472 03473 if (! RtlpDphHeapListHasBeenInitialized) { 03474 RtlpDphHeapListHasBeenInitialized = TRUE; 03475 RtlInitializeCriticalSection( &RtlpDphHeapListCriticalSection ); 03476 } 03477 03478 RtlEnterCriticalSection( &RtlpDphHeapListCriticalSection ); 03479 03480 if (RtlpDphHeapListCount <= NumberOfHeaps) { 03481 03482 for (HeapRoot = RtlpDphHeapListHead, Count = 0; 03483 HeapRoot != NULL; 03484 HeapRoot = HeapRoot->pNextHeapRoot, Count += 1) { 03485 03486 *ProcessHeaps++ = HEAP_HANDLE_FROM_ROOT( HeapRoot ); 03487 } 03488 03489 if (Count != RtlpDphHeapListCount) { 03490 RtlpDebugPageHeapBreak( "Page heap: BUG: process heap list count wrong\n" ); 03491 } 03492 03493 } 03494 else { 03495 03496 // 03497 // User's buffer is too small. Return number of entries 03498 // necessary for subsequent call to succeed. Buffer 03499 // remains untouched. 03500 // 03501 03502 Count = RtlpDphHeapListCount; 03503 03504 } 03505 03506 RtlLeaveCriticalSection( &RtlpDphHeapListCriticalSection ); 03507 03508 return Count; 03509 } 03510 03511 ULONG 03512 RtlpDebugPageHeapCompact( 03513 IN PVOID HeapHandle, 03514 IN ULONG Flags 03515 ) 03516 { 03517 PDPH_HEAP_ROOT HeapRoot; 03518 03519 HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle ); 03520 if (HeapRoot == NULL) 03521 return 0; 03522 03523 Flags |= HeapRoot->HeapFlags; 03524 03525 RtlpDebugPageHeapEnterCritSect( HeapRoot, Flags ); 03526 03527 // 03528 // Don't do anything, but we did want to acquire the critsect 03529 // in case this was called with HEAP_NO_SERIALIZE while another 03530 // thread is in the heap code. 03531 // 03532 03533 RtlpDebugPageHeapLeaveCritSect( HeapRoot ); 03534 03535 return 0; 03536 } 03537 03538 BOOLEAN 03539 RtlpDebugPageHeapValidate( 03540 IN PVOID HeapHandle, 03541 IN ULONG Flags, 03542 IN PVOID Address 03543 ) 03544 { 03545 PDPH_HEAP_ROOT HeapRoot; 03546 PDPH_HEAP_BLOCK Node; 03547 BOOLEAN Result = FALSE; 03548 03549 HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle ); 03550 if (HeapRoot == NULL) 03551 return FALSE; 03552 03553 Flags |= HeapRoot->HeapFlags; 03554 03555 RtlpDebugPageHeapEnterCritSect( HeapRoot, Flags ); 03556 DEBUG_CODE( RtlpDebugPageHeapVerifyIntegrity( HeapRoot )); 03557 UNPROTECT_HEAP_STRUCTURES( HeapRoot ); 03558 03559 Node = Address ? RtlpDebugPageHeapFindBusyMem( HeapRoot, Address, NULL ) : NULL; 03560 03561 if (Node == NULL) { 03562 03563 Result = RtlpDphNormalHeapValidate ( 03564 HeapRoot, 03565 Flags, 03566 Address); 03567 } 03568 03569 PROTECT_HEAP_STRUCTURES( HeapRoot ); 03570 RtlpDebugPageHeapLeaveCritSect( HeapRoot ); 03571 03572 if (Address) { 03573 if (Node) { 03574 return TRUE; 03575 } 03576 else { 03577 return Result; 03578 } 03579 } 03580 else { 03581 return TRUE; 03582 } 03583 } 03584 03585 NTSTATUS 03586 RtlpDebugPageHeapWalk( 03587 IN PVOID HeapHandle, 03588 IN OUT PRTL_HEAP_WALK_ENTRY Entry 03589 ) 03590 { 03591 return STATUS_NOT_IMPLEMENTED; 03592 } 03593 03594 BOOLEAN 03595 RtlpDebugPageHeapLock( 03596 IN PVOID HeapHandle 03597 ) 03598 { 03599 PDPH_HEAP_ROOT HeapRoot; 03600 03601 HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle ); 03602 03603 if (HeapRoot == NULL) { 03604 return FALSE; 03605 } 03606 03607 RtlpDebugPageHeapEnterCritSect( HeapRoot, HeapRoot->HeapFlags ); 03608 03609 return TRUE; 03610 } 03611 03612 BOOLEAN 03613 RtlpDebugPageHeapUnlock( 03614 IN PVOID HeapHandle 03615 ) 03616 { 03617 PDPH_HEAP_ROOT HeapRoot; 03618 03619 HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle ); 03620 03621 if (HeapRoot == NULL) { 03622 return FALSE; 03623 } 03624 03625 RtlpDebugPageHeapLeaveCritSect( HeapRoot ); 03626 03627 return TRUE; 03628 } 03629 03630 BOOLEAN 03631 RtlpDebugPageHeapSetUserValue( 03632 IN PVOID HeapHandle, 03633 IN ULONG Flags, 03634 IN PVOID Address, 03635 IN PVOID UserValue 03636 ) 03637 { 03638 PDPH_HEAP_ROOT HeapRoot; 03639 PDPH_HEAP_BLOCK Node; 03640 BOOLEAN Success; 03641 03642 Success = FALSE; 03643 03644 HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle ); 03645 if ( HeapRoot == NULL ) 03646 return Success; 03647 03648 Flags |= HeapRoot->HeapFlags; 03649 03650 RtlpDebugPageHeapEnterCritSect( HeapRoot, Flags ); 03651 UNPROTECT_HEAP_STRUCTURES( HeapRoot ); 03652 03653 Node = RtlpDebugPageHeapFindBusyMem( HeapRoot, Address, NULL ); 03654 03655 if ( Node == NULL ) { 03656 03657 // 03658 // If we cannot find the node in page heap structures it might be 03659 // because it has been allocated from normal heap. 03660 // 03661 03662 Success = RtlpDphNormalHeapSetUserValue ( 03663 HeapRoot, 03664 Flags, 03665 Address, 03666 UserValue); 03667 03668 goto EXIT; 03669 } 03670 else { 03671 Node->UserValue = UserValue; 03672 Success = TRUE; 03673 } 03674 03675 EXIT: 03676 PROTECT_HEAP_STRUCTURES( HeapRoot ); 03677 RtlpDebugPageHeapLeaveCritSect( HeapRoot ); 03678 03679 return Success; 03680 } 03681 03682 BOOLEAN 03683 RtlpDebugPageHeapGetUserInfo( 03684 IN PVOID HeapHandle, 03685 IN ULONG Flags, 03686 IN PVOID Address, 03687 OUT PVOID* UserValue, 03688 OUT PULONG UserFlags 03689 ) 03690 { 03691 PDPH_HEAP_ROOT HeapRoot; 03692 PDPH_HEAP_BLOCK Node; 03693 BOOLEAN Success; 03694 03695 Success = FALSE; 03696 03697 HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle ); 03698 if ( HeapRoot == NULL ) 03699 return Success; 03700 03701 Flags |= HeapRoot->HeapFlags; 03702 03703 RtlpDebugPageHeapEnterCritSect( HeapRoot, Flags ); 03704 UNPROTECT_HEAP_STRUCTURES( HeapRoot ); 03705 03706 Node = RtlpDebugPageHeapFindBusyMem( HeapRoot, Address, NULL ); 03707 03708 if ( Node == NULL ) { 03709 03710 // 03711 // If we cannot find the node in page heap structures it might be 03712 // because it has been allocated from normal heap. 03713 // 03714 03715 Success = RtlpDphNormalHeapGetUserInfo ( 03716 HeapRoot, 03717 Flags, 03718 Address, 03719 UserValue, 03720 UserFlags); 03721 03722 goto EXIT; 03723 } 03724 else { 03725 if ( UserValue != NULL ) 03726 *UserValue = Node->UserValue; 03727 if ( UserFlags != NULL ) 03728 *UserFlags = Node->UserFlags; 03729 Success = TRUE; 03730 } 03731 03732 EXIT: 03733 PROTECT_HEAP_STRUCTURES( HeapRoot ); 03734 RtlpDebugPageHeapLeaveCritSect( HeapRoot ); 03735 03736 return Success; 03737 } 03738 03739 BOOLEAN 03740 RtlpDebugPageHeapSetUserFlags( 03741 IN PVOID HeapHandle, 03742 IN ULONG Flags, 03743 IN PVOID Address, 03744 IN ULONG UserFlagsReset, 03745 IN ULONG UserFlagsSet 03746 ) 03747 { 03748 PDPH_HEAP_ROOT HeapRoot; 03749 PDPH_HEAP_BLOCK Node; 03750 BOOLEAN Success; 03751 03752 Success = FALSE; 03753 03754 HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle ); 03755 if ( HeapRoot == NULL ) 03756 return Success; 03757 03758 Flags |= HeapRoot->HeapFlags; 03759 03760 RtlpDebugPageHeapEnterCritSect( HeapRoot, Flags ); 03761 UNPROTECT_HEAP_STRUCTURES( HeapRoot ); 03762 03763 Node = RtlpDebugPageHeapFindBusyMem( HeapRoot, Address, NULL ); 03764 03765 if ( Node == NULL ) { 03766 03767 // 03768 // If we cannot find the node in page heap structures it might be 03769 // because it has been allocated from normal heap. 03770 // 03771 03772 Success = RtlpDphNormalHeapSetUserFlags ( 03773 HeapRoot, 03774 Flags, 03775 Address, 03776 UserFlagsReset, 03777 UserFlagsSet); 03778 03779 goto EXIT; 03780 } 03781 else { 03782 Node->UserFlags &= ~( UserFlagsReset ); 03783 Node->UserFlags |= UserFlagsSet; 03784 Success = TRUE; 03785 } 03786 03787 EXIT: 03788 PROTECT_HEAP_STRUCTURES( HeapRoot ); 03789 RtlpDebugPageHeapLeaveCritSect( HeapRoot ); 03790 03791 return Success; 03792 } 03793 03794 BOOLEAN 03795 RtlpDebugPageHeapSerialize( 03796 IN PVOID HeapHandle 03797 ) 03798 { 03799 PDPH_HEAP_ROOT HeapRoot; 03800 03801 HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle ); 03802 if ( HeapRoot == NULL ) 03803 return FALSE; 03804 03805 RtlpDebugPageHeapEnterCritSect( HeapRoot, 0 ); 03806 UNPROTECT_HEAP_STRUCTURES( HeapRoot ); 03807 03808 HeapRoot->HeapFlags &= ~HEAP_NO_SERIALIZE; 03809 03810 PROTECT_HEAP_STRUCTURES( HeapRoot ); 03811 RtlpDebugPageHeapLeaveCritSect( HeapRoot ); 03812 03813 return TRUE; 03814 } 03815 03816 NTSTATUS 03817 RtlpDebugPageHeapExtend( 03818 IN PVOID HeapHandle, 03819 IN ULONG Flags, 03820 IN PVOID Base, 03821 IN SIZE_T Size 03822 ) 03823 { 03824 return STATUS_SUCCESS; 03825 } 03826 03827 NTSTATUS 03828 RtlpDebugPageHeapZero( 03829 IN PVOID HeapHandle, 03830 IN ULONG Flags 03831 ) 03832 { 03833 return STATUS_SUCCESS; 03834 } 03835 03836 NTSTATUS 03837 RtlpDebugPageHeapReset( 03838 IN PVOID HeapHandle, 03839 IN ULONG Flags 03840 ) 03841 { 03842 return STATUS_SUCCESS; 03843 } 03844 03845 NTSTATUS 03846 RtlpDebugPageHeapUsage( 03847 IN PVOID HeapHandle, 03848 IN ULONG Flags, 03849 IN OUT PRTL_HEAP_USAGE Usage 03850 ) 03851 { 03852 PDPH_HEAP_ROOT HeapRoot; 03853 03854 // 03855 // Partial implementation since this information is kind of meaningless. 03856 // 03857 03858 HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle ); 03859 if ( HeapRoot == NULL ) 03860 return STATUS_INVALID_PARAMETER; 03861 03862 if ( Usage->Length != sizeof( RTL_HEAP_USAGE )) 03863 return STATUS_INFO_LENGTH_MISMATCH; 03864 03865 memset( Usage, 0, sizeof( RTL_HEAP_USAGE )); 03866 Usage->Length = sizeof( RTL_HEAP_USAGE ); 03867 03868 RtlpDebugPageHeapEnterCritSect( HeapRoot, 0 ); 03869 UNPROTECT_HEAP_STRUCTURES( HeapRoot ); 03870 03871 Usage->BytesAllocated = HeapRoot->nBusyAllocationBytesAccessible; 03872 Usage->BytesCommitted = HeapRoot->nVirtualStorageBytes; 03873 Usage->BytesReserved = HeapRoot->nVirtualStorageBytes; 03874 Usage->BytesReservedMaximum = HeapRoot->nVirtualStorageBytes; 03875 03876 PROTECT_HEAP_STRUCTURES( HeapRoot ); 03877 RtlpDebugPageHeapLeaveCritSect( HeapRoot ); 03878 03879 return STATUS_SUCCESS; 03880 } 03881 03882 BOOLEAN 03883 RtlpDebugPageHeapIsLocked( 03884 IN PVOID HeapHandle 03885 ) 03886 { 03887 PDPH_HEAP_ROOT HeapRoot; 03888 03889 HeapRoot = RtlpDebugPageHeapPointerFromHandle( HeapHandle ); 03890 if ( HeapRoot == NULL ) 03891 return FALSE; 03892 03893 if ( RtlTryEnterCriticalSection( HeapRoot->HeapCritSect )) { 03894 RtlLeaveCriticalSection( HeapRoot->HeapCritSect ); 03895 return FALSE; 03896 } 03897 else { 03898 return TRUE; 03899 } 03900 } 03901 03905 03906 RtlpDphShouldAllocateInPageHeap ( 03907 PDPH_HEAP_ROOT HeapRoot, 03908 SIZE_T Size 03909 ) 03910 { 03911 SYSTEM_PERFORMANCE_INFORMATION PerfInfo; 03912 NTSTATUS Status; 03913 ULONG Random; 03914 ULONG Percentage; 03915 03916 // 03917 // If page heap is not enabled => normal heap. 03918 // 03919 03920 if (! (HeapRoot->ExtraFlags & PAGE_HEAP_ENABLE_PAGE_HEAP)) { 03921 return FALSE; 03922 } 03923 03924 // 03925 // If in size range => page heap 03926 // 03927 03928 else if ((HeapRoot->ExtraFlags & PAGE_HEAP_USE_SIZE_RANGE)) { 03929 03930 if (Size >= RtlpDphSizeRangeStart && Size <= RtlpDphSizeRangeEnd) { 03931 return TRUE; 03932 } 03933 else { 03934 return FALSE; 03935 } 03936 } 03937 03938 // 03939 // If in dll range => page heap 03940 // 03941 03942 else if ((HeapRoot->ExtraFlags & PAGE_HEAP_USE_DLL_RANGE)) { 03943 03944 PVOID StackTrace[32]; 03945 ULONG Count; 03946 ULONG Index; 03947 ULONG Hash; 03948 03949 Count = RtlCaptureStackBackTrace ( 03950 1, 03951 32, 03952 StackTrace, 03953 &Hash); 03954 03955 // 03956 // (SilviuC): should read DllRange as PVOIDs 03957 // 03958 03959 for (Index = 0; Index < Count; Index += 1) { 03960 if (PtrToUlong(StackTrace[Index]) >= RtlpDphDllRangeStart 03961 && PtrToUlong(StackTrace[Index]) <= RtlpDphDllRangeEnd) { 03962 03963 return TRUE; 03964 } 03965 } 03966 03967 return FALSE; 03968 } 03969 03970 // 03971 // If randomly decided => page heap 03972 // 03973 03974 else if ((HeapRoot->ExtraFlags & PAGE_HEAP_USE_RANDOM_DECISION)) { 03975 03976 Random = RtlRandom (& (HeapRoot->Seed)); 03977 03978 if ((Random % 100) < RtlpDphRandomProbability) { 03979 return TRUE; 03980 } 03981 else { 03982 return FALSE; 03983 } 03984 } 03985 03986 // 03987 // If call not generated from one of the target dlls => normal heap 03988 // 03989 03990 else if ((HeapRoot->ExtraFlags & PAGE_HEAP_USE_DLL_NAMES)) { 03991 03992 // We return false. The calls generated from target 03993 // dlls will never get into this function and therefore 03994 // we just return false signalling that we do not want 03995 // page heap verification for the rest of the world. 03996 // 03997 return FALSE; 03998 } 03999 04000 // 04001 // For all other cases we will allocate in the page heap. 04002 // 04003 04004 else { 04005 return TRUE; 04006 } 04007 } 04008 04009 04010 04014 04015 VOID 04016 RtlpDphReportCorruptedBlock ( 04017 PVOID Block, 04018 ULONG Reason 04019 ) 04020 { 04021 DbgPrint ("Page heap: block @ %p is corrupted (reason %0X) \n", Block, Reason); 04022 04023 if ((Reason & DPH_ERROR_CORRUPTED_INFIX_PATTERN)) { 04024 DbgPrint ("Page heap: reason: corrupted infix pattern for freed block \n"); 04025 } 04026 if ((Reason & DPH_ERROR_CORRUPTED_START_STAMP)) { 04027 DbgPrint ("Page heap: reason: corrupted start stamp \n"); 04028 } 04029 if ((Reason & DPH_ERROR_CORRUPTED_END_STAMP)) { 04030 DbgPrint ("Page heap: reason: corrupted end stamp \n"); 04031 } 04032 if ((Reason & DPH_ERROR_CORRUPTED_HEAP_POINTER)) { 04033 DbgPrint ("Page heap: reason: corrupted heap pointer \n"); 04034 } 04035 if ((Reason & DPH_ERROR_CORRUPTED_PREFIX_PATTERN)) { 04036 DbgPrint ("Page heap: reason: corrupted prefix pattern \n"); 04037 } 04038 if ((Reason & DPH_ERROR_CORRUPTED_SUFFIX_PATTERN)) { 04039 DbgPrint ("Page heap: reason: corrupted suffix pattern \n"); 04040 } 04041 if ((Reason & DPH_ERROR_RAISED_EXCEPTION)) { 04042 DbgPrint ("Page heap: reason: raised exception while probing \n"); 04043 } 04044 04045 DbgBreakPoint (); 04046 } 04047 04048 BOOLEAN 04049 RtlpDphIsPageHeapBlock ( 04050 PDPH_HEAP_ROOT Heap, 04051 PVOID Block, 04052 PULONG Reason, 04053 BOOLEAN CheckPattern 04054 ) 04055 { 04056 PDPH_BLOCK_INFORMATION Info; 04057 BOOLEAN Corrupted = FALSE; 04058 PUCHAR Current; 04059 PUCHAR FillStart; 04060 PUCHAR FillEnd; 04061 04062 DEBUG_ASSERT (Reason != NULL); 04063 *Reason = 0; 04064 04065 try { 04066 04067 Info = (PDPH_BLOCK_INFORMATION)Block - 1; 04068 04069 // 04070 // Start checking ... 04071 // 04072 04073 if (Info->StartStamp != DPH_PAGE_BLOCK_START_STAMP_ALLOCATED) { 04074 *Reason |= DPH_ERROR_CORRUPTED_START_STAMP; 04075 Corrupted = TRUE; 04076 } 04077 04078 if (Info->EndStamp != DPH_PAGE_BLOCK_END_STAMP_ALLOCATED) { 04079 *Reason |= DPH_ERROR_CORRUPTED_END_STAMP; 04080 Corrupted = TRUE; 04081 } 04082 04083 if (Info->Heap != Heap) { 04084 *Reason |= DPH_ERROR_CORRUPTED_HEAP_POINTER; 04085 Corrupted = TRUE; 04086 } 04087 04088 // 04089 // Check the block suffix byte pattern. 04090 // 04091 04092 if (CheckPattern) { 04093 04094 FillStart = (PUCHAR)Block + Info->RequestedSize; 04095 FillEnd = (PUCHAR)ROUNDUP2((ULONG_PTR)FillStart, PAGE_SIZE); 04096 04097 for (Current = FillStart; Current < FillEnd; Current++) { 04098 04099 if (*Current != DPH_PAGE_BLOCK_SUFFIX) { 04100 04101 *Reason |= DPH_ERROR_CORRUPTED_SUFFIX_PATTERN; 04102 Corrupted = TRUE; 04103 break; 04104 } 04105 } 04106 } 04107 } 04108 except (EXCEPTION_EXECUTE_HANDLER) { 04109 04110 *Reason |= DPH_ERROR_RAISED_EXCEPTION; 04111 Corrupted = TRUE; 04112 } 04113 04114 if (Corrupted) { 04115 return FALSE; 04116 } 04117 else { 04118 return TRUE; 04119 } 04120 } 04121 04122 BOOLEAN 04123 RtlpDphIsNormalHeapBlock ( 04124 PDPH_HEAP_ROOT Heap, 04125 PVOID Block, 04126 PULONG Reason, 04127 BOOLEAN CheckPattern 04128 ) 04129 { 04130 PDPH_BLOCK_INFORMATION Info; 04131 BOOLEAN Corrupted = FALSE; 04132 PUCHAR Current; 04133 PUCHAR FillStart; 04134 PUCHAR FillEnd; 04135 04136 DEBUG_ASSERT (Reason != NULL); 04137 *Reason = 0; 04138 04139 Info = (PDPH_BLOCK_INFORMATION)Block - 1; 04140 04141 try { 04142 04143 if (Info->Heap != Heap) { 04144 *Reason |= DPH_ERROR_CORRUPTED_HEAP_POINTER; 04145 Corrupted = TRUE; 04146 } 04147 04148 if (Info->StartStamp != DPH_NORMAL_BLOCK_START_STAMP_ALLOCATED) { 04149 *Reason |= DPH_ERROR_CORRUPTED_START_STAMP; 04150 Corrupted = TRUE; 04151 } 04152 04153 if (Info->EndStamp != DPH_NORMAL_BLOCK_END_STAMP_ALLOCATED) { 04154 *Reason |= DPH_ERROR_CORRUPTED_END_STAMP; 04155 Corrupted = TRUE; 04156 } 04157 04158 // 04159 // Check the block suffix byte pattern. 04160 // 04161 04162 if (CheckPattern) { 04163 04164 FillStart = (PUCHAR)Block + Info->RequestedSize; 04165 FillEnd = FillStart + USER_ALIGNMENT; 04166 04167 for (Current = FillStart; Current < FillEnd; Current++) { 04168 04169 if (*Current != DPH_NORMAL_BLOCK_SUFFIX) { 04170 04171 *Reason |= DPH_ERROR_CORRUPTED_SUFFIX_PATTERN; 04172 Corrupted = TRUE; 04173 break; 04174 } 04175 } 04176 } 04177 } 04178 except (EXCEPTION_EXECUTE_HANDLER) { 04179 04180 *Reason |= DPH_ERROR_RAISED_EXCEPTION; 04181 Corrupted = TRUE; 04182 } 04183 04184 if (Corrupted) { 04185 return FALSE; 04186 } 04187 else { 04188 return TRUE; 04189 } 04190 } 04191 04192 BOOLEAN 04193 RtlpDphIsNormalFreeHeapBlock ( 04194 PVOID Block, 04195 PULONG Reason, 04196 BOOLEAN CheckPattern 04197 ) 04198 { 04199 PDPH_BLOCK_INFORMATION Info; 04200 BOOLEAN Corrupted = FALSE; 04201 PUCHAR Current; 04202 PUCHAR FillStart; 04203 PUCHAR FillEnd; 04204 04205 DEBUG_ASSERT (Reason != NULL); 04206 *Reason = 0; 04207 04208 Info = (PDPH_BLOCK_INFORMATION)Block - 1; 04209 04210 try { 04211 04212 // 04213 // If heap pointer is null we will just ignore this field. 04214 // This can happen during heap destroy operations where 04215 // the page heap got destroyed but the normal heap is still 04216 // alive. 04217 // 04218 04219 if (Info->StartStamp != DPH_NORMAL_BLOCK_START_STAMP_FREE) { 04220 *Reason |= DPH_ERROR_CORRUPTED_START_STAMP; 04221 Corrupted = TRUE; 04222 } 04223 04224 if (Info->EndStamp != DPH_NORMAL_BLOCK_END_STAMP_FREE) { 04225 *Reason |= DPH_ERROR_CORRUPTED_END_STAMP; 04226 Corrupted = TRUE; 04227 } 04228 04229 // 04230 // Check the block suffix byte pattern. 04231 // 04232 04233 if (CheckPattern) { 04234 04235 FillStart = (PUCHAR)Block + Info->RequestedSize; 04236 FillEnd = FillStart + USER_ALIGNMENT; 04237 04238 for (Current = FillStart; Current < FillEnd; Current++) { 04239 04240 if (*Current != DPH_NORMAL_BLOCK_SUFFIX) { 04241 04242 *Reason |= DPH_ERROR_CORRUPTED_SUFFIX_PATTERN; 04243 Corrupted = TRUE; 04244 break; 04245 } 04246 } 04247 } 04248 04249 // 04250 // Check the block infix byte pattern. 04251 // 04252 04253 if (CheckPattern) { 04254 04255 FillStart = (PUCHAR)Block; 04256 FillEnd = FillStart 04257 + ((Info->RequestedSize > USER_ALIGNMENT) ? USER_ALIGNMENT : Info->RequestedSize); 04258 04259 for (Current = FillStart; Current < FillEnd; Current++) { 04260 04261 if (*Current != DPH_FREE_BLOCK_INFIX) { 04262 04263 *Reason |= DPH_ERROR_CORRUPTED_INFIX_PATTERN; 04264 Corrupted = TRUE; 04265 break; 04266 } 04267 } 04268 } 04269 } 04270 except (EXCEPTION_EXECUTE_HANDLER) { 04271 04272 *Reason |= DPH_ERROR_RAISED_EXCEPTION; 04273 Corrupted = TRUE; 04274 } 04275 04276 if (Corrupted) { 04277 return FALSE; 04278 } 04279 else { 04280 return TRUE; 04281 } 04282 } 04283 04284 BOOLEAN 04285 RtlpDphWritePageHeapBlockInformation ( 04286 PDPH_HEAP_ROOT Heap, 04287 PVOID Block, 04288 SIZE_T RequestedSize, 04289 SIZE_T ActualSize 04290 ) 04291 { 04292 PDPH_BLOCK_INFORMATION Info; 04293 PUCHAR FillStart; 04294 PUCHAR FillEnd; 04295 ULONG Hash; 04296 04297 // 04298 // Size and stamp information 04299 // 04300 04301 Info = (PDPH_BLOCK_INFORMATION)Block - 1; 04302 04303 Info->Heap = Heap; 04304 Info->RequestedSize = RequestedSize; 04305 Info->ActualSize = ActualSize; 04306 Info->StartStamp = DPH_PAGE_BLOCK_START_STAMP_ALLOCATED; 04307 Info->EndStamp = DPH_PAGE_BLOCK_END_STAMP_ALLOCATED; 04308 04309 // 04310 // Fill the block suffix pattern. 04311 // We fill up to USER_ALIGNMENT bytes. 04312 // 04313 04314 FillStart = (PUCHAR)Block + RequestedSize; 04315 FillEnd = (PUCHAR)ROUNDUP2((ULONG_PTR)FillStart, PAGE_SIZE); 04316 04317 RtlFillMemory (FillStart, FillEnd - FillStart, DPH_PAGE_BLOCK_SUFFIX); 04318 04319 // 04320 // Capture stack trace 04321 // 04322 04323 if ((Heap->ExtraFlags & PAGE_HEAP_COLLECT_STACK_TRACES)) { 04324 Info->StackTrace = RtlpDphLogStackTrace (3); 04325 } 04326 else { 04327 Info->StackTrace = NULL; 04328 } 04329 04330 return TRUE; 04331 } 04332 04333 BOOLEAN 04334 RtlpDphWriteNormalHeapBlockInformation ( 04335 PDPH_HEAP_ROOT Heap, 04336 PVOID Block, 04337 SIZE_T RequestedSize, 04338 SIZE_T ActualSize 04339 ) 04340 { 04341 PDPH_BLOCK_INFORMATION Info; 04342 PUCHAR FillStart; 04343 PUCHAR FillEnd; 04344 ULONG Hash; 04345 ULONG Reason; 04346 04347 Info = (PDPH_BLOCK_INFORMATION)Block - 1; 04348 04349 // 04350 // Size and stamp information 04351 // 04352 04353 Info->Heap = Heap; 04354 Info->RequestedSize = RequestedSize; 04355 Info->ActualSize = ActualSize; 04356 Info->StartStamp = DPH_NORMAL_BLOCK_START_STAMP_ALLOCATED; 04357 Info->EndStamp = DPH_NORMAL_BLOCK_END_STAMP_ALLOCATED; 04358 04359 Info->FreeQueue.Blink = NULL; 04360 Info->FreeQueue.Flink = NULL; 04361 04362 // 04363 // Fill the block suffix pattern. 04364 // We fill only USER_ALIGNMENT bytes. 04365 // 04366 04367 FillStart = (PUCHAR)Block + RequestedSize; 04368 FillEnd = FillStart + USER_ALIGNMENT; 04369 04370 RtlFillMemory (FillStart, FillEnd - FillStart, DPH_NORMAL_BLOCK_SUFFIX); 04371 04372 // 04373 // Capture stack trace 04374 // 04375 04376 if ((Heap->ExtraFlags & PAGE_HEAP_COLLECT_STACK_TRACES)) { 04377 04378 Info->StackTrace = RtlpDphLogStackTrace (4); 04379 04380 if (Info->StackTrace) { 04381 04382 RtlTraceDatabaseLock (RtlpDphTraceDatabase); 04383 ((PRTL_TRACE_BLOCK)(Info->StackTrace))->UserCount += 1; 04384 ((PRTL_TRACE_BLOCK)(Info->StackTrace))->UserSize += RequestedSize; 04385 ((PRTL_TRACE_BLOCK)(Info->StackTrace))->UserContext = Heap; 04386 RtlTraceDatabaseUnlock (RtlpDphTraceDatabase); 04387 } 04388 04389 } 04390 else { 04391 Info->StackTrace = NULL; 04392 } 04393 04394 return TRUE; 04395 } 04396 04397 04401 04402 PVOID 04403 RtlpDphNormalHeapAllocate ( 04404 PDPH_HEAP_ROOT Heap, 04405 ULONG Flags, 04406 SIZE_T Size 04407 ) 04408 { 04409 PVOID Block; 04410 PDPH_BLOCK_INFORMATION Info; 04411 ULONG Hash; 04412 SIZE_T ActualSize; 04413 SIZE_T RequestedSize; 04414 ULONG Reason; 04415 04416 BUMP_GLOBAL_COUNTER (DPH_COUNTER_NO_OF_NORMAL_ALLOCS); 04417 BUMP_SIZE_COUNTER (Size); 04418 04419 Heap->Counter[DPH_COUNTER_NO_OF_NORMAL_ALLOCS] += 1; 04420 04421 RequestedSize = Size; 04422 ActualSize = Size + sizeof(DPH_BLOCK_INFORMATION) + USER_ALIGNMENT; 04423 04424 Block = RtlAllocateHeap ( 04425 Heap->NormalHeap, 04426 Flags, 04427 ActualSize); 04428 04429 if (Block == NULL) { 04430 04431 // 04432 // (SilviuC): If we have memory pressure we might want 04433 // to trim the delayed free queues. We do not do this 04434 // right now because the threshold is kind of small. 04435 // 04436 04437 return NULL; 04438 } 04439 04440 RtlpDphWriteNormalHeapBlockInformation ( 04441 Heap, 04442 (PDPH_BLOCK_INFORMATION)Block + 1, 04443 RequestedSize, 04444 ActualSize); 04445 04446 if (! (Flags & HEAP_ZERO_MEMORY)) { 04447 04448 RtlFillMemory ((PDPH_BLOCK_INFORMATION)Block + 1, 04449 RequestedSize, 04450 DPH_NORMAL_BLOCK_INFIX); 04451 } 04452 04453 return (PVOID)((PDPH_BLOCK_INFORMATION)Block + 1); 04454 } 04455 04456 04457 BOOLEAN 04458 RtlpDphNormalHeapFree ( 04459 PDPH_HEAP_ROOT Heap, 04460 ULONG Flags, 04461 PVOID Block 04462 ) 04463 { 04464 PDPH_BLOCK_INFORMATION Info; 04465 BOOLEAN Success; 04466 ULONG Reason; 04467 ULONG Hash; 04468 SIZE_T TrimSize; 04469 04470 BUMP_GLOBAL_COUNTER (DPH_COUNTER_NO_OF_NORMAL_FREES); 04471 Heap->Counter[DPH_COUNTER_NO_OF_NORMAL_FREES] += 1; 04472 04473 Info = (PDPH_BLOCK_INFORMATION)Block - 1; 04474 04475 if (! RtlpDphIsNormalHeapBlock(Heap, Block, &Reason, TRUE)) { 04476 04477 RtlpDphReportCorruptedBlock (Block, Reason); 04478 04479 return FALSE; 04480 } 04481 04482 // 04483 // Save the free stack trace. 04484 // 04485 04486 if ((Heap->ExtraFlags & PAGE_HEAP_COLLECT_STACK_TRACES)) { 04487 04488 if (Info->StackTrace) { 04489 04490 RtlTraceDatabaseLock (RtlpDphTraceDatabase); 04491 ((PRTL_TRACE_BLOCK)(Info->StackTrace))->UserCount -= 1; 04492 ((PRTL_TRACE_BLOCK)(Info->StackTrace))->UserSize -= Info->RequestedSize; 04493 RtlTraceDatabaseUnlock (RtlpDphTraceDatabase); 04494 } 04495 04496 Info->StackTrace = RtlpDphLogStackTrace (3); 04497 } 04498 else { 04499 Info->StackTrace = NULL; 04500 } 04501 04502 // 04503 // Mark the block as freed. 04504 // 04505 04506 Info->StartStamp -= 1; 04507 Info->EndStamp -= 1; 04508 04509 // 04510 // Wipe out all the information in the block so that it cannot 04511 // be used while free. The pattern looks like a kernel pointer 04512 // and if we are lucky enough the buggy code might use a value 04513 // from the block as a pointer and instantly access violate. 04514 // 04515 04516 RtlFillMemory ( 04517 Info + 1, 04518 Info->RequestedSize, 04519 DPH_FREE_BLOCK_INFIX); 04520 04521 // 04522 // It is useful during debugging sessions to not free at 04523 // all so that you can detect use after free, etc. 04524 // 04525 04526 if ((RtlpDphDebugLevel & DPH_DEBUG_NEVER_FREE)) { 04527 04528 return TRUE; 04529 } 04530 04531 // 04532 // Add block to the delayed free queue. 04533 // 04534 04535 RtlpDphAddToDelayedFreeQueue (Info); 04536 04537 // 04538 // If we are over the threshold we need to really free 04539 // some of the guys. 04540 // 04541 // (SilviuC): should make this threshold more fine tuned. 04542 // 04543 04544 Success = TRUE; 04545 04546 if (RtlpDphNeedToTrimDelayedFreeQueue(&TrimSize)) { 04547 04548 RtlpDphTrimDelayedFreeQueue (TrimSize, Flags); 04549 } 04550 04551 return Success; 04552 } 04553 04554 04555 PVOID 04556 RtlpDphNormalHeapReAllocate ( 04557 PDPH_HEAP_ROOT Heap, 04558 ULONG Flags, 04559 PVOID OldBlock, 04560 SIZE_T Size 04561 ) 04562 { 04563 PVOID Block; 04564 PDPH_BLOCK_INFORMATION Info; 04565 ULONG Hash; 04566 SIZE_T CopySize; 04567 ULONG Reason; 04568 04569 BUMP_GLOBAL_COUNTER (DPH_COUNTER_NO_OF_NORMAL_REALLOCS); 04570 BUMP_SIZE_COUNTER (Size); 04571 04572 Heap->Counter[DPH_COUNTER_NO_OF_NORMAL_REALLOCS] += 1; 04573 04574 Info = (PDPH_BLOCK_INFORMATION)OldBlock - 1; 04575 04576 if (! RtlpDphIsNormalHeapBlock(Heap, OldBlock, &Reason, TRUE)) { 04577 04578 RtlpDphReportCorruptedBlock (OldBlock, Reason); 04579 04580 return NULL; 04581 } 04582 04583 // 04584 // Note that this operation will bump the counters for 04585 // normal allocations. Decided to leave this situation 04586 // as it is. 04587 // 04588 04589 Block = RtlpDphNormalHeapAllocate (Heap, Flags, Size); 04590 04591 if (Block == NULL) { 04592 return NULL; 04593 } 04594 04595 // 04596 // Copy old block stuff into the new block and then 04597 // free old block. 04598 // 04599 04600 if (Size < Info->RequestedSize) { 04601 CopySize = Size; 04602 } 04603 else { 04604 CopySize = Info->RequestedSize; 04605 } 04606 04607 RtlCopyMemory (Block, OldBlock, CopySize); 04608 04609 // 04610 // Free the old guy. 04611 // 04612 04613 RtlpDphNormalHeapFree (Heap, Flags, OldBlock); 04614 04615 return Block; 04616 } 04617 04618 04619 SIZE_T 04620 RtlpDphNormalHeapSize ( 04621 PDPH_HEAP_ROOT Heap, 04622 ULONG Flags, 04623 PVOID Block 04624 ) 04625 { 04626 PDPH_BLOCK_INFORMATION Info; 04627 SIZE_T Result; 04628 ULONG Reason; 04629 04630 Info = (PDPH_BLOCK_INFORMATION)Block - 1; 04631 04632 if (! RtlpDphIsNormalHeapBlock(Heap, Block, &Reason, FALSE)) { 04633 04634 // 04635 // We cannot stop here for a wrong block. 04636 // The users might use this function to validate 04637 // if a block belongs to the heap or not. However 04638 // they should use HeapValidate for that. 04639 // 04640 04641 #if DBG 04642 DbgPrint ("Page heap: warning: HeapSize called with " 04643 "invalid block @ %p (reason %0X) \n", Block, Reason); 04644 #endif 04645 04646 return (SIZE_T)-1; 04647 } 04648 04649 Result = RtlSizeHeap ( 04650 Heap->NormalHeap, 04651 Flags, 04652 Info); 04653 04654 if (Result == (SIZE_T)-1) { 04655 return Result; 04656 } 04657 else { 04658 return Result - sizeof(*Info) - USER_ALIGNMENT; 04659 } 04660 } 04661 04662 04663 BOOLEAN 04664 RtlpDphNormalHeapSetUserFlags( 04665 IN PDPH_HEAP_ROOT Heap, 04666 IN ULONG Flags, 04667 IN PVOID Address, 04668 IN ULONG UserFlagsReset, 04669 IN ULONG UserFlagsSet 04670 ) 04671 { 04672 BOOLEAN Success; 04673 ULONG Reason; 04674 04675 if (! RtlpDphIsNormalHeapBlock(Heap, Address, &Reason, FALSE)) { 04676 04677 RtlpDphReportCorruptedBlock (Address, Reason); 04678 04679 return FALSE; 04680 } 04681 04682 Success = RtlSetUserFlagsHeap ( 04683 Heap->NormalHeap, 04684 Flags, 04685 (PDPH_BLOCK_INFORMATION)Address - 1, 04686 UserFlagsReset, 04687 UserFlagsSet); 04688 04689 return Success; 04690 } 04691 04692 04693 BOOLEAN 04694 RtlpDphNormalHeapSetUserValue( 04695 IN PDPH_HEAP_ROOT Heap, 04696 IN ULONG Flags, 04697 IN PVOID Address, 04698 IN PVOID UserValue 04699 ) 04700 { 04701 BOOLEAN Success; 04702 ULONG Reason; 04703 04704 if (! RtlpDphIsNormalHeapBlock(Heap, Address, &Reason, FALSE)) { 04705 04706 RtlpDphReportCorruptedBlock (Address, Reason); 04707 04708 return FALSE; 04709 } 04710 04711 Success = RtlSetUserValueHeap ( 04712 Heap->NormalHeap, 04713 Flags, 04714 (PDPH_BLOCK_INFORMATION)Address - 1, 04715 UserValue); 04716 04717 return Success; 04718 } 04719 04720 04721 BOOLEAN 04722 RtlpDphNormalHeapGetUserInfo( 04723 IN PDPH_HEAP_ROOT Heap, 04724 IN ULONG Flags, 04725 IN PVOID Address, 04726 OUT PVOID* UserValue, 04727 OUT PULONG UserFlags 04728 ) 04729 { 04730 BOOLEAN Success; 04731 ULONG Reason; 04732 04733 if (! RtlpDphIsNormalHeapBlock(Heap, Address, &Reason, FALSE)) { 04734 04735 RtlpDphReportCorruptedBlock (Address, Reason); 04736 04737 return FALSE; 04738 } 04739 04740 Success = RtlGetUserInfoHeap ( 04741 Heap->NormalHeap, 04742 Flags, 04743 (PDPH_BLOCK_INFORMATION)Address - 1, 04744 UserValue, 04745 UserFlags); 04746 04747 return Success; 04748 } 04749 04750 04751 BOOLEAN 04752 RtlpDphNormalHeapValidate( 04753 IN PDPH_HEAP_ROOT Heap, 04754 IN ULONG Flags, 04755 IN PVOID Address 04756 ) 04757 { 04758 BOOLEAN Success; 04759 ULONG Reason; 04760 04761 if (Address == NULL) { 04762 04763 // 04764 // Validation for the whole heap. 04765 // 04766 04767 Success = RtlValidateHeap ( 04768 Heap->NormalHeap, 04769 Flags, 04770 Address); 04771 } 04772 else { 04773 04774 // 04775 // Validation for a heap block. 04776 // 04777 04778 if (! RtlpDphIsNormalHeapBlock(Heap, Address, &Reason, TRUE)) { 04779 04780 // 04781 // We cannot break in this case because the function might indeed 04782 // be called with invalid block. 04783 // 04784 // (SilviuC): we will leave this as a warning and delete it only 04785 // if it becomes annoying. 04786 // 04787 04788 #if DBG 04789 DbgPrint ("Page heap: warning: validate called with " 04790 "invalid block @ %p (reason %0X) \n", Address, Reason); 04791 #endif 04792 04793 return FALSE; 04794 } 04795 04796 Success = RtlValidateHeap ( 04797 Heap->NormalHeap, 04798 Flags, 04799 (PDPH_BLOCK_INFORMATION)Address - 1); 04800 } 04801 04802 return Success; 04803 } 04804 04805 04809 04810 04811 RTL_CRITICAL_SECTION RtlpDphDelayedFreeQueueLock; 04812 04813 SIZE_T RtlpDphMemoryUsedByDelayedFreeBlocks; 04814 SIZE_T RtlpDphNumberOfDelayedFreeBlocks; 04815 04816 LIST_ENTRY RtlpDphDelayedFreeQueue; 04817 04818 VOID 04819 RtlpDphInitializeDelayedFreeQueue ( 04820 ) 04821 { 04822 RtlInitializeCriticalSection (&RtlpDphDelayedFreeQueueLock); 04823 InitializeListHead (&RtlpDphDelayedFreeQueue); 04824 04825 RtlpDphMemoryUsedByDelayedFreeBlocks = 0; 04826 RtlpDphNumberOfDelayedFreeBlocks = 0; 04827 } 04828 04829 04830 VOID 04831 RtlpDphAddToDelayedFreeQueue ( 04832 PDPH_BLOCK_INFORMATION Info 04833 ) 04834 { 04835 RtlEnterCriticalSection (&RtlpDphDelayedFreeQueueLock); 04836 04837 InsertTailList (&(RtlpDphDelayedFreeQueue), &(Info->FreeQueue)); 04838 04839 RtlpDphMemoryUsedByDelayedFreeBlocks += Info->ActualSize; 04840 RtlpDphNumberOfDelayedFreeBlocks += 1; 04841 04842 RtlLeaveCriticalSection (&RtlpDphDelayedFreeQueueLock); 04843 } 04844 04845 BOOLEAN 04846 RtlpDphNeedToTrimDelayedFreeQueue ( 04847 PSIZE_T TrimSize 04848 ) 04849 { 04850 BOOLEAN Result; 04851 04852 RtlEnterCriticalSection (&RtlpDphDelayedFreeQueueLock); 04853 04854 if (RtlpDphMemoryUsedByDelayedFreeBlocks > RtlpDphDelayedFreeCacheSize) { 04855 04856 *TrimSize = RtlpDphMemoryUsedByDelayedFreeBlocks - RtlpDphDelayedFreeCacheSize; 04857 04858 if (*TrimSize < PAGE_SIZE) { 04859 *TrimSize = PAGE_SIZE; 04860 } 04861 04862 Result = TRUE; 04863 } 04864 else { 04865 04866 Result = FALSE; 04867 } 04868 04869 RtlLeaveCriticalSection (&RtlpDphDelayedFreeQueueLock); 04870 return Result; 04871 } 04872 04873 VOID 04874 RtlpDphTrimDelayedFreeQueue ( 04875 SIZE_T TrimSize, 04876 ULONG Flags 04877 ) 04878 /*++ 04879 04880 Routine Description: 04881 04882 This routine trims the delayed free queue (global per process). 04883 If trim size is zero it will trim up to a global threshold 04884 (RtlpDphDelayedFreeCacheSize) otherwise uses `TrimSize'. 04885 04886 Note. This function might become a little bit of a bottleneck 04887 because it is called by every free operation. Because of this 04888 it is better to always call RtlpDphNeedToTrimDelayedFreeQueue 04889 first. 04890 04891 Arguments: 04892 04893 TrimSize: amount to trim (in bytes). If zero it trims down to 04894 a global threshold. 04895 04896 Flags: flags for free operation. 04897 04898 Return Value: 04899 04900 None. 04901 04902 Environment: 04903 04904 Called from RtlpDphNormalXxx (normal heap management) routines. 04905 04906 --*/ 04907 04908 { 04909 ULONG Reason; 04910 SIZE_T CurrentTrimmed = 0; 04911 PDPH_BLOCK_INFORMATION QueueBlock; 04912 PLIST_ENTRY ListEntry; 04913 04914 RtlEnterCriticalSection (&RtlpDphDelayedFreeQueueLock); 04915 04916 if (TrimSize == 0) { 04917 if (RtlpDphMemoryUsedByDelayedFreeBlocks > RtlpDphDelayedFreeCacheSize) { 04918 04919 TrimSize = RtlpDphMemoryUsedByDelayedFreeBlocks - RtlpDphDelayedFreeCacheSize; 04920 } 04921 } 04922 04923 while (TRUE) { 04924 04925 // 04926 // Did we achieve our trimming goal? 04927 // 04928 04929 if (CurrentTrimmed >= TrimSize) { 04930 break; 04931 } 04932 04933 // 04934 // The list can get empty since we remove blocks from it. 04935 // 04936 04937 if (IsListEmpty(&RtlpDphDelayedFreeQueue)) { 04938 break; 04939 } 04940 04941 ListEntry = RemoveHeadList (&RtlpDphDelayedFreeQueue); 04942 QueueBlock = CONTAINING_RECORD (ListEntry, DPH_BLOCK_INFORMATION, FreeQueue); 04943 04944 if (! RtlpDphIsNormalFreeHeapBlock(QueueBlock + 1, &Reason, TRUE)) { 04945 04946 RtlpDphReportCorruptedBlock (QueueBlock + 1, Reason); 04947 } 04948 04949 RtlpDphMemoryUsedByDelayedFreeBlocks -= QueueBlock->ActualSize; 04950 RtlpDphNumberOfDelayedFreeBlocks -= 1; 04951 CurrentTrimmed += QueueBlock->ActualSize; 04952 04953 RtlFreeHeap (((PDPH_HEAP_ROOT)(QueueBlock->Heap))->NormalHeap, Flags, QueueBlock); 04954 } 04955 04956 RtlLeaveCriticalSection (&RtlpDphDelayedFreeQueueLock); 04957 } 04958 04959 04960 VOID 04961 RtlpDphFreeDelayedBlocksFromHeap ( 04962 PVOID PageHeap, 04963 PVOID NormalHeap 04964 ) 04965 { 04966 ULONG Reason; 04967 PDPH_BLOCK_INFORMATION Block; 04968 PLIST_ENTRY Current; 04969 PLIST_ENTRY Next; 04970 04971 RtlEnterCriticalSection (&RtlpDphDelayedFreeQueueLock); 04972 04973 for (Current = RtlpDphDelayedFreeQueue.Flink; 04974 Current != &RtlpDphDelayedFreeQueue; 04975 Current = Next) { 04976 04977 Next = Current->Flink; 04978 04979 Block = CONTAINING_RECORD (Current, DPH_BLOCK_INFORMATION, FreeQueue); 04980 04981 if (Block->Heap != PageHeap) { 04982 continue; 04983 } 04984 04985 // 04986 // We need to delete this block; 04987 // 04988 04989 RemoveEntryList (Current); 04990 Block = CONTAINING_RECORD (Current, DPH_BLOCK_INFORMATION, FreeQueue); 04991 04992 // 04993 // Prevent probing of this field during RtlpDphIsNormalFreeBlock. 04994 // 04995 04996 Block->Heap = 0; 04997 04998 // 04999 // Check if the block about to be freed was touched. 05000 // 05001 05002 if (! RtlpDphIsNormalFreeHeapBlock(Block + 1, &Reason, TRUE)) { 05003 05004 RtlpDphReportCorruptedBlock (Block + 1, Reason); 05005 } 05006 05007 RtlpDphMemoryUsedByDelayedFreeBlocks -= Block->ActualSize; 05008 RtlpDphNumberOfDelayedFreeBlocks -= 1; 05009 05010 // 05011 // (SilviuC): Not sure what flags to use here. Zero should work 05012 // but I have to investigate. 05013 // 05014 05015 RtlFreeHeap (NormalHeap, 0, Block); 05016 } 05017 05018 RtlLeaveCriticalSection (&RtlpDphDelayedFreeQueueLock); 05019 } 05020 05024 05025 PRTL_TRACE_BLOCK 05026 RtlpDphLogStackTrace ( 05027 ULONG FramesToSkip 05028 ) 05029 { 05030 PVOID Trace [DPH_MAX_STACK_LENGTH]; 05031 ULONG Hash; 05032 ULONG Count; 05033 PRTL_TRACE_BLOCK Block; 05034 BOOLEAN Result; 05035 05036 Count = RtlCaptureStackBackTrace ( 05037 1 + FramesToSkip, 05038 DPH_MAX_STACK_LENGTH, 05039 Trace, 05040 &Hash); 05041 05042 if (Count == 0 || RtlpDphTraceDatabase == NULL) { 05043 return NULL; 05044 } 05045 05046 Result = RtlTraceDatabaseAdd ( 05047 RtlpDphTraceDatabase, 05048 Count, 05049 Trace, 05050 &Block); 05051 05052 if (Result == FALSE) { 05053 return NULL; 05054 } 05055 else { 05056 return Block; 05057 } 05058 } 05059 05063 05064 RTL_CRITICAL_SECTION RtlpDphTargetDllsLock; 05065 LIST_ENTRY RtlpDphTargetDllsList; 05066 BOOLEAN RtlpDphTargetDllsInitialized; 05067 05068 typedef struct _DPH_TARGET_DLL { 05069 05070 LIST_ENTRY List; 05071 UNICODE_STRING Name; 05072 PVOID StartAddress; 05073 PVOID EndAddress; 05074 05075 } DPH_TARGET_DLL, * PDPH_TARGET_DLL; 05076 05077 VOID 05078 RtlpDphTargetDllsLogicInitialize ( 05079 ) 05080 { 05081 RtlInitializeCriticalSection (&RtlpDphTargetDllsLock); 05082 InitializeListHead (&RtlpDphTargetDllsList); 05083 RtlpDphTargetDllsInitialized = TRUE; 05084 } 05085 05086 VOID 05087 RtlpDphTargetDllsLoadCallBack ( 05088 PUNICODE_STRING Name, 05089 PVOID Address, 05090 ULONG Size 05091 ) 05092 // 05093 // This function is not called right now but it will get called 05094 // from \base\ntdll\ldrapi.c whenever a dll gets loaded. This 05095 // gives page heap the opportunity to update per dll data structures 05096 // that are not used right now for anything. 05097 // 05098 { 05099 PDPH_TARGET_DLL Descriptor; 05100 05101 // 05102 // Get out if we are in some weird condition. 05103 // 05104 05105 if (! RtlpDphTargetDllsInitialized) { 05106 return; 05107 } 05108 05109 if (! RtlpDphIsDllTargeted (Name->Buffer)) { 05110 return; 05111 } 05112 05113 Descriptor = RtlAllocateHeap (RtlProcessHeap(), 0, sizeof *Descriptor); 05114 05115 if (Descriptor == NULL) { 05116 return; 05117 } 05118 05119 if (! RtlCreateUnicodeString (&(Descriptor->Name), Name->Buffer)) { 05120 RtlFreeHeap (RtlProcessHeap(), 0, Descriptor); 05121 return; 05122 } 05123 05124 Descriptor->StartAddress = Address; 05125 Descriptor->EndAddress = (PUCHAR)Address + Size; 05126 05127 RtlEnterCriticalSection (&RtlpDphTargetDllsLock); 05128 InsertTailList (&(RtlpDphTargetDllsList), &(Descriptor->List)); 05129 RtlLeaveCriticalSection (&RtlpDphTargetDllsLock); 05130 05131 // 05132 // SilviuC: This message should be printed only if a target 05133 // dll has been identified. 05134 // 05135 05136 DbgPrint("Page heap: loaded target dll %ws [%p - %p]\n", 05137 Descriptor->Name.Buffer, 05138 Descriptor->StartAddress, 05139 Descriptor->EndAddress); 05140 } 05141 05142 const WCHAR * 05143 RtlpDphIsDllTargeted ( 05144 const WCHAR * Name 05145 ) 05146 { 05147 const WCHAR * All; 05148 ULONG I, J; 05149 05150 All = RtlpDphTargetDllsUnicode.Buffer; 05151 05152 for (I = 0; All[I]; I += 1) { 05153 05154 for (J = 0; All[I+J] && Name[J]; J += 1) { 05155 if (RtlUpcaseUnicodeChar(All[I+J]) != RtlUpcaseUnicodeChar(Name[J])) { 05156 break; 05157 } 05158 } 05159 05160 if (Name[J]) { 05161 continue; 05162 } 05163 else { 05164 // we got to the end of string 05165 return &(All[I]); 05166 } 05167 } 05168 05169 return NULL; 05170 } 05171 05175 05176 PDPH_HEAP_BLOCK 05177 RtlpDphSearchBlockInList ( 05178 PDPH_HEAP_BLOCK List, 05179 PUCHAR Address 05180 ) 05181 { 05182 PDPH_HEAP_BLOCK Current; 05183 05184 for (Current = List; Current; Current = Current->pNextAlloc) { 05185 if (Current->pVirtualBlock == Address) { 05186 return Current; 05187 } 05188 } 05189 05190 return NULL; 05191 } 05192 05193 PVOID RtlpDphLastValidationStack; 05194 PVOID RtlpDphCurrentValidationStack; 05195 05196 VOID 05197 RtlpDphInternalValidatePageHeap ( 05198 PDPH_HEAP_ROOT Heap, 05199 PUCHAR ExemptAddress, 05200 SIZE_T ExemptSize 05201 ) 05202 { 05203 PDPH_HEAP_BLOCK Range; 05204 PDPH_HEAP_BLOCK Node; 05205 PUCHAR Address; 05206 BOOLEAN FoundLeak; 05207 05208 RtlpDphLastValidationStack = RtlpDphCurrentValidationStack; 05209 RtlpDphCurrentValidationStack = RtlpDphLogStackTrace (0); 05210 FoundLeak = FALSE; 05211 05212 for (Range = Heap->pVirtualStorageListHead; 05213 Range != NULL; 05214 Range = Range->pNextAlloc) { 05215 05216 Address = Range->pVirtualBlock; 05217 05218 while (Address < Range->pVirtualBlock + Range->nVirtualBlockSize) { 05219 05220 // 05221 // Ignore DPH_HEAP_ROOT structures. 05222 // 05223 05224 if ((Address >= (PUCHAR)Heap - PAGE_SIZE) && (Address < (PUCHAR)Heap + 5 * PAGE_SIZE)) { 05225 Address += PAGE_SIZE; 05226 continue; 05227 } 05228 05229 // 05230 // Ignore exempt region (temporarily out of all structures). 05231 // 05232 05233 if ((Address >= ExemptAddress) && (Address < ExemptAddress + ExemptSize)) { 05234 Address += PAGE_SIZE; 05235 continue; 05236 } 05237 05238 Node = RtlpDphSearchBlockInList (Heap->pBusyAllocationListHead, Address); 05239 05240 if (Node) { 05241 Address += Node->nVirtualBlockSize; 05242 continue; 05243 } 05244 05245 Node = RtlpDphSearchBlockInList (Heap->pFreeAllocationListHead, Address); 05246 05247 if (Node) { 05248 Address += Node->nVirtualBlockSize; 05249 continue; 05250 } 05251 05252 Node = RtlpDphSearchBlockInList (Heap->pAvailableAllocationListHead, Address); 05253 05254 if (Node) { 05255 Address += Node->nVirtualBlockSize; 05256 continue; 05257 } 05258 05259 Node = RtlpDphSearchBlockInList (Heap->pNodePoolListHead, Address); 05260 05261 if (Node) { 05262 Address += Node->nVirtualBlockSize; 05263 continue; 05264 } 05265 05266 DbgPrint ("Block @ %p has been leaked \n", Address); 05267 FoundLeak = TRUE; 05268 05269 Address += PAGE_SIZE; 05270 } 05271 } 05272 05273 if (FoundLeak) { 05274 05275 DbgPrint ("Page heap: Last stack @ %p, Current stack @ %p \n", 05276 RtlpDphLastValidationStack, 05277 RtlpDphCurrentValidationStack); 05278 05279 DbgBreakPoint (); 05280 } 05281 } 05282 05283 05284 #endif // DEBUG_PAGE_HEAP 05285 05286 // 05287 // End of module 05288 //

Generated on Sat May 15 19:40:16 2004 for test by doxygen 1.3.7