Main Page | Class Hierarchy | Class List | File List | Class Members | File Members

pool.c

Go to the documentation of this file.
00001 /*++ 00002 00003 Copyright (c) 1989-1994 Microsoft Corporation 00004 00005 Module Name: 00006 00007 pool.c 00008 00009 Abstract: 00010 00011 This module implements the NT executive pool allocator. 00012 00013 Author: 00014 00015 Mark Lucovsky 16-feb-1989 00016 Lou Perazzoli 31-Aug-1991 (change from binary buddy) 00017 David N. Cutler (davec) 27-May-1994 00018 Landy Wang 17-Oct-1997 00019 00020 Environment: 00021 00022 Kernel mode only 00023 00024 Revision History: 00025 00026 --*/ 00027 00028 #include "exp.h" 00029 00030 #pragma hdrstop 00031 00032 #undef ExAllocatePoolWithTag 00033 #undef ExAllocatePool 00034 #undef ExAllocatePoolWithQuota 00035 #undef ExAllocatePoolWithQuotaTag 00036 #undef ExFreePoolWithTag 00037 00038 #if defined (_WIN64) 00039 #define POOL_QUOTA_ENABLED (TRUE) 00040 #else 00041 #define POOL_QUOTA_ENABLED (PoolTrackTable == NULL) 00042 #endif 00043 00044 // 00045 // These bitfield definitions are based on EX_POOL_PRIORITY in inc\ex.h. 00046 // 00047 00048 #define POOL_SPECIAL_POOL_BIT 0x8 00049 #define POOL_SPECIAL_POOL_UNDERRUN_BIT 0x1 00050 00051 // 00052 // FREE_CHECK_ERESOURCE - If enabled causes each free pool to verify 00053 // no active ERESOURCEs are in the pool block being freed. 00054 // 00055 // FREE_CHECK_KTIMER - If enabled causes each free pool to verify no 00056 // active KTIMERs are in the pool block being freed. 00057 // 00058 00059 #if DBG 00060 00061 #define FREE_CHECK_ERESOURCE(Va, NumberOfBytes) \ 00062 ExpCheckForResource(Va, NumberOfBytes) 00063 00064 #define FREE_CHECK_KTIMER(Va, NumberOfBytes) \ 00065 KeCheckForTimer(Va, NumberOfBytes) 00066 00067 #define FREE_CHECK_WORKER(Va, NumberOfBytes) \ 00068 ExpCheckForWorker(Va, NumberOfBytes) 00069 00070 #else 00071 00072 #define FREE_CHECK_ERESOURCE(Va, NumberOfBytes) 00073 #define FREE_CHECK_KTIMER(Va, NumberOfBytes) 00074 #define FREE_CHECK_WORKER(Va, NumberOfBytes) 00075 00076 #endif 00077 00078 00079 #if defined(_ALPHA_) && !defined(_AXP64_) 00080 00081 // 00082 // On Alpha32, Entry->PoolType cannot be updated without 00083 // synchronizing with updates to Entry->PreviousSize. 00084 // Otherwise, the lack of byte granularity can cause one 00085 // update to get lost. 00086 // 00087 00088 #define _POOL_LOCK_GRANULAR_ 1 00089 00090 #define LOCK_POOL_GRANULAR(PoolDesc, LockHandle) \ 00091 LOCK_POOL(PoolDesc, LockHandle); 00092 00093 #define UNLOCK_POOL_GRANULAR(PoolDesc, LockHandle) \ 00094 UNLOCK_POOL(PoolDesc, LockHandle); 00095 00096 #else 00097 00098 #define LOCK_POOL_GRANULAR(PoolDesc, LockHandle) 00099 #define UNLOCK_POOL_GRANULAR(PoolDesc, LockHandle) 00100 00101 #endif 00102 00103 00104 // 00105 // We redefine the LIST_ENTRY macros to have each pointer biased 00106 // by one so any rogue code using these pointers will access 00107 // violate. See \nt\public\sdk\inc\ntrtl.h for the original 00108 // definition of these macros. 00109 // 00110 // This is turned off in the shipping product. 00111 // 00112 00113 #ifndef NO_POOL_CHECKS 00114 00115 ULONG ExpPoolBugCheckLine; 00116 00117 PVOID 00118 MmSqueezeBadTags ( 00119 IN SIZE_T NumberOfBytes, 00120 IN ULONG Tag, 00121 IN POOL_TYPE PoolType, 00122 IN ULONG SpecialPoolType 00123 ); 00124 00125 #define DecodeLink(Link) ((PLIST_ENTRY)((ULONG_PTR)(Link) & ~1)) 00126 #define EncodeLink(Link) ((PLIST_ENTRY)((ULONG_PTR)(Link) | 1)) 00127 00128 #define PrivateInitializeListHead(ListHead) ( \ 00129 (ListHead)->Flink = (ListHead)->Blink = EncodeLink(ListHead)) 00130 00131 #define PrivateIsListEmpty(ListHead) \ 00132 (DecodeLink((ListHead)->Flink) == (ListHead)) 00133 00134 #define PrivateRemoveHeadList(ListHead) \ 00135 DecodeLink((ListHead)->Flink); \ 00136 {PrivateRemoveEntryList(DecodeLink((ListHead)->Flink))} 00137 00138 #define PrivateRemoveTailList(ListHead) \ 00139 DecodeLink((ListHead)->Blink); \ 00140 {PrivateRemoveEntryList(DecodeLink((ListHead)->Blink))} 00141 00142 #define PrivateRemoveEntryList(Entry) { \ 00143 PLIST_ENTRY _EX_Blink; \ 00144 PLIST_ENTRY _EX_Flink; \ 00145 _EX_Flink = DecodeLink((Entry)->Flink); \ 00146 _EX_Blink = DecodeLink((Entry)->Blink); \ 00147 _EX_Blink->Flink = EncodeLink(_EX_Flink); \ 00148 _EX_Flink->Blink = EncodeLink(_EX_Blink); \ 00149 } 00150 00151 #define PrivateInsertTailList(ListHead,Entry) { \ 00152 PLIST_ENTRY _EX_Blink; \ 00153 PLIST_ENTRY _EX_ListHead; \ 00154 _EX_ListHead = (ListHead); \ 00155 _EX_Blink = DecodeLink(_EX_ListHead->Blink); \ 00156 (Entry)->Flink = EncodeLink(_EX_ListHead); \ 00157 (Entry)->Blink = EncodeLink(_EX_Blink); \ 00158 _EX_Blink->Flink = EncodeLink(Entry); \ 00159 _EX_ListHead->Blink = EncodeLink(Entry); \ 00160 } 00161 00162 #define PrivateInsertHeadList(ListHead,Entry) { \ 00163 PLIST_ENTRY _EX_Flink; \ 00164 PLIST_ENTRY _EX_ListHead; \ 00165 _EX_ListHead = (ListHead); \ 00166 _EX_Flink = DecodeLink(_EX_ListHead->Flink); \ 00167 (Entry)->Flink = EncodeLink(_EX_Flink); \ 00168 (Entry)->Blink = EncodeLink(_EX_ListHead); \ 00169 _EX_Flink->Blink = EncodeLink(Entry); \ 00170 _EX_ListHead->Flink = EncodeLink(Entry); \ 00171 } 00172 00173 #define CHECK_LIST(LINE,LIST,ENTRY) \ 00174 if ((DecodeLink(DecodeLink((LIST)->Flink)->Blink) != (LIST)) || \ 00175 (DecodeLink(DecodeLink((LIST)->Blink)->Flink) != (LIST))) { \ 00176 ExpPoolBugCheckLine = LINE; \ 00177 KeBugCheckEx (BAD_POOL_HEADER, \ 00178 3, \ 00179 (ULONG_PTR)LIST, \ 00180 (ULONG_PTR)DecodeLink(DecodeLink((LIST)->Flink)->Blink), \ 00181 (ULONG_PTR)DecodeLink(DecodeLink((LIST)->Blink)->Flink)); \ 00182 } 00183 00184 #define CHECK_POOL_HEADER(LINE,ENTRY) { \ 00185 PPOOL_HEADER PreviousEntry; \ 00186 PPOOL_HEADER NextEntry; \ 00187 if ((ENTRY)->PreviousSize != 0) { \ 00188 PreviousEntry = (PPOOL_HEADER)((PPOOL_BLOCK)(ENTRY) - (ENTRY)->PreviousSize); \ 00189 if ((PreviousEntry->BlockSize != (ENTRY)->PreviousSize) || \ 00190 (DECODE_POOL_INDEX(PreviousEntry) != DECODE_POOL_INDEX(ENTRY))) { \ 00191 ExpPoolBugCheckLine = LINE; \ 00192 KeBugCheckEx(BAD_POOL_HEADER, 5, (ULONG_PTR)PreviousEntry, LINE, (ULONG_PTR)ENTRY); \ 00193 } \ 00194 } \ 00195 NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)(ENTRY) + (ENTRY)->BlockSize); \ 00196 if (!PAGE_END(NextEntry)) { \ 00197 if ((NextEntry->PreviousSize != (ENTRY)->BlockSize) || \ 00198 (DECODE_POOL_INDEX(NextEntry) != DECODE_POOL_INDEX(ENTRY))) { \ 00199 ExpPoolBugCheckLine = LINE; \ 00200 KeBugCheckEx(BAD_POOL_HEADER, 5, (ULONG_PTR)NextEntry, LINE, (ULONG_PTR)ENTRY); \ 00201 } \ 00202 } \ 00203 } 00204 00205 #define ASSERT_ALLOCATE_IRQL(_PoolType, _NumberOfBytes) \ 00206 if ((_PoolType & BASE_POOL_TYPE_MASK) == PagedPool) { \ 00207 if (KeGetCurrentIrql() > APC_LEVEL) { \ 00208 KeBugCheckEx (BAD_POOL_CALLER, 8, KeGetCurrentIrql(), _PoolType, _NumberOfBytes); \ 00209 } \ 00210 } \ 00211 else { \ 00212 if (KeGetCurrentIrql() > DISPATCH_LEVEL) { \ 00213 KeBugCheckEx (BAD_POOL_CALLER, 8, KeGetCurrentIrql(), _PoolType, _NumberOfBytes); \ 00214 } \ 00215 } 00216 00217 #define ASSERT_FREE_IRQL(_PoolType, _P) \ 00218 if ((_PoolType & BASE_POOL_TYPE_MASK) == PagedPool) { \ 00219 if (KeGetCurrentIrql() > APC_LEVEL) { \ 00220 KeBugCheckEx (BAD_POOL_CALLER, 9, KeGetCurrentIrql(), _PoolType, (ULONG_PTR)_P); \ 00221 } \ 00222 } \ 00223 else { \ 00224 if (KeGetCurrentIrql() > DISPATCH_LEVEL) { \ 00225 KeBugCheckEx (BAD_POOL_CALLER, 9, KeGetCurrentIrql(), _PoolType, (ULONG_PTR)P); \ 00226 } \ 00227 } 00228 00229 #define ASSERT_POOL_NOT_FREE(_Entry) \ 00230 if ((_Entry->PoolType & POOL_TYPE_MASK) == 0) { \ 00231 KeBugCheckEx (BAD_POOL_CALLER, 6, __LINE__, (ULONG_PTR)_Entry, _Entry->Ulong1); \ 00232 } 00233 00234 #define ASSERT_POOL_TYPE_NOT_ZERO(_Entry) \ 00235 if (_Entry->PoolType == 0) { \ 00236 KeBugCheckEx(BAD_POOL_CALLER, 1, (ULONG_PTR)_Entry, (ULONG_PTR)(*(PULONG)_Entry), 0); \ 00237 } 00238 00239 #define CHECK_LOOKASIDE_LIST(LINE,LIST,ENTRY) {NOTHING;} 00240 00241 #else 00242 00243 #define DecodeLink(Link) ((PLIST_ENTRY)((ULONG_PTR)(Link))) 00244 #define EncodeLink(Link) ((PLIST_ENTRY)((ULONG_PTR)(Link))) 00245 #define PrivateInitializeListHead InitializeListHead 00246 #define PrivateIsListEmpty IsListEmpty 00247 #define PrivateRemoveHeadList RemoveHeadList 00248 #define PrivateRemoveTailList RemoveTailList 00249 #define PrivateRemoveEntryList RemoveEntryList 00250 #define PrivateInsertTailList InsertTailList 00251 #define PrivateInsertHeadList InsertHeadList 00252 00253 #define ASSERT_ALLOCATE_IRQL(_PoolType, _P) {NOTHING;} 00254 #define ASSERT_FREE_IRQL(_PoolType, _P) {NOTHING;} 00255 #define ASSERT_POOL_NOT_FREE(_Entry) {NOTHING;} 00256 #define ASSERT_POOL_TYPE_NOT_ZERO(_Entry) {NOTHING;} 00257 00258 // 00259 // The check list macros come in two flavors - there is one in the checked 00260 // and free build that will bugcheck the system if a list is ill-formed, and 00261 // there is one for the final shipping version that has all the checked 00262 // disabled. 00263 // 00264 // The check lookaside list macros also comes in two flavors and is used to 00265 // verify that the look aside lists are well formed. 00266 // 00267 // The check pool header macro (two flavors) verifies that the specified 00268 // pool header matches the preceeding and succeeding pool headers. 00269 // 00270 00271 #define CHECK_LIST(LINE,LIST,ENTRY) {NOTHING;} 00272 #define CHECK_POOL_HEADER(LINE,ENTRY) {NOTHING;} 00273 00274 #define CHECK_LOOKASIDE_LIST(LINE,LIST,ENTRY) {NOTHING;} 00275 00276 #define CHECK_POOL_PAGE(PAGE) \ 00277 { \ 00278 PPOOL_HEADER P = (PPOOL_HEADER)(((ULONG_PTR)(PAGE)) & ~(PAGE_SIZE-1)); \ 00279 ULONG SIZE, LSIZE; \ 00280 LOGICAL FOUND=FALSE; \ 00281 LSIZE = 0; \ 00282 SIZE = 0; \ 00283 do { \ 00284 if (P == (PPOOL_HEADER)PAGE) { \ 00285 FOUND = TRUE; \ 00286 } \ 00287 if (P->PreviousSize != LSIZE) { \ 00288 DbgPrint("POOL: Inconsistent size: ( %lx ) - %lx->%u != %u\n",\ 00289 PAGE, P, P->PreviousSize, LSIZE); \ 00290 DbgBreakPoint(); \ 00291 } \ 00292 LSIZE = P->BlockSize; \ 00293 SIZE += LSIZE; \ 00294 P = (PPOOL_HEADER)((PPOOL_BLOCK)P + LSIZE); \ 00295 } while ((SIZE < (PAGE_SIZE / POOL_SMALLEST_BLOCK)) && \ 00296 (PAGE_END(P) == FALSE)); \ 00297 if ((PAGE_END(P) == FALSE) || (FOUND == FALSE)) { \ 00298 DbgPrint("POOL: Inconsistent page: %lx\n",P); \ 00299 DbgBreakPoint(); \ 00300 } \ 00301 } 00302 00303 #endif 00304 00305 00306 00307 // 00308 // Define forward referenced function prototypes. 00309 // 00310 00311 VOID 00312 ExpInitializePoolDescriptor( 00313 IN PPOOL_DESCRIPTOR PoolDescriptor, 00314 IN POOL_TYPE PoolType, 00315 IN ULONG PoolIndex, 00316 IN ULONG Threshold, 00317 IN PVOID PoolLock 00318 ); 00319 00320 NTSTATUS 00321 ExpSnapShotPoolPages( 00322 IN PVOID Address, 00323 IN ULONG Size, 00324 IN OUT PSYSTEM_POOL_INFORMATION PoolInformation, 00325 IN OUT PSYSTEM_POOL_ENTRY *PoolEntryInfo, 00326 IN ULONG Length, 00327 IN OUT PULONG RequiredLength 00328 ); 00329 00330 #ifdef ALLOC_PRAGMA 00331 #pragma alloc_text(INIT, InitializePool) 00332 #pragma alloc_text(PAGE, ExpInitializePoolDescriptor) 00333 #pragma alloc_text(PAGEVRFY, ExAllocatePoolSanityChecks) 00334 #pragma alloc_text(PAGEVRFY, ExFreePoolSanityChecks) 00335 #pragma alloc_text(POOLCODE, ExAllocatePoolWithTag) 00336 #pragma alloc_text(POOLCODE, ExFreePool) 00337 #pragma alloc_text(POOLCODE, ExFreePoolWithTag) 00338 #if DBG 00339 #pragma alloc_text(PAGELK, ExSnapShotPool) 00340 #pragma alloc_text(PAGELK, ExpSnapShotPoolPages) 00341 #endif // DBG 00342 #endif 00343 00344 #define MAX_TRACKER_TABLE 1025 00345 #define MAX_BIGPAGE_TABLE 4096 00346 // #define MAX_TRACKER_TABLE 5 00347 // #define MAX_BIGPAGE_TABLE 4 00348 00349 PPOOL_DESCRIPTOR ExpSessionPoolDescriptor; 00350 ULONG FirstPrint; 00351 00352 PPOOL_TRACKER_TABLE PoolTrackTable; 00353 SIZE_T PoolTrackTableSize; 00354 SIZE_T PoolTrackTableMask; 00355 00356 PPOOL_TRACKER_BIG_PAGES PoolBigPageTable; 00357 SIZE_T PoolBigPageTableSize; 00358 SIZE_T PoolBigPageTableHash; 00359 00360 ULONG PoolHitTag = 0xffffff0f; 00361 00362 VOID 00363 ExpInsertPoolTracker ( 00364 IN ULONG Key, 00365 IN SIZE_T Size, 00366 IN POOL_TYPE PoolType 00367 ); 00368 00369 VOID 00370 ExpRemovePoolTracker ( 00371 IN ULONG Key, 00372 IN ULONG Size, 00373 IN POOL_TYPE PoolType 00374 ); 00375 00376 LOGICAL 00377 ExpAddTagForBigPages ( 00378 IN PVOID Va, 00379 IN ULONG Key, 00380 IN ULONG NumberOfPages 00381 ); 00382 00383 ULONG 00384 ExpFindAndRemoveTagBigPages ( 00385 IN PVOID Va 00386 ); 00387 00388 PVOID 00389 ExpAllocateStringRoutine( 00390 IN SIZE_T NumberOfBytes 00391 ) 00392 { 00393 return ExAllocatePoolWithTag(PagedPool,NumberOfBytes,'grtS'); 00394 } 00395 00396 BOOLEAN 00397 ExOkayToLockRoutine( 00398 IN PVOID Lock 00399 ) 00400 { 00401 UNREFERENCED_PARAMETER (Lock); 00402 00403 if (KeIsExecutingDpc()) { 00404 return FALSE; 00405 } else { 00406 return TRUE; 00407 } 00408 } 00409 00410 PRTL_ALLOCATE_STRING_ROUTINE RtlAllocateStringRoutine = ExpAllocateStringRoutine; 00411 PRTL_FREE_STRING_ROUTINE RtlFreeStringRoutine = (PRTL_FREE_STRING_ROUTINE)ExFreePool; 00412 00413 // 00414 // Define macros to pack and unpack a pool index. 00415 // 00416 00417 #define ENCODE_POOL_INDEX(POOLHEADER,INDEX) {(POOLHEADER)->PoolIndex = (UCHAR)((((POOLHEADER)->PoolIndex & 0x80) | ((UCHAR)(INDEX))));} 00418 #define DECODE_POOL_INDEX(POOLHEADER) ((ULONG)((POOLHEADER)->PoolIndex & 0x7f)) 00419 00420 #define MARK_POOL_HEADER_ALLOCATED(POOLHEADER) {(POOLHEADER)->PoolIndex |= 0x80;} 00421 #define MARK_POOL_HEADER_FREED(POOLHEADER) {(POOLHEADER)->PoolIndex &= 0x7f;} 00422 #define IS_POOL_HEADER_MARKED_ALLOCATED(POOLHEADER) ((POOLHEADER)->PoolIndex & 0x80) 00423 00424 // 00425 // Define the number of paged pools. This value may be overridden at boot 00426 // time. 00427 // 00428 00429 ULONG ExpNumberOfPagedPools = NUMBER_OF_PAGED_POOLS; 00430 00431 // 00432 // Pool descriptors for nonpaged pool and nonpaged pool must succeed are 00433 // static. The pool descriptors for paged pool are dynamically allocated 00434 // since there can be more than one paged pool. There is always one more 00435 // paged pool descriptor than there are paged pools. This descriptor is 00436 // used when a page allocation is done for a paged pool and is the first 00437 // descriptor in the paged pool descriptor array. 00438 // 00439 00440 POOL_DESCRIPTOR NonPagedPoolDescriptor; 00441 POOL_DESCRIPTOR NonPagedPoolDescriptorMS; 00442 00443 // 00444 // The pool vector contains an array of pointers to pool descriptors. For 00445 // nonpaged pool and nonpaged pool must succeed, this is a pointer to a 00446 // single descriptor. For page pool, this is a pointer to an array of pool 00447 // descriptors. The pointer to the paged pool descriptor is duplicated so 00448 // if can be found easily by the kernel debugger. 00449 // 00450 00451 PPOOL_DESCRIPTOR PoolVector[NUMBER_OF_POOLS]; 00452 PPOOL_DESCRIPTOR ExpPagedPoolDescriptor; 00453 00454 extern KSPIN_LOCK NonPagedPoolLock; 00455 00456 #define ExpLockNonPagedPool(OldIrql) \ 00457 ExAcquireSpinLock(&NonPagedPoolLock, &OldIrql) 00458 00459 #define ExpUnlockNonPagedPool(OldIrql) \ 00460 ExReleaseSpinLock(&NonPagedPoolLock, OldIrql) 00461 00462 volatile ULONG ExpPoolIndex = 1; 00463 KSPIN_LOCK ExpTaggedPoolLock; 00464 00465 #if DBG 00466 PSZ PoolTypeNames[MaxPoolType] = { 00467 "NonPaged", 00468 "Paged", 00469 "NonPagedMustSucceed", 00470 "NotUsed", 00471 "NonPagedCacheAligned", 00472 "PagedCacheAligned", 00473 "NonPagedCacheAlignedMustS" 00474 }; 00475 00476 #endif //DBG 00477 00478 00479 // 00480 // Define paged and nonpaged pool lookaside descriptors. 00481 // 00482 00483 NPAGED_LOOKASIDE_LIST ExpSmallNPagedPoolLookasideLists[POOL_SMALL_LISTS]; 00484 00485 NPAGED_LOOKASIDE_LIST ExpSmallPagedPoolLookasideLists[POOL_SMALL_LISTS]; 00486 00487 00488 // 00489 // LOCK_POOL and LOCK_IF_PAGED_POOL are only used within this module. 00490 // 00491 00492 #define LOCK_POOL(PoolDesc, LockHandle) { \ 00493 if ((PoolDesc->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) { \ 00494 ExpLockNonPagedPool(LockHandle); \ 00495 } else { \ 00496 ExAcquireFastMutex((PFAST_MUTEX)PoolDesc->LockAddress); \ 00497 } \ 00498 } 00499 00500 #define LOCK_IF_PAGED_POOL(_CheckType, _GlobalPool) \ 00501 if (_CheckType == PagedPool && _GlobalPool == TRUE) { \ 00502 ExAcquireFastMutex((PFAST_MUTEX)PoolVector[PagedPool]->LockAddress); \ 00503 } 00504 00505 KIRQL 00506 ExLockPool( 00507 IN POOL_TYPE PoolType 00508 ) 00509 00510 /*++ 00511 00512 Routine Description: 00513 00514 This function locks the pool specified by pool type. 00515 00516 Arguments: 00517 00518 PoolType - Specifies the pool that should be locked. 00519 00520 Return Value: 00521 00522 The previous IRQL is returned as the function value. 00523 00524 --*/ 00525 00526 { 00527 00528 KIRQL OldIrql; 00529 00530 // 00531 // If the pool type is nonpaged, then use a spinlock to lock the 00532 // pool. Otherwise, use a fast mutex to lock the pool. 00533 // 00534 00535 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) { 00536 ExAcquireSpinLock(NonPagedPoolDescriptor.LockAddress, &OldIrql); 00537 00538 } else { 00539 ExAcquireFastMutex((PFAST_MUTEX)PoolVector[PagedPool]->LockAddress); 00540 OldIrql = (KIRQL)((PFAST_MUTEX)(PoolVector[PagedPool]->LockAddress))->OldIrql; 00541 } 00542 00543 return OldIrql; 00544 } 00545 00546 00547 // 00548 // UNLOCK_POOL and UNLOCK_IF_PAGED_POOL are only used within this module. 00549 // 00550 00551 #define UNLOCK_POOL(PoolDesc, LockHandle) { \ 00552 if ((PoolDesc->PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) { \ 00553 ExpUnlockNonPagedPool((KIRQL)LockHandle); \ 00554 } else { \ 00555 ExReleaseFastMutex((PFAST_MUTEX)PoolDesc->LockAddress); \ 00556 } \ 00557 } 00558 00559 #define UNLOCK_IF_PAGED_POOL(_CheckType, _GlobalPool) \ 00560 if (_CheckType == PagedPool && _GlobalPool == TRUE) { \ 00561 ExReleaseFastMutex((PFAST_MUTEX)PoolVector[PagedPool]->LockAddress); \ 00562 } 00563 00564 VOID 00565 ExUnlockPool( 00566 IN POOL_TYPE PoolType, 00567 IN KIRQL LockHandle 00568 ) 00569 00570 /*++ 00571 00572 Routine Description: 00573 00574 This function unlocks the pool specified by pool type. 00575 00576 00577 Arguments: 00578 00579 PoolType - Specifies the pool that should be unlocked. 00580 00581 LockHandle - Specifies the lock handle from a previous call to 00582 ExLockPool. 00583 00584 Return Value: 00585 00586 None. 00587 00588 --*/ 00589 00590 { 00591 00592 // 00593 // If the pool type is nonpaged, then use a spinlock to unlock the 00594 // pool. Otherwise, use a fast mutex to unlock the pool. 00595 // 00596 00597 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) { 00598 ExReleaseSpinLock(&NonPagedPoolLock, LockHandle); 00599 00600 } else { 00601 ExReleaseFastMutex((PFAST_MUTEX)PoolVector[PagedPool]->LockAddress); 00602 } 00603 00604 return; 00605 } 00606 00607 00608 VOID 00609 ExpInitializePoolDescriptor( 00610 IN PPOOL_DESCRIPTOR PoolDescriptor, 00611 IN POOL_TYPE PoolType, 00612 IN ULONG PoolIndex, 00613 IN ULONG Threshold, 00614 IN PVOID PoolLock 00615 ) 00616 00617 /*++ 00618 00619 Routine Description: 00620 00621 This function initializes a pool descriptor. 00622 00623 Note that this routine is called directly by the memory manager. 00624 00625 Arguments: 00626 00627 PoolDescriptor - Supplies a pointer to the pool descriptor. 00628 00629 PoolType - Supplies the type of the pool. 00630 00631 PoolIndex - Supplies the pool descriptor index. 00632 00633 Threshold - Supplies the threshold value for the specified pool. 00634 00635 PoolLock - Supplies a point to the lock for the specified pool. 00636 00637 Return Value: 00638 00639 None. 00640 00641 --*/ 00642 00643 { 00644 00645 ULONG Index; 00646 00647 // 00648 // Initialize statistics fields, the pool type, the threshold value, 00649 // and the lock address. 00650 // 00651 00652 PoolDescriptor->PoolType = PoolType; 00653 PoolDescriptor->PoolIndex = PoolIndex; 00654 PoolDescriptor->RunningAllocs = 0; 00655 PoolDescriptor->RunningDeAllocs = 0; 00656 PoolDescriptor->TotalPages = 0; 00657 PoolDescriptor->TotalBigPages = 0; 00658 PoolDescriptor->Threshold = Threshold; 00659 PoolDescriptor->LockAddress = PoolLock; 00660 00661 // 00662 // Initialize the allocation listheads. 00663 // 00664 00665 for (Index = 0; Index < POOL_LIST_HEADS; Index += 1) { 00666 PrivateInitializeListHead(&PoolDescriptor->ListHeads[Index]); 00667 } 00668 00669 if ((PoolType == PagedPoolSession) && (ExpSessionPoolDescriptor == NULL)) { 00670 ExpSessionPoolDescriptor = (PPOOL_DESCRIPTOR) MiSessionPoolVector (); 00671 } 00672 00673 return; 00674 } 00675 00676 VOID 00677 InitializePool( 00678 IN POOL_TYPE PoolType, 00679 IN ULONG Threshold 00680 ) 00681 00682 /*++ 00683 00684 Routine Description: 00685 00686 This procedure initializes a pool descriptor for the specified pool 00687 type. Once initialized, the pool may be used for allocation and 00688 deallocation. 00689 00690 This function should be called once for each base pool type during 00691 system initialization. 00692 00693 Each pool descriptor contains an array of list heads for free 00694 blocks. Each list head holds blocks which are a multiple of 00695 the POOL_BLOCK_SIZE. The first element on the list [0] links 00696 together free entries of size POOL_BLOCK_SIZE, the second element 00697 [1] links together entries of POOL_BLOCK_SIZE * 2, the third 00698 POOL_BLOCK_SIZE * 3, etc, up to the number of blocks which fit 00699 into a page. 00700 00701 Arguments: 00702 00703 PoolType - Supplies the type of pool being initialized (e.g. 00704 nonpaged pool, paged pool...). 00705 00706 Threshold - Supplies the threshold value for the specified pool. 00707 00708 Return Value: 00709 00710 None. 00711 00712 --*/ 00713 00714 { 00715 00716 PPOOL_DESCRIPTOR Descriptor; 00717 ULONG Index; 00718 PFAST_MUTEX FastMutex; 00719 SIZE_T Size; 00720 00721 ASSERT((PoolType & MUST_SUCCEED_POOL_TYPE_MASK) == 0); 00722 00723 if (PoolType == NonPagedPool) { 00724 00725 // 00726 // Initialize nonpaged pools. 00727 // 00728 00729 #if !DBG 00730 if (NtGlobalFlag & FLG_POOL_ENABLE_TAGGING) { 00731 #endif 00732 PoolTrackTableSize = MAX_TRACKER_TABLE; 00733 PoolTrackTableMask = PoolTrackTableSize - 2; 00734 PoolTrackTable = MiAllocatePoolPages(NonPagedPool, 00735 PoolTrackTableSize * 00736 sizeof(POOL_TRACKER_TABLE), 00737 FALSE); 00738 00739 RtlZeroMemory(PoolTrackTable, PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)); 00740 00741 PoolBigPageTableSize = MAX_BIGPAGE_TABLE; 00742 PoolBigPageTableHash = PoolBigPageTableSize - 1; 00743 PoolBigPageTable = MiAllocatePoolPages(NonPagedPool, 00744 PoolBigPageTableSize * 00745 sizeof(POOL_TRACKER_BIG_PAGES), 00746 FALSE); 00747 00748 RtlZeroMemory(PoolBigPageTable, PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES)); 00749 #if !DBG 00750 } 00751 #endif 00752 00753 // 00754 // Initialize the spinlocks for nonpaged pool. 00755 // 00756 00757 KeInitializeSpinLock (&ExpTaggedPoolLock); 00758 KeInitializeSpinLock(&NonPagedPoolLock); 00759 00760 // 00761 // Initialize the nonpaged pool descriptor. 00762 // 00763 00764 PoolVector[NonPagedPool] = &NonPagedPoolDescriptor; 00765 ExpInitializePoolDescriptor(&NonPagedPoolDescriptor, 00766 NonPagedPool, 00767 0, 00768 Threshold, 00769 (PVOID)&NonPagedPoolLock); 00770 00771 // 00772 // Initialize the nonpaged must succeed pool descriptor. 00773 // 00774 00775 PoolVector[NonPagedPoolMustSucceed] = &NonPagedPoolDescriptorMS; 00776 ExpInitializePoolDescriptor(&NonPagedPoolDescriptorMS, 00777 NonPagedPoolMustSucceed, 00778 0, 00779 0, 00780 (PVOID)&NonPagedPoolLock); 00781 00782 } else { 00783 00784 // 00785 // Allocate memory for the paged pool descriptors and fast mutexes. 00786 // 00787 00788 Size = (ExpNumberOfPagedPools + 1) * (sizeof(FAST_MUTEX) + sizeof(POOL_DESCRIPTOR)); 00789 Descriptor = (PPOOL_DESCRIPTOR)ExAllocatePoolWithTag (NonPagedPool, 00790 Size, 00791 'looP'); 00792 if (Descriptor == NULL) { 00793 KeBugCheckEx (MUST_SUCCEED_POOL_EMPTY, 00794 Size, 00795 (ULONG_PTR)-1, 00796 (ULONG_PTR)-1, 00797 (ULONG_PTR)-1); 00798 } 00799 00800 if (PoolTrackTable) { 00801 ExpInsertPoolTracker('looP', 00802 (ULONG) ROUND_TO_PAGES(PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE)), 00803 NonPagedPool); 00804 00805 ExpInsertPoolTracker('looP', 00806 (ULONG) ROUND_TO_PAGES(PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES)), 00807 NonPagedPool); 00808 } 00809 00810 FastMutex = (PFAST_MUTEX)(Descriptor + ExpNumberOfPagedPools + 1); 00811 PoolVector[PagedPool] = Descriptor; 00812 ExpPagedPoolDescriptor = Descriptor; 00813 for (Index = 0; Index < (ExpNumberOfPagedPools + 1); Index += 1) { 00814 ExInitializeFastMutex(FastMutex); 00815 ExpInitializePoolDescriptor(Descriptor, 00816 PagedPool, 00817 Index, 00818 Threshold, 00819 (PVOID)FastMutex); 00820 00821 Descriptor += 1; 00822 FastMutex += 1; 00823 } 00824 } 00825 00826 // 00827 // The maximum cache alignment must be less than the size of the 00828 // smallest pool block because the lower bits are being cleared 00829 // in ExFreePool to find the entry's address. 00830 // 00831 00832 #if POOL_CACHE_SUPPORTED 00833 00834 // 00835 // Compute pool cache information. 00836 // 00837 00838 PoolCacheSize = HalGetDmaAlignmentRequirement(); 00839 00840 ASSERT(PoolCacheSize >= POOL_OVERHEAD); 00841 00842 PoolCacheOverhead = PoolCacheSize + PoolCacheSize - (sizeof(POOL_HEADER) + 1); 00843 00844 PoolBuddyMax = 00845 (POOL_PAGE_SIZE - (POOL_OVERHEAD + (3*POOL_SMALLEST_BLOCK) + 2*PoolCacheSize)); 00846 00847 #endif //POOL_CACHE_SUPPORTED 00848 00849 } 00850 00851 00852 LOGICAL 00853 ExpCheckSingleFilter ( 00854 ULONG Tag, 00855 ULONG Filter 00856 ) 00857 00858 /*++ 00859 00860 Routine Description: 00861 00862 This function checks if a pool tag matches a given pattern. Pool 00863 protection is ignored in the tag. 00864 00865 ? - matches a single character 00866 * - terminates match with TRUE 00867 00868 N.B.: ability inspired by the !poolfind debugger extension. 00869 00870 Arguments: 00871 00872 Tag - a pool tag 00873 00874 Filter - a globish pattern (chars and/or ?,*) 00875 00876 Return Value: 00877 00878 TRUE if a match exists, FALSE otherwise. 00879 00880 --*/ 00881 00882 { 00883 ULONG i; 00884 PUCHAR tc; 00885 PUCHAR fc; 00886 00887 tc = (PUCHAR) &Tag; 00888 fc = (PUCHAR) &Filter; 00889 00890 for (i = 0; i < 4; i += 1, tc += 1, fc += 1) { 00891 00892 if (*fc == '*') { 00893 break; 00894 } 00895 if (*fc == '?') { 00896 continue; 00897 } 00898 if (i == 3 && ((*tc) & ~(PROTECTED_POOL >> 24)) == *fc) { 00899 continue; 00900 } 00901 if (*tc != *fc) { 00902 return FALSE; 00903 } 00904 } 00905 return TRUE; 00906 } 00907 00908 00909 PVOID 00910 ExAllocatePool( 00911 IN POOL_TYPE PoolType, 00912 IN SIZE_T NumberOfBytes 00913 ) 00914 00915 /*++ 00916 00917 Routine Description: 00918 00919 This function allocates a block of pool of the specified type and 00920 returns a pointer to the allocated block. This function is used to 00921 access both the page-aligned pools, and the list head entries (less than 00922 a page) pools. 00923 00924 If the number of bytes specifies a size that is too large to be 00925 satisfied by the appropriate list, then the page-aligned 00926 pool allocator is used. The allocated block will be page-aligned 00927 and a page-sized multiple. 00928 00929 Otherwise, the appropriate pool list entry is used. The allocated 00930 block will be 64-bit aligned, but will not be page aligned. The 00931 pool allocator calculates the smallest number of POOL_BLOCK_SIZE 00932 that can be used to satisfy the request. If there are no blocks 00933 available of this size, then a block of the next larger block size 00934 is allocated and split. One piece is placed back into the pool, and 00935 the other piece is used to satisfy the request. If the allocator 00936 reaches the paged-sized block list, and nothing is there, the 00937 page-aligned pool allocator is called. The page is split and added 00938 to the pool... 00939 00940 Arguments: 00941 00942 PoolType - Supplies the type of pool to allocate. If the pool type 00943 is one of the "MustSucceed" pool types, then this call will 00944 always succeed and return a pointer to allocated pool. 00945 Otherwise, if the system cannot allocate the requested amount 00946 of memory a NULL is returned. 00947 00948 Valid pool types: 00949 00950 NonPagedPool 00951 PagedPool 00952 NonPagedPoolMustSucceed, 00953 NonPagedPoolCacheAligned 00954 PagedPoolCacheAligned 00955 NonPagedPoolCacheAlignedMustS 00956 00957 NumberOfBytes - Supplies the number of bytes to allocate. 00958 00959 Return Value: 00960 00961 NULL - The PoolType is not one of the "MustSucceed" pool types, and 00962 not enough pool exists to satisfy the request. 00963 00964 NON-NULL - Returns a pointer to the allocated pool. 00965 00966 --*/ 00967 00968 { 00969 return ExAllocatePoolWithTag (PoolType, 00970 NumberOfBytes, 00971 'enoN'); 00972 } 00973 00974 #define _NTOSKRNL_VERIFIER_ 1 // LWFIX: disable for ship 00975 00976 #ifdef _NTOSKRNL_VERIFIER_ 00977 00978 PVOID 00979 VeAllocatePoolWithTagPriority( 00980 IN POOL_TYPE PoolType, 00981 IN SIZE_T NumberOfBytes, 00982 IN ULONG Tag, 00983 IN EX_POOL_PRIORITY Priority, 00984 IN PVOID CallingAddress 00985 ); 00986 00987 extern LOGICAL KernelVerifier; 00988 00989 #endif 00990 00991 00992 PVOID 00993 ExAllocatePoolWithTagPriority( 00994 IN POOL_TYPE PoolType, 00995 IN SIZE_T NumberOfBytes, 00996 IN ULONG Tag, 00997 IN EX_POOL_PRIORITY Priority 00998 ) 00999 01000 /*++ 01001 01002 Routine Description: 01003 01004 This function allocates a block of pool of the specified type and 01005 returns a pointer to the allocated block. This function is used to 01006 access both the page-aligned pools, and the list head entries (less than 01007 a page) pools. 01008 01009 If the number of bytes specifies a size that is too large to be 01010 satisfied by the appropriate list, then the page-aligned 01011 pool allocator is used. The allocated block will be page-aligned 01012 and a page-sized multiple. 01013 01014 Otherwise, the appropriate pool list entry is used. The allocated 01015 block will be 64-bit aligned, but will not be page aligned. The 01016 pool allocator calculates the smallest number of POOL_BLOCK_SIZE 01017 that can be used to satisfy the request. If there are no blocks 01018 available of this size, then a block of the next larger block size 01019 is allocated and split. One piece is placed back into the pool, and 01020 the other piece is used to satisfy the request. If the allocator 01021 reaches the paged-sized block list, and nothing is there, the 01022 page-aligned pool allocator is called. The page is split and added 01023 to the pool... 01024 01025 Arguments: 01026 01027 PoolType - Supplies the type of pool to allocate. If the pool type 01028 is one of the "MustSucceed" pool types, then this call will 01029 always succeed and return a pointer to allocated pool. 01030 Otherwise, if the system cannot allocate the requested amount 01031 of memory a NULL is returned. 01032 01033 Valid pool types: 01034 01035 NonPagedPool 01036 PagedPool 01037 NonPagedPoolMustSucceed, 01038 NonPagedPoolCacheAligned 01039 PagedPoolCacheAligned 01040 NonPagedPoolCacheAlignedMustS 01041 01042 NumberOfBytes - Supplies the number of bytes to allocate. 01043 01044 Tag - Supplies the caller's identifying tag. 01045 01046 Priority - Supplies an indication as to how important it is that this 01047 request succeed under low available pool conditions. This 01048 can also be used to specify special pool. 01049 01050 Return Value: 01051 01052 NULL - The PoolType is not one of the "MustSucceed" pool types, and 01053 not enough pool exists to satisfy the request. 01054 01055 NON-NULL - Returns a pointer to the allocated pool. 01056 01057 --*/ 01058 01059 { 01060 PVOID Entry; 01061 01062 if ((Priority & POOL_SPECIAL_POOL_BIT) && (NumberOfBytes <= POOL_BUDDY_MAX)) { 01063 Entry = MmAllocateSpecialPool (NumberOfBytes, 01064 Tag, 01065 PoolType & (BASE_POOL_TYPE_MASK | POOL_VERIFIER_MASK), 01066 (Priority & POOL_SPECIAL_POOL_UNDERRUN_BIT) ? 1 : 0); 01067 01068 if (Entry != NULL) { 01069 return Entry; 01070 } 01071 Priority &= ~(POOL_SPECIAL_POOL_BIT | POOL_SPECIAL_POOL_UNDERRUN_BIT); 01072 } 01073 01074 // 01075 // Pool and other resources can be allocated directly through the Mm 01076 // without the pool code knowing - so always call the Mm for the 01077 // up-to-date counters. 01078 // 01079 01080 if ((Priority != HighPoolPriority) && ((PoolType & MUST_SUCCEED_POOL_TYPE_MASK) == 0)) { 01081 01082 if (ExpSessionPoolDescriptor == NULL) { 01083 PoolType &= ~SESSION_POOL_MASK; 01084 } 01085 01086 if (MmResourcesAvailable (PoolType, NumberOfBytes, Priority) == FALSE) { 01087 return NULL; 01088 } 01089 } 01090 01091 // 01092 // There is a window between determining whether to proceed and actually 01093 // doing the allocation. In this window the pool may deplete. This is not 01094 // worth closing at this time. 01095 // 01096 01097 return ExAllocatePoolWithTag (PoolType, 01098 NumberOfBytes, 01099 Tag); 01100 } 01101 01102 01103 PVOID 01104 ExAllocatePoolWithTag( 01105 IN POOL_TYPE PoolType, 01106 IN SIZE_T NumberOfBytes, 01107 IN ULONG Tag 01108 ) 01109 01110 /*++ 01111 01112 Routine Description: 01113 01114 This function allocates a block of pool of the specified type and 01115 returns a pointer to the allocated block. This function is used to 01116 access both the page-aligned pools and the list head entries (less 01117 than a page) pools. 01118 01119 If the number of bytes specifies a size that is too large to be 01120 satisfied by the appropriate list, then the page-aligned pool 01121 allocator is used. The allocated block will be page-aligned and a 01122 page-sized multiple. 01123 01124 Otherwise, the appropriate pool list entry is used. The allocated 01125 block will be 64-bit aligned, but will not be page aligned. The 01126 pool allocator calculates the smallest number of POOL_BLOCK_SIZE 01127 that can be used to satisfy the request. If there are no blocks 01128 available of this size, then a block of the next larger block size 01129 is allocated and split. One piece is placed back into the pool, and 01130 the other piece is used to satisfy the request. If the allocator 01131 reaches the paged-sized block list, and nothing is there, the 01132 page-aligned pool allocator is called. The page is split and added 01133 to the pool. 01134 01135 Arguments: 01136 01137 PoolType - Supplies the type of pool to allocate. If the pool type 01138 is one of the "MustSucceed" pool types, then this call will 01139 always succeed and return a pointer to allocated pool. Otherwise, 01140 if the system cannot allocate the requested amount of memory a 01141 NULL is returned. 01142 01143 Valid pool types: 01144 01145 NonPagedPool 01146 PagedPool 01147 NonPagedPoolMustSucceed, 01148 NonPagedPoolCacheAligned 01149 PagedPoolCacheAligned 01150 NonPagedPoolCacheAlignedMustS 01151 01152 Tag - Supplies the caller's identifying tag. 01153 01154 NumberOfBytes - Supplies the number of bytes to allocate. 01155 01156 Return Value: 01157 01158 NULL - The PoolType is not one of the "MustSucceed" pool types, and 01159 not enough pool exists to satisfy the request. 01160 01161 NON-NULL - Returns a pointer to the allocated pool. 01162 01163 --*/ 01164 01165 { 01166 PVOID Block; 01167 PPOOL_HEADER Entry; 01168 PNPAGED_LOOKASIDE_LIST LookasideList; 01169 PPOOL_HEADER NextEntry; 01170 PPOOL_HEADER SplitEntry; 01171 KIRQL LockHandle; 01172 PPOOL_DESCRIPTOR PoolDesc; 01173 ULONG Index; 01174 ULONG ListNumber; 01175 ULONG NeededSize; 01176 ULONG PoolIndex; 01177 POOL_TYPE CheckType; 01178 POOL_TYPE RequestType; 01179 PLIST_ENTRY ListHead; 01180 POOL_TYPE NewPoolType; 01181 LOGICAL GlobalSpace; 01182 ULONG IsLargeSessionAllocation; 01183 PKPRCB Prcb; 01184 ULONG NumberOfPages; 01185 PVOID CallingAddress; 01186 PVOID CallersCaller; 01187 01188 #if POOL_CACHE_SUPPORTED 01189 ULONG CacheOverhead; 01190 #else 01191 #define CacheOverhead POOL_OVERHEAD 01192 #endif 01193 01194 PERFINFO_EXALLOCATEPOOLWITHTAG_DECL(); 01195 01196 #ifdef _NTOSKRNL_VERIFIER_ 01197 01198 if (KernelVerifier == TRUE) { 01199 #if defined (_X86_) 01200 RtlGetCallersAddress(&CallingAddress, &CallersCaller); 01201 #else 01202 CallingAddress = (PVOID)_ReturnAddress(); 01203 #endif 01204 01205 ASSERT(NumberOfBytes != 0); 01206 ASSERT_ALLOCATE_IRQL(PoolType, NumberOfBytes); 01207 01208 if ((PoolType & POOL_DRIVER_MASK) == 0) { 01209 01210 // 01211 // Use the Driver Verifier pool framework. Note this will 01212 // result in a recursive callback to this routine. 01213 // 01214 01215 return VeAllocatePoolWithTagPriority (PoolType | POOL_DRIVER_MASK, 01216 NumberOfBytes, 01217 Tag, 01218 HighPoolPriority, 01219 CallingAddress); 01220 } 01221 PoolType &= ~POOL_DRIVER_MASK; 01222 } 01223 01224 #else 01225 01226 ASSERT(NumberOfBytes != 0); 01227 ASSERT_ALLOCATE_IRQL(PoolType, NumberOfBytes); 01228 01229 #endif 01230 01231 // 01232 // Isolate the base pool type and select a pool from which to allocate 01233 // the specified block size. 01234 // 01235 01236 CheckType = PoolType & BASE_POOL_TYPE_MASK; 01237 01238 // 01239 // Currently only Hydra paged pool allocations come from the per session 01240 // pools. Nonpaged Hydra pool allocations still come from global pool. 01241 // 01242 01243 if (PoolType & SESSION_POOL_MASK) { 01244 if (ExpSessionPoolDescriptor == NULL) { 01245 01246 // 01247 // Promote this down to support common binaries. 01248 // 01249 01250 PoolType &= ~SESSION_POOL_MASK; 01251 PoolDesc = PoolVector[CheckType]; 01252 GlobalSpace = TRUE; 01253 } 01254 else { 01255 GlobalSpace = FALSE; 01256 if (CheckType == NonPagedPool) { 01257 PoolDesc = PoolVector[CheckType]; 01258 } 01259 else { 01260 PoolDesc = ExpSessionPoolDescriptor; 01261 } 01262 } 01263 } 01264 else { 01265 PoolDesc = PoolVector[CheckType]; 01266 GlobalSpace = TRUE; 01267 } 01268 01269 // 01270 // Check to determine if the requested block can be allocated from one 01271 // of the pool lists or must be directly allocated from virtual memory. 01272 // 01273 01274 if (NumberOfBytes > POOL_BUDDY_MAX) { 01275 01276 // 01277 // The requested size is greater than the largest block maintained 01278 // by allocation lists. 01279 // 01280 01281 ASSERT ((NumberOfBytes <= PAGE_SIZE) || 01282 (ExpPagedPoolDescriptor == (PPOOL_DESCRIPTOR)0) || 01283 ((PoolType & MUST_SUCCEED_POOL_TYPE_MASK) == 0)); 01284 01285 LOCK_POOL(PoolDesc, LockHandle); 01286 01287 PoolDesc->RunningAllocs += 1; 01288 01289 IsLargeSessionAllocation = (PoolType & SESSION_POOL_MASK); 01290 01291 RequestType = (PoolType & (BASE_POOL_TYPE_MASK | SESSION_POOL_MASK | POOL_VERIFIER_MASK)); 01292 01293 RetryWithMustSucceed: 01294 Entry = (PPOOL_HEADER) MiAllocatePoolPages (RequestType, 01295 NumberOfBytes, 01296 IsLargeSessionAllocation); 01297 01298 // 01299 // Large session pool allocations are accounted for directly by 01300 // the memory manager so no need to call MiSessionPoolAllocated here. 01301 // 01302 01303 if (Entry != NULL) { 01304 01305 NumberOfPages = BYTES_TO_PAGES(NumberOfBytes); 01306 PoolDesc->TotalBigPages += NumberOfPages; 01307 01308 UNLOCK_POOL(PoolDesc, LockHandle); 01309 01310 if ((PoolBigPageTable) && (IsLargeSessionAllocation == 0)) { 01311 01312 if (ExpAddTagForBigPages((PVOID)Entry, 01313 Tag, 01314 NumberOfPages) == FALSE) { 01315 Tag = ' GIB'; 01316 } 01317 01318 ExpInsertPoolTracker (Tag, 01319 (ULONG) ROUND_TO_PAGES(NumberOfBytes), 01320 PoolType); 01321 } 01322 01323 } else { 01324 if (PoolType & MUST_SUCCEED_POOL_TYPE_MASK) { 01325 RequestType |= MUST_SUCCEED_POOL_TYPE_MASK; 01326 goto RetryWithMustSucceed; 01327 } 01328 01329 UNLOCK_POOL(PoolDesc, LockHandle); 01330 01331 KdPrint(("EX: ExAllocatePool (%p, 0x%x ) returning NULL\n", 01332 NumberOfBytes, 01333 PoolType)); 01334 01335 if ((PoolType & POOL_RAISE_IF_ALLOCATION_FAILURE) != 0) { 01336 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 01337 } 01338 } 01339 01340 PERFINFO_BIGPOOLALLOC(PoolType, Tag, NumberOfBytes, Entry); 01341 01342 return Entry; 01343 } 01344 01345 // 01346 // The requested size is less than or equal to the size of the 01347 // maximum block maintained by the allocation lists. 01348 // 01349 01350 PERFINFO_POOLALLOC(PoolType, Tag, NumberOfBytes); 01351 01352 // 01353 // Check for a special pool tag match by actual tag. 01354 // 01355 01356 if (MmSpecialPoolTag != 0 && NumberOfBytes != 0) { 01357 01358 #ifndef NO_POOL_CHECKS 01359 Entry = MmSqueezeBadTags (NumberOfBytes, Tag, CheckType, 2); 01360 if (Entry != NULL) { 01361 return (PVOID)Entry; 01362 } 01363 #endif 01364 01365 // 01366 // Check for a special pool tag match by tag string and size ranges. 01367 // 01368 01369 if ((ExpCheckSingleFilter(Tag, MmSpecialPoolTag)) || 01370 ((MmSpecialPoolTag >= (NumberOfBytes + POOL_OVERHEAD)) && 01371 (MmSpecialPoolTag < (NumberOfBytes + POOL_OVERHEAD + POOL_SMALLEST_BLOCK)))) { 01372 01373 Entry = MmAllocateSpecialPool (NumberOfBytes, 01374 Tag, 01375 PoolType & (BASE_POOL_TYPE_MASK | POOL_VERIFIER_MASK), 01376 2); 01377 if (Entry != NULL) { 01378 return (PVOID)Entry; 01379 } 01380 } 01381 } 01382 01383 // 01384 // If the request is for cache aligned memory adjust the number of 01385 // bytes. 01386 // 01387 01388 #if POOL_CACHE_SUPPORTED 01389 01390 CacheOverhead = POOL_OVERHEAD; 01391 if (PoolType & CACHE_ALIGNED_POOL_TYPE_MASK) { 01392 NumberOfBytes += PoolCacheOverhead; 01393 CacheOverhead = PoolCacheSize; 01394 } 01395 01396 #endif //POOL_CACHE_SUPPORTED 01397 01398 // 01399 // Compute the Index of the listhead for blocks of the requested 01400 // size. 01401 // 01402 01403 ListNumber = (ULONG)((NumberOfBytes + POOL_OVERHEAD + (POOL_SMALLEST_BLOCK - 1)) >> POOL_BLOCK_SHIFT); 01404 01405 NeededSize = ListNumber; 01406 01407 // 01408 // If the pool type is paged, then pick a starting pool number and 01409 // attempt to lock each paged pool in circular succession. Otherwise, 01410 // lock the nonpaged pool as the same lock is used for both nonpaged 01411 // and nonpaged must succeed. 01412 // 01413 // N.B. The paged pool is selected in a round robin fashion using a 01414 // simple counter. Note that the counter is incremented using a 01415 // a noninterlocked sequence, but the pool index is never allowed 01416 // to get out of range. 01417 // 01418 01419 if (CheckType == PagedPool) { 01420 01421 // 01422 // If the requested pool block is a small block, then attempt to 01423 // allocate the requested pool from the per processor lookaside 01424 // list. If the attempt fails, then attempt to allocate from the 01425 // system lookaside list. If the attempt fails, then select a 01426 // pool to allocate from and allocate the block normally. 01427 // 01428 // Session space allocations do not currently use lookaside lists. 01429 // 01430 01431 if ((GlobalSpace == TRUE) && 01432 (NeededSize <= POOL_SMALL_LISTS) && 01433 (Isx86FeaturePresent(KF_CMPXCHG8B))) { 01434 01435 Prcb = KeGetCurrentPrcb(); 01436 LookasideList = Prcb->PPPagedLookasideList[NeededSize - 1].P; 01437 LookasideList->L.TotalAllocates += 1; 01438 01439 CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, Entry); 01440 01441 Entry = (PPOOL_HEADER) 01442 ExInterlockedPopEntrySList (&LookasideList->L.ListHead, 01443 &LookasideList->Lock); 01444 01445 if (Entry == NULL) { 01446 LookasideList = Prcb->PPPagedLookasideList[NeededSize - 1].L; 01447 LookasideList->L.TotalAllocates += 1; 01448 01449 CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, Entry); 01450 01451 Entry = (PPOOL_HEADER) 01452 ExInterlockedPopEntrySList (&LookasideList->L.ListHead, 01453 &LookasideList->Lock); 01454 } 01455 01456 if (Entry != NULL) { 01457 01458 CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, Entry); 01459 01460 Entry -= 1; 01461 LookasideList->L.AllocateHits += 1; 01462 NewPoolType = (PoolType & (BASE_POOL_TYPE_MASK | POOL_QUOTA_MASK | SESSION_POOL_MASK | POOL_VERIFIER_MASK)) + 1; 01463 01464 #if _POOL_LOCK_GRANULAR_ 01465 PoolDesc = &PoolDesc[DECODE_POOL_INDEX(Entry)]; 01466 #endif 01467 01468 LOCK_POOL_GRANULAR(PoolDesc, LockHandle); 01469 01470 Entry->PoolType = (UCHAR)NewPoolType; 01471 MARK_POOL_HEADER_ALLOCATED(Entry); 01472 01473 UNLOCK_POOL_GRANULAR(PoolDesc, LockHandle); 01474 01475 Entry->PoolTag = Tag; 01476 01477 if ((PoolTrackTable != NULL) && 01478 ((PoolType & SESSION_POOL_MASK) == 0)) { 01479 01480 ExpInsertPoolTracker (Tag, 01481 Entry->BlockSize << POOL_BLOCK_SHIFT, 01482 PoolType); 01483 } 01484 01485 // 01486 // Zero out any back pointer to our internal structures 01487 // to stop someone from corrupting us via an 01488 // uninitialized pointer. 01489 // 01490 01491 ((PULONG)((PCHAR)Entry + CacheOverhead))[0] = 0; 01492 01493 PERFINFO_POOLALLOC_ADDR((PUCHAR)Entry + CacheOverhead); 01494 01495 return (PUCHAR)Entry + CacheOverhead; 01496 } 01497 } 01498 01499 // 01500 // If there is more than one paged pool, then attempt to find 01501 // one that can be immediately locked. 01502 // 01503 01504 if (GlobalSpace == TRUE) { 01505 01506 PVOID Lock; 01507 01508 PoolIndex = 1; 01509 if (ExpNumberOfPagedPools != PoolIndex) { 01510 ExpPoolIndex += 1; 01511 PoolIndex = ExpPoolIndex; 01512 if (PoolIndex > ExpNumberOfPagedPools) { 01513 PoolIndex = 1; 01514 ExpPoolIndex = 1; 01515 } 01516 01517 Index = PoolIndex; 01518 do { 01519 Lock = PoolDesc[PoolIndex].LockAddress; 01520 if (ExTryToAcquireFastMutex((PFAST_MUTEX)Lock) == TRUE) { 01521 PoolDesc = &PoolDesc[PoolIndex]; 01522 goto PoolLocked; 01523 } 01524 01525 PoolIndex += 1; 01526 if (PoolIndex > ExpNumberOfPagedPools) { 01527 PoolIndex = 1; 01528 } 01529 01530 } while (PoolIndex != Index); 01531 } 01532 PoolDesc = &PoolDesc[PoolIndex]; 01533 } 01534 else { 01535 01536 // 01537 // Only one paged pool is currently available per session. 01538 // 01539 01540 PoolIndex = 0; 01541 ASSERT (PoolDesc == ExpSessionPoolDescriptor); 01542 } 01543 01544 // 01545 // None of the paged pools could be conditionally locked or there 01546 // is only one paged pool. The first pool considered is picked as 01547 // the victim to wait on. 01548 // 01549 01550 ExAcquireFastMutex((PFAST_MUTEX)PoolDesc->LockAddress); 01551 PoolLocked: 01552 01553 GlobalSpace = GlobalSpace; 01554 #if DBG 01555 if (GlobalSpace == TRUE) { 01556 ASSERT(PoolIndex == PoolDesc->PoolIndex); 01557 } 01558 else { 01559 ASSERT(PoolIndex == 0); 01560 ASSERT(PoolDesc->PoolIndex == 0); 01561 } 01562 #endif 01563 01564 } else { 01565 01566 // 01567 // If the requested pool block is a small block, then attempt to 01568 // allocate the requested pool from the per processor lookaside 01569 // list. If the attempt fails, then attempt to allocate from the 01570 // system lookaside list. If the attempt fails, then select a 01571 // pool to allocate from and allocate the block normally. 01572 // 01573 01574 if (GlobalSpace == TRUE && NeededSize <= POOL_SMALL_LISTS) { 01575 Prcb = KeGetCurrentPrcb(); 01576 LookasideList = Prcb->PPNPagedLookasideList[NeededSize - 1].P; 01577 LookasideList->L.TotalAllocates += 1; 01578 01579 CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, 0); 01580 01581 Entry = (PPOOL_HEADER) 01582 ExInterlockedPopEntrySList (&LookasideList->L.ListHead, 01583 &LookasideList->Lock); 01584 01585 if (Entry == NULL) { 01586 LookasideList = Prcb->PPNPagedLookasideList[NeededSize - 1].L; 01587 LookasideList->L.TotalAllocates += 1; 01588 01589 CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, 0); 01590 01591 Entry = (PPOOL_HEADER) 01592 ExInterlockedPopEntrySList (&LookasideList->L.ListHead, 01593 &LookasideList->Lock); 01594 } 01595 01596 if (Entry != NULL) { 01597 01598 CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, Entry); 01599 01600 Entry -= 1; 01601 LookasideList->L.AllocateHits += 1; 01602 NewPoolType = (PoolType & (BASE_POOL_TYPE_MASK | POOL_QUOTA_MASK | SESSION_POOL_MASK | POOL_VERIFIER_MASK)) + 1; 01603 01604 LOCK_POOL_GRANULAR(PoolDesc, LockHandle); 01605 01606 Entry->PoolType = (UCHAR)NewPoolType; 01607 MARK_POOL_HEADER_ALLOCATED(Entry); 01608 01609 UNLOCK_POOL_GRANULAR(PoolDesc, LockHandle); 01610 01611 Entry->PoolTag = Tag; 01612 01613 if (PoolTrackTable != NULL) { 01614 01615 ExpInsertPoolTracker (Tag, 01616 Entry->BlockSize << POOL_BLOCK_SHIFT, 01617 PoolType); 01618 } 01619 01620 // 01621 // Zero out any back pointer to our internal structures 01622 // to stop someone from corrupting us via an 01623 // uninitialized pointer. 01624 // 01625 01626 ((PULONG)((PCHAR)Entry + CacheOverhead))[0] = 0; 01627 01628 PERFINFO_POOLALLOC_ADDR((PUCHAR)Entry + CacheOverhead); 01629 01630 return (PUCHAR)Entry + CacheOverhead; 01631 } 01632 } 01633 01634 PoolIndex = 0; 01635 ExAcquireSpinLock(&NonPagedPoolLock, &LockHandle); 01636 01637 ASSERT(PoolIndex == PoolDesc->PoolIndex); 01638 } 01639 01640 // 01641 // The following code has an outer loop and an inner loop. 01642 // 01643 // The outer loop is utilized to repeat a nonpaged must succeed 01644 // allocation if necessary. 01645 // 01646 // The inner loop is used to repeat an allocation attempt if there 01647 // are no entries in any of the pool lists. 01648 // 01649 01650 RequestType = PoolType & (BASE_POOL_TYPE_MASK | SESSION_POOL_MASK); 01651 01652 PoolDesc->RunningAllocs += 1; 01653 ListHead = &PoolDesc->ListHeads[ListNumber]; 01654 01655 do { 01656 01657 // 01658 // Attempt to allocate the requested block from the current free 01659 // blocks. 01660 // 01661 01662 do { 01663 01664 // 01665 // If the list is not empty, then allocate a block from the 01666 // selected list. 01667 // 01668 01669 if (PrivateIsListEmpty(ListHead) == FALSE) { 01670 01671 CHECK_LIST( __LINE__, ListHead, 0 ); 01672 Block = PrivateRemoveHeadList(ListHead); 01673 CHECK_LIST( __LINE__, ListHead, 0 ); 01674 Entry = (PPOOL_HEADER)((PCHAR)Block - POOL_OVERHEAD); 01675 01676 ASSERT(Entry->BlockSize >= NeededSize); 01677 01678 ASSERT(DECODE_POOL_INDEX(Entry) == PoolIndex); 01679 01680 ASSERT(Entry->PoolType == 0); 01681 01682 if (Entry->BlockSize != NeededSize) { 01683 01684 // 01685 // The selected block is larger than the allocation 01686 // request. Split the block and insert the remaining 01687 // fragment in the appropriate list. 01688 // 01689 // If the entry is at the start of a page, then take 01690 // the allocation from the front of the block so as 01691 // to minimize fragmentation. Otherwise, take the 01692 // allocation from the end of the block which may 01693 // also reduce fragmentation if the block is at the 01694 // end of a page. 01695 // 01696 01697 if (Entry->PreviousSize == 0) { 01698 01699 // 01700 // The entry is at the start of a page. 01701 // 01702 01703 SplitEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + NeededSize); 01704 SplitEntry->BlockSize = (UCHAR)(Entry->BlockSize - (UCHAR)NeededSize); 01705 SplitEntry->PreviousSize = (UCHAR)NeededSize; 01706 01707 // 01708 // If the allocated block is not at the end of a 01709 // page, then adjust the size of the next block. 01710 // 01711 01712 NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)SplitEntry + SplitEntry->BlockSize); 01713 if (PAGE_END(NextEntry) == FALSE) { 01714 NextEntry->PreviousSize = SplitEntry->BlockSize; 01715 } 01716 01717 } else { 01718 01719 // 01720 // The entry is not at the start of a page. 01721 // 01722 01723 SplitEntry = Entry; 01724 Entry->BlockSize -= (UCHAR)NeededSize; 01725 Entry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + Entry->BlockSize); 01726 Entry->PreviousSize = SplitEntry->BlockSize; 01727 01728 // 01729 // If the allocated block is not at the end of a 01730 // page, then adjust the size of the next block. 01731 // 01732 01733 NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + NeededSize); 01734 if (PAGE_END(NextEntry) == FALSE) { 01735 NextEntry->PreviousSize = (UCHAR)NeededSize; 01736 } 01737 } 01738 01739 // 01740 // Set the size of the allocated entry, clear the pool 01741 // type of the split entry, set the index of the split 01742 // entry, and insert the split entry in the appropriate 01743 // free list. 01744 // 01745 01746 Entry->BlockSize = (UCHAR)NeededSize; 01747 ENCODE_POOL_INDEX(Entry, PoolIndex); 01748 SplitEntry->PoolType = 0; 01749 ENCODE_POOL_INDEX(SplitEntry, PoolIndex); 01750 Index = SplitEntry->BlockSize; 01751 01752 CHECK_LIST(__LINE__, &PoolDesc->ListHeads[Index - 1], 0); 01753 PrivateInsertTailList(&PoolDesc->ListHeads[Index - 1], ((PLIST_ENTRY)((PCHAR)SplitEntry + POOL_OVERHEAD))); 01754 CHECK_LIST(__LINE__, &PoolDesc->ListHeads[Index - 1], 0); 01755 CHECK_LIST(__LINE__, ((PLIST_ENTRY)((PCHAR)SplitEntry + POOL_OVERHEAD)), 0); 01756 } 01757 01758 Entry->PoolType = (UCHAR)((PoolType & (BASE_POOL_TYPE_MASK | POOL_QUOTA_MASK | SESSION_POOL_MASK | POOL_VERIFIER_MASK)) + 1); 01759 01760 MARK_POOL_HEADER_ALLOCATED(Entry); 01761 01762 CHECK_POOL_HEADER(__LINE__, Entry); 01763 01764 // 01765 // Notify the memory manager of session pool allocations 01766 // so leaked allocations can be caught on session exit. 01767 // This call must be made with the relevant pool locked. 01768 // 01769 01770 if (PoolType & SESSION_POOL_MASK) { 01771 MiSessionPoolAllocated( 01772 (PVOID)((PCHAR)Entry + CacheOverhead), 01773 (ULONG)(Entry->BlockSize << POOL_BLOCK_SHIFT), 01774 PoolType); 01775 } 01776 01777 UNLOCK_POOL(PoolDesc, LockHandle); 01778 01779 Entry->PoolTag = Tag; 01780 01781 if ((PoolTrackTable != NULL) && 01782 ((PoolType & SESSION_POOL_MASK) == 0)) { 01783 01784 ExpInsertPoolTracker (Tag, 01785 Entry->BlockSize << POOL_BLOCK_SHIFT, 01786 PoolType); 01787 } 01788 01789 // 01790 // Zero out any back pointer to our internal structures 01791 // to stop someone from corrupting us via an 01792 // uninitialized pointer. 01793 // 01794 01795 ((PULONGLONG)((PCHAR)Entry + CacheOverhead))[0] = 0; 01796 01797 PERFINFO_POOLALLOC_ADDR((PUCHAR)Entry + CacheOverhead); 01798 return (PCHAR)Entry + CacheOverhead; 01799 01800 } 01801 ListHead += 1; 01802 01803 } while (ListHead != &PoolDesc->ListHeads[POOL_LIST_HEADS]); 01804 01805 // 01806 // A block of the desired size does not exist and there are 01807 // no large blocks that can be split to satisfy the allocation. 01808 // Attempt to expand the pool by allocating another page to be 01809 // added to the pool. 01810 // 01811 // If the pool type is paged pool, then the paged pool page lock 01812 // must be held during the allocation of the pool pages. 01813 // 01814 01815 LOCK_IF_PAGED_POOL(CheckType, GlobalSpace); 01816 01817 Entry = (PPOOL_HEADER)MiAllocatePoolPages (RequestType, 01818 PAGE_SIZE, 01819 FALSE); 01820 01821 UNLOCK_IF_PAGED_POOL(CheckType, GlobalSpace); 01822 01823 if (Entry == NULL) { 01824 if ((PoolType & MUST_SUCCEED_POOL_TYPE_MASK) != 0) { 01825 01826 // 01827 // Must succeed pool was requested. Reset the type, 01828 // the pool descriptor address, and continue the search. 01829 // 01830 01831 CheckType = NonPagedPoolMustSucceed; 01832 RequestType = RequestType | MUST_SUCCEED_POOL_TYPE_MASK; 01833 PoolDesc = PoolVector[NonPagedPoolMustSucceed]; 01834 ListHead = &PoolDesc->ListHeads[ListNumber]; 01835 continue; 01836 01837 } else { 01838 01839 // 01840 // No more pool of the specified type is available. 01841 // 01842 01843 KdPrint(("EX: ExAllocatePool (%p, 0x%x ) returning NULL\n", 01844 NumberOfBytes, 01845 PoolType)); 01846 01847 UNLOCK_POOL(PoolDesc, LockHandle); 01848 01849 if ((PoolType & POOL_RAISE_IF_ALLOCATION_FAILURE) != 0) { 01850 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 01851 } 01852 01853 return NULL; 01854 } 01855 } 01856 01857 // 01858 // Insert the allocated page in the last allocation list. 01859 // 01860 01861 PoolDesc->TotalPages += 1; 01862 Entry->PoolType = 0; 01863 ENCODE_POOL_INDEX(Entry, PoolIndex); 01864 01865 PERFINFO_ADDPOOLPAGE(CheckType, PoolIndex, Entry, PoolDesc); 01866 01867 // 01868 // N.B. A byte is used to store the block size in units of the 01869 // smallest block size. Therefore, if the number of small 01870 // blocks in the page is greater than 255, the block size 01871 // is set to 255. 01872 // 01873 01874 if ((PAGE_SIZE / POOL_SMALLEST_BLOCK) > 255) { 01875 Entry->BlockSize = 255; 01876 01877 } else { 01878 Entry->BlockSize = (UCHAR)(PAGE_SIZE / POOL_SMALLEST_BLOCK); 01879 } 01880 01881 Entry->PreviousSize = 0; 01882 ListHead = &PoolDesc->ListHeads[POOL_LIST_HEADS - 1]; 01883 01884 CHECK_LIST(__LINE__, ListHead, 0); 01885 PrivateInsertHeadList(ListHead, ((PLIST_ENTRY)((PCHAR)Entry + POOL_OVERHEAD))); 01886 CHECK_LIST(__LINE__, ListHead, 0); 01887 CHECK_LIST(__LINE__, ((PLIST_ENTRY)((PCHAR)Entry + POOL_OVERHEAD)), 0); 01888 01889 } while (TRUE); 01890 } 01891 01892 VOID 01893 ExInsertPoolTag ( 01894 ULONG Tag, 01895 PVOID Va, 01896 SIZE_T NumberOfBytes, 01897 POOL_TYPE PoolType 01898 ) 01899 01900 /*++ 01901 01902 Routine Description: 01903 01904 This function inserts a pool tag in the tag table and increments the 01905 number of allocates and updates the total allocation size. 01906 01907 This function also inserts the pool tag in the big page tag table. 01908 01909 N.B. This function is for use by memory management ONLY. 01910 01911 Arguments: 01912 01913 Tag - Supplies the tag used to insert an entry in the tag table. 01914 01915 Va - Supplies the allocated virtual address. 01916 01917 NumberOfBytes - Supplies the allocation size in bytes. 01918 01919 PoolType - Supplies the pool type. 01920 01921 Return Value: 01922 01923 None. 01924 01925 Environment: 01926 01927 No pool locks held so pool may be freely allocated here as needed. 01928 01929 --*/ 01930 01931 { 01932 ULONG NumberOfPages; 01933 01934 ASSERT ((PoolType & SESSION_POOL_MASK) == 0); 01935 01936 if ((PoolBigPageTable) && (NumberOfBytes >= PAGE_SIZE)) { 01937 01938 NumberOfPages = BYTES_TO_PAGES(NumberOfBytes); 01939 01940 if (ExpAddTagForBigPages((PVOID)Va, Tag, NumberOfPages) == FALSE) { 01941 Tag = ' GIB'; 01942 } 01943 } 01944 01945 if (PoolTrackTable != NULL) { 01946 ExpInsertPoolTracker (Tag, NumberOfBytes, NonPagedPool); 01947 } 01948 } 01949 01950 VOID 01951 ExRemovePoolTag ( 01952 ULONG Tag, 01953 PVOID Va, 01954 SIZE_T NumberOfBytes 01955 ) 01956 01957 /*++ 01958 01959 Routine Description: 01960 01961 This function removes a pool tag from the tag table and increments the 01962 number of frees and updates the total allocation size. 01963 01964 This function also removes the pool tag from the big page tag table. 01965 01966 N.B. This function is for use by memory management ONLY. 01967 01968 Arguments: 01969 01970 Tag - Supplies the tag used to remove an entry in the tag table. 01971 01972 Va - Supplies the allocated virtual address. 01973 01974 NumberOfBytes - Supplies the allocation size in bytes. 01975 01976 Return Value: 01977 01978 None. 01979 01980 Environment: 01981 01982 No pool locks held so pool may be freely allocated here as needed. 01983 01984 --*/ 01985 01986 { 01987 if ((PoolBigPageTable) && (NumberOfBytes >= PAGE_SIZE)) { 01988 ExpFindAndRemoveTagBigPages (Va); 01989 } 01990 01991 if (PoolTrackTable != NULL) { 01992 ExpRemovePoolTracker(Tag, 01993 (ULONG)NumberOfBytes, 01994 NonPagedPool); 01995 } 01996 } 01997 01998 01999 VOID 02000 ExpInsertPoolTracker ( 02001 IN ULONG Key, 02002 IN SIZE_T Size, 02003 IN POOL_TYPE PoolType 02004 ) 02005 02006 /*++ 02007 02008 Routine Description: 02009 02010 This function inserts a pool tag in the tag table and increments the 02011 number of allocates and updates the total allocation size. 02012 02013 Arguments: 02014 02015 Key - Supplies the key value used to locate a matching entry in the 02016 tag table. 02017 02018 Size - Supplies the allocation size. 02019 02020 PoolType - Supplies the pool type. 02021 02022 Return Value: 02023 02024 None. 02025 02026 Environment: 02027 02028 No pool locks held so pool may be freely allocated here as needed. 02029 02030 --*/ 02031 02032 { 02033 USHORT Result; 02034 ULONG Hash; 02035 ULONG OriginalKey; 02036 ULONG OriginalHash; 02037 ULONG Index; 02038 KIRQL OldIrql; 02039 KIRQL LockHandle; 02040 ULONG BigPages; 02041 LOGICAL HashedIt; 02042 SIZE_T NewSize; 02043 SIZE_T SizeInBytes; 02044 SIZE_T NewSizeInBytes; 02045 SIZE_T NewSizeMask; 02046 PPOOL_TRACKER_TABLE OldTable; 02047 PPOOL_TRACKER_TABLE NewTable; 02048 02049 ASSERT ((PoolType & SESSION_POOL_MASK) == 0); 02050 02051 // 02052 // Ignore protected pool bit except for returned hash index. 02053 // 02054 02055 if (Key & PROTECTED_POOL) { 02056 Key &= ~PROTECTED_POOL; 02057 Result = (USHORT)(PROTECTED_POOL >> 16); 02058 } else { 02059 Result = 0; 02060 } 02061 02062 if (Key == PoolHitTag) { 02063 DbgBreakPoint(); 02064 } 02065 02066 retry: 02067 02068 // 02069 // Compute hash index and search for pool tag. 02070 // 02071 02072 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql); 02073 02074 Hash = ((40543*((((((((PUCHAR)&Key)[0]<<2)^((PUCHAR)&Key)[1])<<2)^((PUCHAR)&Key)[2])<<2)^((PUCHAR)&Key)[3]))>>2) & (ULONG)PoolTrackTableMask; 02075 Index = Hash; 02076 02077 do { 02078 if (PoolTrackTable[Hash].Key == Key) { 02079 PoolTrackTable[Hash].Key = Key; 02080 goto EntryFound; 02081 } 02082 02083 if (PoolTrackTable[Hash].Key == 0 && Hash != PoolTrackTableSize - 1) { 02084 PoolTrackTable[Hash].Key = Key; 02085 goto EntryFound; 02086 } 02087 02088 Hash = (Hash + 1) & (ULONG)PoolTrackTableMask; 02089 } while (Hash != Index); 02090 02091 // 02092 // No matching entry and no free entry was found. 02093 // If the overflow bucket has been used then expansion of the tracker table 02094 // is not allowed because a subsequent free of a tag can go negative as the 02095 // original allocation is in overflow and a newer allocation may be 02096 // distinct. 02097 // 02098 02099 NewSize = ((PoolTrackTableSize - 1) << 1) + 1; 02100 NewSizeInBytes = NewSize * sizeof(POOL_TRACKER_TABLE); 02101 02102 SizeInBytes = PoolTrackTableSize * sizeof(POOL_TRACKER_TABLE); 02103 02104 if ((NewSizeInBytes > SizeInBytes) && 02105 (PoolTrackTable[PoolTrackTableSize - 1].Key == 0)) { 02106 02107 ExpLockNonPagedPool(LockHandle); 02108 02109 NewTable = MiAllocatePoolPages (NonPagedPool, 02110 NewSizeInBytes, 02111 FALSE); 02112 02113 ExpUnlockNonPagedPool(LockHandle); 02114 02115 if (NewTable != NULL) { 02116 02117 OldTable = (PVOID)PoolTrackTable; 02118 02119 KdPrint(("POOL:grew track table (%p, %p, %p)\n", 02120 OldTable, 02121 PoolTrackTableSize, 02122 NewTable)); 02123 02124 RtlZeroMemory ((PVOID)NewTable, NewSizeInBytes); 02125 02126 // 02127 // Rehash all the entries into the new table. 02128 // 02129 02130 NewSizeMask = NewSize - 2; 02131 02132 for (OriginalHash = 0; OriginalHash < PoolTrackTableSize; OriginalHash += 1) { 02133 OriginalKey = PoolTrackTable[OriginalHash].Key; 02134 02135 if (OriginalKey == 0) { 02136 continue; 02137 } 02138 02139 Hash = (ULONG)((40543*((((((((PUCHAR)&OriginalKey)[0]<<2)^((PUCHAR)&OriginalKey)[1])<<2)^((PUCHAR)&OriginalKey)[2])<<2)^((PUCHAR)&OriginalKey)[3]))>>2) & (ULONG)NewSizeMask; 02140 Index = Hash; 02141 02142 HashedIt = FALSE; 02143 do { 02144 if (NewTable[Hash].Key == 0 && Hash != NewSize - 1) { 02145 RtlCopyMemory ((PVOID)&NewTable[Hash], 02146 (PVOID)&PoolTrackTable[OriginalHash], 02147 sizeof(POOL_TRACKER_TABLE)); 02148 HashedIt = TRUE; 02149 break; 02150 } 02151 02152 Hash = (Hash + 1) & (ULONG)NewSizeMask; 02153 } while (Hash != Index); 02154 02155 // 02156 // No matching entry and no free entry was found, have to bail. 02157 // 02158 02159 if (HashedIt == FALSE) { 02160 KdPrint(("POOL:rehash of track table failed (%p, %p, %p %p)\n", 02161 OldTable, 02162 PoolTrackTableSize, 02163 NewTable, 02164 OriginalKey)); 02165 02166 ExpLockNonPagedPool(LockHandle); 02167 02168 MiFreePoolPages (NewTable); 02169 02170 ExpUnlockNonPagedPool(LockHandle); 02171 02172 goto overflow; 02173 } 02174 } 02175 02176 PoolTrackTable = NewTable; 02177 PoolTrackTableSize = NewSize; 02178 PoolTrackTableMask = NewSizeMask; 02179 02180 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql); 02181 02182 ExpLockNonPagedPool(LockHandle); 02183 02184 BigPages = MiFreePoolPages (OldTable); 02185 02186 ExpUnlockNonPagedPool(LockHandle); 02187 02188 ExpRemovePoolTracker ('looP', 02189 BigPages * PAGE_SIZE, 02190 NonPagedPool); 02191 02192 ExpInsertPoolTracker ('looP', 02193 (ULONG) ROUND_TO_PAGES(NewSizeInBytes), 02194 NonPagedPool); 02195 02196 goto retry; 02197 } 02198 } 02199 02200 overflow: 02201 02202 // 02203 // Use the very last entry as a bit bucket for overflows. 02204 // 02205 02206 Hash = (ULONG)PoolTrackTableSize - 1; 02207 02208 PoolTrackTable[Hash].Key = 'lfvO'; 02209 02210 // 02211 // Update pool tracker table entry. 02212 // 02213 02214 EntryFound: 02215 02216 if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) { 02217 PoolTrackTable[Hash].PagedAllocs += 1; 02218 PoolTrackTable[Hash].PagedBytes += Size; 02219 02220 } else { 02221 PoolTrackTable[Hash].NonPagedAllocs += 1; 02222 PoolTrackTable[Hash].NonPagedBytes += Size; 02223 } 02224 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql); 02225 02226 return; 02227 } 02228 02229 02230 VOID 02231 ExpRemovePoolTracker ( 02232 IN ULONG Key, 02233 IN ULONG Size, 02234 IN POOL_TYPE PoolType 02235 ) 02236 02237 /*++ 02238 02239 Routine Description: 02240 02241 This function increments the number of frees and updates the total 02242 allocation size. 02243 02244 Arguments: 02245 02246 Key - Supplies the key value used to locate a matching entry in the 02247 tag table. 02248 02249 Size - Supplies the allocation size. 02250 02251 PoolType - Supplies the pool type. 02252 02253 Return Value: 02254 02255 None. 02256 02257 --*/ 02258 02259 { 02260 ULONG Hash; 02261 ULONG Index; 02262 KIRQL OldIrql; 02263 02264 // 02265 // Ignore protected pool bit 02266 // 02267 02268 Key &= ~PROTECTED_POOL; 02269 if (Key == PoolHitTag) { 02270 DbgBreakPoint(); 02271 } 02272 02273 // 02274 // Compute hash index and search for pool tag. 02275 // 02276 02277 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql); 02278 02279 Hash = ((40543*((((((((PUCHAR)&Key)[0]<<2)^((PUCHAR)&Key)[1])<<2)^((PUCHAR)&Key)[2])<<2)^((PUCHAR)&Key)[3]))>>2) & (ULONG)PoolTrackTableMask; 02280 Index = Hash; 02281 02282 do { 02283 if (PoolTrackTable[Hash].Key == Key) { 02284 goto EntryFound; 02285 } 02286 02287 if (PoolTrackTable[Hash].Key == 0 && Hash != PoolTrackTableSize - 1) { 02288 KdPrint(("POOL: Unable to find tracker %lx, table corrupted\n", Key)); 02289 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql); 02290 return; 02291 } 02292 02293 Hash = (Hash + 1) & (ULONG)PoolTrackTableMask; 02294 } while (Hash != Index); 02295 02296 // 02297 // No matching entry and no free entry was found. 02298 // 02299 02300 Hash = (ULONG)PoolTrackTableSize - 1; 02301 02302 // 02303 // Update pool tracker table entry. 02304 // 02305 02306 EntryFound: 02307 02308 if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) { 02309 PoolTrackTable[Hash].PagedBytes -= Size; 02310 PoolTrackTable[Hash].PagedFrees += 1; 02311 02312 } else { 02313 PoolTrackTable[Hash].NonPagedBytes -= Size; 02314 PoolTrackTable[Hash].NonPagedFrees += 1; 02315 } 02316 02317 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql); 02318 02319 return; 02320 } 02321 02322 02323 LOGICAL 02324 ExpAddTagForBigPages ( 02325 IN PVOID Va, 02326 IN ULONG Key, 02327 IN ULONG NumberOfPages 02328 ) 02329 /*++ 02330 02331 Routine Description: 02332 02333 This function inserts a pool tag in the big page tag table. 02334 02335 Arguments: 02336 02337 Va - Supplies the allocated virtual address. 02338 02339 Key - Supplies the key value used to locate a matching entry in the 02340 tag table. 02341 02342 NumberOfPages - Supplies the number of pages that were allocated. 02343 02344 Return Value: 02345 02346 TRUE if an entry was allocated, FALSE if not. 02347 02348 Environment: 02349 02350 No pool locks held so the table may be freely expanded here as needed. 02351 02352 --*/ 02353 { 02354 ULONG Hash; 02355 ULONG BigPages; 02356 PVOID OldTable; 02357 LOGICAL Inserted; 02358 KIRQL OldIrql; 02359 KIRQL LockHandle; 02360 SIZE_T SizeInBytes; 02361 SIZE_T NewSizeInBytes; 02362 PPOOL_TRACKER_BIG_PAGES NewTable; 02363 PPOOL_TRACKER_BIG_PAGES p; 02364 02365 retry: 02366 02367 Inserted = TRUE; 02368 Hash = (ULONG)(((ULONG_PTR)Va >> PAGE_SHIFT) & PoolBigPageTableHash); 02369 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql); 02370 while ((LONG_PTR)PoolBigPageTable[Hash].Va < 0) { 02371 Hash += 1; 02372 if (Hash >= PoolBigPageTableSize) { 02373 if (!Inserted) { 02374 02375 // 02376 // Try to expand the tracker table. 02377 // 02378 02379 SizeInBytes = PoolBigPageTableSize * sizeof(POOL_TRACKER_BIG_PAGES); 02380 NewSizeInBytes = (SizeInBytes << 1); 02381 02382 if (NewSizeInBytes > SizeInBytes) { 02383 02384 ExpLockNonPagedPool(LockHandle); 02385 02386 NewTable = MiAllocatePoolPages (NonPagedPool, 02387 NewSizeInBytes, 02388 FALSE); 02389 02390 ExpUnlockNonPagedPool(LockHandle); 02391 02392 if (NewTable != NULL) { 02393 02394 OldTable = (PVOID)PoolBigPageTable; 02395 02396 KdPrint(("POOL:grew big table (%p, %p, %p)\n", 02397 OldTable, 02398 PoolBigPageTableSize, 02399 NewTable)); 02400 02401 RtlCopyMemory ((PVOID)NewTable, 02402 OldTable, 02403 SizeInBytes); 02404 02405 RtlZeroMemory ((PVOID)(NewTable + PoolBigPageTableSize), 02406 NewSizeInBytes - SizeInBytes); 02407 02408 PoolBigPageTable = NewTable; 02409 PoolBigPageTableSize <<= 1; 02410 PoolBigPageTableHash = PoolBigPageTableSize - 1; 02411 02412 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql); 02413 02414 ExpLockNonPagedPool(LockHandle); 02415 02416 BigPages = MiFreePoolPages (OldTable); 02417 02418 ExpUnlockNonPagedPool(LockHandle); 02419 02420 ExpRemovePoolTracker ('looP', 02421 BigPages * PAGE_SIZE, 02422 NonPagedPool); 02423 02424 ExpInsertPoolTracker ('looP', 02425 (ULONG) ROUND_TO_PAGES(NewSizeInBytes), 02426 NonPagedPool); 02427 02428 goto retry; 02429 } 02430 } 02431 02432 if (!FirstPrint) { 02433 KdPrint(("POOL:unable to insert big page slot %lx\n",Key)); 02434 FirstPrint = TRUE; 02435 } 02436 02437 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql); 02438 return FALSE; 02439 } 02440 02441 Hash = 0; 02442 Inserted = FALSE; 02443 } 02444 } 02445 02446 p = &PoolBigPageTable[Hash]; 02447 02448 ASSERT ((LONG_PTR)Va < 0); 02449 02450 p->Va = Va; 02451 p->Key = Key; 02452 p->NumberOfPages = NumberOfPages; 02453 02454 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql); 02455 02456 return TRUE; 02457 } 02458 02459 02460 ULONG 02461 ExpFindAndRemoveTagBigPages ( 02462 IN PVOID Va 02463 ) 02464 02465 { 02466 ULONG Hash; 02467 LOGICAL Inserted; 02468 KIRQL OldIrql; 02469 ULONG ReturnKey; 02470 02471 Inserted = TRUE; 02472 Hash = (ULONG)(((ULONG_PTR)Va >> PAGE_SHIFT) & PoolBigPageTableHash); 02473 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql); 02474 while (PoolBigPageTable[Hash].Va != Va) { 02475 Hash += 1; 02476 if (Hash >= PoolBigPageTableSize) { 02477 if (!Inserted) { 02478 if (!FirstPrint) { 02479 KdPrint(("POOL:unable to find big page slot %lx\n",Va)); 02480 FirstPrint = TRUE; 02481 } 02482 02483 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql); 02484 return ' GIB'; 02485 } 02486 02487 Hash = 0; 02488 Inserted = FALSE; 02489 } 02490 } 02491 02492 ASSERT ((LONG_PTR)Va < 0); 02493 (ULONG_PTR)PoolBigPageTable[Hash].Va &= MAXLONG_PTR; 02494 02495 ReturnKey = PoolBigPageTable[Hash].Key; 02496 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql); 02497 return ReturnKey; 02498 } 02499 02500 02501 ULONG 02502 ExpAllocatePoolWithQuotaHandler( 02503 IN NTSTATUS ExceptionCode, 02504 IN PVOID PoolAddress, 02505 IN LOGICAL ContinueSearch 02506 ) 02507 02508 /*++ 02509 02510 Routine Description: 02511 02512 This function is called when an exception occurs in ExFreePool 02513 while quota is being charged to a process. 02514 02515 Its function is to deallocate the pool block and continue the search 02516 for an exception handler. 02517 02518 Arguments: 02519 02520 ExceptionCode - Supplies the exception code that caused this 02521 function to be entered. 02522 02523 PoolAddress - Supplies the address of a pool block that needs to be 02524 deallocated. 02525 02526 ContinueSearch - Supplies a value that if TRUE causes the exception 02527 search to continue. This is used in allocate pool with quota 02528 calls that do not contain the pool quota mask bit set. 02529 02530 Return Value: 02531 02532 EXCEPTION_CONTINUE_SEARCH - The exception should be propagated to the 02533 caller of ExAllocatePoolWithQuota. 02534 02535 --*/ 02536 02537 { 02538 if ( PoolAddress ) { 02539 ASSERT(ExceptionCode == STATUS_QUOTA_EXCEEDED); 02540 ExFreePool(PoolAddress); 02541 02542 } else { 02543 ASSERT(ExceptionCode == STATUS_INSUFFICIENT_RESOURCES); 02544 } 02545 02546 return ContinueSearch ? EXCEPTION_CONTINUE_SEARCH : EXCEPTION_EXECUTE_HANDLER; 02547 } 02548 02549 PVOID 02550 ExAllocatePoolWithQuota( 02551 IN POOL_TYPE PoolType, 02552 IN SIZE_T NumberOfBytes 02553 ) 02554 02555 /*++ 02556 02557 Routine Description: 02558 02559 This function allocates a block of pool of the specified type, 02560 returns a pointer to the allocated block, and if the binary buddy 02561 allocator was used to satisfy the request, charges pool quota to the 02562 current process. This function is used to access both the 02563 page-aligned pools, and the binary buddy. 02564 02565 If the number of bytes specifies a size that is too large to be 02566 satisfied by the appropriate binary buddy pool, then the 02567 page-aligned pool allocator is used. The allocated block will be 02568 page-aligned and a page-sized multiple. No quota is charged to the 02569 current process if this is the case. 02570 02571 Otherwise, the appropriate binary buddy pool is used. The allocated 02572 block will be 64-bit aligned, but will not be page aligned. After 02573 the allocation completes, an attempt will be made to charge pool 02574 quota (of the appropriate type) to the current process object. If 02575 the quota charge succeeds, then the pool block's header is adjusted 02576 to point to the current process. The process object is not 02577 dereferenced until the pool is deallocated and the appropriate 02578 amount of quota is returned to the process. Otherwise, the pool is 02579 deallocated, a "quota exceeded" condition is raised. 02580 02581 Arguments: 02582 02583 PoolType - Supplies the type of pool to allocate. If the pool type 02584 is one of the "MustSucceed" pool types and sufficient quota 02585 exists, then this call will always succeed and return a pointer 02586 to allocated pool. Otherwise, if the system cannot allocate 02587 the requested amount of memory a STATUS_INSUFFICIENT_RESOURCES 02588 status is raised. 02589 02590 NumberOfBytes - Supplies the number of bytes to allocate. 02591 02592 Return Value: 02593 02594 NON-NULL - Returns a pointer to the allocated pool. 02595 02596 Unspecified - If insufficient quota exists to complete the pool 02597 allocation, the return value is unspecified. 02598 02599 --*/ 02600 02601 { 02602 return (ExAllocatePoolWithQuotaTag (PoolType, NumberOfBytes, 'enoN')); 02603 } 02604 02605 02606 PVOID 02607 ExAllocatePoolWithQuotaTag( 02608 IN POOL_TYPE PoolType, 02609 IN SIZE_T NumberOfBytes, 02610 IN ULONG Tag 02611 ) 02612 02613 /*++ 02614 02615 Routine Description: 02616 02617 This function allocates a block of pool of the specified type, 02618 returns a pointer to the allocated block, and if the binary buddy 02619 allocator was used to satisfy the request, charges pool quota to the 02620 current process. This function is used to access both the 02621 page-aligned pools, and the binary buddy. 02622 02623 If the number of bytes specifies a size that is too large to be 02624 satisfied by the appropriate binary buddy pool, then the 02625 page-aligned pool allocator is used. The allocated block will be 02626 page-aligned and a page-sized multiple. No quota is charged to the 02627 current process if this is the case. 02628 02629 Otherwise, the appropriate binary buddy pool is used. The allocated 02630 block will be 64-bit aligned, but will not be page aligned. After 02631 the allocation completes, an attempt will be made to charge pool 02632 quota (of the appropriate type) to the current process object. If 02633 the quota charge succeeds, then the pool block's header is adjusted 02634 to point to the current process. The process object is not 02635 dereferenced until the pool is deallocated and the appropriate 02636 amount of quota is returned to the process. Otherwise, the pool is 02637 deallocated, a "quota exceeded" condition is raised. 02638 02639 Arguments: 02640 02641 PoolType - Supplies the type of pool to allocate. If the pool type 02642 is one of the "MustSucceed" pool types and sufficient quota 02643 exists, then this call will always succeed and return a pointer 02644 to allocated pool. Otherwise, if the system cannot allocate 02645 the requested amount of memory a STATUS_INSUFFICIENT_RESOURCES 02646 status is raised. 02647 02648 NumberOfBytes - Supplies the number of bytes to allocate. 02649 02650 Return Value: 02651 02652 NON-NULL - Returns a pointer to the allocated pool. 02653 02654 Unspecified - If insufficient quota exists to complete the pool 02655 allocation, the return value is unspecified. 02656 02657 --*/ 02658 02659 { 02660 PVOID p; 02661 PEPROCESS Process; 02662 PPOOL_HEADER Entry; 02663 LOGICAL IgnoreQuota; 02664 LOGICAL RaiseOnQuotaFailure; 02665 02666 IgnoreQuota = FALSE; 02667 RaiseOnQuotaFailure = TRUE; 02668 02669 if ( PoolType & POOL_QUOTA_FAIL_INSTEAD_OF_RAISE ) { 02670 RaiseOnQuotaFailure = FALSE; 02671 PoolType &= ~POOL_QUOTA_FAIL_INSTEAD_OF_RAISE; 02672 } 02673 02674 if ((POOL_QUOTA_ENABLED == FALSE) 02675 #if i386 && !FPO 02676 || (NtGlobalFlag & FLG_KERNEL_STACK_TRACE_DB) 02677 #endif // i386 && !FPO 02678 ) { 02679 IgnoreQuota = TRUE; 02680 } else { 02681 PoolType = (POOL_TYPE)((UCHAR)PoolType + POOL_QUOTA_MASK); 02682 } 02683 02684 p = ExAllocatePoolWithTag(PoolType, NumberOfBytes, Tag); 02685 02686 // 02687 // Note - NULL is page aligned. 02688 // 02689 02690 if (!PAGE_ALIGNED(p) && !IgnoreQuota) { 02691 02692 if ((p >= MmSpecialPoolStart) && (p < MmSpecialPoolEnd)) { 02693 return p; 02694 } 02695 02696 #if POOL_CACHE_SUPPORTED 02697 02698 // 02699 // Align entry on pool allocation boundary. 02700 // 02701 02702 if (((ULONG)p & POOL_CACHE_CHECK) == 0) { 02703 Entry = (PPOOL_HEADER)((ULONG)p - PoolCacheSize); 02704 } else { 02705 Entry = (PPOOL_HEADER)((PCH)p - POOL_OVERHEAD); 02706 } 02707 02708 #else 02709 Entry = (PPOOL_HEADER)((PCH)p - POOL_OVERHEAD); 02710 #endif //POOL_CACHE_SUPPORTED 02711 02712 Process = PsGetCurrentProcess(); 02713 02714 // 02715 // Catch exception and back out allocation if necessary 02716 // 02717 02718 try { 02719 02720 Entry->ProcessBilled = NULL; 02721 02722 if (Process != PsInitialSystemProcess) { 02723 02724 PsChargePoolQuota(Process, 02725 PoolType & BASE_POOL_TYPE_MASK, 02726 (ULONG)(Entry->BlockSize << POOL_BLOCK_SHIFT)); 02727 02728 ObReferenceObject(Process); 02729 Entry->ProcessBilled = Process; 02730 } 02731 02732 } except ( ExpAllocatePoolWithQuotaHandler(GetExceptionCode(),p,RaiseOnQuotaFailure)) { 02733 if ( RaiseOnQuotaFailure ) { 02734 KeBugCheck(GetExceptionCode()); 02735 } 02736 else { 02737 p = NULL; 02738 } 02739 } 02740 02741 } else { 02742 if ( !p && RaiseOnQuotaFailure ) { 02743 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 02744 } 02745 } 02746 02747 return p; 02748 } 02749 02750 VOID 02751 ExFreePool( 02752 IN PVOID P 02753 ) 02754 { 02755 ExFreePoolWithTag(P, 0); 02756 return; 02757 } 02758 02759 VOID 02760 ExFreePoolWithTag( 02761 IN PVOID P, 02762 IN ULONG TagToFree 02763 ) 02764 { 02765 02766 /*++ 02767 02768 Routine Description: 02769 02770 This function deallocates a block of pool. This function is used to 02771 deallocate to both the page aligned pools and the buddy (less than 02772 a page) pools. 02773 02774 If the address of the block being deallocated is page-aligned, then 02775 the page-aligned pool deallocator is used. 02776 02777 Otherwise, the binary buddy pool deallocator is used. Deallocation 02778 looks at the allocated block's pool header to determine the pool 02779 type and block size being deallocated. If the pool was allocated 02780 using ExAllocatePoolWithQuota, then after the deallocation is 02781 complete, the appropriate process's pool quota is adjusted to reflect 02782 the deallocation, and the process object is dereferenced. 02783 02784 Arguments: 02785 02786 P - Supplies the address of the block of pool being deallocated. 02787 02788 Return Value: 02789 02790 None. 02791 02792 --*/ 02793 02794 POOL_TYPE CheckType; 02795 PPOOL_HEADER Entry; 02796 ULONG Index; 02797 KIRQL LockHandle; 02798 PNPAGED_LOOKASIDE_LIST LookasideList; 02799 PPOOL_HEADER NextEntry; 02800 ULONG PoolIndex; 02801 POOL_TYPE PoolType; 02802 PPOOL_DESCRIPTOR PoolDesc; 02803 PEPROCESS ProcessBilled; 02804 LOGICAL Combined; 02805 ULONG BigPages; 02806 ULONG Tag; 02807 LOGICAL GlobalSpace; 02808 PKPRCB Prcb; 02809 PERFINFO_EXFREEPOOLWITHTAG_DECL(); 02810 02811 PERFINFO_FREEPOOL(P); 02812 02813 if ((P >= MmSpecialPoolStart) && (P < MmSpecialPoolEnd)) { 02814 MmFreeSpecialPool (P); 02815 return; 02816 } 02817 02818 ProcessBilled = NULL; 02819 02820 // 02821 // If entry is page aligned, then call free block to the page aligned 02822 // pool. Otherwise, free the block to the allocation lists. 02823 // 02824 02825 if (PAGE_ALIGNED(P)) { 02826 02827 PoolType = MmDeterminePoolType(P); 02828 02829 ASSERT_FREE_IRQL(PoolType, P); 02830 02831 CheckType = PoolType & BASE_POOL_TYPE_MASK; 02832 02833 if (PoolType == PagedPoolSession) { 02834 PoolDesc = ExpSessionPoolDescriptor; 02835 } 02836 else { 02837 PoolDesc = PoolVector[PoolType]; 02838 } 02839 02840 if ((PoolTrackTable != NULL) && (PoolType != PagedPoolSession)) { 02841 Tag = ExpFindAndRemoveTagBigPages(P); 02842 } 02843 02844 LOCK_POOL(PoolDesc, LockHandle); 02845 02846 PoolDesc->RunningDeAllocs += 1; 02847 02848 // 02849 // Large session pool allocations are accounted for directly by 02850 // the memory manager so no need to call MiSessionPoolFreed here. 02851 // 02852 02853 BigPages = MiFreePoolPages(P); 02854 02855 if ((PoolTrackTable != NULL) && (PoolType != PagedPoolSession)) { 02856 if (Tag & PROTECTED_POOL) { 02857 Tag &= ~PROTECTED_POOL; 02858 TagToFree &= ~PROTECTED_POOL; 02859 if (Tag != TagToFree) { 02860 DbgPrint( "EX: Invalid attempt to free protected pool block %x (%c%c%c%c)\n", 02861 P, 02862 Tag, 02863 Tag >> 8, 02864 Tag >> 16, 02865 Tag >> 24 02866 ); 02867 DbgBreakPoint(); 02868 } 02869 } 02870 02871 ExpRemovePoolTracker(Tag, 02872 BigPages * PAGE_SIZE, 02873 PoolType); 02874 } 02875 02876 // 02877 // Check if an ERESOURCE is currently active in this memory block. 02878 // 02879 02880 FREE_CHECK_ERESOURCE (P, BigPages << PAGE_SHIFT); 02881 02882 // 02883 // Check if a KTIMER is currently active in this memory block 02884 // 02885 02886 FREE_CHECK_KTIMER(P, BigPages << PAGE_SHIFT); 02887 02888 // 02889 // Search worker queues for work items still queued 02890 // 02891 FREE_CHECK_WORKER(P, BigPages << PAGE_SHIFT); 02892 02893 PoolDesc->TotalBigPages -= BigPages; 02894 02895 UNLOCK_POOL(PoolDesc, LockHandle); 02896 02897 return; 02898 } 02899 02900 // 02901 // Align the entry address to a pool allocation boundary. 02902 // 02903 02904 #if POOL_CACHE_SUPPORTED 02905 02906 if (((ULONG)P & POOL_CACHE_CHECK) == 0) { 02907 Entry = (PPOOL_HEADER)((ULONG)P - PoolCacheSize); 02908 02909 } else { 02910 Entry = (PPOOL_HEADER)((PCHAR)P - POOL_OVERHEAD); 02911 } 02912 02913 #else 02914 02915 Entry = (PPOOL_HEADER)((PCHAR)P - POOL_OVERHEAD); 02916 02917 #endif //POOL_CACHE_SUPPORTED 02918 02919 ASSERT_POOL_NOT_FREE(Entry); 02920 02921 PoolType = (Entry->PoolType & POOL_TYPE_MASK) - 1; 02922 02923 CheckType = PoolType & BASE_POOL_TYPE_MASK; 02924 02925 ASSERT_FREE_IRQL(PoolType, P); 02926 02927 if (Entry->PoolType & POOL_VERIFIER_MASK) { 02928 VerifierFreeTrackedPool (P, 02929 Entry->BlockSize << POOL_BLOCK_SHIFT, 02930 CheckType, 02931 FALSE); 02932 } 02933 02934 PoolDesc = PoolVector[PoolType]; 02935 GlobalSpace = TRUE; 02936 02937 if (Entry->PoolType & SESSION_POOL_MASK) { 02938 if (CheckType == PagedPool) { 02939 PoolDesc = ExpSessionPoolDescriptor; 02940 } 02941 GlobalSpace = FALSE; 02942 } 02943 else if (CheckType == PagedPool) { 02944 PoolDesc = &PoolDesc[DECODE_POOL_INDEX(Entry)]; 02945 } 02946 02947 LOCK_POOL(PoolDesc, LockHandle); 02948 02949 if (!IS_POOL_HEADER_MARKED_ALLOCATED(Entry)) { 02950 KeBugCheckEx (BAD_POOL_CALLER, 7, __LINE__, (ULONG_PTR)Entry, (ULONG_PTR)P); 02951 } 02952 02953 MARK_POOL_HEADER_FREED(Entry); 02954 02955 // 02956 // If this allocation was in session space, let the memory 02957 // manager know to delete it so it won't be considered in use on 02958 // session exit. Note this call must be made with the 02959 // relevant pool still locked. 02960 // 02961 02962 if (GlobalSpace == FALSE) { 02963 MiSessionPoolFreed(P, Entry->BlockSize << POOL_BLOCK_SHIFT, CheckType); 02964 } 02965 02966 UNLOCK_POOL(PoolDesc, LockHandle); 02967 02968 ASSERT_POOL_TYPE_NOT_ZERO(Entry); 02969 02970 // 02971 // Check if an ERESOURCE is currently active in this memory block. 02972 // 02973 02974 FREE_CHECK_ERESOURCE (Entry, (ULONG)(Entry->BlockSize << POOL_BLOCK_SHIFT)); 02975 02976 // 02977 // Check if a KTIMER is currently active in this memory block. 02978 // 02979 02980 FREE_CHECK_KTIMER(Entry, (ULONG)(Entry->BlockSize << POOL_BLOCK_SHIFT)); 02981 02982 // 02983 // Look for work items still queued 02984 // 02985 02986 FREE_CHECK_WORKER(Entry, (ULONG)(Entry->BlockSize << POOL_BLOCK_SHIFT)); 02987 02988 02989 #if DBG 02990 02991 // 02992 // Check if the pool index field is defined correctly. 02993 // 02994 02995 if (CheckType == NonPagedPool) { 02996 if (DECODE_POOL_INDEX(Entry) != 0) { 02997 KeBugCheckEx(BAD_POOL_CALLER, 2, (ULONG_PTR)Entry, (ULONG_PTR)(*(PULONG)Entry), 0); 02998 } 02999 } 03000 else { 03001 if (GlobalSpace == FALSE) { 03002 // 03003 // All session space allocations have an index of 0. 03004 // 03005 ASSERT (DECODE_POOL_INDEX(Entry) == 0); 03006 } 03007 else if (DECODE_POOL_INDEX(Entry) == 0) { 03008 KeBugCheckEx(BAD_POOL_CALLER, 4, (ULONG_PTR)Entry, *(PULONG)Entry, 0); 03009 } 03010 } 03011 03012 #endif // DBG 03013 03014 // 03015 // If pool tagging is enabled, then update the pool tracking database. 03016 // Otherwise, check to determine if quota was charged when the pool 03017 // block was allocated. 03018 // 03019 03020 #if defined (_WIN64) 03021 if (Entry->PoolType & POOL_QUOTA_MASK) { 03022 ProcessBilled = Entry->ProcessBilled; 03023 } 03024 03025 Tag = Entry->PoolTag; 03026 if (Tag & PROTECTED_POOL) { 03027 Tag &= ~PROTECTED_POOL; 03028 TagToFree &= ~PROTECTED_POOL; 03029 if (Tag != TagToFree) { 03030 DbgPrint( "EX: Invalid attempt to free protected pool block %x (%c%c%c%c)\n", 03031 P, 03032 Tag, 03033 Tag >> 8, 03034 Tag >> 16, 03035 Tag >> 24 03036 ); 03037 DbgBreakPoint(); 03038 } 03039 } 03040 if (PoolTrackTable != NULL) { 03041 if (GlobalSpace == TRUE) { 03042 ExpRemovePoolTracker(Tag, 03043 Entry->BlockSize << POOL_BLOCK_SHIFT, 03044 PoolType); 03045 03046 } 03047 } 03048 if (ProcessBilled != NULL) { 03049 PsReturnPoolQuota(ProcessBilled, 03050 PoolType & BASE_POOL_TYPE_MASK, 03051 (ULONG)Entry->BlockSize << POOL_BLOCK_SHIFT); 03052 ObDereferenceObject(ProcessBilled); 03053 } 03054 #else 03055 if (Entry->PoolType & POOL_QUOTA_MASK) { 03056 if (PoolTrackTable == NULL) { 03057 ProcessBilled = Entry->ProcessBilled; 03058 Entry->PoolTag = 'atoQ'; 03059 } 03060 } 03061 03062 if (PoolTrackTable != NULL) { 03063 Tag = Entry->PoolTag; 03064 if (Tag & PROTECTED_POOL) { 03065 Tag &= ~PROTECTED_POOL; 03066 TagToFree &= ~PROTECTED_POOL; 03067 if (Tag != TagToFree) { 03068 DbgPrint( "EX: Invalid attempt to free protected pool block %x (%c%c%c%c)\n", 03069 P, 03070 Tag, 03071 Tag >> 8, 03072 Tag >> 16, 03073 Tag >> 24 03074 ); 03075 DbgBreakPoint(); 03076 } 03077 } 03078 if (GlobalSpace == TRUE) { 03079 ExpRemovePoolTracker(Tag, 03080 Entry->BlockSize << POOL_BLOCK_SHIFT , 03081 PoolType); 03082 } 03083 } else if (ProcessBilled != NULL) { 03084 PsReturnPoolQuota(ProcessBilled, 03085 PoolType & BASE_POOL_TYPE_MASK, 03086 (ULONG)Entry->BlockSize << POOL_BLOCK_SHIFT); 03087 ObDereferenceObject(ProcessBilled); 03088 } 03089 #endif 03090 03091 // 03092 // If the pool block is a small block, then attempt to free the block 03093 // to the single entry lookaside list. If the free attempt fails, then 03094 // free the block by merging it back into the pool data structures. 03095 // 03096 03097 PoolIndex = DECODE_POOL_INDEX(Entry); 03098 03099 Index = Entry->BlockSize; 03100 03101 if (Index <= POOL_SMALL_LISTS && GlobalSpace == TRUE) { 03102 03103 // 03104 // Attempt to free the small block to a per processor lookaside 03105 // list. 03106 // 03107 03108 if (CheckType == PagedPool) { 03109 if (Isx86FeaturePresent(KF_CMPXCHG8B)) { 03110 Prcb = KeGetCurrentPrcb(); 03111 LookasideList = Prcb->PPPagedLookasideList[Index - 1].P; 03112 LookasideList->L.TotalFrees += 1; 03113 03114 CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, P); 03115 03116 if (ExQueryDepthSList(&LookasideList->L.ListHead) < LookasideList->L.Depth) { 03117 LookasideList->L.FreeHits += 1; 03118 Entry += 1; 03119 ExInterlockedPushEntrySList(&LookasideList->L.ListHead, 03120 (PSINGLE_LIST_ENTRY)Entry, 03121 &LookasideList->Lock); 03122 03123 CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, P); 03124 03125 return; 03126 03127 } else { 03128 LookasideList = Prcb->PPPagedLookasideList[Index - 1].L; 03129 LookasideList->L.TotalFrees += 1; 03130 03131 CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, P); 03132 03133 if (ExQueryDepthSList(&LookasideList->L.ListHead) < LookasideList->L.Depth) { 03134 LookasideList->L.FreeHits += 1; 03135 Entry += 1; 03136 ExInterlockedPushEntrySList(&LookasideList->L.ListHead, 03137 (PSINGLE_LIST_ENTRY)Entry, 03138 &LookasideList->Lock); 03139 03140 CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, P); 03141 03142 return; 03143 } 03144 } 03145 } 03146 03147 } else { 03148 03149 // 03150 // Make sure we don't put a must succeed buffer into the 03151 // regular nonpaged pool list. 03152 // 03153 03154 if (PoolType != NonPagedPoolMustSucceed) { 03155 Prcb = KeGetCurrentPrcb(); 03156 LookasideList = Prcb->PPNPagedLookasideList[Index - 1].P; 03157 LookasideList->L.TotalFrees += 1; 03158 03159 CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, P); 03160 03161 if (ExQueryDepthSList(&LookasideList->L.ListHead) < LookasideList->L.Depth) { 03162 LookasideList->L.FreeHits += 1; 03163 Entry += 1; 03164 ExInterlockedPushEntrySList(&LookasideList->L.ListHead, 03165 (PSINGLE_LIST_ENTRY)Entry, 03166 &LookasideList->Lock); 03167 03168 CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, P); 03169 03170 return; 03171 03172 } else { 03173 LookasideList = Prcb->PPNPagedLookasideList[Index - 1].L; 03174 LookasideList->L.TotalFrees += 1; 03175 03176 CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, P); 03177 03178 if (ExQueryDepthSList(&LookasideList->L.ListHead) < LookasideList->L.Depth) { 03179 LookasideList->L.FreeHits += 1; 03180 Entry += 1; 03181 ExInterlockedPushEntrySList(&LookasideList->L.ListHead, 03182 (PSINGLE_LIST_ENTRY)Entry, 03183 &LookasideList->Lock); 03184 03185 CHECK_LOOKASIDE_LIST(__LINE__, LookasideList, P); 03186 03187 return; 03188 } 03189 } 03190 } 03191 } 03192 } 03193 03194 ASSERT(PoolIndex == PoolDesc->PoolIndex); 03195 03196 LOCK_POOL(PoolDesc, LockHandle); 03197 03198 CHECK_POOL_HEADER(__LINE__, Entry); 03199 03200 PoolDesc->RunningDeAllocs += 1; 03201 03202 // 03203 // Free the specified pool block. 03204 // 03205 // Check to see if the next entry is free. 03206 // 03207 03208 Combined = FALSE; 03209 NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + Entry->BlockSize); 03210 if (PAGE_END(NextEntry) == FALSE) { 03211 03212 if (NextEntry->PoolType == 0) { 03213 03214 // 03215 // This block is free, combine with the released block. 03216 // 03217 03218 Combined = TRUE; 03219 03220 CHECK_LIST(__LINE__, ((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD)), P); 03221 PrivateRemoveEntryList(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))); 03222 CHECK_LIST(__LINE__, DecodeLink(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))->Flink), P); 03223 CHECK_LIST(__LINE__, DecodeLink(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))->Blink), P); 03224 03225 Entry->BlockSize += NextEntry->BlockSize; 03226 } 03227 } 03228 03229 // 03230 // Check to see if the previous entry is free. 03231 // 03232 03233 if (Entry->PreviousSize != 0) { 03234 NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry - Entry->PreviousSize); 03235 if (NextEntry->PoolType == 0) { 03236 03237 // 03238 // This block is free, combine with the released block. 03239 // 03240 03241 Combined = TRUE; 03242 03243 CHECK_LIST(__LINE__, ((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD)), P); 03244 PrivateRemoveEntryList(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))); 03245 CHECK_LIST(__LINE__, DecodeLink(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))->Flink), P); 03246 CHECK_LIST(__LINE__, DecodeLink(((PLIST_ENTRY)((PCHAR)NextEntry + POOL_OVERHEAD))->Blink), P); 03247 03248 NextEntry->BlockSize += Entry->BlockSize; 03249 Entry = NextEntry; 03250 } 03251 } 03252 03253 // 03254 // If the block being freed has been combined into a full page, 03255 // then return the free page to memory management. 03256 // 03257 03258 if (PAGE_ALIGNED(Entry) && 03259 (PAGE_END((PPOOL_BLOCK)Entry + Entry->BlockSize) != FALSE)) { 03260 03261 // 03262 // If the pool type is paged pool, then the paged pool page lock 03263 // must be held during the free of the pool pages. 03264 // 03265 03266 LOCK_IF_PAGED_POOL(CheckType, GlobalSpace); 03267 03268 PERFINFO_FREEPOOLPAGE(CheckType, PoolIndex, Entry, PoolDesc); 03269 03270 MiFreePoolPages(Entry); 03271 03272 UNLOCK_IF_PAGED_POOL(CheckType, GlobalSpace); 03273 03274 PoolDesc->TotalPages -= 1; 03275 03276 } else { 03277 03278 // 03279 // Insert this element into the list. 03280 // 03281 03282 Entry->PoolType = 0; 03283 ENCODE_POOL_INDEX(Entry, PoolIndex); 03284 Index = Entry->BlockSize; 03285 03286 // 03287 // If the freed block was combined with any other block, then 03288 // adjust the size of the next block if necessary. 03289 // 03290 03291 if (Combined != FALSE) { 03292 03293 // 03294 // The size of this entry has changed, if this entry is 03295 // not the last one in the page, update the pool block 03296 // after this block to have a new previous allocation size. 03297 // 03298 03299 NextEntry = (PPOOL_HEADER)((PPOOL_BLOCK)Entry + Entry->BlockSize); 03300 if (PAGE_END(NextEntry) == FALSE) { 03301 NextEntry->PreviousSize = Entry->BlockSize; 03302 } 03303 03304 // 03305 // Reduce fragmentation and insert at the tail in hopes 03306 // neighbors for this will be freed before this is reallocated. 03307 // 03308 03309 CHECK_LIST(__LINE__, &PoolDesc->ListHeads[Index - 1], P); 03310 PrivateInsertTailList(&PoolDesc->ListHeads[Index - 1], ((PLIST_ENTRY)((PCHAR)Entry + POOL_OVERHEAD))); 03311 CHECK_LIST(__LINE__, &PoolDesc->ListHeads[Index - 1], P); 03312 CHECK_LIST(__LINE__, ((PLIST_ENTRY)((PCHAR)Entry + POOL_OVERHEAD)), P); 03313 03314 } else { 03315 03316 CHECK_LIST(__LINE__, &PoolDesc->ListHeads[Index - 1], P); 03317 PrivateInsertHeadList(&PoolDesc->ListHeads[Index - 1], ((PLIST_ENTRY)((PCHAR)Entry + POOL_OVERHEAD))); 03318 CHECK_LIST(__LINE__, &PoolDesc->ListHeads[Index - 1], P); 03319 CHECK_LIST(__LINE__, ((PLIST_ENTRY)((PCHAR)Entry + POOL_OVERHEAD)), P); 03320 } 03321 } 03322 03323 UNLOCK_POOL(PoolDesc, LockHandle); 03324 } 03325 03326 03327 ULONG 03328 ExQueryPoolBlockSize ( 03329 IN PVOID PoolBlock, 03330 OUT PBOOLEAN QuotaCharged 03331 ) 03332 03333 /*++ 03334 03335 Routine Description: 03336 03337 This function returns the size of the pool block. 03338 03339 Arguments: 03340 03341 PoolBlock - Supplies the address of the block of pool. 03342 03343 QuotaCharged - Supplies a BOOLEAN variable to receive whether or not the 03344 pool block had quota charged. 03345 03346 NOTE: If the entry is bigger than a page, the value PAGE_SIZE is returned 03347 rather than the correct number of bytes. 03348 03349 Return Value: 03350 03351 Size of pool block. 03352 03353 --*/ 03354 03355 { 03356 PPOOL_HEADER Entry; 03357 ULONG size; 03358 03359 if ((PoolBlock >= MmSpecialPoolStart) && (PoolBlock < MmSpecialPoolEnd)) { 03360 *QuotaCharged = FALSE; 03361 return (ULONG)MmQuerySpecialPoolBlockSize (PoolBlock); 03362 } 03363 03364 if (PAGE_ALIGNED(PoolBlock)) { 03365 *QuotaCharged = FALSE; 03366 return PAGE_SIZE; 03367 } 03368 03369 #if POOL_CACHE_SUPPORTED 03370 03371 // 03372 // Align entry on pool allocation boundary. 03373 // 03374 03375 if (((ULONG)PoolBlock & POOL_CACHE_CHECK) == 0) { 03376 Entry = (PPOOL_HEADER)((ULONG)PoolBlock - PoolCacheSize); 03377 size = (Entry->BlockSize << POOL_BLOCK_SHIFT) - PoolCacheSize; 03378 03379 } else { 03380 Entry = (PPOOL_HEADER)((PCHAR)PoolBlock - POOL_OVERHEAD); 03381 size = (Entry->BlockSize << POOL_BLOCK_SHIFT) - POOL_OVERHEAD; 03382 } 03383 03384 #else 03385 03386 Entry = (PPOOL_HEADER)((PCHAR)PoolBlock - POOL_OVERHEAD); 03387 size = (ULONG)((Entry->BlockSize << POOL_BLOCK_SHIFT) - POOL_OVERHEAD); 03388 03389 #endif //POOL_CACHE_SUPPORTED 03390 03391 #ifdef _WIN64 03392 *QuotaCharged = (BOOLEAN) (Entry->ProcessBilled != NULL); 03393 #else 03394 if ( PoolTrackTable ) { 03395 *QuotaCharged = FALSE; 03396 } 03397 else { 03398 *QuotaCharged = (BOOLEAN) (Entry->ProcessBilled != NULL); 03399 } 03400 #endif 03401 return size; 03402 } 03403 03404 VOID 03405 ExQueryPoolUsage( 03406 OUT PULONG PagedPoolPages, 03407 OUT PULONG NonPagedPoolPages, 03408 OUT PULONG PagedPoolAllocs, 03409 OUT PULONG PagedPoolFrees, 03410 OUT PULONG PagedPoolLookasideHits, 03411 OUT PULONG NonPagedPoolAllocs, 03412 OUT PULONG NonPagedPoolFrees, 03413 OUT PULONG NonPagedPoolLookasideHits 03414 ) 03415 03416 { 03417 ULONG Index; 03418 PNPAGED_LOOKASIDE_LIST Lookaside; 03419 PLIST_ENTRY NextEntry; 03420 PPOOL_DESCRIPTOR pd; 03421 03422 // 03423 // Sum all the paged pool usage. 03424 // 03425 03426 pd = PoolVector[PagedPool]; 03427 *PagedPoolPages = 0; 03428 *PagedPoolAllocs = 0; 03429 *PagedPoolFrees = 0; 03430 03431 for (Index = 0; Index < ExpNumberOfPagedPools + 1; Index += 1) { 03432 *PagedPoolPages += pd[Index].TotalPages + pd[Index].TotalBigPages; 03433 *PagedPoolAllocs += pd[Index].RunningAllocs; 03434 *PagedPoolFrees += pd[Index].RunningDeAllocs; 03435 } 03436 03437 // 03438 // Sum all the nonpaged pool usage. 03439 // 03440 03441 pd = PoolVector[NonPagedPool]; 03442 *NonPagedPoolPages = pd->TotalPages + pd->TotalBigPages; 03443 *NonPagedPoolAllocs = pd->RunningAllocs; 03444 *NonPagedPoolFrees = pd->RunningDeAllocs; 03445 03446 // 03447 // Sum all the nonpaged must succeed usage. 03448 // 03449 03450 pd = PoolVector[NonPagedPoolMustSucceed]; 03451 *NonPagedPoolPages += pd->TotalPages + pd->TotalBigPages; 03452 *NonPagedPoolAllocs += pd->RunningAllocs; 03453 *NonPagedPoolFrees += pd->RunningDeAllocs; 03454 03455 // 03456 // Sum all the lookaside hits for paged and nonpaged pool. 03457 // 03458 03459 NextEntry = ExPoolLookasideListHead.Flink; 03460 while (NextEntry != &ExPoolLookasideListHead) { 03461 Lookaside = CONTAINING_RECORD(NextEntry, 03462 NPAGED_LOOKASIDE_LIST, 03463 L.ListEntry); 03464 03465 if (Lookaside->L.Type == NonPagedPool) { 03466 *NonPagedPoolLookasideHits += Lookaside->L.AllocateHits; 03467 03468 } else { 03469 *PagedPoolLookasideHits += Lookaside->L.AllocateHits; 03470 } 03471 03472 NextEntry = NextEntry->Flink; 03473 } 03474 03475 return; 03476 } 03477 03478 03479 VOID 03480 ExReturnPoolQuota( 03481 IN PVOID P 03482 ) 03483 03484 /*++ 03485 03486 Routine Description: 03487 03488 This function returns quota charged to a subject process when the 03489 specified pool block was allocated. 03490 03491 Arguments: 03492 03493 P - Supplies the address of the block of pool being deallocated. 03494 03495 Return Value: 03496 03497 None. 03498 03499 --*/ 03500 03501 { 03502 03503 PPOOL_HEADER Entry; 03504 POOL_TYPE PoolType; 03505 PEPROCESS Process; 03506 #if defined(_ALPHA_) && !defined(_AXP64_) 03507 PPOOL_DESCRIPTOR PoolDesc; 03508 KIRQL LockHandle; 03509 #endif 03510 03511 // 03512 // Do nothing for special pool. No quota was charged. 03513 // 03514 03515 if ((P >= MmSpecialPoolStart) && (P < MmSpecialPoolEnd)) { 03516 03517 return; 03518 } 03519 03520 // 03521 // Align the entry address to a pool allocation boundary. 03522 // 03523 03524 #if POOL_CACHE_SUPPORTED 03525 03526 if (((ULONG)P & POOL_CACHE_CHECK) == 0) { 03527 Entry = (PPOOL_HEADER)((ULONG)P - PoolCacheSize); 03528 03529 } else { 03530 Entry = (PPOOL_HEADER)((PCHAR)P - POOL_OVERHEAD); 03531 } 03532 03533 #else 03534 03535 Entry = (PPOOL_HEADER)((PCHAR)P - POOL_OVERHEAD); 03536 03537 #endif //POOL_CACHE_SUPPORTED 03538 03539 // 03540 // If quota was charged, then return the appropriate quota to the 03541 // subject process. 03542 // 03543 03544 if ((Entry->PoolType & POOL_QUOTA_MASK) && POOL_QUOTA_ENABLED) { 03545 03546 PoolType = (Entry->PoolType & POOL_TYPE_MASK) - 1; 03547 03548 #if _POOL_LOCK_GRANULAR_ 03549 PoolDesc = PoolVector[PoolType]; 03550 if (PoolType == PagedPool) { 03551 PoolDesc = &PoolDesc[DECODE_POOL_INDEX(Entry)]; 03552 } 03553 #endif 03554 03555 LOCK_POOL_GRANULAR(PoolDesc, LockHandle); 03556 03557 Entry->PoolType &= ~POOL_QUOTA_MASK; 03558 03559 UNLOCK_POOL_GRANULAR(PoolDesc, LockHandle); 03560 03561 Process = Entry->ProcessBilled; 03562 03563 #if !defined (_WIN64) 03564 Entry->PoolTag = 'atoQ'; 03565 #endif 03566 03567 if (Process != NULL) { 03568 PsReturnPoolQuota(Process, 03569 PoolType & BASE_POOL_TYPE_MASK, 03570 (ULONG)Entry->BlockSize << POOL_BLOCK_SHIFT); 03571 03572 ObDereferenceObject(Process); 03573 } 03574 03575 } 03576 03577 return; 03578 } 03579 03580 #if DBG || (i386 && !FPO) 03581 03582 // 03583 // Only works on checked builds or free x86 builds with FPO turned off 03584 // See comment in mm\allocpag.c 03585 // 03586 03587 NTSTATUS 03588 ExpSnapShotPoolPages( 03589 IN PVOID Address, 03590 IN ULONG Size, 03591 IN OUT PSYSTEM_POOL_INFORMATION PoolInformation, 03592 IN OUT PSYSTEM_POOL_ENTRY *PoolEntryInfo, 03593 IN ULONG Length, 03594 IN OUT PULONG RequiredLength 03595 ) 03596 { 03597 NTSTATUS Status; 03598 CLONG i; 03599 PPOOL_HEADER p; 03600 PPOOL_TRACKER_BIG_PAGES PoolBig; 03601 LOGICAL ValidSplitBlock; 03602 ULONG EntrySize; 03603 KIRQL OldIrql; 03604 03605 if (PAGE_ALIGNED(Address) && PoolBigPageTable) { 03606 03607 ExAcquireSpinLock(&ExpTaggedPoolLock, &OldIrql); 03608 03609 PoolBig = PoolBigPageTable; 03610 03611 for (i = 0; i < PoolBigPageTableSize; i += 1, PoolBig += 1) { 03612 03613 if (PoolBig->NumberOfPages == 0 || PoolBig->Va != Address) { 03614 continue; 03615 } 03616 03617 PoolInformation->NumberOfEntries += 1; 03618 *RequiredLength += sizeof(SYSTEM_POOL_ENTRY); 03619 03620 if (Length < *RequiredLength) { 03621 Status = STATUS_INFO_LENGTH_MISMATCH; 03622 } 03623 else { 03624 (*PoolEntryInfo)->Allocated = TRUE; 03625 (*PoolEntryInfo)->Size = PoolBig->NumberOfPages << PAGE_SHIFT; 03626 (*PoolEntryInfo)->AllocatorBackTraceIndex = 0; 03627 (*PoolEntryInfo)->ProcessChargedQuota = 0; 03628 #if !DBG 03629 if (NtGlobalFlag & FLG_POOL_ENABLE_TAGGING) 03630 #endif 03631 (*PoolEntryInfo)->TagUlong = PoolBig->Key; 03632 (*PoolEntryInfo) += 1; 03633 Status = STATUS_SUCCESS; 03634 } 03635 03636 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql); 03637 return Status; 03638 } 03639 ExReleaseSpinLock(&ExpTaggedPoolLock, OldIrql); 03640 } 03641 03642 p = (PPOOL_HEADER)Address; 03643 ValidSplitBlock = FALSE; 03644 03645 if (Size == PAGE_SIZE && p->PreviousSize == 0 && p->BlockSize != 0) { 03646 PPOOL_HEADER PoolAddress; 03647 PPOOL_HEADER EndPoolAddress; 03648 03649 // 03650 // Validate all the pool links before we regard this as a page that 03651 // has been split into small pool blocks. 03652 // 03653 03654 PoolAddress = p; 03655 EndPoolAddress = (PPOOL_HEADER)((PCHAR) p + PAGE_SIZE); 03656 03657 do { 03658 EntrySize = PoolAddress->BlockSize << POOL_BLOCK_SHIFT; 03659 PoolAddress = (PPOOL_HEADER)((PCHAR)PoolAddress + EntrySize); 03660 if (PoolAddress == EndPoolAddress) { 03661 ValidSplitBlock = TRUE; 03662 break; 03663 } 03664 if (PoolAddress > EndPoolAddress) { 03665 break; 03666 } 03667 if (PoolAddress->PreviousSize != EntrySize) { 03668 break; 03669 } 03670 } while (EntrySize != 0); 03671 } 03672 03673 if (ValidSplitBlock == TRUE) { 03674 03675 p = (PPOOL_HEADER)Address; 03676 03677 do { 03678 EntrySize = p->BlockSize << POOL_BLOCK_SHIFT; 03679 03680 if (EntrySize == 0) { 03681 return STATUS_COMMITMENT_LIMIT; 03682 } 03683 03684 PoolInformation->NumberOfEntries += 1; 03685 *RequiredLength += sizeof(SYSTEM_POOL_ENTRY); 03686 03687 if (Length < *RequiredLength) { 03688 Status = STATUS_INFO_LENGTH_MISMATCH; 03689 } 03690 else { 03691 (*PoolEntryInfo)->Size = EntrySize; 03692 if (p->PoolType != 0) { 03693 (*PoolEntryInfo)->Allocated = TRUE; 03694 (*PoolEntryInfo)->AllocatorBackTraceIndex = 0; 03695 (*PoolEntryInfo)->ProcessChargedQuota = 0; 03696 #if !DBG 03697 if (NtGlobalFlag & FLG_POOL_ENABLE_TAGGING) 03698 #endif 03699 (*PoolEntryInfo)->TagUlong = p->PoolTag; 03700 } 03701 else { 03702 (*PoolEntryInfo)->Allocated = FALSE; 03703 (*PoolEntryInfo)->AllocatorBackTraceIndex = 0; 03704 (*PoolEntryInfo)->ProcessChargedQuota = 0; 03705 03706 #if !defined(DBG) && !defined(_WIN64) 03707 if (NtGlobalFlag & FLG_POOL_ENABLE_TAGGING) 03708 #endif 03709 (*PoolEntryInfo)->TagUlong = p->PoolTag; 03710 } 03711 03712 (*PoolEntryInfo) += 1; 03713 Status = STATUS_SUCCESS; 03714 } 03715 03716 p = (PPOOL_HEADER)((PCHAR)p + EntrySize); 03717 } 03718 while (PAGE_END(p) == FALSE); 03719 03720 } 03721 else { 03722 03723 PoolInformation->NumberOfEntries += 1; 03724 *RequiredLength += sizeof(SYSTEM_POOL_ENTRY); 03725 if (Length < *RequiredLength) { 03726 Status = STATUS_INFO_LENGTH_MISMATCH; 03727 03728 } else { 03729 (*PoolEntryInfo)->Allocated = TRUE; 03730 (*PoolEntryInfo)->Size = Size; 03731 (*PoolEntryInfo)->AllocatorBackTraceIndex = 0; 03732 (*PoolEntryInfo)->ProcessChargedQuota = 0; 03733 (*PoolEntryInfo) += 1; 03734 Status = STATUS_SUCCESS; 03735 } 03736 } 03737 03738 return Status; 03739 } 03740 03741 NTSTATUS 03742 ExSnapShotPool( 03743 IN POOL_TYPE PoolType, 03744 IN PSYSTEM_POOL_INFORMATION PoolInformation, 03745 IN ULONG Length, 03746 OUT PULONG ReturnLength OPTIONAL 03747 ) 03748 { 03749 ULONG Index; 03750 PVOID Lock; 03751 KIRQL LockHandle; 03752 PPOOL_DESCRIPTOR PoolDesc; 03753 ULONG RequiredLength; 03754 NTSTATUS Status; 03755 03756 RequiredLength = FIELD_OFFSET(SYSTEM_POOL_INFORMATION, Entries); 03757 if (Length < RequiredLength) { 03758 return STATUS_INFO_LENGTH_MISMATCH; 03759 } 03760 03761 try { 03762 03763 // 03764 // If the pool type is paged, then lock all of the paged pools. 03765 // Otherwise, lock the nonpaged pool. 03766 // 03767 03768 PoolDesc = PoolVector[PoolType]; 03769 if (PoolType == PagedPool) { 03770 Index = 0; 03771 KeRaiseIrql(APC_LEVEL, &LockHandle); \ 03772 do { 03773 Lock = PoolDesc[Index].LockAddress; 03774 ExAcquireFastMutex((PFAST_MUTEX)Lock); 03775 Index += 1; 03776 } while (Index < ExpNumberOfPagedPools); 03777 03778 } else { 03779 ExAcquireSpinLock(&NonPagedPoolLock, &LockHandle); 03780 } 03781 03782 PoolInformation->EntryOverhead = POOL_OVERHEAD; 03783 PoolInformation->NumberOfEntries = 0; 03784 03785 #if POOL_CACHE_SUPPORTED 03786 if (PoolType & CACHE_ALIGNED_POOL_TYPE_MASK) { 03787 PoolInformation->EntryOverhead = (USHORT)PoolCacheSize; 03788 } 03789 #endif //POOL_CACHE_SUPPORTED 03790 03791 Status = MmSnapShotPool(PoolType, 03792 ExpSnapShotPoolPages, 03793 PoolInformation, 03794 Length, 03795 &RequiredLength); 03796 03797 } finally { 03798 03799 // 03800 // If the pool type is paged, then unlock all of the paged pools. 03801 // Otherwise, unlock the nonpaged pool. 03802 // 03803 03804 if (PoolType == PagedPool) { 03805 Index = 0; 03806 do { 03807 Lock = PoolDesc[Index].LockAddress; 03808 ExReleaseFastMutex((PFAST_MUTEX)Lock); 03809 Index += 1; 03810 } while (Index < ExpNumberOfPagedPools); 03811 03812 KeLowerIrql(LockHandle); 03813 03814 } else { 03815 ExReleaseSpinLock(&NonPagedPoolLock, LockHandle); 03816 } 03817 } 03818 03819 if (ARGUMENT_PRESENT(ReturnLength)) { 03820 *ReturnLength = RequiredLength; 03821 } 03822 03823 return Status; 03824 } 03825 #endif // DBG || (i386 && !FPO) 03826 03827 VOID 03828 ExAllocatePoolSanityChecks( 03829 IN POOL_TYPE PoolType, 03830 IN SIZE_T NumberOfBytes 03831 ) 03832 03833 /*++ 03834 03835 Routine Description: 03836 03837 This function performs sanity checks on the caller. 03838 03839 Return Value: 03840 03841 None. 03842 03843 Environment: 03844 03845 Only enabled as part of the driver verification package. 03846 03847 --*/ 03848 03849 { 03850 if (NumberOfBytes == 0) { 03851 KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION, 03852 0x0, 03853 KeGetCurrentIrql(), 03854 PoolType, 03855 NumberOfBytes); 03856 } 03857 03858 if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) { 03859 03860 if (KeGetCurrentIrql() > APC_LEVEL) { 03861 03862 KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION, 03863 0x1, 03864 KeGetCurrentIrql(), 03865 PoolType, 03866 NumberOfBytes); 03867 } 03868 } 03869 else { 03870 if (KeGetCurrentIrql() > DISPATCH_LEVEL) { 03871 03872 KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION, 03873 0x2, 03874 KeGetCurrentIrql(), 03875 PoolType, 03876 NumberOfBytes); 03877 } 03878 } 03879 03880 if (PoolType & MUST_SUCCEED_POOL_TYPE_MASK) { 03881 if (NumberOfBytes > PAGE_SIZE) { 03882 KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION, 03883 0x3, 03884 KeGetCurrentIrql(), 03885 PoolType, 03886 NumberOfBytes); 03887 } 03888 } 03889 } 03890 03891 VOID 03892 ExFreePoolSanityChecks( 03893 IN PVOID P 03894 ) 03895 03896 /*++ 03897 03898 Routine Description: 03899 03900 This function performs sanity checks on the caller. 03901 03902 Return Value: 03903 03904 None. 03905 03906 Environment: 03907 03908 Only enabled as part of the driver verification package. 03909 03910 --*/ 03911 03912 { 03913 PPOOL_HEADER Entry; 03914 POOL_TYPE PoolType; 03915 PVOID StillQueued; 03916 03917 if (P <= (PVOID)(MM_HIGHEST_USER_ADDRESS)) { 03918 KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION, 03919 0x10, 03920 (ULONG_PTR)P, 03921 0, 03922 0); 03923 } 03924 03925 if ((P >= MmSpecialPoolStart) && (P < MmSpecialPoolEnd)) { 03926 StillQueued = KeCheckForTimer(P, PAGE_SIZE - BYTE_OFFSET (P)); 03927 if (StillQueued != NULL) { 03928 KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION, 03929 0x15, 03930 (ULONG_PTR)StillQueued, 03931 (ULONG_PTR)-1, 03932 (ULONG_PTR)P); 03933 } 03934 03935 // 03936 // Check if an ERESOURCE is currently active in this memory block. 03937 // 03938 03939 StillQueued = ExpCheckForResource(P, PAGE_SIZE - BYTE_OFFSET (P)); 03940 if (StillQueued != NULL) { 03941 KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION, 03942 0x17, 03943 (ULONG_PTR)StillQueued, 03944 (ULONG_PTR)-1, 03945 (ULONG_PTR)P); 03946 } 03947 03948 ExpCheckForWorker (P, PAGE_SIZE - BYTE_OFFSET (P)); // bugchecks inside 03949 return; 03950 } 03951 03952 if (PAGE_ALIGNED(P)) { 03953 PoolType = MmDeterminePoolType(P); 03954 03955 if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) { 03956 if (KeGetCurrentIrql() > APC_LEVEL) { 03957 KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION, 03958 0x11, 03959 KeGetCurrentIrql(), 03960 PoolType, 03961 (ULONG_PTR)P); 03962 } 03963 } 03964 else { 03965 if (KeGetCurrentIrql() > DISPATCH_LEVEL) { 03966 KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION, 03967 0x12, 03968 KeGetCurrentIrql(), 03969 PoolType, 03970 (ULONG_PTR)P); 03971 } 03972 } 03973 03974 // 03975 // Just check the first page. 03976 // 03977 03978 StillQueued = KeCheckForTimer(P, PAGE_SIZE); 03979 if (StillQueued != NULL) { 03980 KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION, 03981 0x15, 03982 (ULONG_PTR)StillQueued, 03983 PoolType, 03984 (ULONG_PTR)P); 03985 } 03986 03987 // 03988 // Check if an ERESOURCE is currently active in this memory block. 03989 // 03990 03991 StillQueued = ExpCheckForResource(P, PAGE_SIZE); 03992 03993 if (StillQueued != NULL) { 03994 KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION, 03995 0x17, 03996 (ULONG_PTR)StillQueued, 03997 PoolType, 03998 (ULONG_PTR)P); 03999 } 04000 } 04001 else { 04002 04003 #if !defined (_WIN64) 04004 if (((ULONG_PTR)P & 0x17) != 0) { 04005 KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION, 04006 0x16, 04007 __LINE__, 04008 (ULONG_PTR)P, 04009 0); 04010 } 04011 #endif 04012 04013 Entry = (PPOOL_HEADER)((PCHAR)P - POOL_OVERHEAD); 04014 04015 if ((Entry->PoolType & POOL_TYPE_MASK) == 0) { 04016 KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION, 04017 0x13, 04018 __LINE__, 04019 (ULONG_PTR)Entry, 04020 Entry->Ulong1); 04021 } 04022 04023 PoolType = (Entry->PoolType & POOL_TYPE_MASK) - 1; 04024 04025 if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) { 04026 if (KeGetCurrentIrql() > APC_LEVEL) { 04027 KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION, 04028 0x11, 04029 KeGetCurrentIrql(), 04030 PoolType, 04031 (ULONG_PTR)P); 04032 } 04033 } 04034 else { 04035 if (KeGetCurrentIrql() > DISPATCH_LEVEL) { 04036 KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION, 04037 0x12, 04038 KeGetCurrentIrql(), 04039 PoolType, 04040 (ULONG_PTR)P); 04041 } 04042 } 04043 04044 if (!IS_POOL_HEADER_MARKED_ALLOCATED(Entry)) { 04045 KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION, 04046 0x14, 04047 __LINE__, 04048 (ULONG_PTR)Entry, 04049 0); 04050 } 04051 StillQueued = KeCheckForTimer(Entry, (ULONG)(Entry->BlockSize << POOL_BLOCK_SHIFT)); 04052 if (StillQueued != NULL) { 04053 KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION, 04054 0x15, 04055 (ULONG_PTR)StillQueued, 04056 PoolType, 04057 (ULONG_PTR)P); 04058 } 04059 04060 // 04061 // Check if an ERESOURCE is currently active in this memory block. 04062 // 04063 04064 StillQueued = ExpCheckForResource(Entry, (ULONG)(Entry->BlockSize << POOL_BLOCK_SHIFT)); 04065 04066 if (StillQueued != NULL) { 04067 KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION, 04068 0x17, 04069 (ULONG_PTR)StillQueued, 04070 PoolType, 04071 (ULONG_PTR)P); 04072 } 04073 } 04074 }

Generated on Sat May 15 19:41:24 2004 for test by doxygen 1.3.7