Main Page | Class Hierarchy | Class List | File List | Class Members | File Members

allocpag.c

Go to the documentation of this file.
00001 /*++ 00002 00003 Copyright (c) 1989 Microsoft Corporation 00004 00005 Module Name: 00006 00007 allocpag.c 00008 00009 Abstract: 00010 00011 This module contains the routines which allocate and deallocate 00012 one or more pages from paged or nonpaged pool. 00013 00014 Author: 00015 00016 Lou Perazzoli (loup) 6-Apr-1989 00017 Landy Wang (landyw) 02-June-1997 00018 00019 Revision History: 00020 00021 --*/ 00022 00023 #include "mi.h" 00024 00025 VOID 00026 MiInitializeSpecialPool ( 00027 VOID 00028 ); 00029 00030 #ifndef NO_POOL_CHECKS 00031 VOID 00032 MiInitializeSpecialPoolCriteria ( 00033 IN VOID 00034 ); 00035 00036 VOID 00037 MiSpecialPoolTimerDispatch ( 00038 IN PKDPC Dpc, 00039 IN PVOID DeferredContext, 00040 IN PVOID SystemArgument1, 00041 IN PVOID SystemArgument2 00042 ); 00043 00044 PVOID 00045 MmSqueezeBadTags ( 00046 IN SIZE_T NumberOfBytes, 00047 IN ULONG Tag, 00048 IN POOL_TYPE PoolType, 00049 IN ULONG SpecialPoolType 00050 ); 00051 #endif 00052 00053 LOGICAL 00054 MmSetSpecialPool ( 00055 IN LOGICAL Enable 00056 ); 00057 00058 PVOID 00059 MiAllocateSpecialPool ( 00060 IN SIZE_T NumberOfBytes, 00061 IN ULONG Tag, 00062 IN POOL_TYPE PoolType, 00063 IN ULONG SpecialPoolType 00064 ); 00065 00066 VOID 00067 MmFreeSpecialPool ( 00068 IN PVOID P 00069 ); 00070 00071 LOGICAL 00072 MiProtectSpecialPool ( 00073 IN PVOID VirtualAddress, 00074 IN ULONG NewProtect 00075 ); 00076 00077 VOID 00078 MiMakeSpecialPoolPagable ( 00079 IN PVOID VirtualAddress, 00080 IN PMMPTE PointerPte 00081 ); 00082 00083 #ifdef ALLOC_PRAGMA 00084 #pragma alloc_text(INIT, MiInitializeNonPagedPool) 00085 #pragma alloc_text(INIT, MiInitializeSpecialPool) 00086 00087 #pragma alloc_text(PAGELK, MiFindContiguousMemory) 00088 00089 #pragma alloc_text(PAGEHYDRA, MiCheckSessionPoolAllocations) 00090 #pragma alloc_text(PAGEHYDRA, MiSessionPoolAllocated) 00091 #pragma alloc_text(PAGEHYDRA, MiSessionPoolFreed) 00092 #pragma alloc_text(PAGEHYDRA, MiSessionPoolVector) 00093 #pragma alloc_text(PAGEHYDRA, MiInitializeSessionPool) 00094 #pragma alloc_text(PAGEHYDRA, MiFreeSessionPoolBitMaps) 00095 00096 #pragma alloc_text(PAGESPEC, MmFreeSpecialPool) 00097 #pragma alloc_text(PAGESPEC, MiAllocateSpecialPool) 00098 #pragma alloc_text(PAGESPEC, MiMakeSpecialPoolPagable) 00099 #pragma alloc_text(PAGESPEC, MiProtectSpecialPool) 00100 00101 #pragma alloc_text(POOLMI, MiAllocatePoolPages) 00102 #pragma alloc_text(POOLMI, MiFreePoolPages) 00103 00104 #if DBG || (i386 && !FPO) 00105 #pragma alloc_text(PAGELK, MmSnapShotPool) 00106 #endif // DBG || (i386 && !FPO) 00107 #endif 00108 00109 ULONG MmPagedPoolCommit; // used by the debugger 00110 00111 PFN_NUMBER MmAllocatedNonPagedPool; 00112 PFN_NUMBER MiEndOfInitialPoolFrame; 00113 00114 PVOID MmNonPagedPoolExpansionStart; 00115 00116 LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_LIST_HEADS]; 00117 00118 extern POOL_DESCRIPTOR NonPagedPoolDescriptor; 00119 00120 extern LOGICAL MmPagedPoolMaximumDesired; 00121 00122 #define MM_SMALL_ALLOCATIONS 4 00123 00124 #if DBG 00125 00126 // 00127 // Set this to a nonzero (ie: 10000) value to cause every pool allocation to 00128 // be checked and an ASSERT fires if the allocation is larger than this value. 00129 // 00130 00131 ULONG MmCheckRequestInPages = 0; 00132 00133 // 00134 // Set this to a nonzero (ie: 0x23456789) value to cause this pattern to be 00135 // written into freed nonpaged pool pages. 00136 // 00137 00138 ULONG MiFillFreedPool = 0; 00139 #endif 00140 00141 extern ULONG MmUnusedSegmentForceFree; 00142 00143 #define MI_MEMORY_MAKER(Thread) \ 00144 ((Thread->StartAddress == (PVOID)MiModifiedPageWriter) || \ 00145 (Thread->StartAddress == (PVOID)MiMappedPageWriter) || \ 00146 (Thread->StartAddress == (PVOID)MiDereferenceSegmentThread)) 00147 00148 00149 VOID 00150 MiProtectFreeNonPagedPool ( 00151 IN PVOID VirtualAddress, 00152 IN ULONG SizeInPages 00153 ) 00154 00155 /*++ 00156 00157 Routine Description: 00158 00159 This function protects freed nonpaged pool. 00160 00161 Arguments: 00162 00163 VirtualAddress - Supplies the freed pool address to protect. 00164 00165 SizeInPages - Supplies the size of the request in pages. 00166 00167 Return Value: 00168 00169 None. 00170 00171 Environment: 00172 00173 Kernel mode. 00174 00175 --*/ 00176 00177 { 00178 ULONG i; 00179 MMPTE PteContents; 00180 PMMPTE PointerPte; 00181 00182 // 00183 // Prevent anyone from touching the free non paged pool 00184 // 00185 00186 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress) == 0) { 00187 PointerPte = MiGetPteAddress (VirtualAddress); 00188 00189 for (i = 0; i < SizeInPages; i += 1) { 00190 00191 PteContents = *PointerPte; 00192 00193 PteContents.u.Hard.Valid = 0; 00194 PteContents.u.Soft.Prototype = 1; 00195 00196 KeFlushSingleTb (VirtualAddress, 00197 TRUE, 00198 TRUE, 00199 (PHARDWARE_PTE)PointerPte, 00200 PteContents.u.Flush); 00201 VirtualAddress = (PVOID)((PCHAR)VirtualAddress + PAGE_SIZE); 00202 PointerPte += 1; 00203 } 00204 } 00205 } 00206 00207 00208 LOGICAL 00209 MiUnProtectFreeNonPagedPool ( 00210 IN PVOID VirtualAddress, 00211 IN ULONG SizeInPages 00212 ) 00213 00214 /*++ 00215 00216 Routine Description: 00217 00218 This function unprotects freed nonpaged pool. 00219 00220 Arguments: 00221 00222 VirtualAddress - Supplies the freed pool address to unprotect. 00223 00224 SizeInPages - Supplies the size of the request in pages - zero indicates 00225 to keep going until there are no more protected PTEs (ie: the 00226 caller doesn't know how many protected PTEs there are). 00227 00228 Return Value: 00229 00230 TRUE if pages were unprotected, FALSE if not. 00231 00232 Environment: 00233 00234 Kernel mode. 00235 00236 --*/ 00237 00238 { 00239 PMMPTE PointerPte; 00240 MMPTE PteContents; 00241 ULONG PagesDone; 00242 00243 PagesDone = 0; 00244 00245 // 00246 // Unprotect the previously freed pool so it can be manipulated 00247 // 00248 00249 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress) == 0) { 00250 00251 PointerPte = MiGetPteAddress((PVOID)VirtualAddress); 00252 00253 PteContents = *PointerPte; 00254 00255 while (PteContents.u.Hard.Valid == 0 && PteContents.u.Soft.Prototype == 1) { 00256 00257 PteContents.u.Hard.Valid = 1; 00258 PteContents.u.Soft.Prototype = 0; 00259 00260 MI_WRITE_VALID_PTE (PointerPte, PteContents); 00261 00262 PagesDone += 1; 00263 00264 if (PagesDone == SizeInPages) { 00265 break; 00266 } 00267 00268 PointerPte += 1; 00269 PteContents = *PointerPte; 00270 } 00271 } 00272 00273 if (PagesDone == 0) { 00274 return FALSE; 00275 } 00276 00277 return TRUE; 00278 } 00279 00280 00281 VOID 00282 MiProtectedPoolInsertList ( 00283 IN PLIST_ENTRY ListHead, 00284 IN PLIST_ENTRY Entry, 00285 IN LOGICAL InsertHead 00286 ) 00287 00288 /*++ 00289 00290 Routine Description: 00291 00292 This function inserts the entry into the protected list. 00293 00294 Arguments: 00295 00296 ListHead - Supplies the list head to add onto. 00297 00298 Entry - Supplies the list entry to insert. 00299 00300 InsertHead - If TRUE, insert at the head otherwise at the tail. 00301 00302 Return Value: 00303 00304 None. 00305 00306 Environment: 00307 00308 Kernel mode. 00309 00310 --*/ 00311 { 00312 PVOID FreeFlink; 00313 PVOID FreeBlink; 00314 PVOID VirtualAddress; 00315 00316 // 00317 // Either the flink or the blink may be pointing 00318 // at protected nonpaged pool. Unprotect now. 00319 // 00320 00321 FreeFlink = (PVOID)0; 00322 FreeBlink = (PVOID)0; 00323 00324 if (IsListEmpty(ListHead) == 0) { 00325 00326 VirtualAddress = (PVOID)ListHead->Flink; 00327 if (MiUnProtectFreeNonPagedPool (VirtualAddress, 1) == TRUE) { 00328 FreeFlink = VirtualAddress; 00329 } 00330 } 00331 00332 if (((PVOID)Entry == ListHead->Blink) == 0) { 00333 VirtualAddress = (PVOID)ListHead->Blink; 00334 if (MiUnProtectFreeNonPagedPool (VirtualAddress, 1) == TRUE) { 00335 FreeBlink = VirtualAddress; 00336 } 00337 } 00338 00339 if (InsertHead == TRUE) { 00340 InsertHeadList (ListHead, Entry); 00341 } 00342 else { 00343 InsertTailList (ListHead, Entry); 00344 } 00345 00346 if (FreeFlink) { 00347 // 00348 // Reprotect the flink. 00349 // 00350 00351 MiProtectFreeNonPagedPool (FreeFlink, 1); 00352 } 00353 00354 if (FreeBlink) { 00355 // 00356 // Reprotect the blink. 00357 // 00358 00359 MiProtectFreeNonPagedPool (FreeBlink, 1); 00360 } 00361 } 00362 00363 00364 VOID 00365 MiProtectedPoolRemoveEntryList ( 00366 IN PLIST_ENTRY Entry 00367 ) 00368 00369 /*++ 00370 00371 Routine Description: 00372 00373 This function unlinks the list pointer from protected freed nonpaged pool. 00374 00375 Arguments: 00376 00377 Entry - Supplies the list entry to remove. 00378 00379 Return Value: 00380 00381 None. 00382 00383 Environment: 00384 00385 Kernel mode. 00386 00387 --*/ 00388 { 00389 PVOID FreeFlink; 00390 PVOID FreeBlink; 00391 PVOID VirtualAddress; 00392 00393 // 00394 // Either the flink or the blink may be pointing 00395 // at protected nonpaged pool. Unprotect now. 00396 // 00397 00398 FreeFlink = (PVOID)0; 00399 FreeBlink = (PVOID)0; 00400 00401 if (IsListEmpty(Entry) == 0) { 00402 00403 VirtualAddress = (PVOID)Entry->Flink; 00404 if (MiUnProtectFreeNonPagedPool (VirtualAddress, 1) == TRUE) { 00405 FreeFlink = VirtualAddress; 00406 } 00407 } 00408 00409 if (((PVOID)Entry == Entry->Blink) == 0) { 00410 VirtualAddress = (PVOID)Entry->Blink; 00411 if (MiUnProtectFreeNonPagedPool (VirtualAddress, 1) == TRUE) { 00412 FreeBlink = VirtualAddress; 00413 } 00414 } 00415 00416 RemoveEntryList (Entry); 00417 00418 if (FreeFlink) { 00419 // 00420 // Reprotect the flink. 00421 // 00422 00423 MiProtectFreeNonPagedPool (FreeFlink, 1); 00424 } 00425 00426 if (FreeBlink) { 00427 // 00428 // Reprotect the blink. 00429 // 00430 00431 MiProtectFreeNonPagedPool (FreeBlink, 1); 00432 } 00433 } 00434 00435 00436 POOL_TYPE 00437 MmDeterminePoolType ( 00438 IN PVOID VirtualAddress 00439 ) 00440 00441 /*++ 00442 00443 Routine Description: 00444 00445 This function determines which pool a virtual address resides within. 00446 00447 Arguments: 00448 00449 VirtualAddress - Supplies the virtual address to determine which pool 00450 it resides within. 00451 00452 Return Value: 00453 00454 Returns the POOL_TYPE (PagedPool, NonPagedPool, PagedPoolSession or 00455 NonPagedPoolSession), it never returns any information about 00456 MustSucceed pool types. 00457 00458 Environment: 00459 00460 Kernel Mode Only. 00461 00462 --*/ 00463 00464 { 00465 if ((VirtualAddress >= MmPagedPoolStart) && 00466 (VirtualAddress <= MmPagedPoolEnd)) { 00467 return PagedPool; 00468 } 00469 00470 if (MI_IS_SESSION_POOL_ADDRESS (VirtualAddress) == TRUE) { 00471 return PagedPoolSession; 00472 } 00473 00474 return NonPagedPool; 00475 } 00476 00477 00478 PVOID 00479 MiSessionPoolVector( 00480 VOID 00481 ) 00482 00483 /*++ 00484 00485 Routine Description: 00486 00487 This function returns the session pool descriptor for the current session. 00488 00489 Arguments: 00490 00491 None. 00492 00493 Return Value: 00494 00495 Pool descriptor. 00496 00497 --*/ 00498 00499 { 00500 return (PVOID)&MmSessionSpace->PagedPool; 00501 } 00502 00503 00504 VOID 00505 MiSessionPoolAllocated( 00506 IN PVOID VirtualAddress, 00507 IN SIZE_T NumberOfBytes, 00508 IN POOL_TYPE PoolType 00509 ) 00510 00511 /*++ 00512 00513 Routine Description: 00514 00515 This function charges the new pool allocation for the current session. 00516 On session exit, this charge must be zero. 00517 00518 Arguments: 00519 00520 VirtualAddress - Supplies the allocated pool address. 00521 00522 NumberOfBytes - Supplies the number of bytes allocated. 00523 00524 PoolType - Supplies the type of the above pool allocation. 00525 00526 Return Value: 00527 00528 None. 00529 00530 --*/ 00531 00532 { 00533 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) { 00534 ASSERT (MI_IS_SESSION_POOL_ADDRESS(VirtualAddress) == FALSE); 00535 MmSessionSpace->NonPagedPoolBytes += NumberOfBytes; 00536 MmSessionSpace->NonPagedPoolAllocations += 1; 00537 } 00538 else { 00539 ASSERT (MI_IS_SESSION_POOL_ADDRESS(VirtualAddress) == TRUE); 00540 MmSessionSpace->PagedPoolBytes += NumberOfBytes; 00541 MmSessionSpace->PagedPoolAllocations += 1; 00542 } 00543 } 00544 00545 00546 VOID 00547 MiSessionPoolFreed( 00548 IN PVOID VirtualAddress, 00549 IN SIZE_T NumberOfBytes, 00550 IN POOL_TYPE PoolType 00551 ) 00552 00553 /*++ 00554 00555 Routine Description: 00556 00557 This function returns the specified pool allocation for the current session. 00558 On session exit, this charge must be zero. 00559 00560 Arguments: 00561 00562 VirtualAddress - Supplies the pool address being freed. 00563 00564 NumberOfBytes - Supplies the number of bytes being freed. 00565 00566 PoolType - Supplies the type of the above pool allocation. 00567 00568 Return Value: 00569 00570 None. 00571 00572 --*/ 00573 00574 { 00575 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) { 00576 ASSERT (MI_IS_SESSION_POOL_ADDRESS(VirtualAddress) == FALSE); 00577 MmSessionSpace->NonPagedPoolBytes -= NumberOfBytes; 00578 MmSessionSpace->NonPagedPoolAllocations -= 1; 00579 } 00580 else { 00581 ASSERT (MI_IS_SESSION_POOL_ADDRESS(VirtualAddress) == TRUE); 00582 MmSessionSpace->PagedPoolBytes -= NumberOfBytes; 00583 MmSessionSpace->PagedPoolAllocations -= 1; 00584 } 00585 } 00586 00587 00588 LOGICAL 00589 MmResourcesAvailable ( 00590 IN POOL_TYPE PoolType, 00591 IN SIZE_T NumberOfBytes, 00592 IN EX_POOL_PRIORITY Priority 00593 ) 00594 00595 /*++ 00596 00597 Routine Description: 00598 00599 This function examines various resources to determine if this 00600 pool allocation should be allowed to proceed. 00601 00602 Arguments: 00603 00604 PoolType - Supplies the type of pool to retrieve information about. 00605 00606 NumberOfBytes - Supplies the number of bytes to allocate. 00607 00608 Priority - Supplies an indication as to how important it is that this 00609 request succeed under low available resource conditions. 00610 Return Value: 00611 00612 TRUE if the pool allocation should be allowed to proceed, FALSE if not. 00613 00614 --*/ 00615 00616 { 00617 KIRQL OldIrql; 00618 PFN_NUMBER NumberOfPages; 00619 SIZE_T FreePoolInBytes; 00620 PETHREAD Thread; 00621 LOGICAL SignalDereferenceThread; 00622 00623 ASSERT (Priority != HighPoolPriority); 00624 ASSERT ((PoolType & MUST_SUCCEED_POOL_TYPE_MASK) == 0); 00625 00626 NumberOfPages = BYTES_TO_PAGES (NumberOfBytes); 00627 00628 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) { 00629 FreePoolInBytes = MmMaximumNonPagedPoolInBytes - (MmAllocatedNonPagedPool << PAGE_SHIFT); 00630 } 00631 else if (PoolType & SESSION_POOL_MASK) { 00632 FreePoolInBytes = MI_SESSION_POOL_SIZE - MmSessionSpace->PagedPoolBytes; 00633 } 00634 else { 00635 FreePoolInBytes = MmSizeOfPagedPoolInBytes - (MmPagedPoolInfo.AllocatedPagedPool << PAGE_SHIFT); 00636 } 00637 00638 // 00639 // Check available VA space. 00640 // 00641 00642 if (Priority == NormalPoolPriority) { 00643 if ((SIZE_T)NumberOfBytes + 512*1024 > FreePoolInBytes) { 00644 Thread = PsGetCurrentThread (); 00645 if (!MI_MEMORY_MAKER(Thread)) { 00646 goto nopool; 00647 } 00648 } 00649 } 00650 else { 00651 if ((SIZE_T)NumberOfBytes + 2*1024*1024 > FreePoolInBytes) { 00652 Thread = PsGetCurrentThread (); 00653 if (!MI_MEMORY_MAKER(Thread)) { 00654 goto nopool; 00655 } 00656 } 00657 } 00658 00659 // 00660 // Paged allocations (session and normal) can also fail for lack of commit. 00661 // 00662 00663 if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) { 00664 if (MmTotalCommittedPages + NumberOfPages > MmTotalCommitLimitMaximum) { 00665 Thread = PsGetCurrentThread (); 00666 if (!MI_MEMORY_MAKER(Thread)) { 00667 MiIssuePageExtendRequestNoWait (NumberOfPages); 00668 goto nopool; 00669 } 00670 } 00671 } 00672 00673 return TRUE; 00674 00675 nopool: 00676 00677 // 00678 // Running low on pool - if this request is not for session pool, 00679 // force unused segment trimming when appropriate. 00680 // 00681 00682 if ((PoolType & SESSION_POOL_MASK) == 0) { 00683 00684 if (MI_UNUSED_SEGMENTS_SURPLUS()) { 00685 KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE); 00686 } 00687 else { 00688 SignalDereferenceThread = FALSE; 00689 LOCK_PFN2 (OldIrql); 00690 if (MmUnusedSegmentForceFree == 0) { 00691 if (!IsListEmpty(&MmUnusedSegmentList)) { 00692 SignalDereferenceThread = TRUE; 00693 MmUnusedSegmentForceFree = 30; 00694 } 00695 } 00696 UNLOCK_PFN2 (OldIrql); 00697 if (SignalDereferenceThread == TRUE) { 00698 KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE); 00699 } 00700 } 00701 } 00702 00703 return FALSE; 00704 } 00705 00706 00707 VOID 00708 MiFreeNonPagedPool ( 00709 IN PVOID StartingAddress, 00710 IN PFN_NUMBER NumberOfPages 00711 ) 00712 00713 /*++ 00714 00715 Routine Description: 00716 00717 This function releases virtually mapped nonpaged expansion pool. 00718 00719 Arguments: 00720 00721 StartingAddress - Supplies the starting address. 00722 00723 NumberOfPages - Supplies the number of pages to free. 00724 00725 Return Value: 00726 00727 None. 00728 00729 Environment: 00730 00731 These functions are used by the internal Mm page allocation/free routines 00732 only and should not be called directly. 00733 00734 Mutexes guarding the pool databases must be held when calling 00735 this function. 00736 00737 --*/ 00738 00739 { 00740 PFN_NUMBER i; 00741 KIRQL OldIrql; 00742 PMMPFN Pfn1; 00743 PMMPTE PointerPte; 00744 PFN_NUMBER PageFrameIndex; 00745 00746 MI_MAKING_MULTIPLE_PTES_INVALID (TRUE); 00747 00748 PointerPte = MiGetPteAddress (StartingAddress); 00749 00750 // 00751 // Return commitment. 00752 // 00753 00754 MiReturnCommitment (NumberOfPages); 00755 00756 MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_NONPAGED_POOL_EXPANSION, 00757 NumberOfPages); 00758 00759 LOCK_PFN2 (OldIrql); 00760 00761 for (i = 0; i < NumberOfPages; i += 1) { 00762 00763 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 00764 00765 // 00766 // Set the pointer to the PTE as empty so the page 00767 // is deleted when the reference count goes to zero. 00768 // 00769 00770 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 00771 ASSERT (Pfn1->u2.ShareCount == 1); 00772 Pfn1->u2.ShareCount = 0; 00773 MI_SET_PFN_DELETED (Pfn1); 00774 #if DBG 00775 Pfn1->u3.e1.PageLocation = StandbyPageList; 00776 #endif //DBG 00777 MiDecrementReferenceCount (PageFrameIndex); 00778 00779 (VOID)KeFlushSingleTb (StartingAddress, 00780 TRUE, 00781 TRUE, 00782 (PHARDWARE_PTE)PointerPte, 00783 ZeroKernelPte.u.Flush); 00784 00785 StartingAddress = (PVOID)((PCHAR)StartingAddress + PAGE_SIZE); 00786 PointerPte += 1; 00787 } 00788 00789 // 00790 // Update the count of available resident pages. 00791 // 00792 00793 MmResidentAvailablePages += NumberOfPages; 00794 MM_BUMP_COUNTER(2, NumberOfPages); 00795 00796 UNLOCK_PFN2(OldIrql); 00797 00798 PointerPte -= NumberOfPages; 00799 00800 MiReleaseSystemPtes (PointerPte, 00801 (ULONG)NumberOfPages, 00802 NonPagedPoolExpansion); 00803 } 00804 00805 00806 PVOID 00807 MiAllocatePoolPages ( 00808 IN POOL_TYPE PoolType, 00809 IN SIZE_T SizeInBytes, 00810 IN ULONG IsLargeSessionAllocation 00811 ) 00812 00813 /*++ 00814 00815 Routine Description: 00816 00817 This function allocates a set of pages from the specified pool 00818 - and returns the starting virtual address to the caller. 00819 00820 For the NonPagedPoolMustSucceed case, the caller must first 00821 attempt to get NonPagedPool and if and ONLY IF that fails, then 00822 MiAllocatePoolPages should be called again with the PoolType of 00823 NonPagedPoolMustSucceed. 00824 00825 Arguments: 00826 00827 PoolType - Supplies the type of pool from which to obtain pages. 00828 00829 SizeInBytes - Supplies the size of the request in bytes. The actual 00830 size returned is rounded up to a page boundary. 00831 00832 IsLargeSessionAllocation - Supplies nonzero if the allocation is a single 00833 large session allocation. Zero otherwise. 00834 00835 Return Value: 00836 00837 Returns a pointer to the allocated pool, or NULL if no more pool is 00838 available. 00839 00840 Environment: 00841 00842 These functions are used by the general pool allocation routines 00843 and should not be called directly. 00844 00845 Mutexes guarding the pool databases must be held when calling 00846 these functions. 00847 00848 Kernel mode, IRQL at DISPATCH_LEVEL. 00849 00850 --*/ 00851 00852 { 00853 PFN_NUMBER SizeInPages; 00854 ULONG StartPosition; 00855 ULONG EndPosition; 00856 PMMPTE StartingPte; 00857 PMMPTE PointerPte; 00858 PMMPFN Pfn1; 00859 MMPTE TempPte; 00860 PFN_NUMBER PageFrameIndex; 00861 PVOID BaseVa; 00862 KIRQL OldIrql; 00863 KIRQL SessionIrql; 00864 PFN_NUMBER i; 00865 PLIST_ENTRY Entry; 00866 PMMFREE_POOL_ENTRY FreePageInfo; 00867 PMM_SESSION_SPACE SessionSpace; 00868 PMM_PAGED_POOL_INFO PagedPoolInfo; 00869 PVOID VirtualAddress; 00870 ULONG Index; 00871 PMMPTE SessionPte; 00872 ULONG WsEntry; 00873 ULONG WsSwapEntry; 00874 ULONG PageTableCount; 00875 LOGICAL FreedPool; 00876 LOGICAL SignalDereferenceThread; 00877 PETHREAD Thread; 00878 00879 SizeInPages = BYTES_TO_PAGES (SizeInBytes); 00880 00881 #if DBG 00882 if (MmCheckRequestInPages != 0) { 00883 ASSERT (SizeInPages < MmCheckRequestInPages); 00884 } 00885 #endif 00886 00887 if (PoolType & MUST_SUCCEED_POOL_TYPE_MASK) { 00888 00889 // 00890 // Pool expansion failed, see if any Must Succeed 00891 // pool is still left. 00892 // 00893 00894 if (MmNonPagedMustSucceed == NULL) { 00895 00896 // 00897 // No more pool exists. Bug Check. 00898 // 00899 00900 KeBugCheckEx (MUST_SUCCEED_POOL_EMPTY, 00901 SizeInBytes, 00902 NonPagedPoolDescriptor.TotalPages, 00903 NonPagedPoolDescriptor.TotalBigPages, 00904 MmAvailablePages); 00905 } 00906 00907 // 00908 // Remove a page from the must succeed pool. More than one is illegal. 00909 // 00910 00911 if (SizeInBytes > PAGE_SIZE) { 00912 KeBugCheckEx (BAD_POOL_CALLER, 00913 0x98, 00914 (ULONG_PTR)SizeInBytes, 00915 (ULONG_PTR)SizeInPages, 00916 PoolType); 00917 } 00918 00919 BaseVa = MmNonPagedMustSucceed; 00920 00921 if (IsLargeSessionAllocation != 0) { 00922 00923 // 00924 // Mark this as a large session allocation in the PFN database. 00925 // 00926 00927 if (MI_IS_PHYSICAL_ADDRESS(BaseVa)) { 00928 PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (BaseVa); 00929 } else { 00930 PointerPte = MiGetPteAddress(BaseVa); 00931 ASSERT (PointerPte->u.Hard.Valid == 1); 00932 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 00933 } 00934 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 00935 00936 ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0); 00937 00938 CONSISTENCY_LOCK_PFN2 (OldIrql); 00939 00940 Pfn1->u3.e1.LargeSessionAllocation = 1; 00941 00942 CONSISTENCY_UNLOCK_PFN2 (OldIrql); 00943 00944 MiSessionPoolAllocated (BaseVa, PAGE_SIZE, NonPagedPool); 00945 } 00946 else if (PoolType & POOL_VERIFIER_MASK) { 00947 00948 if (MI_IS_PHYSICAL_ADDRESS(BaseVa)) { 00949 PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (BaseVa); 00950 } else { 00951 PointerPte = MiGetPteAddress(BaseVa); 00952 ASSERT (PointerPte->u.Hard.Valid == 1); 00953 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 00954 } 00955 00956 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 00957 00958 ASSERT (Pfn1->u3.e1.VerifierAllocation == 0); 00959 Pfn1->u3.e1.VerifierAllocation = 1; 00960 } 00961 00962 MmNonPagedMustSucceed = (PVOID)(*(PULONG_PTR)BaseVa); 00963 return BaseVa; 00964 } 00965 00966 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) { 00967 00968 Index = (ULONG)(SizeInPages - 1); 00969 00970 if (Index >= MI_MAX_FREE_LIST_HEADS) { 00971 Index = MI_MAX_FREE_LIST_HEADS - 1; 00972 } 00973 00974 // 00975 // NonPaged pool is linked together through the pages themselves. 00976 // 00977 00978 while (Index < MI_MAX_FREE_LIST_HEADS) { 00979 00980 Entry = MmNonPagedPoolFreeListHead[Index].Flink; 00981 00982 while (Entry != &MmNonPagedPoolFreeListHead[Index]) { 00983 00984 if (MmProtectFreedNonPagedPool == TRUE) { 00985 MiUnProtectFreeNonPagedPool ((PVOID)Entry, 0); 00986 } 00987 00988 // 00989 // The list is not empty, see if this one has enough space. 00990 // 00991 00992 FreePageInfo = CONTAINING_RECORD(Entry, 00993 MMFREE_POOL_ENTRY, 00994 List); 00995 00996 ASSERT (FreePageInfo->Signature == MM_FREE_POOL_SIGNATURE); 00997 if (FreePageInfo->Size >= SizeInPages) { 00998 00999 // 01000 // This entry has sufficient space, remove 01001 // the pages from the end of the allocation. 01002 // 01003 01004 FreePageInfo->Size -= SizeInPages; 01005 01006 BaseVa = (PVOID)((PCHAR)FreePageInfo + 01007 (FreePageInfo->Size << PAGE_SHIFT)); 01008 01009 if (MmProtectFreedNonPagedPool == FALSE) { 01010 RemoveEntryList (&FreePageInfo->List); 01011 } 01012 else { 01013 MiProtectedPoolRemoveEntryList (&FreePageInfo->List); 01014 } 01015 01016 if (FreePageInfo->Size != 0) { 01017 01018 // 01019 // Insert any remainder into the correct list. 01020 // 01021 01022 Index = (ULONG)(FreePageInfo->Size - 1); 01023 if (Index >= MI_MAX_FREE_LIST_HEADS) { 01024 Index = MI_MAX_FREE_LIST_HEADS - 1; 01025 } 01026 01027 if (MmProtectFreedNonPagedPool == FALSE) { 01028 InsertTailList (&MmNonPagedPoolFreeListHead[Index], 01029 &FreePageInfo->List); 01030 } 01031 else { 01032 MiProtectedPoolInsertList (&MmNonPagedPoolFreeListHead[Index], 01033 &FreePageInfo->List, 01034 FALSE); 01035 01036 MiProtectFreeNonPagedPool ((PVOID)FreePageInfo, 01037 (ULONG)FreePageInfo->Size); 01038 } 01039 } 01040 01041 // 01042 // Adjust the number of free pages remaining in the pool. 01043 // 01044 01045 MmNumberOfFreeNonPagedPool -= SizeInPages; 01046 ASSERT ((LONG)MmNumberOfFreeNonPagedPool >= 0); 01047 01048 // 01049 // Mark start and end of allocation in the PFN database. 01050 // 01051 01052 if (MI_IS_PHYSICAL_ADDRESS(BaseVa)) { 01053 01054 // 01055 // On certain architectures, virtual addresses 01056 // may be physical and hence have no corresponding PTE. 01057 // 01058 01059 PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (BaseVa); 01060 } else { 01061 PointerPte = MiGetPteAddress(BaseVa); 01062 ASSERT (PointerPte->u.Hard.Valid == 1); 01063 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 01064 } 01065 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 01066 01067 ASSERT (Pfn1->u3.e1.StartOfAllocation == 0); 01068 ASSERT (Pfn1->u3.e1.VerifierAllocation == 0); 01069 01070 CONSISTENCY_LOCK_PFN2 (OldIrql); 01071 01072 Pfn1->u3.e1.StartOfAllocation = 1; 01073 01074 if (PoolType & POOL_VERIFIER_MASK) { 01075 Pfn1->u3.e1.VerifierAllocation = 1; 01076 } 01077 01078 CONSISTENCY_UNLOCK_PFN2 (OldIrql); 01079 01080 // 01081 // Mark this as a large session allocation in the PFN database. 01082 // 01083 01084 if (IsLargeSessionAllocation != 0) { 01085 ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0); 01086 01087 CONSISTENCY_LOCK_PFN2 (OldIrql); 01088 01089 Pfn1->u3.e1.LargeSessionAllocation = 1; 01090 01091 CONSISTENCY_UNLOCK_PFN2 (OldIrql); 01092 01093 MiSessionPoolAllocated (BaseVa, 01094 SizeInPages << PAGE_SHIFT, 01095 NonPagedPool); 01096 } 01097 01098 // 01099 // Calculate the ending PTE's address. 01100 // 01101 01102 if (SizeInPages != 1) { 01103 if (MI_IS_PHYSICAL_ADDRESS(BaseVa)) { 01104 Pfn1 += SizeInPages - 1; 01105 } else { 01106 PointerPte += SizeInPages - 1; 01107 ASSERT (PointerPte->u.Hard.Valid == 1); 01108 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 01109 } 01110 } 01111 else if (MmProtectFreedNonPagedPool == FALSE) { 01112 01113 // 01114 // Map this with KSEG0 if possible. 01115 // 01116 #if defined (_X86_) 01117 if ((BaseVa > (PVOID)MM_KSEG2_BASE) && 01118 (PageFrameIndex >= MI_CONVERT_PHYSICAL_TO_PFN(MmSubsectionBase)) && 01119 (PageFrameIndex < MmSubsectionTopPage) && 01120 (MmKseg2Frame != 0)) 01121 #elif defined (_ALPHA_) 01122 if ((BaseVa > (PVOID)KSEG2_BASE) && 01123 (PageFrameIndex >= MI_CONVERT_PHYSICAL_TO_PFN(MmSubsectionBase)) && 01124 (PageFrameIndex < MmSubsectionTopPage)) 01125 #else 01126 if ((BaseVa > (PVOID)KSEG2_BASE) && 01127 (PageFrameIndex < MmSubsectionTopPage)) 01128 #endif 01129 { 01130 BaseVa = (PVOID)(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT)); 01131 } 01132 } 01133 01134 ASSERT (Pfn1->u3.e1.EndOfAllocation == 0); 01135 01136 CONSISTENCY_LOCK_PFN2 (OldIrql); 01137 01138 Pfn1->u3.e1.EndOfAllocation = 1; 01139 01140 CONSISTENCY_UNLOCK_PFN2 (OldIrql); 01141 01142 MmAllocatedNonPagedPool += SizeInPages; 01143 return BaseVa; 01144 } 01145 01146 Entry = FreePageInfo->List.Flink; 01147 01148 if (MmProtectFreedNonPagedPool == TRUE) { 01149 MiProtectFreeNonPagedPool ((PVOID)FreePageInfo, 01150 (ULONG)FreePageInfo->Size); 01151 } 01152 } 01153 Index += 1; 01154 } 01155 01156 // 01157 // No more entries on the list, expand nonpaged pool if 01158 // possible to satisfy this request. 01159 // 01160 01161 // 01162 // Check to see if there are too many unused segments laying 01163 // around. If so, set an event so they get deleted. 01164 // 01165 01166 if (MI_UNUSED_SEGMENTS_SURPLUS()) { 01167 KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE); 01168 } 01169 01170 LOCK_PFN2 (OldIrql); 01171 01172 // 01173 // Make sure we have 1 more than the number of pages 01174 // requested available. 01175 // 01176 01177 if (MmAvailablePages <= SizeInPages) { 01178 01179 UNLOCK_PFN2 (OldIrql); 01180 01181 // 01182 // There are no free physical pages to expand 01183 // nonpaged pool. 01184 // 01185 01186 return NULL; 01187 } 01188 01189 // 01190 // Try to find system PTEs to expand the pool into. 01191 // 01192 01193 StartingPte = MiReserveSystemPtes ((ULONG)SizeInPages, 01194 NonPagedPoolExpansion, 01195 0, 01196 0, 01197 FALSE); 01198 01199 if (StartingPte == NULL) { 01200 01201 // 01202 // There are no free physical PTEs to expand nonpaged pool. 01203 // If there are any cached expansion PTEs, free them now in 01204 // an attempt to get enough contiguous VA for our caller. 01205 // 01206 01207 if ((SizeInPages > 1) && (MmNumberOfFreeNonPagedPool != 0)) { 01208 01209 FreedPool = FALSE; 01210 01211 for (Index = 0; Index < MI_MAX_FREE_LIST_HEADS; Index += 1) { 01212 01213 Entry = MmNonPagedPoolFreeListHead[Index].Flink; 01214 01215 while (Entry != &MmNonPagedPoolFreeListHead[Index]) { 01216 01217 if (MmProtectFreedNonPagedPool == TRUE) { 01218 MiUnProtectFreeNonPagedPool ((PVOID)Entry, 0); 01219 } 01220 01221 // 01222 // The list is not empty, see if this one is virtually 01223 // mapped. 01224 // 01225 01226 FreePageInfo = CONTAINING_RECORD(Entry, 01227 MMFREE_POOL_ENTRY, 01228 List); 01229 01230 if ((!MI_IS_PHYSICAL_ADDRESS(FreePageInfo)) && 01231 ((PVOID)FreePageInfo >= MmNonPagedPoolExpansionStart)) { 01232 if (MmProtectFreedNonPagedPool == FALSE) { 01233 RemoveEntryList (&FreePageInfo->List); 01234 } 01235 else { 01236 MiProtectedPoolRemoveEntryList (&FreePageInfo->List); 01237 } 01238 01239 MmNumberOfFreeNonPagedPool -= FreePageInfo->Size; 01240 ASSERT ((LONG)MmNumberOfFreeNonPagedPool >= 0); 01241 01242 UNLOCK_PFN2 (OldIrql); 01243 01244 FreedPool = TRUE; 01245 01246 MiFreeNonPagedPool ((PVOID)FreePageInfo, 01247 FreePageInfo->Size); 01248 01249 LOCK_PFN2 (OldIrql); 01250 Index = 0; 01251 break; 01252 } 01253 01254 Entry = FreePageInfo->List.Flink; 01255 01256 if (MmProtectFreedNonPagedPool == TRUE) { 01257 MiProtectFreeNonPagedPool ((PVOID)FreePageInfo, 01258 (ULONG)FreePageInfo->Size); 01259 } 01260 } 01261 } 01262 01263 if (FreedPool == TRUE) { 01264 StartingPte = MiReserveSystemPtes ((ULONG)SizeInPages, 01265 NonPagedPoolExpansion, 01266 0, 01267 0, 01268 FALSE); 01269 01270 if (StartingPte != NULL) { 01271 goto gotpool; 01272 } 01273 } 01274 } 01275 01276 UNLOCK_PFN2 (OldIrql); 01277 01278 nopool: 01279 01280 // 01281 // Running low on pool - if this request is not for session pool, 01282 // force unused segment trimming when appropriate. 01283 // 01284 01285 SignalDereferenceThread = FALSE; 01286 LOCK_PFN2 (OldIrql); 01287 if (MmUnusedSegmentForceFree == 0) { 01288 if (!IsListEmpty(&MmUnusedSegmentList)) { 01289 SignalDereferenceThread = TRUE; 01290 MmUnusedSegmentForceFree = 30; 01291 } 01292 } 01293 UNLOCK_PFN2 (OldIrql); 01294 if (SignalDereferenceThread == TRUE) { 01295 KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE); 01296 } 01297 01298 return NULL; 01299 } 01300 01301 gotpool: 01302 01303 // 01304 // Update the count of available resident pages. 01305 // 01306 01307 MmResidentAvailablePages -= SizeInPages; 01308 MM_BUMP_COUNTER(0, SizeInPages); 01309 01310 // 01311 // Charge commitment as non paged pool uses physical memory. 01312 // 01313 01314 MM_TRACK_COMMIT (MM_DBG_COMMIT_NONPAGED_POOL_EXPANSION, SizeInPages); 01315 01316 MiChargeCommitmentCantExpand (SizeInPages, TRUE); 01317 01318 // 01319 // Expand the pool. 01320 // 01321 01322 PointerPte = StartingPte; 01323 TempPte = ValidKernelPte; 01324 MmAllocatedNonPagedPool += SizeInPages; 01325 i = SizeInPages; 01326 01327 do { 01328 PageFrameIndex = MiRemoveAnyPage ( 01329 MI_GET_PAGE_COLOR_FROM_PTE (PointerPte)); 01330 01331 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 01332 01333 Pfn1->u3.e2.ReferenceCount = 1; 01334 Pfn1->u2.ShareCount = 1; 01335 Pfn1->PteAddress = PointerPte; 01336 Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE; 01337 Pfn1->PteFrame = MI_GET_PAGE_FRAME_FROM_PTE (MiGetPteAddress(PointerPte)); 01338 01339 Pfn1->u3.e1.PageLocation = ActiveAndValid; 01340 Pfn1->u3.e1.LargeSessionAllocation = 0; 01341 Pfn1->u3.e1.VerifierAllocation = 0; 01342 01343 TempPte.u.Hard.PageFrameNumber = PageFrameIndex; 01344 MI_WRITE_VALID_PTE (PointerPte, TempPte); 01345 PointerPte += 1; 01346 SizeInPages -= 1; 01347 } while (SizeInPages > 0); 01348 01349 Pfn1->u3.e1.EndOfAllocation = 1; 01350 01351 Pfn1 = MI_PFN_ELEMENT (StartingPte->u.Hard.PageFrameNumber); 01352 Pfn1->u3.e1.StartOfAllocation = 1; 01353 01354 ASSERT (Pfn1->u3.e1.VerifierAllocation == 0); 01355 01356 if (PoolType & POOL_VERIFIER_MASK) { 01357 Pfn1->u3.e1.VerifierAllocation = 1; 01358 } 01359 01360 // 01361 // Mark this as a large session allocation in the PFN database. 01362 // 01363 01364 ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0); 01365 01366 if (IsLargeSessionAllocation != 0) { 01367 Pfn1->u3.e1.LargeSessionAllocation = 1; 01368 01369 MiSessionPoolAllocated(MiGetVirtualAddressMappedByPte (StartingPte), 01370 i << PAGE_SHIFT, 01371 NonPagedPool); 01372 } 01373 01374 UNLOCK_PFN2 (OldIrql); 01375 01376 BaseVa = MiGetVirtualAddressMappedByPte (StartingPte); 01377 01378 if (i == 1) { 01379 01380 // 01381 // Map this with KSEG0 if possible. 01382 // 01383 01384 #if defined (_X86_) 01385 if ((PageFrameIndex >= MI_CONVERT_PHYSICAL_TO_PFN(MmSubsectionBase)) && 01386 (PageFrameIndex < MmSubsectionTopPage) && 01387 (MmKseg2Frame != 0)) 01388 #elif defined (_ALPHA_) 01389 if ((PageFrameIndex >= MI_CONVERT_PHYSICAL_TO_PFN(MmSubsectionBase)) && 01390 (PageFrameIndex < MmSubsectionTopPage)) 01391 #else 01392 if (PageFrameIndex < MmSubsectionTopPage) 01393 #endif 01394 { 01395 BaseVa = (PVOID)(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT)); 01396 } 01397 } 01398 01399 return BaseVa; 01400 } 01401 01402 // 01403 // Paged Pool. 01404 // 01405 01406 if ((PoolType & SESSION_POOL_MASK) == 0) { 01407 SessionSpace = (PMM_SESSION_SPACE)0; 01408 PagedPoolInfo = &MmPagedPoolInfo; 01409 } 01410 else { 01411 SessionSpace = MmSessionSpace; 01412 PagedPoolInfo = &SessionSpace->PagedPoolInfo; 01413 } 01414 01415 StartPosition = RtlFindClearBitsAndSet ( 01416 PagedPoolInfo->PagedPoolAllocationMap, 01417 (ULONG)SizeInPages, 01418 PagedPoolInfo->PagedPoolHint 01419 ); 01420 01421 if ((StartPosition == 0xFFFFFFFF) && (PagedPoolInfo->PagedPoolHint != 0)) { 01422 01423 if (MI_UNUSED_SEGMENTS_SURPLUS()) { 01424 KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE); 01425 } 01426 01427 // 01428 // No free bits were found, check from the start of 01429 // the bit map. 01430 01431 StartPosition = RtlFindClearBitsAndSet ( 01432 PagedPoolInfo->PagedPoolAllocationMap, 01433 (ULONG)SizeInPages, 01434 0 01435 ); 01436 } 01437 01438 // 01439 // If start position = -1, no room in pool. Attempt to expand PagedPool. 01440 // 01441 01442 if (StartPosition == 0xFFFFFFFF) { 01443 01444 // 01445 // Attempt to expand the paged pool. 01446 // 01447 01448 StartPosition = (((ULONG)SizeInPages - 1) / PTE_PER_PAGE) + 1; 01449 01450 // 01451 // Make sure there is enough space to create the prototype PTEs. 01452 // 01453 01454 if (((StartPosition - 1) + PagedPoolInfo->NextPdeForPagedPoolExpansion) > 01455 MiGetPteAddress (PagedPoolInfo->LastPteForPagedPool)) { 01456 01457 // 01458 // Can't expand pool any more. If this request is not for session 01459 // pool, force unused segment trimming when appropriate. 01460 // 01461 01462 if (SessionSpace == NULL) { 01463 goto nopool; 01464 } 01465 01466 return NULL; 01467 } 01468 01469 if (SessionSpace) { 01470 TempPte = ValidKernelPdeLocal; 01471 PageTableCount = StartPosition; 01472 } 01473 else { 01474 TempPte = ValidKernelPde; 01475 } 01476 01477 LOCK_PFN (OldIrql); 01478 01479 // 01480 // Make sure we have 1 more than the number of pages 01481 // requested available. 01482 // 01483 01484 if (MmAvailablePages <= StartPosition) { 01485 01486 UNLOCK_PFN (OldIrql); 01487 01488 // 01489 // There are no free physical pages to expand 01490 // paged pool. 01491 // 01492 01493 return NULL; 01494 } 01495 01496 // 01497 // Update the count of available resident pages. 01498 // 01499 01500 MmResidentAvailablePages -= StartPosition; 01501 MM_BUMP_COUNTER(1, StartPosition); 01502 01503 // 01504 // Expand the pool. 01505 // 01506 01507 EndPosition = (ULONG)((PagedPoolInfo->NextPdeForPagedPoolExpansion - 01508 MiGetPteAddress(PagedPoolInfo->FirstPteForPagedPool)) * 01509 PTE_PER_PAGE); 01510 01511 RtlClearBits (PagedPoolInfo->PagedPoolAllocationMap, 01512 EndPosition, 01513 (ULONG) StartPosition * PTE_PER_PAGE); 01514 01515 PointerPte = PagedPoolInfo->NextPdeForPagedPoolExpansion; 01516 StartingPte = (PMMPTE)MiGetVirtualAddressMappedByPte(PointerPte); 01517 PagedPoolInfo->NextPdeForPagedPoolExpansion += StartPosition; 01518 01519 do { 01520 ASSERT (PointerPte->u.Hard.Valid == 0); 01521 01522 MM_TRACK_COMMIT (MM_DBG_COMMIT_PAGED_POOL_PAGETABLE, 1); 01523 01524 MiChargeCommitmentCantExpand (1, TRUE); 01525 01526 PageFrameIndex = MiRemoveAnyPage ( 01527 MI_GET_PAGE_COLOR_FROM_PTE (PointerPte)); 01528 01529 TempPte.u.Hard.PageFrameNumber = PageFrameIndex; 01530 MI_WRITE_VALID_PTE (PointerPte, TempPte); 01531 01532 // 01533 // Map valid PDE into system (or session) address space as well. 01534 // 01535 01536 VirtualAddress = MiGetVirtualAddressMappedByPte (PointerPte); 01537 01538 #if defined (_WIN64) 01539 01540 MiInitializePfn (PageFrameIndex, 01541 PointerPte, 01542 1); 01543 01544 #else 01545 01546 if (SessionSpace) { 01547 01548 Index = (ULONG)(PointerPte - MiGetPdeAddress (MmSessionBase)); 01549 ASSERT (MmSessionSpace->PageTables[Index].u.Long == 0); 01550 MmSessionSpace->PageTables[Index] = TempPte; 01551 01552 MiInitializePfnForOtherProcess (PageFrameIndex, 01553 PointerPte, 01554 MmSessionSpace->SessionPageDirectoryIndex); 01555 01556 MM_BUMP_SESS_COUNTER(MM_DBG_SESSION_PAGEDPOOL_PAGETABLE_ALLOC1, 1); 01557 } 01558 else { 01559 #if !defined (_X86PAE_) 01560 MmSystemPagePtes [((ULONG_PTR)PointerPte & 01561 ((sizeof(MMPTE) * PDE_PER_PAGE) - 1)) / sizeof(MMPTE)] = 01562 TempPte; 01563 MiInitializePfnForOtherProcess (PageFrameIndex, 01564 PointerPte, 01565 MmSystemPageDirectory); 01566 #else 01567 MmSystemPagePtes [((ULONG_PTR)PointerPte & 01568 (PD_PER_SYSTEM * (sizeof(MMPTE) * PDE_PER_PAGE) - 1)) / sizeof(MMPTE)] = 01569 TempPte; 01570 MiInitializePfnForOtherProcess (PageFrameIndex, 01571 PointerPte, 01572 MmSystemPageDirectory[(PointerPte - MiGetPdeAddress(0)) / PDE_PER_PAGE]); 01573 #endif 01574 } 01575 #endif 01576 01577 KeFillEntryTb ((PHARDWARE_PTE) PointerPte, VirtualAddress, FALSE); 01578 01579 MiFillMemoryPte (StartingPte, 01580 PAGE_SIZE, 01581 MM_KERNEL_NOACCESS_PTE); 01582 01583 PointerPte += 1; 01584 StartingPte += PAGE_SIZE / sizeof(MMPTE); 01585 StartPosition -= 1; 01586 } while (StartPosition > 0); 01587 01588 UNLOCK_PFN (OldIrql); 01589 01590 if (SessionSpace) { 01591 01592 PointerPte -= PageTableCount; 01593 01594 LOCK_SESSION_SPACE_WS (SessionIrql); 01595 01596 MmSessionSpace->NonPagablePages += PageTableCount; 01597 MmSessionSpace->CommittedPages += PageTableCount; 01598 01599 do { 01600 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 01601 01602 ASSERT (Pfn1->u1.Event == 0); 01603 Pfn1->u1.Event = (PVOID) PsGetCurrentThread (); 01604 01605 SessionPte = MiGetVirtualAddressMappedByPte (PointerPte); 01606 01607 MiAddValidPageToWorkingSet (SessionPte, 01608 PointerPte, 01609 Pfn1, 01610 0); 01611 01612 WsEntry = MiLocateWsle (SessionPte, 01613 MmSessionSpace->Vm.VmWorkingSetList, 01614 Pfn1->u1.WsIndex); 01615 01616 if (WsEntry >= MmSessionSpace->Vm.VmWorkingSetList->FirstDynamic) { 01617 01618 WsSwapEntry = MmSessionSpace->Vm.VmWorkingSetList->FirstDynamic; 01619 01620 if (WsEntry != MmSessionSpace->Vm.VmWorkingSetList->FirstDynamic) { 01621 01622 // 01623 // Swap this entry with the one at first dynamic. 01624 // 01625 01626 MiSwapWslEntries (WsEntry, WsSwapEntry, &MmSessionSpace->Vm); 01627 } 01628 01629 MmSessionSpace->Vm.VmWorkingSetList->FirstDynamic += 1; 01630 } 01631 else { 01632 WsSwapEntry = WsEntry; 01633 } 01634 01635 // 01636 // Indicate that the page is locked. 01637 // 01638 01639 MmSessionSpace->Wsle[WsSwapEntry].u1.e1.LockedInWs = 1; 01640 01641 PointerPte += 1; 01642 PageTableCount -= 1; 01643 } while (PageTableCount > 0); 01644 UNLOCK_SESSION_SPACE_WS (SessionIrql); 01645 } 01646 01647 StartPosition = RtlFindClearBitsAndSet ( 01648 PagedPoolInfo->PagedPoolAllocationMap, 01649 (ULONG)SizeInPages, 01650 EndPosition 01651 ); 01652 01653 ASSERT (StartPosition != 0xffffffff); 01654 } 01655 01656 // 01657 // This is paged pool, the start and end can't be saved 01658 // in the PFN database as the page isn't always resident 01659 // in memory. The ideal place to save the start and end 01660 // would be in the prototype PTE, but there are no free 01661 // bits. To solve this problem, a bitmap which parallels 01662 // the allocation bitmap exists which contains set bits 01663 // in the positions where an allocation ends. This 01664 // allows pages to be deallocated with only their starting 01665 // address. 01666 // 01667 // For sanity's sake, the starting address can be verified 01668 // from the 2 bitmaps as well. If the page before the starting 01669 // address is not allocated (bit is zero in allocation bitmap) 01670 // then this page is obviously a start of an allocation block. 01671 // If the page before is allocated and the other bit map does 01672 // not indicate the previous page is the end of an allocation, 01673 // then the starting address is wrong and a bug check should 01674 // be issued. 01675 // 01676 01677 if (SizeInPages == 1) { 01678 PagedPoolInfo->PagedPoolHint = StartPosition + (ULONG)SizeInPages; 01679 } 01680 01681 if (MiChargeCommitmentCantExpand (SizeInPages, FALSE) == FALSE) { 01682 Thread = PsGetCurrentThread (); 01683 if (MI_MEMORY_MAKER(Thread)) { 01684 MiChargeCommitmentCantExpand (SizeInPages, TRUE); 01685 } 01686 else { 01687 RtlClearBits (PagedPoolInfo->PagedPoolAllocationMap, 01688 StartPosition, 01689 (ULONG)SizeInPages); 01690 01691 // 01692 // Could not commit the page(s), return NULL indicating 01693 // no pool was allocated. Note that the lack of commit may be due 01694 // to unused segments and the MmSharedCommit, prototype PTEs, etc 01695 // associated with them. So force a reduction now. 01696 // 01697 01698 MiIssuePageExtendRequestNoWait (SizeInPages); 01699 01700 SignalDereferenceThread = FALSE; 01701 LOCK_PFN (OldIrql); 01702 if (MmUnusedSegmentForceFree == 0) { 01703 if (!IsListEmpty(&MmUnusedSegmentList)) { 01704 SignalDereferenceThread = TRUE; 01705 MmUnusedSegmentForceFree = 30; 01706 } 01707 } 01708 UNLOCK_PFN (OldIrql); 01709 if (SignalDereferenceThread == TRUE) { 01710 KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE); 01711 } 01712 01713 return NULL; 01714 } 01715 } 01716 01717 MM_TRACK_COMMIT (MM_DBG_COMMIT_PAGED_POOL_PAGES, SizeInPages); 01718 01719 if (SessionSpace) { 01720 LOCK_SESSION_SPACE_WS (OldIrql); 01721 SessionSpace->CommittedPages += SizeInPages; 01722 MM_BUMP_SESS_COUNTER(MM_DBG_SESSION_COMMIT_PAGEDPOOL_PAGES, SizeInPages); 01723 UNLOCK_SESSION_SPACE_WS (OldIrql); 01724 BaseVa = (PVOID)((PCHAR)SessionSpace->PagedPoolStart + 01725 (StartPosition << PAGE_SHIFT)); 01726 } 01727 else { 01728 MmPagedPoolCommit += (ULONG)SizeInPages; 01729 BaseVa = (PVOID)((PUCHAR)MmPageAlignedPoolBase[PagedPool] + 01730 (StartPosition << PAGE_SHIFT)); 01731 } 01732 01733 #if DBG 01734 PointerPte = MiGetPteAddress (BaseVa); 01735 for (i = 0; i < SizeInPages; i += 1) { 01736 if (*(ULONG *)PointerPte != MM_KERNEL_NOACCESS_PTE) { 01737 DbgPrint("MiAllocatePoolPages: PP not zero PTE (%x %x %x)\n", 01738 BaseVa, PointerPte, *PointerPte); 01739 DbgBreakPoint(); 01740 } 01741 PointerPte += 1; 01742 } 01743 #endif 01744 PointerPte = MiGetPteAddress (BaseVa); 01745 MiFillMemoryPte (PointerPte, 01746 SizeInPages * sizeof(MMPTE), 01747 MM_KERNEL_DEMAND_ZERO_PTE); 01748 01749 PagedPoolInfo->PagedPoolCommit += SizeInPages; 01750 EndPosition = StartPosition + (ULONG)SizeInPages - 1; 01751 RtlSetBits (PagedPoolInfo->EndOfPagedPoolBitmap, EndPosition, 1L); 01752 01753 // 01754 // Mark this as a large session allocation in the PFN database. 01755 // 01756 01757 if (IsLargeSessionAllocation != 0) { 01758 RtlSetBits (PagedPoolInfo->PagedPoolLargeSessionAllocationMap, 01759 StartPosition, 01760 1L); 01761 01762 MiSessionPoolAllocated (BaseVa, 01763 SizeInPages << PAGE_SHIFT, 01764 PagedPool); 01765 } 01766 else if (PoolType & POOL_VERIFIER_MASK) { 01767 RtlSetBits (VerifierLargePagedPoolMap, 01768 StartPosition, 01769 1L); 01770 } 01771 01772 PagedPoolInfo->AllocatedPagedPool += SizeInPages; 01773 01774 return BaseVa; 01775 } 01776 01777 ULONG 01778 MiFreePoolPages ( 01779 IN PVOID StartingAddress 01780 ) 01781 01782 /*++ 01783 01784 Routine Description: 01785 01786 This function returns a set of pages back to the pool from 01787 which they were obtained. Once the pages have been deallocated 01788 the region provided by the allocation becomes available for 01789 allocation to other callers, i.e. any data in the region is now 01790 trashed and cannot be referenced. 01791 01792 Arguments: 01793 01794 StartingAddress - Supplies the starting address which was returned 01795 in a previous call to MiAllocatePoolPages. 01796 01797 Return Value: 01798 01799 Returns the number of pages deallocated. 01800 01801 Environment: 01802 01803 These functions are used by the general pool allocation routines 01804 and should not be called directly. 01805 01806 Mutexes guarding the pool databases must be held when calling 01807 these functions. 01808 01809 --*/ 01810 01811 { 01812 ULONG StartPosition; 01813 ULONG Index; 01814 PFN_NUMBER i; 01815 PFN_NUMBER NumberOfPages; 01816 POOL_TYPE PoolType; 01817 PMMPTE PointerPte; 01818 PMMPFN Pfn1; 01819 PFN_NUMBER PageFrameIndex; 01820 KIRQL OldIrql; 01821 ULONG IsLargeSessionAllocation; 01822 ULONG IsLargeVerifierAllocation; 01823 PMMFREE_POOL_ENTRY Entry; 01824 PMMFREE_POOL_ENTRY NextEntry; 01825 PMM_PAGED_POOL_INFO PagedPoolInfo; 01826 PMM_SESSION_SPACE SessionSpace; 01827 LOGICAL SessionAllocation; 01828 MMPTE NoAccessPte; 01829 PFN_NUMBER PagesFreed; 01830 01831 NumberOfPages = 1; 01832 01833 // 01834 // Determine Pool type base on the virtual address of the block 01835 // to deallocate. 01836 // 01837 // This assumes NonPagedPool starts at a higher virtual address 01838 // then PagedPool. 01839 // 01840 01841 if ((StartingAddress >= MmPagedPoolStart) && 01842 (StartingAddress <= MmPagedPoolEnd)) { 01843 PoolType = PagedPool; 01844 SessionSpace = NULL; 01845 PagedPoolInfo = &MmPagedPoolInfo; 01846 StartPosition = (ULONG)(((PCHAR)StartingAddress - 01847 (PCHAR)MmPageAlignedPoolBase[PoolType]) >> PAGE_SHIFT); 01848 } 01849 else if (MI_IS_SESSION_POOL_ADDRESS (StartingAddress) == TRUE) { 01850 ASSERT (MiHydra == TRUE); 01851 PoolType = PagedPool; 01852 SessionSpace = MmSessionSpace; 01853 ASSERT (SessionSpace); 01854 PagedPoolInfo = &SessionSpace->PagedPoolInfo; 01855 StartPosition = (ULONG)(((PCHAR)StartingAddress - 01856 (PCHAR)SessionSpace->PagedPoolStart) >> PAGE_SHIFT); 01857 } 01858 else { 01859 01860 if (StartingAddress < MM_SYSTEM_RANGE_START) { 01861 KeBugCheckEx (BAD_POOL_CALLER, 01862 0x40, 01863 (ULONG_PTR)StartingAddress, 01864 (ULONG_PTR)MM_SYSTEM_RANGE_START, 01865 0); 01866 } 01867 01868 PoolType = NonPagedPool; 01869 SessionSpace = NULL; 01870 PagedPoolInfo = &MmPagedPoolInfo; 01871 StartPosition = (ULONG)(((PCHAR)StartingAddress - 01872 (PCHAR)MmPageAlignedPoolBase[PoolType]) >> PAGE_SHIFT); 01873 } 01874 01875 // 01876 // Check to ensure this page is really the start of an allocation. 01877 // 01878 01879 if (PoolType == NonPagedPool) { 01880 01881 if (StartPosition < MmMustSucceedPoolBitPosition) { 01882 01883 PULONG_PTR NextList; 01884 01885 // 01886 // This is must succeed pool, don't free it, just 01887 // add it to the front of the list. 01888 // 01889 // Note - only a single page can be released at a time. 01890 // 01891 01892 if (MI_IS_PHYSICAL_ADDRESS(StartingAddress)) { 01893 PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (StartingAddress); 01894 } else { 01895 PointerPte = MiGetPteAddress(StartingAddress); 01896 ASSERT (PointerPte->u.Hard.Valid == 1); 01897 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 01898 } 01899 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 01900 01901 if (Pfn1->u3.e1.VerifierAllocation == 1) { 01902 Pfn1->u3.e1.VerifierAllocation = 0; 01903 VerifierFreeTrackedPool (StartingAddress, 01904 PAGE_SIZE, 01905 NonPagedPool, 01906 FALSE); 01907 } 01908 01909 // 01910 // Check for this being a large session allocation. If it is, 01911 // we need to return the pool charge accordingly. 01912 // 01913 01914 if (Pfn1->u3.e1.LargeSessionAllocation) { 01915 Pfn1->u3.e1.LargeSessionAllocation = 0; 01916 MiSessionPoolFreed (StartingAddress, 01917 PAGE_SIZE, 01918 NonPagedPool); 01919 } 01920 01921 NextList = (PULONG_PTR)StartingAddress; 01922 *NextList = (ULONG_PTR)MmNonPagedMustSucceed; 01923 MmNonPagedMustSucceed = StartingAddress; 01924 return (ULONG)NumberOfPages; 01925 } 01926 01927 if (MI_IS_PHYSICAL_ADDRESS (StartingAddress)) { 01928 01929 // 01930 // On certain architectures, virtual addresses 01931 // may be physical and hence have no corresponding PTE. 01932 // 01933 01934 Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (StartingAddress)); 01935 if (StartPosition >= MmExpandedPoolBitPosition) { 01936 PointerPte = Pfn1->PteAddress; 01937 StartingAddress = MiGetVirtualAddressMappedByPte (PointerPte); 01938 } 01939 } else { 01940 PointerPte = MiGetPteAddress (StartingAddress); 01941 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 01942 } 01943 01944 if (Pfn1->u3.e1.StartOfAllocation == 0) { 01945 KeBugCheckEx (BAD_POOL_CALLER, 01946 0x41, 01947 (ULONG_PTR)StartingAddress, 01948 (ULONG_PTR)(Pfn1 - MmPfnDatabase), 01949 MmHighestPhysicalPage); 01950 } 01951 01952 CONSISTENCY_LOCK_PFN2 (OldIrql); 01953 01954 ASSERT (Pfn1->PteFrame != MI_MAGIC_AWE_PTEFRAME); 01955 01956 IsLargeVerifierAllocation = Pfn1->u3.e1.VerifierAllocation; 01957 IsLargeSessionAllocation = Pfn1->u3.e1.LargeSessionAllocation; 01958 01959 Pfn1->u3.e1.StartOfAllocation = 0; 01960 Pfn1->u3.e1.VerifierAllocation = 0; 01961 Pfn1->u3.e1.LargeSessionAllocation = 0; 01962 01963 CONSISTENCY_UNLOCK_PFN2 (OldIrql); 01964 01965 #if DBG 01966 if ((Pfn1->u3.e2.ReferenceCount > 1) && 01967 (Pfn1->u3.e1.WriteInProgress == 0)) { 01968 DbgPrint ("MM: MiFreePoolPages - deleting pool locked for I/O %lx\n", 01969 Pfn1); 01970 ASSERT (Pfn1->u3.e2.ReferenceCount == 1); 01971 } 01972 #endif //DBG 01973 01974 // 01975 // Find end of allocation and release the pages. 01976 // 01977 01978 while (Pfn1->u3.e1.EndOfAllocation == 0) { 01979 if (MI_IS_PHYSICAL_ADDRESS(StartingAddress)) { 01980 Pfn1 += 1; 01981 } else { 01982 PointerPte += 1; 01983 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 01984 } 01985 NumberOfPages += 1; 01986 #if DBG 01987 if ((Pfn1->u3.e2.ReferenceCount > 1) && 01988 (Pfn1->u3.e1.WriteInProgress == 0)) { 01989 DbgPrint ("MM:MiFreePoolPages - deleting pool locked for I/O %lx\n", 01990 Pfn1); 01991 ASSERT (Pfn1->u3.e2.ReferenceCount == 1); 01992 } 01993 #endif //DBG 01994 } 01995 01996 MmAllocatedNonPagedPool -= NumberOfPages; 01997 01998 if (IsLargeVerifierAllocation != 0) { 01999 VerifierFreeTrackedPool (StartingAddress, 02000 NumberOfPages << PAGE_SHIFT, 02001 NonPagedPool, 02002 FALSE); 02003 } 02004 02005 if (IsLargeSessionAllocation != 0) { 02006 MiSessionPoolFreed (StartingAddress, 02007 NumberOfPages << PAGE_SHIFT, 02008 NonPagedPool); 02009 } 02010 02011 CONSISTENCY_LOCK_PFN2 (OldIrql); 02012 02013 Pfn1->u3.e1.EndOfAllocation = 0; 02014 02015 CONSISTENCY_UNLOCK_PFN2 (OldIrql); 02016 02017 #if DBG 02018 if (MiFillFreedPool != 0) { 02019 RtlFillMemoryUlong (StartingAddress, 02020 PAGE_SIZE * NumberOfPages, 02021 MiFillFreedPool); 02022 } 02023 #endif //DBG 02024 02025 if (StartingAddress > MmNonPagedPoolExpansionStart) { 02026 02027 // 02028 // This page was from the expanded pool, should 02029 // it be freed? 02030 // 02031 // NOTE: all pages in the expanded pool area have PTEs 02032 // so no physical address checks need to be performed. 02033 // 02034 02035 if ((NumberOfPages > 3) || (MmNumberOfFreeNonPagedPool > 5)) { 02036 02037 // 02038 // Free these pages back to the free page list. 02039 // 02040 02041 MiFreeNonPagedPool (StartingAddress, NumberOfPages); 02042 02043 return (ULONG)NumberOfPages; 02044 } 02045 } 02046 02047 // 02048 // Add the pages to the list of free pages. 02049 // 02050 02051 MmNumberOfFreeNonPagedPool += NumberOfPages; 02052 02053 // 02054 // Check to see if the next allocation is free. 02055 // We cannot walk off the end of nonpaged initial or expansion 02056 // pages as the highest initial allocation is never freed and 02057 // the highest expansion allocation is guard-paged. 02058 // 02059 02060 i = NumberOfPages; 02061 02062 ASSERT (MiEndOfInitialPoolFrame != 0); 02063 02064 if ((PFN_NUMBER)(Pfn1 - MmPfnDatabase) == MiEndOfInitialPoolFrame) { 02065 PointerPte += 1; 02066 Pfn1 = NULL; 02067 } 02068 else if (MI_IS_PHYSICAL_ADDRESS(StartingAddress)) { 02069 Pfn1 += 1; 02070 ASSERT ((PCHAR)StartingAddress + NumberOfPages < (PCHAR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes); 02071 } else { 02072 PointerPte += 1; 02073 ASSERT ((PCHAR)StartingAddress + NumberOfPages <= (PCHAR)MmNonPagedPoolEnd); 02074 02075 // 02076 // Unprotect the previously freed pool so it can be merged. 02077 // 02078 02079 if (MmProtectFreedNonPagedPool == TRUE) { 02080 MiUnProtectFreeNonPagedPool ( 02081 (PVOID)MiGetVirtualAddressMappedByPte(PointerPte), 02082 0); 02083 } 02084 02085 if (PointerPte->u.Hard.Valid == 1) { 02086 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 02087 } else { 02088 Pfn1 = NULL; 02089 } 02090 } 02091 02092 if ((Pfn1 != NULL) && (Pfn1->u3.e1.StartOfAllocation == 0)) { 02093 02094 // 02095 // This range of pages is free. Remove this entry 02096 // from the list and add these pages to the current 02097 // range being freed. 02098 // 02099 02100 Entry = (PMMFREE_POOL_ENTRY)((PCHAR)StartingAddress 02101 + (NumberOfPages << PAGE_SHIFT)); 02102 ASSERT (Entry->Signature == MM_FREE_POOL_SIGNATURE); 02103 ASSERT (Entry->Owner == Entry); 02104 #if DBG 02105 { 02106 PMMPTE DebugPte; 02107 PMMPFN DebugPfn; 02108 02109 DebugPfn = NULL; 02110 02111 if (MI_IS_PHYSICAL_ADDRESS(StartingAddress)) { 02112 02113 // 02114 // On certain architectures, virtual addresses 02115 // may be physical and hence have no corresponding PTE. 02116 // 02117 02118 DebugPfn = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (Entry)); 02119 DebugPfn += Entry->Size; 02120 if ((PFN_NUMBER)((DebugPfn - 1) - MmPfnDatabase) != MiEndOfInitialPoolFrame) { 02121 ASSERT (DebugPfn->u3.e1.StartOfAllocation == 1); 02122 } 02123 } else { 02124 DebugPte = PointerPte + Entry->Size; 02125 if ((DebugPte-1)->u.Hard.Valid == 1) { 02126 DebugPfn = MI_PFN_ELEMENT ((DebugPte-1)->u.Hard.PageFrameNumber); 02127 if ((PFN_NUMBER)(DebugPfn - MmPfnDatabase) != MiEndOfInitialPoolFrame) { 02128 if (DebugPte->u.Hard.Valid == 1) { 02129 DebugPfn = MI_PFN_ELEMENT (DebugPte->u.Hard.PageFrameNumber); 02130 ASSERT (DebugPfn->u3.e1.StartOfAllocation == 1); 02131 } 02132 } 02133 02134 } 02135 } 02136 } 02137 #endif //DBG 02138 02139 i += Entry->Size; 02140 if (MmProtectFreedNonPagedPool == FALSE) { 02141 RemoveEntryList (&Entry->List); 02142 } 02143 else { 02144 MiProtectedPoolRemoveEntryList (&Entry->List); 02145 } 02146 } 02147 02148 // 02149 // Check to see if the previous page is the end of an allocation. 02150 // If it is not the end of an allocation, it must be free and 02151 // therefore this allocation can be tagged onto the end of 02152 // that allocation. 02153 // 02154 // We cannot walk off the beginning of expansion pool because it is 02155 // guard-paged. If the initial pool is superpaged instead, we are also 02156 // safe as the must succeed pages always have EndOfAllocation set. 02157 // 02158 02159 Entry = (PMMFREE_POOL_ENTRY)StartingAddress; 02160 02161 if (MI_IS_PHYSICAL_ADDRESS(StartingAddress)) { 02162 ASSERT (StartingAddress != MmNonPagedPoolStart); 02163 02164 Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN ( 02165 (PVOID)((PCHAR)Entry - PAGE_SIZE))); 02166 02167 } else { 02168 PointerPte -= NumberOfPages + 1; 02169 02170 // 02171 // Unprotect the previously freed pool so it can be merged. 02172 // 02173 02174 if (MmProtectFreedNonPagedPool == TRUE) { 02175 MiUnProtectFreeNonPagedPool ( 02176 (PVOID)MiGetVirtualAddressMappedByPte(PointerPte), 02177 0); 02178 } 02179 02180 if (PointerPte->u.Hard.Valid == 1) { 02181 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 02182 } else { 02183 Pfn1 = NULL; 02184 } 02185 } 02186 if (Pfn1 != NULL) { 02187 if (Pfn1->u3.e1.EndOfAllocation == 0) { 02188 02189 // 02190 // This range of pages is free, add these pages to 02191 // this entry. The owner field points to the address 02192 // of the list entry which is linked into the free pool 02193 // pages list. 02194 // 02195 02196 Entry = (PMMFREE_POOL_ENTRY)((PCHAR)StartingAddress - PAGE_SIZE); 02197 ASSERT (Entry->Signature == MM_FREE_POOL_SIGNATURE); 02198 Entry = Entry->Owner; 02199 02200 // 02201 // Unprotect the previously freed pool so we can merge it 02202 // 02203 02204 if (MmProtectFreedNonPagedPool == TRUE) { 02205 MiUnProtectFreeNonPagedPool ((PVOID)Entry, 0); 02206 } 02207 02208 // 02209 // If this entry became larger than MM_SMALL_ALLOCATIONS 02210 // pages, move it to the tail of the list. This keeps the 02211 // small allocations at the front of the list. 02212 // 02213 02214 if (Entry->Size < MI_MAX_FREE_LIST_HEADS - 1) { 02215 02216 if (MmProtectFreedNonPagedPool == FALSE) { 02217 RemoveEntryList (&Entry->List); 02218 } 02219 else { 02220 MiProtectedPoolRemoveEntryList (&Entry->List); 02221 } 02222 02223 // 02224 // Add these pages to the previous entry. 02225 // 02226 02227 Entry->Size += i; 02228 02229 Index = (ULONG)(Entry->Size - 1); 02230 02231 if (Index >= MI_MAX_FREE_LIST_HEADS) { 02232 Index = MI_MAX_FREE_LIST_HEADS - 1; 02233 } 02234 02235 if (MmProtectFreedNonPagedPool == FALSE) { 02236 InsertTailList (&MmNonPagedPoolFreeListHead[Index], 02237 &Entry->List); 02238 } 02239 else { 02240 MiProtectedPoolInsertList (&MmNonPagedPoolFreeListHead[Index], 02241 &Entry->List, 02242 Entry->Size < MM_SMALL_ALLOCATIONS ? 02243 TRUE : FALSE); 02244 } 02245 } 02246 else { 02247 02248 // 02249 // Add these pages to the previous entry. 02250 // 02251 02252 Entry->Size += i; 02253 } 02254 } 02255 } 02256 02257 if (Entry == (PMMFREE_POOL_ENTRY)StartingAddress) { 02258 02259 // 02260 // This entry was not combined with the previous, insert it 02261 // into the list. 02262 // 02263 02264 Entry->Size = i; 02265 02266 Index = (ULONG)(Entry->Size - 1); 02267 02268 if (Index >= MI_MAX_FREE_LIST_HEADS) { 02269 Index = MI_MAX_FREE_LIST_HEADS - 1; 02270 } 02271 02272 if (MmProtectFreedNonPagedPool == FALSE) { 02273 InsertTailList (&MmNonPagedPoolFreeListHead[Index], 02274 &Entry->List); 02275 } 02276 else { 02277 MiProtectedPoolInsertList (&MmNonPagedPoolFreeListHead[Index], 02278 &Entry->List, 02279 Entry->Size < MM_SMALL_ALLOCATIONS ? 02280 TRUE : FALSE); 02281 } 02282 } 02283 02284 // 02285 // Set the owner field in all these pages. 02286 // 02287 02288 NextEntry = (PMMFREE_POOL_ENTRY)StartingAddress; 02289 while (i > 0) { 02290 NextEntry->Owner = Entry; 02291 #if DBG 02292 NextEntry->Signature = MM_FREE_POOL_SIGNATURE; 02293 #endif 02294 02295 NextEntry = (PMMFREE_POOL_ENTRY)((PCHAR)NextEntry + PAGE_SIZE); 02296 i -= 1; 02297 } 02298 02299 #if DBG 02300 NextEntry = Entry; 02301 for (i = 0; i < Entry->Size; i += 1) { 02302 PMMPTE DebugPte; 02303 PMMPFN DebugPfn; 02304 if (MI_IS_PHYSICAL_ADDRESS(StartingAddress)) { 02305 02306 // 02307 // On certain architectures, virtual addresses 02308 // may be physical and hence have no corresponding PTE. 02309 // 02310 02311 DebugPfn = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (NextEntry)); 02312 } else { 02313 02314 DebugPte = MiGetPteAddress (NextEntry); 02315 DebugPfn = MI_PFN_ELEMENT (DebugPte->u.Hard.PageFrameNumber); 02316 } 02317 ASSERT (DebugPfn->u3.e1.StartOfAllocation == 0); 02318 ASSERT (DebugPfn->u3.e1.EndOfAllocation == 0); 02319 ASSERT (NextEntry->Owner == Entry); 02320 NextEntry = (PMMFREE_POOL_ENTRY)((PCHAR)NextEntry + PAGE_SIZE); 02321 } 02322 #endif 02323 02324 // 02325 // Prevent anyone from touching non paged pool after freeing it. 02326 // 02327 02328 if (MmProtectFreedNonPagedPool == TRUE) { 02329 MiProtectFreeNonPagedPool ((PVOID)Entry, (ULONG)Entry->Size); 02330 } 02331 02332 return (ULONG)NumberOfPages; 02333 02334 } else { 02335 02336 // 02337 // Paged pool. Need to verify start of allocation using 02338 // end of allocation bitmap. 02339 // 02340 02341 if (!RtlCheckBit (PagedPoolInfo->PagedPoolAllocationMap, StartPosition)) { 02342 KeBugCheckEx (BAD_POOL_CALLER, 02343 0x50, 02344 (ULONG_PTR)StartingAddress, 02345 (ULONG_PTR)StartPosition, 02346 MmSizeOfPagedPoolInBytes); 02347 } 02348 02349 #if DBG 02350 if (StartPosition > 0) { 02351 if (RtlCheckBit (PagedPoolInfo->PagedPoolAllocationMap, StartPosition - 1)) { 02352 if (!RtlCheckBit (PagedPoolInfo->EndOfPagedPoolBitmap, StartPosition - 1)) { 02353 02354 // 02355 // In the middle of an allocation... bugcheck. 02356 // 02357 02358 DbgPrint("paged pool in middle of allocation\n"); 02359 KeBugCheckEx (MEMORY_MANAGEMENT, 02360 0x41286, 02361 (ULONG_PTR)PagedPoolInfo->PagedPoolAllocationMap, 02362 (ULONG_PTR)PagedPoolInfo->EndOfPagedPoolBitmap, 02363 StartPosition); 02364 } 02365 } 02366 } 02367 #endif 02368 02369 i = StartPosition; 02370 PointerPte = PagedPoolInfo->FirstPteForPagedPool + i; 02371 02372 // 02373 // Find the last allocated page and check to see if any 02374 // of the pages being deallocated are in the paging file. 02375 // 02376 02377 while (!RtlCheckBit (PagedPoolInfo->EndOfPagedPoolBitmap, i)) { 02378 NumberOfPages += 1; 02379 i += 1; 02380 } 02381 02382 NoAccessPte.u.Long = MM_KERNEL_NOACCESS_PTE; 02383 02384 if (SessionSpace) { 02385 02386 // 02387 // This is needed purely to verify no one leaks pool. This 02388 // could be removed if we believe everyone was good. 02389 // 02390 02391 if (RtlCheckBit (PagedPoolInfo->PagedPoolLargeSessionAllocationMap, 02392 StartPosition)) { 02393 02394 RtlClearBits (PagedPoolInfo->PagedPoolLargeSessionAllocationMap, 02395 StartPosition, 02396 1L); 02397 02398 MiSessionPoolFreed (MiGetVirtualAddressMappedByPte (PointerPte), 02399 NumberOfPages << PAGE_SHIFT, 02400 PagedPool); 02401 } 02402 02403 SessionAllocation = TRUE; 02404 } 02405 else { 02406 SessionAllocation = FALSE; 02407 02408 if (VerifierLargePagedPoolMap) { 02409 02410 if (RtlCheckBit (VerifierLargePagedPoolMap, StartPosition)) { 02411 02412 RtlClearBits (VerifierLargePagedPoolMap, 02413 StartPosition, 02414 1L); 02415 02416 VerifierFreeTrackedPool (MiGetVirtualAddressMappedByPte (PointerPte), 02417 NumberOfPages << PAGE_SHIFT, 02418 PagedPool, 02419 FALSE); 02420 } 02421 } 02422 } 02423 02424 PagesFreed = MiDeleteSystemPagableVm (PointerPte, 02425 NumberOfPages, 02426 NoAccessPte, 02427 SessionAllocation, 02428 NULL); 02429 02430 ASSERT (PagesFreed == NumberOfPages); 02431 02432 if (SessionSpace) { 02433 LOCK_SESSION_SPACE_WS (OldIrql); 02434 MmSessionSpace->CommittedPages -= NumberOfPages; 02435 02436 MM_BUMP_SESS_COUNTER(MM_DBG_SESSION_COMMIT_POOL_FREED, 02437 NumberOfPages); 02438 02439 UNLOCK_SESSION_SPACE_WS (OldIrql); 02440 } 02441 else { 02442 MmPagedPoolCommit -= (ULONG)NumberOfPages; 02443 } 02444 02445 MiReturnCommitment (NumberOfPages); 02446 02447 MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_PAGED_POOL_PAGES, NumberOfPages); 02448 02449 // 02450 // Clear the end of allocation bit in the bit map. 02451 // 02452 02453 RtlClearBits (PagedPoolInfo->EndOfPagedPoolBitmap, (ULONG)i, 1L); 02454 02455 PagedPoolInfo->PagedPoolCommit -= NumberOfPages; 02456 PagedPoolInfo->AllocatedPagedPool -= NumberOfPages; 02457 02458 // 02459 // Clear the allocation bits in the bit map. 02460 // 02461 02462 RtlClearBits (PagedPoolInfo->PagedPoolAllocationMap, 02463 StartPosition, 02464 (ULONG)NumberOfPages 02465 ); 02466 02467 if (StartPosition < PagedPoolInfo->PagedPoolHint) { 02468 PagedPoolInfo->PagedPoolHint = StartPosition; 02469 } 02470 02471 return (ULONG)NumberOfPages; 02472 } 02473 } 02474 02475 VOID 02476 MiInitializeNonPagedPool ( 02477 VOID 02478 ) 02479 02480 /*++ 02481 02482 Routine Description: 02483 02484 This function initializes the NonPaged pool. 02485 02486 NonPaged Pool is linked together through the pages. 02487 02488 Arguments: 02489 02490 None. 02491 02492 Return Value: 02493 02494 None. 02495 02496 Environment: 02497 02498 Kernel mode, during initialization. 02499 02500 --*/ 02501 02502 { 02503 ULONG PagesInPool; 02504 ULONG Size; 02505 ULONG Index; 02506 PMMFREE_POOL_ENTRY FreeEntry; 02507 PMMFREE_POOL_ENTRY FirstEntry; 02508 PMMPTE PointerPte; 02509 PFN_NUMBER i; 02510 PULONG_PTR ThisPage; 02511 PULONG_PTR NextPage; 02512 PVOID EndOfInitialPool; 02513 PFN_NUMBER PageFrameIndex; 02514 02515 PAGED_CODE(); 02516 02517 // 02518 // Initialize the list heads for free pages. 02519 // 02520 02521 for (Index = 0; Index < MI_MAX_FREE_LIST_HEADS; Index += 1) { 02522 InitializeListHead (&MmNonPagedPoolFreeListHead[Index]); 02523 } 02524 02525 // 02526 // Initialize the must succeed pool (this occupies the first 02527 // pages of the pool area). 02528 // 02529 02530 // 02531 // Allocate NonPaged pool for the NonPagedPoolMustSucceed pool. 02532 // 02533 02534 MmNonPagedMustSucceed = (PCHAR)MmNonPagedPoolStart; 02535 02536 i = MmSizeOfNonPagedMustSucceed - PAGE_SIZE; 02537 02538 MmMustSucceedPoolBitPosition = BYTES_TO_PAGES(MmSizeOfNonPagedMustSucceed); 02539 02540 ThisPage = (PULONG_PTR)MmNonPagedMustSucceed; 02541 02542 while (i > 0) { 02543 NextPage = (PULONG_PTR)((PCHAR)ThisPage + PAGE_SIZE); 02544 *ThisPage = (ULONG_PTR)NextPage; 02545 ThisPage = NextPage; 02546 i -= PAGE_SIZE; 02547 } 02548 *ThisPage = 0; 02549 02550 // 02551 // Set up the remaining pages as non paged pool pages. 02552 // 02553 02554 ASSERT ((MmSizeOfNonPagedMustSucceed & (PAGE_SIZE - 1)) == 0); 02555 FreeEntry = (PMMFREE_POOL_ENTRY)((PCHAR)MmNonPagedPoolStart + 02556 MmSizeOfNonPagedMustSucceed); 02557 FirstEntry = FreeEntry; 02558 02559 PagesInPool = BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes - 02560 MmSizeOfNonPagedMustSucceed); 02561 02562 // 02563 // Set the location of expanded pool. 02564 // 02565 02566 MmExpandedPoolBitPosition = BYTES_TO_PAGES (MmSizeOfNonPagedPoolInBytes); 02567 02568 MmNumberOfFreeNonPagedPool = PagesInPool; 02569 02570 Index = (ULONG)(MmNumberOfFreeNonPagedPool - 1); 02571 if (Index >= MI_MAX_FREE_LIST_HEADS) { 02572 Index = MI_MAX_FREE_LIST_HEADS - 1; 02573 } 02574 02575 InsertHeadList (&MmNonPagedPoolFreeListHead[Index], &FreeEntry->List); 02576 02577 FreeEntry->Size = PagesInPool; 02578 #if DBG 02579 FreeEntry->Signature = MM_FREE_POOL_SIGNATURE; 02580 #endif 02581 FreeEntry->Owner = FirstEntry; 02582 02583 while (PagesInPool > 1) { 02584 FreeEntry = (PMMFREE_POOL_ENTRY)((PCHAR)FreeEntry + PAGE_SIZE); 02585 #if DBG 02586 FreeEntry->Signature = MM_FREE_POOL_SIGNATURE; 02587 #endif 02588 FreeEntry->Owner = FirstEntry; 02589 PagesInPool -= 1; 02590 } 02591 02592 // 02593 // Set the last nonpaged pool PFN so coalescing on free doesn't go 02594 // past the end of the initial pool. 02595 // 02596 02597 EndOfInitialPool = (PVOID)((ULONG_PTR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes - 1); 02598 02599 if (MI_IS_PHYSICAL_ADDRESS(EndOfInitialPool)) { 02600 PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (EndOfInitialPool); 02601 } else { 02602 PointerPte = MiGetPteAddress(EndOfInitialPool); 02603 ASSERT (PointerPte->u.Hard.Valid == 1); 02604 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 02605 } 02606 MiEndOfInitialPoolFrame = PageFrameIndex; 02607 02608 // 02609 // Set up the system PTEs for nonpaged pool expansion. 02610 // 02611 02612 PointerPte = MiGetPteAddress (MmNonPagedPoolExpansionStart); 02613 ASSERT (PointerPte->u.Hard.Valid == 0); 02614 02615 Size = BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - 02616 MmSizeOfNonPagedPoolInBytes); 02617 02618 // 02619 // Insert a guard PTE at the bottom of expanded nonpaged pool. 02620 // 02621 02622 Size -= 1; 02623 PointerPte += 1; 02624 02625 MiInitializeSystemPtes (PointerPte, 02626 Size, 02627 NonPagedPoolExpansion 02628 ); 02629 02630 // 02631 // A guard PTE is built at the top by our caller. This allows us to 02632 // freely increment virtual addresses in MiFreePoolPages and just check 02633 // for a blank PTE. 02634 // 02635 } 02636 02637 #if DBG || (i386 && !FPO) 02638 02639 // 02640 // This only works on checked builds, because the TraceLargeAllocs array is 02641 // kept in that case to keep track of page size pool allocations. Otherwise 02642 // we will call ExpSnapShotPoolPages with a page size pool allocation containing 02643 // arbitrary data and it will potentially go off in the weeds trying to 02644 // interpret it as a suballocated pool page. Ideally, there would be another 02645 // bit map that identified single page pool allocations so 02646 // ExpSnapShotPoolPages would NOT be called for those. 02647 // 02648 02649 NTSTATUS 02650 MmSnapShotPool( 02651 IN POOL_TYPE PoolType, 02652 IN PMM_SNAPSHOT_POOL_PAGE SnapShotPoolPage, 02653 IN PSYSTEM_POOL_INFORMATION PoolInformation, 02654 IN ULONG Length, 02655 IN OUT PULONG RequiredLength 02656 ) 02657 { 02658 ULONG Index; 02659 NTSTATUS Status; 02660 NTSTATUS xStatus; 02661 PCHAR p, pStart; 02662 PVOID *pp; 02663 ULONG Size; 02664 ULONG BusyFlag; 02665 ULONG CurrentPage, NumberOfPages; 02666 PSYSTEM_POOL_ENTRY PoolEntryInfo; 02667 PLIST_ENTRY Entry; 02668 PMMFREE_POOL_ENTRY FreePageInfo; 02669 ULONG StartPosition; 02670 PMMPTE PointerPte; 02671 PMMPFN Pfn1; 02672 LOGICAL NeedsReprotect; 02673 02674 Status = STATUS_SUCCESS; 02675 PoolEntryInfo = &PoolInformation->Entries[0]; 02676 02677 if (PoolType == PagedPool) { 02678 PoolInformation->TotalSize = (PCHAR)MmPagedPoolEnd - 02679 (PCHAR)MmPagedPoolStart; 02680 PoolInformation->FirstEntry = MmPagedPoolStart; 02681 p = MmPagedPoolStart; 02682 CurrentPage = 0; 02683 while (p < (PCHAR)MmPagedPoolEnd) { 02684 pStart = p; 02685 BusyFlag = RtlCheckBit (MmPagedPoolInfo.PagedPoolAllocationMap, CurrentPage); 02686 while (~(BusyFlag ^ RtlCheckBit (MmPagedPoolInfo.PagedPoolAllocationMap, CurrentPage))) { 02687 p += PAGE_SIZE; 02688 if (RtlCheckBit (MmPagedPoolInfo.EndOfPagedPoolBitmap, CurrentPage)) { 02689 CurrentPage += 1; 02690 break; 02691 } 02692 02693 CurrentPage += 1; 02694 if (p > (PCHAR)MmPagedPoolEnd) { 02695 break; 02696 } 02697 } 02698 02699 Size = (ULONG)(p - pStart); 02700 if (BusyFlag) { 02701 xStatus = (*SnapShotPoolPage)(pStart, 02702 Size, 02703 PoolInformation, 02704 &PoolEntryInfo, 02705 Length, 02706 RequiredLength 02707 ); 02708 if (xStatus != STATUS_COMMITMENT_LIMIT) { 02709 Status = xStatus; 02710 } 02711 } 02712 else { 02713 PoolInformation->NumberOfEntries += 1; 02714 *RequiredLength += sizeof (SYSTEM_POOL_ENTRY); 02715 if (Length < *RequiredLength) { 02716 Status = STATUS_INFO_LENGTH_MISMATCH; 02717 } 02718 else { 02719 PoolEntryInfo->Allocated = FALSE; 02720 PoolEntryInfo->Size = Size; 02721 PoolEntryInfo->AllocatorBackTraceIndex = 0; 02722 PoolEntryInfo->TagUlong = 0; 02723 PoolEntryInfo += 1; 02724 Status = STATUS_SUCCESS; 02725 } 02726 } 02727 } 02728 } 02729 else if (PoolType == NonPagedPool) { 02730 PoolInformation->TotalSize = MmSizeOfNonPagedPoolInBytes; 02731 PoolInformation->FirstEntry = MmNonPagedPoolStart; 02732 02733 p = MmNonPagedPoolStart; 02734 while (p < (PCHAR)MmNonPagedPoolEnd) { 02735 02736 // 02737 // NonPaged pool is linked together through the pages themselves. 02738 // 02739 02740 pp = (PVOID *)MmNonPagedMustSucceed; 02741 while (pp) { 02742 if (p == (PCHAR)pp) { 02743 PoolInformation->NumberOfEntries += 1; 02744 *RequiredLength += sizeof( SYSTEM_POOL_ENTRY ); 02745 if (Length < *RequiredLength) { 02746 Status = STATUS_INFO_LENGTH_MISMATCH; 02747 } 02748 else { 02749 PoolEntryInfo->Allocated = FALSE; 02750 PoolEntryInfo->Size = PAGE_SIZE; 02751 PoolEntryInfo->AllocatorBackTraceIndex = 0; 02752 PoolEntryInfo->TagUlong = 0; 02753 PoolEntryInfo += 1; 02754 Status = STATUS_SUCCESS; 02755 } 02756 02757 p += PAGE_SIZE; 02758 pp = (PVOID *)MmNonPagedMustSucceed; 02759 } 02760 else { 02761 pp = (PVOID *)*pp; 02762 } 02763 } 02764 02765 NeedsReprotect = FALSE; 02766 02767 for (Index = 0; Index < MI_MAX_FREE_LIST_HEADS; Index += 1) { 02768 02769 Entry = MmNonPagedPoolFreeListHead[Index].Flink; 02770 02771 while (Entry != &MmNonPagedPoolFreeListHead[Index]) { 02772 02773 if (MmProtectFreedNonPagedPool == TRUE) { 02774 MiUnProtectFreeNonPagedPool ((PVOID)Entry, 0); 02775 NeedsReprotect = TRUE; 02776 } 02777 02778 FreePageInfo = CONTAINING_RECORD( Entry, 02779 MMFREE_POOL_ENTRY, 02780 List 02781 ); 02782 02783 ASSERT (FreePageInfo->Signature == MM_FREE_POOL_SIGNATURE); 02784 02785 if (p == (PCHAR)FreePageInfo) { 02786 02787 Size = (ULONG)(FreePageInfo->Size << PAGE_SHIFT); 02788 PoolInformation->NumberOfEntries += 1; 02789 *RequiredLength += sizeof( SYSTEM_POOL_ENTRY ); 02790 if (Length < *RequiredLength) { 02791 Status = STATUS_INFO_LENGTH_MISMATCH; 02792 } 02793 else { 02794 PoolEntryInfo->Allocated = FALSE; 02795 PoolEntryInfo->Size = Size; 02796 PoolEntryInfo->AllocatorBackTraceIndex = 0; 02797 PoolEntryInfo->TagUlong = 0; 02798 PoolEntryInfo += 1; 02799 Status = STATUS_SUCCESS; 02800 } 02801 02802 p += Size; 02803 Index = MI_MAX_FREE_LIST_HEADS; 02804 break; 02805 } 02806 02807 Entry = FreePageInfo->List.Flink; 02808 02809 if (NeedsReprotect == TRUE) { 02810 MiProtectFreeNonPagedPool ((PVOID)FreePageInfo, 02811 (ULONG)FreePageInfo->Size); 02812 NeedsReprotect = FALSE; 02813 } 02814 } 02815 } 02816 02817 StartPosition = BYTES_TO_PAGES((PCHAR)p - 02818 (PCHAR)MmPageAlignedPoolBase[NonPagedPool]); 02819 if (StartPosition >= MmExpandedPoolBitPosition) { 02820 if (NeedsReprotect == TRUE) { 02821 MiProtectFreeNonPagedPool ((PVOID)FreePageInfo, 02822 (ULONG)FreePageInfo->Size); 02823 } 02824 break; 02825 } 02826 02827 if (StartPosition < MmMustSucceedPoolBitPosition) { 02828 ASSERT (NeedsReprotect == FALSE); 02829 Size = PAGE_SIZE; 02830 xStatus = (*SnapShotPoolPage) (p, 02831 Size, 02832 PoolInformation, 02833 &PoolEntryInfo, 02834 Length, 02835 RequiredLength 02836 ); 02837 if (xStatus != STATUS_COMMITMENT_LIMIT) { 02838 Status = xStatus; 02839 } 02840 } 02841 else { 02842 if (MI_IS_PHYSICAL_ADDRESS(p)) { 02843 // 02844 // On certain architectures, virtual addresses 02845 // may be physical and hence have no corresponding PTE. 02846 // 02847 PointerPte = NULL; 02848 Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (p)); 02849 } else { 02850 PointerPte = MiGetPteAddress (p); 02851 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 02852 } 02853 ASSERT (Pfn1->u3.e1.StartOfAllocation != 0); 02854 02855 // 02856 // Find end of allocation and determine size. 02857 // 02858 02859 NumberOfPages = 1; 02860 while (Pfn1->u3.e1.EndOfAllocation == 0) { 02861 NumberOfPages += 1; 02862 if (PointerPte == NULL) { 02863 Pfn1 += 1; 02864 } 02865 else { 02866 PointerPte += 1; 02867 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 02868 } 02869 } 02870 02871 Size = NumberOfPages << PAGE_SHIFT; 02872 xStatus = (*SnapShotPoolPage) (p, 02873 Size, 02874 PoolInformation, 02875 &PoolEntryInfo, 02876 Length, 02877 RequiredLength 02878 ); 02879 if (NeedsReprotect == TRUE) { 02880 MiProtectFreeNonPagedPool ((PVOID)FreePageInfo, 02881 (ULONG)FreePageInfo->Size); 02882 } 02883 02884 if (xStatus != STATUS_COMMITMENT_LIMIT) { 02885 Status = xStatus; 02886 } 02887 } 02888 02889 p += Size; 02890 } 02891 } 02892 else { 02893 Status = STATUS_NOT_IMPLEMENTED; 02894 } 02895 02896 return Status; 02897 } 02898 02899 #endif // DBG || (i386 && !FPO) 02900 02901 ULONG MmSpecialPoolTag; 02902 PVOID MmSpecialPoolStart; 02903 PVOID MmSpecialPoolEnd; 02904 02905 ULONG MmSpecialPoolRejected[5]; 02906 LOGICAL MmSpecialPoolCatchOverruns = TRUE; 02907 02908 02909 PMMPTE MiSpecialPoolFirstPte; 02910 PMMPTE MiSpecialPoolLastPte; 02911 02912 ULONG MiSpecialPagesNonPaged; 02913 ULONG MiSpecialPagesPagable; 02914 ULONG MmSpecialPagesInUse; // Used by the debugger 02915 02916 ULONG MiSpecialPagesNonPagedPeak; 02917 ULONG MiSpecialPagesPagablePeak; 02918 ULONG MiSpecialPagesInUsePeak; 02919 02920 ULONG MiSpecialPagesNonPagedMaximum; 02921 02922 ULONG MiSpecialPoolPtes; 02923 02924 LOGICAL MiSpecialPoolEnabled = TRUE; 02925 02926 SIZE_T 02927 MmQuerySpecialPoolBlockSize ( 02928 IN PVOID P 02929 ) 02930 02931 /*++ 02932 02933 Routine Description: 02934 02935 This routine returns the size of a special pool allocation. 02936 02937 Arguments: 02938 02939 VirtualAddress - Supplies the special pool virtual address to query. 02940 02941 Return Value: 02942 02943 The size in bytes of the allocation. 02944 02945 Environment: 02946 02947 Kernel mode, APC_LEVEL or below for pagable addresses, DISPATCH_LEVEL or 02948 below for nonpaged addresses. 02949 02950 --*/ 02951 02952 { 02953 PPOOL_HEADER Header; 02954 02955 ASSERT ((P >= MmSpecialPoolStart) && (P < MmSpecialPoolEnd)); 02956 02957 if (((ULONG_PTR)P & (PAGE_SIZE - 1))) { 02958 Header = PAGE_ALIGN (P); 02959 } 02960 else { 02961 Header = (PPOOL_HEADER)((PCHAR)PAGE_ALIGN (P) + PAGE_SIZE - POOL_OVERHEAD); 02962 } 02963 02964 return (SIZE_T)(Header->Ulong1 & ~(MI_SPECIAL_POOL_PAGABLE | MI_SPECIAL_POOL_VERIFIER)); 02965 } 02966 02967 VOID 02968 MiMakeSpecialPoolPagable ( 02969 IN PVOID VirtualAddress, 02970 IN PMMPTE PointerPte 02971 ); 02972 02973 VOID 02974 MiInitializeSpecialPool ( 02975 VOID 02976 ) 02977 02978 /*++ 02979 02980 Routine Description: 02981 02982 This routine initializes the special pool used to catch pool corruptors. 02983 02984 Arguments: 02985 02986 None. 02987 02988 Return Value: 02989 02990 None. 02991 02992 Environment: 02993 02994 Kernel mode, no locks held. 02995 02996 --*/ 02997 02998 { 02999 ULONG BugCheckOnFailure; 03000 KIRQL OldIrql; 03001 PMMPTE PointerPte; 03002 03003 if ((MmVerifyDriverBufferLength == (ULONG)-1) && 03004 ((MmSpecialPoolTag == 0) || (MmSpecialPoolTag == (ULONG)-1))) { 03005 return; 03006 } 03007 03008 #if PFN_CONSISTENCY 03009 MiUnMapPfnDatabase (); 03010 #endif 03011 03012 LOCK_PFN (OldIrql); 03013 03014 // 03015 // Even though we asked for some number of system PTEs to map special pool, 03016 // we may not have been given them all. Large memory systems are 03017 // autoconfigured so that a large nonpaged pool is the default. 03018 // x86 systems booted with the 3GB switch and all Alphas don't have enough 03019 // contiguous virtual address space to support this, so our request may 03020 // have been trimmed. Handle that intelligently here so we don't exhaust 03021 // the system PTE pool and fail to handle thread stacks and I/O. 03022 // 03023 03024 if (MmNumberOfSystemPtes < 0x3000) { 03025 MiSpecialPoolPtes = MmNumberOfSystemPtes / 6; 03026 } 03027 else { 03028 MiSpecialPoolPtes = MmNumberOfSystemPtes / 3; 03029 } 03030 03031 #if !defined (_WIN64) 03032 03033 // 03034 // 32-bit systems are very cramped on virtual address space. Apply 03035 // a cap here to prevent overzealousness. 03036 // 03037 03038 if (MiSpecialPoolPtes > MM_SPECIAL_POOL_PTES) { 03039 MiSpecialPoolPtes = MM_SPECIAL_POOL_PTES; 03040 } 03041 #endif 03042 03043 #ifdef _X86_ 03044 03045 // 03046 // For x86, we can actually use an additional range of special PTEs to 03047 // map memory with and so we can raise the limit from 25000 to approximately 03048 // 96000. 03049 // 03050 03051 if ((MiNumberOfExtraSystemPdes != 0) && 03052 ((MiHydra == FALSE) || (ExpMultiUserTS == FALSE)) && 03053 (MiRequestedSystemPtes != (ULONG)-1)) { 03054 03055 if (MmPagedPoolMaximumDesired == TRUE) { 03056 03057 // 03058 // The low PTEs between 0xA4000000 & 0xC0000000 must be used 03059 // for both regular system PTE usage and special pool usage. 03060 // 03061 03062 MiSpecialPoolPtes = (MiNumberOfExtraSystemPdes / 2) * PTE_PER_PAGE; 03063 } 03064 else { 03065 03066 // 03067 // The low PTEs between 0xA4000000 & 0xC0000000 can be used 03068 // exclusively for special pool. 03069 // 03070 03071 MiSpecialPoolPtes = MiNumberOfExtraSystemPdes * PTE_PER_PAGE; 03072 } 03073 } 03074 03075 #endif 03076 03077 // 03078 // A PTE disappears for double mapping the system page directory. 03079 // When guard paging for system PTEs is enabled, a few more go also. 03080 // Thus, not being able to get all the PTEs we wanted is not fatal and 03081 // we just back off a bit and retry. 03082 // 03083 03084 // 03085 // Always request an even number of PTEs so each one can be guard paged. 03086 // 03087 03088 MiSpecialPoolPtes &= ~0x1; 03089 03090 BugCheckOnFailure = FALSE; 03091 03092 do { 03093 MiSpecialPoolFirstPte = MiReserveSystemPtes (MiSpecialPoolPtes, 03094 SystemPteSpace, 03095 0, 03096 0, 03097 BugCheckOnFailure); 03098 if (MiSpecialPoolFirstPte != NULL) { 03099 break; 03100 } 03101 03102 if (MiSpecialPoolPtes == 0) { 03103 BugCheckOnFailure = TRUE; 03104 continue; 03105 } 03106 03107 MiSpecialPoolPtes -= 2; 03108 } while (1); 03109 03110 // 03111 // Build the list of PTE pairs. 03112 // 03113 03114 MiSpecialPoolLastPte = MiSpecialPoolFirstPte + MiSpecialPoolPtes; 03115 MmSpecialPoolStart = MiGetVirtualAddressMappedByPte (MiSpecialPoolFirstPte); 03116 03117 PointerPte = MiSpecialPoolFirstPte; 03118 while (PointerPte < MiSpecialPoolLastPte) { 03119 PointerPte->u.List.NextEntry = ((PointerPte + 2) - MmSystemPteBase); 03120 PointerPte += 2; 03121 } 03122 PointerPte -= 2; 03123 PointerPte->u.List.NextEntry = MM_EMPTY_PTE_LIST; 03124 MiSpecialPoolLastPte = PointerPte; 03125 MmSpecialPoolEnd = MiGetVirtualAddressMappedByPte (MiSpecialPoolLastPte + 1); 03126 03127 // 03128 // Cap nonpaged special pool based on the memory size. 03129 // 03130 03131 MiSpecialPagesNonPagedMaximum = (ULONG)(MmResidentAvailablePages >> 4); 03132 03133 if (MmNumberOfPhysicalPages > 0x3FFF) { 03134 MiSpecialPagesNonPagedMaximum = (ULONG)(MmResidentAvailablePages >> 3); 03135 } 03136 03137 UNLOCK_PFN (OldIrql); 03138 } 03139 03140 LOGICAL 03141 MmSetSpecialPool ( 03142 IN LOGICAL Enable 03143 ) 03144 03145 /*++ 03146 03147 Routine Description: 03148 03149 This routine enables/disables special pool. This allows callers to ensure 03150 that subsequent allocations do not come from special pool. It is relied 03151 upon by callers that require KSEG0 addresses. 03152 03153 Arguments: 03154 03155 Enable - Supplies TRUE to enable special pool, FALSE to disable it. 03156 03157 Return Value: 03158 03159 Current special pool state (enabled or disabled). 03160 03161 Environment: 03162 03163 Kernel mode, IRQL of DISPATCH_LEVEL or below. 03164 03165 --*/ 03166 03167 { 03168 KIRQL OldIrql; 03169 LOGICAL OldEnable; 03170 03171 LOCK_PFN2 (OldIrql); 03172 03173 OldEnable = MiSpecialPoolEnabled; 03174 03175 MiSpecialPoolEnabled = Enable; 03176 03177 UNLOCK_PFN2 (OldIrql); 03178 03179 return OldEnable; 03180 } 03181 03182 #ifndef NO_POOL_CHECKS 03183 typedef struct _MI_BAD_TAGS { 03184 USHORT Enabled; 03185 UCHAR TargetChar; 03186 UCHAR AllOthers; 03187 ULONG Dispatches; 03188 ULONG Allocations; 03189 ULONG RandomizerEnabled; 03190 } MI_BAD_TAGS, *PMI_BAD_TAGS; 03191 03192 MI_BAD_TAGS MiBadTags; 03193 KTIMER MiSpecialPoolTimer; 03194 KDPC MiSpecialPoolTimerDpc; 03195 LARGE_INTEGER MiTimerDueTime; 03196 03197 #define MI_THREE_SECONDS 3 03198 03199 03200 VOID 03201 MiSpecialPoolTimerDispatch ( 03202 IN PKDPC Dpc, 03203 IN PVOID DeferredContext, 03204 IN PVOID SystemArgument1, 03205 IN PVOID SystemArgument2 03206 ) 03207 03208 /*++ 03209 03210 Routine Description: 03211 03212 This routine is executed every 3 seconds. Just toggle the enable bit. 03213 If not many squeezed allocations have been made then just leave it 03214 continuously enabled. Switch to a different tag if it looks like this 03215 one isn't getting any hits. 03216 03217 No locks needed. 03218 03219 Arguments: 03220 03221 Dpc - Supplies a pointer to a control object of type DPC. 03222 03223 DeferredContext - Optional deferred context; not used. 03224 03225 SystemArgument1 - Optional argument 1; not used. 03226 03227 SystemArgument2 - Optional argument 2; not used. 03228 03229 Return Value: 03230 03231 None. 03232 03233 --*/ 03234 03235 { 03236 UCHAR NewChar; 03237 03238 UNREFERENCED_PARAMETER (Dpc); 03239 UNREFERENCED_PARAMETER (DeferredContext); 03240 UNREFERENCED_PARAMETER (SystemArgument1); 03241 UNREFERENCED_PARAMETER (SystemArgument2); 03242 03243 MiBadTags.Dispatches += 1; 03244 03245 if (MiBadTags.Allocations > 500) { 03246 MiBadTags.Enabled += 1; 03247 } 03248 else if ((MiBadTags.Allocations == 0) && (MiBadTags.Dispatches > 100)) { 03249 if (MiBadTags.AllOthers == 0) { 03250 NewChar = (UCHAR)(MiBadTags.TargetChar + 1); 03251 if (NewChar >= 'a' && NewChar <= 'z') { 03252 MiBadTags.TargetChar = NewChar; 03253 } 03254 else if (NewChar == 'z' + 1) { 03255 MiBadTags.TargetChar = 'a'; 03256 } 03257 else if (NewChar >= 'A' && NewChar <= 'Z') { 03258 MiBadTags.TargetChar = NewChar; 03259 } 03260 else { 03261 MiBadTags.TargetChar = 'A'; 03262 } 03263 } 03264 } 03265 } 03266 03267 extern ULONG InitializationPhase; 03268 03269 VOID 03270 MiInitializeSpecialPoolCriteria ( 03271 VOID 03272 ) 03273 { 03274 LARGE_INTEGER SystemTime; 03275 TIME_FIELDS TimeFields; 03276 03277 if (InitializationPhase == 0) { 03278 #if defined (_MI_SPECIAL_POOL_BY_DEFAULT) 03279 if (MmSpecialPoolTag == 0) { 03280 MmSpecialPoolTag = (ULONG)-2; 03281 } 03282 #endif 03283 return; 03284 } 03285 03286 if (MmSpecialPoolTag != (ULONG)-2) { 03287 return; 03288 } 03289 03290 KeQuerySystemTime (&SystemTime); 03291 03292 RtlTimeToTimeFields (&SystemTime, &TimeFields); 03293 03294 if (TimeFields.Second <= 25) { 03295 MiBadTags.TargetChar = (UCHAR)('a' + (UCHAR)TimeFields.Second); 03296 } 03297 else if (TimeFields.Second <= 51) { 03298 MiBadTags.TargetChar = (UCHAR)('A' + (UCHAR)(TimeFields.Second - 26)); 03299 } 03300 else { 03301 MiBadTags.AllOthers = 1; 03302 } 03303 03304 MiBadTags.RandomizerEnabled = 1; 03305 03306 // 03307 // Initialize a periodic timer to go off every three seconds. 03308 // 03309 03310 KeInitializeDpc (&MiSpecialPoolTimerDpc, MiSpecialPoolTimerDispatch, NULL); 03311 03312 KeInitializeTimer (&MiSpecialPoolTimer); 03313 03314 MiTimerDueTime.QuadPart = Int32x32To64 (MI_THREE_SECONDS, -10000000); 03315 03316 KeSetTimerEx (&MiSpecialPoolTimer, 03317 MiTimerDueTime, 03318 MI_THREE_SECONDS * 1000, 03319 &MiSpecialPoolTimerDpc); 03320 03321 MiBadTags.Enabled += 1; 03322 } 03323 03324 PVOID 03325 MmSqueezeBadTags ( 03326 IN SIZE_T NumberOfBytes, 03327 IN ULONG Tag, 03328 IN POOL_TYPE PoolType, 03329 IN ULONG SpecialPoolType 03330 ) 03331 03332 /*++ 03333 03334 Routine Description: 03335 03336 This routine squeezes bad tags by forcing them into special pool in a 03337 systematic fashion. 03338 03339 Arguments: 03340 03341 NumberOfBytes - Supplies the number of bytes to commit. 03342 03343 Tag - Supplies the tag of the requested allocation. 03344 03345 PoolType - Supplies the pool type of the requested allocation. 03346 03347 SpecialPoolType - Supplies the special pool type of the 03348 requested allocation. 03349 03350 - 0 indicates overruns. 03351 - 1 indicates underruns. 03352 - 2 indicates use the systemwide pool policy. 03353 03354 Return Value: 03355 03356 A non-NULL pointer if the requested allocation was fulfilled from special 03357 pool. NULL if the allocation was not made. 03358 03359 Environment: 03360 03361 Kernel mode, no locks (not even pool locks) held. 03362 03363 --*/ 03364 03365 { 03366 PUCHAR tc; 03367 03368 if ((MiBadTags.Enabled % 0x10) == 0) { 03369 return NULL; 03370 } 03371 03372 if (MiBadTags.RandomizerEnabled == 0) { 03373 return NULL; 03374 } 03375 03376 tc = (PUCHAR)&Tag; 03377 if (*tc == MiBadTags.TargetChar) { 03378 ; 03379 } 03380 else if (MiBadTags.AllOthers == 1) { 03381 if (*tc >= 'a' && *tc <= 'z') { 03382 return NULL; 03383 } 03384 if (*tc >= 'A' && *tc <= 'Z') { 03385 return NULL; 03386 } 03387 } 03388 else { 03389 return NULL; 03390 } 03391 03392 MiBadTags.Allocations += 1; 03393 03394 return MmAllocateSpecialPool(NumberOfBytes, Tag, PoolType, SpecialPoolType); 03395 } 03396 03397 VOID 03398 MiEnableRandomSpecialPool ( 03399 IN LOGICAL Enable 03400 ) 03401 { 03402 MiBadTags.RandomizerEnabled = Enable; 03403 } 03404 03405 #endif 03406 03407 PVOID 03408 MmAllocateSpecialPool ( 03409 IN SIZE_T NumberOfBytes, 03410 IN ULONG Tag, 03411 IN POOL_TYPE PoolType, 03412 IN ULONG SpecialPoolType 03413 ) 03414 03415 /*++ 03416 03417 Routine Description: 03418 03419 This routine allocates virtual memory from special pool. This allocation 03420 is made from the end of a physical page with the next PTE set to no access 03421 so that any reads or writes will cause an immediate fatal system crash. 03422 03423 This lets us catch components that corrupt pool. 03424 03425 Arguments: 03426 03427 NumberOfBytes - Supplies the number of bytes to commit. 03428 03429 Tag - Supplies the tag of the requested allocation. 03430 03431 PoolType - Supplies the pool type of the requested allocation. 03432 03433 SpecialPoolType - Supplies the special pool type of the 03434 requested allocation. 03435 03436 - 0 indicates overruns. 03437 - 1 indicates underruns. 03438 - 2 indicates use the systemwide pool policy. 03439 03440 Return Value: 03441 03442 A non-NULL pointer if the requested allocation was fulfilled from special 03443 pool. NULL if the allocation was not made. 03444 03445 Environment: 03446 03447 Kernel mode, no pool locks held. 03448 03449 Note this is a nonpagable wrapper so that machines without special pool 03450 can still support drivers allocating nonpaged pool at DISPATCH_LEVEL 03451 requesting special pool. 03452 03453 --*/ 03454 03455 { 03456 if (MiSpecialPoolPtes == 0) { 03457 03458 // 03459 // The special pool allocation code was never initialized. 03460 // 03461 03462 return NULL; 03463 } 03464 03465 return MiAllocateSpecialPool (NumberOfBytes, 03466 Tag, 03467 PoolType, 03468 SpecialPoolType); 03469 } 03470 03471 PVOID 03472 MiAllocateSpecialPool ( 03473 IN SIZE_T NumberOfBytes, 03474 IN ULONG Tag, 03475 IN POOL_TYPE PoolType, 03476 IN ULONG SpecialPoolType 03477 ) 03478 03479 /*++ 03480 03481 Routine Description: 03482 03483 This routine allocates virtual memory from special pool. This allocation 03484 is made from the end of a physical page with the next PTE set to no access 03485 so that any reads or writes will cause an immediate fatal system crash. 03486 03487 This lets us catch components that corrupt pool. 03488 03489 Arguments: 03490 03491 NumberOfBytes - Supplies the number of bytes to commit. 03492 03493 Tag - Supplies the tag of the requested allocation. 03494 03495 PoolType - Supplies the pool type of the requested allocation. 03496 03497 SpecialPoolType - Supplies the special pool type of the 03498 requested allocation. 03499 03500 - 0 indicates overruns. 03501 - 1 indicates underruns. 03502 - 2 indicates use the systemwide pool policy. 03503 03504 Return Value: 03505 03506 A non-NULL pointer if the requested allocation was fulfilled from special 03507 pool. NULL if the allocation was not made. 03508 03509 Environment: 03510 03511 Kernel mode, no locks (not even pool locks) held. 03512 03513 --*/ 03514 03515 { 03516 MMPTE TempPte; 03517 PFN_NUMBER PageFrameIndex; 03518 PMMPTE PointerPte; 03519 KIRQL OldIrql; 03520 PVOID Entry; 03521 PPOOL_HEADER Header; 03522 LARGE_INTEGER CurrentTime; 03523 LOGICAL CatchOverruns; 03524 03525 if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) { 03526 03527 if (KeGetCurrentIrql() > APC_LEVEL) { 03528 03529 KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION, 03530 KeGetCurrentIrql(), 03531 PoolType, 03532 NumberOfBytes, 03533 0x30); 03534 } 03535 } 03536 else { 03537 if (KeGetCurrentIrql() > DISPATCH_LEVEL) { 03538 03539 KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION, 03540 KeGetCurrentIrql(), 03541 PoolType, 03542 NumberOfBytes, 03543 0x30); 03544 } 03545 } 03546 03547 #if defined (_X86_) && !defined (_X86PAE_) 03548 03549 if (MiNumberOfExtraSystemPdes != 0) { 03550 03551 extern ULONG MMSECT; 03552 03553 // 03554 // Prototype PTEs cannot come from lower special pool because 03555 // their address is encoded into PTEs and the encoding only covers 03556 // a max of 1GB from the start of paged pool. Likewise fork 03557 // prototype PTEs. 03558 // 03559 03560 if (Tag == MMSECT || Tag == 'lCmM') { 03561 return NULL; 03562 } 03563 } 03564 03565 #endif 03566 03567 #if !defined (_WIN64) && !defined (_X86PAE_) 03568 03569 if (Tag == 'bSmM' || Tag == 'iCmM' || Tag == 'aCmM') { 03570 03571 // 03572 // Mm subsections cannot come from this special pool because they 03573 // get encoded into PTEs - they must come from normal nonpaged pool. 03574 // 03575 03576 return NULL; 03577 } 03578 03579 #endif 03580 03581 TempPte = ValidKernelPte; 03582 03583 LOCK_PFN2 (OldIrql); 03584 03585 if (MiSpecialPoolEnabled == FALSE) { 03586 03587 // 03588 // The special pool allocation code is currently disabled. 03589 // 03590 03591 UNLOCK_PFN2 (OldIrql); 03592 return NULL; 03593 } 03594 03595 if (MmAvailablePages < 200) { 03596 UNLOCK_PFN2 (OldIrql); 03597 MmSpecialPoolRejected[0] += 1; 03598 return NULL; 03599 } 03600 03601 // 03602 // Don't get too aggressive until a paging file gets set up. 03603 // 03604 03605 if (MmNumberOfPagingFiles == 0 && MmSpecialPagesInUse > MmAvailablePages / 2) { 03606 UNLOCK_PFN2 (OldIrql); 03607 MmSpecialPoolRejected[3] += 1; 03608 return NULL; 03609 } 03610 03611 if (MiSpecialPoolFirstPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) { 03612 UNLOCK_PFN2 (OldIrql); 03613 MmSpecialPoolRejected[2] += 1; 03614 return NULL; 03615 } 03616 03617 if (MmResidentAvailablePages < 100) { 03618 UNLOCK_PFN2 (OldIrql); 03619 MmSpecialPoolRejected[4] += 1; 03620 return NULL; 03621 } 03622 03623 // 03624 // Cap nonpaged allocations to prevent runaways. 03625 // 03626 03627 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) { 03628 03629 if (MiSpecialPagesNonPaged > MiSpecialPagesNonPagedMaximum) { 03630 UNLOCK_PFN2 (OldIrql); 03631 MmSpecialPoolRejected[1] += 1; 03632 return NULL; 03633 } 03634 03635 MmResidentAvailablePages -= 1; 03636 03637 MM_BUMP_COUNTER(31, 1); 03638 03639 MiSpecialPagesNonPaged += 1; 03640 if (MiSpecialPagesNonPaged > MiSpecialPagesNonPagedPeak) { 03641 MiSpecialPagesNonPagedPeak = MiSpecialPagesNonPaged; 03642 } 03643 } 03644 else { 03645 MiSpecialPagesPagable += 1; 03646 if (MiSpecialPagesPagable > MiSpecialPagesPagablePeak) { 03647 MiSpecialPagesPagablePeak = MiSpecialPagesPagable; 03648 } 03649 } 03650 03651 PointerPte = MiSpecialPoolFirstPte; 03652 03653 ASSERT (MiSpecialPoolFirstPte->u.List.NextEntry != MM_EMPTY_PTE_LIST); 03654 03655 MiSpecialPoolFirstPte = PointerPte->u.List.NextEntry + MmSystemPteBase; 03656 03657 PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE (PointerPte)); 03658 03659 MmSpecialPagesInUse += 1; 03660 if (MmSpecialPagesInUse > MiSpecialPagesInUsePeak) { 03661 MiSpecialPagesInUsePeak = MmSpecialPagesInUse; 03662 } 03663 03664 TempPte.u.Hard.PageFrameNumber = PageFrameIndex; 03665 03666 if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) { 03667 MI_SET_PTE_DIRTY (TempPte); 03668 } 03669 03670 MI_WRITE_VALID_PTE (PointerPte, TempPte); 03671 MiInitializePfn (PageFrameIndex, PointerPte, 1); 03672 UNLOCK_PFN2 (OldIrql); 03673 03674 // 03675 // Fill the page with a random pattern. 03676 // 03677 03678 KeQueryTickCount(&CurrentTime); 03679 03680 Entry = MiGetVirtualAddressMappedByPte (PointerPte); 03681 03682 RtlFillMemory (Entry, PAGE_SIZE, (UCHAR) CurrentTime.LowPart); 03683 03684 if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) { 03685 MiMakeSpecialPoolPagable (Entry, PointerPte); 03686 (PointerPte + 1)->u.Soft.PageFileHigh = MI_SPECIAL_POOL_PTE_PAGABLE; 03687 } 03688 else { 03689 (PointerPte + 1)->u.Soft.PageFileHigh = MI_SPECIAL_POOL_PTE_NONPAGABLE; 03690 } 03691 03692 if (SpecialPoolType == 0) { 03693 CatchOverruns = TRUE; 03694 } 03695 else if (SpecialPoolType == 1) { 03696 CatchOverruns = FALSE; 03697 } 03698 else if (MmSpecialPoolCatchOverruns == TRUE) { 03699 CatchOverruns = TRUE; 03700 } 03701 else { 03702 CatchOverruns = FALSE; 03703 } 03704 03705 if (CatchOverruns == TRUE) { 03706 Header = (PPOOL_HEADER) Entry; 03707 Entry = (PVOID)(((LONG_PTR)(((PCHAR)Entry + (PAGE_SIZE - NumberOfBytes)))) & ~((LONG_PTR)POOL_OVERHEAD - 1)); 03708 } 03709 else { 03710 Header = (PPOOL_HEADER) ((PCHAR)Entry + PAGE_SIZE - POOL_OVERHEAD); 03711 } 03712 03713 // 03714 // Zero the header and stash any information needed at release time. 03715 // 03716 03717 RtlZeroMemory (Header, POOL_OVERHEAD); 03718 03719 Header->Ulong1 = (ULONG)NumberOfBytes; 03720 03721 ASSERT (NumberOfBytes <= PAGE_SIZE - POOL_OVERHEAD && PAGE_SIZE <= 32 * 1024); 03722 03723 if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) { 03724 Header->Ulong1 |= MI_SPECIAL_POOL_PAGABLE; 03725 } 03726 03727 if (PoolType & POOL_VERIFIER_MASK) { 03728 Header->Ulong1 |= MI_SPECIAL_POOL_VERIFIER; 03729 } 03730 03731 Header->BlockSize = (UCHAR) CurrentTime.LowPart; 03732 Header->PoolTag = Tag; 03733 03734 MiChargeCommitmentCantExpand (1, TRUE); 03735 03736 ASSERT ((Header->PoolType & POOL_QUOTA_MASK) == 0); 03737 03738 return Entry; 03739 } 03740 03741 VOID 03742 MmFreeSpecialPool ( 03743 IN PVOID P 03744 ) 03745 03746 /*++ 03747 03748 Routine Description: 03749 03750 This routine frees a special pool allocation. The backing page is freed 03751 and the mapping virtual address is made no access (the next virtual 03752 address is already no access). 03753 03754 The virtual address PTE pair is then placed into an LRU queue to provide 03755 maximum no-access (protection) life to catch components that access 03756 deallocated pool. 03757 03758 Arguments: 03759 03760 VirtualAddress - Supplies the special pool virtual address to free. 03761 03762 Return Value: 03763 03764 None. 03765 03766 Environment: 03767 03768 Kernel mode, no locks (not even pool locks) held. 03769 03770 --*/ 03771 03772 { 03773 MMPTE PteContents; 03774 PMMPTE PointerPte; 03775 PMMPFN Pfn1; 03776 KIRQL OldIrql; 03777 ULONG SlopBytes; 03778 ULONG NumberOfBytesCalculated; 03779 ULONG NumberOfBytesRequested; 03780 POOL_TYPE PoolType; 03781 MMPTE NoAccessPte; 03782 PPOOL_HEADER Header; 03783 PUCHAR Slop; 03784 ULONG i; 03785 LOGICAL BufferAtPageEnd; 03786 PMI_FREED_SPECIAL_POOL AllocationBase; 03787 LARGE_INTEGER CurrentTime; 03788 PULONG_PTR StackPointer; 03789 03790 PointerPte = MiGetPteAddress (P); 03791 PteContents = *PointerPte; 03792 03793 // 03794 // Check the PTE now so we can give a more friendly bugcheck rather than 03795 // crashing below on a bad reference. 03796 // 03797 03798 if (PteContents.u.Hard.Valid == 0) { 03799 if ((PteContents.u.Soft.Protection == 0) || 03800 (PteContents.u.Soft.Protection == MM_NOACCESS)) { 03801 KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION, 03802 (ULONG_PTR)P, 03803 (ULONG_PTR)PteContents.u.Long, 03804 0, 03805 0x20); 03806 } 03807 } 03808 03809 if (((ULONG_PTR)P & (PAGE_SIZE - 1))) { 03810 Header = PAGE_ALIGN (P); 03811 BufferAtPageEnd = TRUE; 03812 } 03813 else { 03814 Header = (PPOOL_HEADER)((PCHAR)PAGE_ALIGN (P) + PAGE_SIZE - POOL_OVERHEAD); 03815 BufferAtPageEnd = FALSE; 03816 } 03817 03818 if (Header->Ulong1 & MI_SPECIAL_POOL_PAGABLE) { 03819 ASSERT ((PointerPte + 1)->u.Soft.PageFileHigh == MI_SPECIAL_POOL_PTE_PAGABLE); 03820 if (KeGetCurrentIrql() > APC_LEVEL) { 03821 KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION, 03822 KeGetCurrentIrql(), 03823 PagedPool, 03824 (ULONG_PTR)P, 03825 0x31); 03826 } 03827 PoolType = PagedPool; 03828 } 03829 else { 03830 ASSERT ((PointerPte + 1)->u.Soft.PageFileHigh == MI_SPECIAL_POOL_PTE_NONPAGABLE); 03831 if (KeGetCurrentIrql() > DISPATCH_LEVEL) { 03832 KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION, 03833 KeGetCurrentIrql(), 03834 NonPagedPool, 03835 (ULONG_PTR)P, 03836 0x31); 03837 } 03838 PoolType = NonPagedPool; 03839 } 03840 03841 NumberOfBytesRequested = (ULONG)(USHORT)(Header->Ulong1 & ~(MI_SPECIAL_POOL_PAGABLE | MI_SPECIAL_POOL_VERIFIER)); 03842 03843 // 03844 // We gave the caller pool-header aligned data, so account for 03845 // that when checking here. 03846 // 03847 03848 if (BufferAtPageEnd == TRUE) { 03849 03850 NumberOfBytesCalculated = PAGE_SIZE - BYTE_OFFSET(P); 03851 03852 if (NumberOfBytesRequested > NumberOfBytesCalculated) { 03853 03854 // 03855 // Seems like we didn't give the caller enough - this is an error. 03856 // 03857 03858 KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION, 03859 (ULONG_PTR)P, 03860 NumberOfBytesRequested, 03861 NumberOfBytesCalculated, 03862 0x21); 03863 } 03864 03865 if (NumberOfBytesRequested + POOL_OVERHEAD < NumberOfBytesCalculated) { 03866 03867 // 03868 // Seems like we gave the caller too much - also an error. 03869 // 03870 03871 KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION, 03872 (ULONG_PTR)P, 03873 NumberOfBytesRequested, 03874 NumberOfBytesCalculated, 03875 0x22); 03876 } 03877 03878 // 03879 // Check the memory before the start of the caller's allocation. 03880 // 03881 03882 Slop = (PUCHAR)(Header + 1); 03883 if (Header->Ulong1 & MI_SPECIAL_POOL_VERIFIER) { 03884 Slop += sizeof(MI_VERIFIER_POOL_HEADER); 03885 } 03886 03887 for ( ; Slop < (PUCHAR)P; Slop += 1) { 03888 03889 if (*Slop != Header->BlockSize) { 03890 03891 KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION, 03892 (ULONG_PTR)P, 03893 (ULONG_PTR)Slop, 03894 Header->Ulong1, 03895 0x23); 03896 } 03897 } 03898 } 03899 else { 03900 NumberOfBytesCalculated = 0; 03901 } 03902 03903 // 03904 // Check the memory after the end of the caller's allocation. 03905 // 03906 03907 Slop = (PUCHAR)P + NumberOfBytesRequested; 03908 03909 SlopBytes = (ULONG)((PUCHAR)(PAGE_ALIGN(P)) + PAGE_SIZE - Slop); 03910 03911 if (BufferAtPageEnd == FALSE) { 03912 SlopBytes -= POOL_OVERHEAD; 03913 if (Header->Ulong1 & MI_SPECIAL_POOL_VERIFIER) { 03914 SlopBytes -= sizeof(MI_VERIFIER_POOL_HEADER); 03915 } 03916 } 03917 03918 for (i = 0; i < SlopBytes; i += 1) { 03919 03920 if (*Slop != Header->BlockSize) { 03921 03922 // 03923 // The caller wrote slop between the free alignment we gave and the 03924 // end of the page (this is not detectable from page protection). 03925 // 03926 03927 KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION, 03928 (ULONG_PTR)P, 03929 (ULONG_PTR)Slop, 03930 Header->Ulong1, 03931 0x24); 03932 } 03933 Slop += 1; 03934 } 03935 03936 if (Header->Ulong1 & MI_SPECIAL_POOL_VERIFIER) { 03937 VerifierFreeTrackedPool (P, 03938 NumberOfBytesRequested, 03939 PoolType, 03940 TRUE); 03941 } 03942 03943 AllocationBase = (PMI_FREED_SPECIAL_POOL)(PAGE_ALIGN (P)); 03944 03945 AllocationBase->Signature = MI_FREED_SPECIAL_POOL_SIGNATURE; 03946 03947 KeQueryTickCount(&CurrentTime); 03948 AllocationBase->TickCount = CurrentTime.LowPart; 03949 03950 AllocationBase->NumberOfBytesRequested = NumberOfBytesRequested; 03951 AllocationBase->Pagable = (ULONG)PoolType; 03952 AllocationBase->VirtualAddress = P; 03953 AllocationBase->Thread = PsGetCurrentThread (); 03954 03955 #if defined (_X86_) 03956 _asm { 03957 mov StackPointer, esp 03958 } 03959 03960 AllocationBase->StackPointer = StackPointer; 03961 03962 // 03963 // For now, don't get fancy with copying more than what's in the current 03964 // stack page. To do so would require checking the thread stack limits, 03965 // DPC stack limits, etc. 03966 // 03967 03968 AllocationBase->StackBytes = PAGE_SIZE - BYTE_OFFSET(StackPointer); 03969 03970 if (AllocationBase->StackBytes != 0) { 03971 03972 if (AllocationBase->StackBytes > MI_STACK_BYTES) { 03973 AllocationBase->StackBytes = MI_STACK_BYTES; 03974 } 03975 03976 RtlCopyMemory (AllocationBase->StackData, 03977 StackPointer, 03978 AllocationBase->StackBytes); 03979 } 03980 #else 03981 AllocationBase->StackPointer = NULL; 03982 AllocationBase->StackBytes = 0; 03983 #endif 03984 03985 if (PoolType == PagedPool) { 03986 NoAccessPte.u.Long = MM_KERNEL_NOACCESS_PTE; 03987 MiDeleteSystemPagableVm (PointerPte, 03988 1, 03989 NoAccessPte, 03990 FALSE, 03991 NULL); 03992 LOCK_PFN2 (OldIrql); 03993 MiSpecialPagesPagable -= 1; 03994 } 03995 else { 03996 03997 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 03998 LOCK_PFN2 (OldIrql); 03999 MiSpecialPagesNonPaged -= 1; 04000 MI_SET_PFN_DELETED (Pfn1); 04001 MiDecrementShareCount (MI_GET_PAGE_FRAME_FROM_PTE (PointerPte)); 04002 KeFlushSingleTb (PAGE_ALIGN(P), 04003 TRUE, 04004 TRUE, 04005 (PHARDWARE_PTE)PointerPte, 04006 ZeroKernelPte.u.Flush); 04007 MmResidentAvailablePages += 1; 04008 MM_BUMP_COUNTER(37, 1); 04009 } 04010 04011 // 04012 // Clear the adjacent PTE to support MmIsSpecialPoolAddressFree(). 04013 // 04014 04015 (PointerPte + 1)->u.Long = 0; 04016 04017 ASSERT (MiSpecialPoolLastPte->u.List.NextEntry == MM_EMPTY_PTE_LIST); 04018 MiSpecialPoolLastPte->u.List.NextEntry = PointerPte - MmSystemPteBase; 04019 04020 MiSpecialPoolLastPte = PointerPte; 04021 MiSpecialPoolLastPte->u.List.NextEntry = MM_EMPTY_PTE_LIST; 04022 04023 MmSpecialPagesInUse -= 1; 04024 04025 UNLOCK_PFN2 (OldIrql); 04026 04027 MiReturnCommitment (1); 04028 04029 return; 04030 } 04031 04032 04033 VOID 04034 MiMakeSpecialPoolPagable ( 04035 IN PVOID VirtualAddress, 04036 IN PMMPTE PointerPte 04037 ) 04038 04039 /*++ 04040 04041 Routine Description: 04042 04043 Make a special pool allocation pagable. 04044 04045 Arguments: 04046 04047 VirtualAddress - Supplies the faulting address. 04048 04049 PointerPte - Supplies the PTE for the faulting address. 04050 04051 Return Value: 04052 04053 None. 04054 04055 Environment: 04056 04057 Kernel mode, no locks (not even pool locks) held. 04058 04059 --*/ 04060 04061 { 04062 PMMPFN Pfn1; 04063 MMPTE TempPte; 04064 #if defined(_ALPHA_) && !defined(_AXP64_) 04065 KIRQL OldIrql; 04066 #endif 04067 KIRQL PreviousIrql; 04068 PFN_NUMBER PageFrameIndex; 04069 #if PFN_CONSISTENCY 04070 KIRQL PfnIrql; 04071 #endif 04072 04073 LOCK_SYSTEM_WS (PreviousIrql); 04074 04075 // 04076 // As this page is now allocated, add it to the system working set to 04077 // make it pagable. 04078 // 04079 04080 TempPte = *PointerPte; 04081 04082 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (&TempPte); 04083 04084 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 04085 04086 ASSERT (Pfn1->u1.Event == 0); 04087 04088 CONSISTENCY_LOCK_PFN (PfnIrql); 04089 04090 Pfn1->u1.Event = (PVOID) PsGetCurrentThread(); 04091 04092 CONSISTENCY_UNLOCK_PFN (PfnIrql); 04093 04094 MiAddValidPageToWorkingSet (VirtualAddress, 04095 PointerPte, 04096 Pfn1, 04097 0); 04098 04099 ASSERT (KeGetCurrentIrql() == APC_LEVEL); 04100 04101 if (MmSystemCacheWs.AllowWorkingSetAdjustment == MM_GROW_WSLE_HASH) { 04102 MiGrowWsleHash (&MmSystemCacheWs); 04103 #if defined(_ALPHA_) && !defined(_AXP64_) 04104 LOCK_EXPANSION_IF_ALPHA (OldIrql); 04105 #endif 04106 MmSystemCacheWs.AllowWorkingSetAdjustment = TRUE; 04107 #if defined(_ALPHA_) && !defined(_AXP64_) 04108 UNLOCK_EXPANSION_IF_ALPHA (OldIrql); 04109 #endif 04110 } 04111 UNLOCK_SYSTEM_WS (PreviousIrql); 04112 } 04113 04114 04115 VOID 04116 MiCheckSessionPoolAllocations( 04117 VOID 04118 ) 04119 04120 /*++ 04121 04122 Routine Description: 04123 04124 Ensure that the current session has no pool allocations since it is about 04125 to exit. All session allocations must be freed prior to session exit. 04126 04127 Arguments: 04128 04129 None. 04130 04131 Return Value: 04132 04133 None. 04134 04135 Environment: 04136 04137 Kernel mode. 04138 04139 --*/ 04140 04141 { 04142 ULONG i; 04143 PMMPTE StartPde; 04144 PMMPTE EndPde; 04145 PMMPTE PointerPte; 04146 PVOID VirtualAddress; 04147 04148 PAGED_CODE(); 04149 04150 if (MmSessionSpace->NonPagedPoolBytes || MmSessionSpace->PagedPoolBytes) { 04151 04152 // 04153 // All page tables for this session's paged pool must be freed by now. 04154 // Being here means they aren't - this is fatal. Force in any valid 04155 // pages so that a debugger can show who the guilty party is. 04156 // 04157 04158 StartPde = MiGetPdeAddress (MmSessionSpace->PagedPoolStart); 04159 EndPde = MiGetPdeAddress (MmSessionSpace->PagedPoolEnd); 04160 04161 while (StartPde <= EndPde) { 04162 04163 if (StartPde->u.Long != 0 && StartPde->u.Long != MM_KERNEL_NOACCESS_PTE) { 04164 // 04165 // Hunt through the page table page for valid pages and force 04166 // them in. Note this also forces in the page table page if 04167 // it is not already. 04168 // 04169 04170 PointerPte = MiGetVirtualAddressMappedByPte (StartPde); 04171 04172 for (i = 0; i < PTE_PER_PAGE; i += 1) { 04173 if (PointerPte->u.Long != 0 && PointerPte->u.Long != MM_KERNEL_NOACCESS_PTE) { 04174 VirtualAddress = MiGetVirtualAddressMappedByPte (PointerPte); 04175 *(volatile BOOLEAN *)VirtualAddress = *(volatile BOOLEAN *)VirtualAddress; 04176 04177 #if DBG 04178 DbgPrint("MiCheckSessionPoolAllocations: Address %p still valid\n", 04179 VirtualAddress); 04180 #endif 04181 } 04182 PointerPte += 1; 04183 } 04184 04185 } 04186 04187 StartPde += 1; 04188 } 04189 04190 #if DBG 04191 DbgPrint ("MiCheckSessionPoolAllocations: This exiting session (ID %d) is leaking pool !\n", MmSessionSpace->SessionId); 04192 04193 DbgPrint ("This means win32k.sys, rdpdd.sys, atmfd.sys or a video/font driver is broken\n"); 04194 04195 DbgPrint ("%d nonpaged allocation leaks for %d bytes and %d paged allocation leaks for %d bytes\n", 04196 MmSessionSpace->NonPagedPoolAllocations, 04197 MmSessionSpace->NonPagedPoolBytes, 04198 MmSessionSpace->PagedPoolAllocations, 04199 MmSessionSpace->PagedPoolBytes); 04200 #endif 04201 04202 KeBugCheckEx (SESSION_HAS_VALID_POOL_ON_EXIT, 04203 (ULONG_PTR)MmSessionSpace->SessionId, 04204 MmSessionSpace->PagedPoolBytes, 04205 MmSessionSpace->NonPagedPoolBytes, 04206 #if defined (_WIN64) 04207 (MmSessionSpace->NonPagedPoolAllocations << 32) | 04208 (MmSessionSpace->PagedPoolAllocations) 04209 #else 04210 (MmSessionSpace->NonPagedPoolAllocations << 16) | 04211 (MmSessionSpace->PagedPoolAllocations) 04212 #endif 04213 ); 04214 } 04215 04216 ASSERT (MmSessionSpace->NonPagedPoolAllocations == 0); 04217 ASSERT (MmSessionSpace->PagedPoolAllocations == 0); 04218 } 04219 04220 04221 NTSTATUS 04222 MiInitializeSessionPool( 04223 VOID 04224 ) 04225 04226 /*++ 04227 04228 Routine Description: 04229 04230 Initialize the current session's pool structure. 04231 04232 Arguments: 04233 04234 None. 04235 04236 Return Value: 04237 04238 Status of the pool initialization. 04239 04240 Environment: 04241 04242 Kernel mode. 04243 04244 --*/ 04245 04246 { 04247 ULONG Index; 04248 MMPTE TempPte; 04249 PMMPTE PointerPde, PointerPte; 04250 PFN_NUMBER PageFrameIndex; 04251 PPOOL_DESCRIPTOR PoolDescriptor; 04252 PMM_SESSION_SPACE SessionGlobal; 04253 PMM_PAGED_POOL_INFO PagedPoolInfo; 04254 KIRQL OldIrql; 04255 PMMPFN Pfn1; 04256 MMPTE PreviousPte; 04257 #if DBG 04258 PMMPTE StartPde, EndPde; 04259 #endif 04260 04261 SessionGlobal = SESSION_GLOBAL(MmSessionSpace); 04262 04263 ExInitializeFastMutex (&SessionGlobal->PagedPoolMutex); 04264 04265 PoolDescriptor = &MmSessionSpace->PagedPool; 04266 04267 ExpInitializePoolDescriptor (PoolDescriptor, 04268 PagedPoolSession, 04269 0, 04270 0, 04271 &SessionGlobal->PagedPoolMutex); 04272 04273 MmSessionSpace->PagedPoolStart = (PVOID)MI_SESSION_POOL; 04274 04275 MmSessionSpace->PagedPoolEnd = (PVOID)((MI_SESSION_POOL + MI_SESSION_POOL_SIZE)-1); 04276 04277 PagedPoolInfo = &MmSessionSpace->PagedPoolInfo; 04278 PagedPoolInfo->PagedPoolCommit = 0; 04279 PagedPoolInfo->PagedPoolHint = 0; 04280 PagedPoolInfo->AllocatedPagedPool = 0; 04281 04282 // 04283 // Build the page table page for paged pool. 04284 // 04285 04286 PointerPde = MiGetPdeAddress (MmSessionSpace->PagedPoolStart); 04287 MmSessionSpace->PagedPoolBasePde = PointerPde; 04288 04289 PointerPte = MiGetPteAddress (MmSessionSpace->PagedPoolStart); 04290 04291 PagedPoolInfo->FirstPteForPagedPool = PointerPte; 04292 PagedPoolInfo->LastPteForPagedPool = MiGetPteAddress (MmSessionSpace->PagedPoolEnd); 04293 04294 #if DBG 04295 // 04296 // Session pool better be unused. 04297 // 04298 04299 StartPde = MiGetPdeAddress (MmSessionSpace->PagedPoolStart); 04300 EndPde = MiGetPdeAddress (MmSessionSpace->PagedPoolEnd); 04301 04302 while (StartPde <= EndPde) { 04303 ASSERT (StartPde->u.Long == 0); 04304 StartPde += 1; 04305 } 04306 #endif 04307 04308 // 04309 // Mark all PDEs as empty. 04310 // 04311 04312 MiFillMemoryPte (PointerPde, 04313 sizeof(MMPTE) * 04314 (1 + MiGetPdeAddress (MmSessionSpace->PagedPoolEnd) - PointerPde), 04315 ZeroKernelPte.u.Long); 04316 04317 if (MiChargeCommitment (1, NULL) == FALSE) { 04318 MM_BUMP_SESSION_FAILURES (MM_SESSION_FAILURE_NO_COMMIT); 04319 return STATUS_NO_MEMORY; 04320 } 04321 04322 MM_TRACK_COMMIT (MM_DBG_COMMIT_SESSION_POOL_PAGE_TABLES, 1); 04323 04324 TempPte = ValidKernelPdeLocal; 04325 04326 LOCK_PFN (OldIrql); 04327 04328 if (MmAvailablePages <= 1) { 04329 UNLOCK_PFN (OldIrql); 04330 MiReturnCommitment (1); 04331 MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_SESSION_POOL_PAGE_TABLES, 1); 04332 MM_BUMP_SESSION_FAILURES (MM_SESSION_FAILURE_NO_RESIDENT); 04333 return STATUS_NO_MEMORY; 04334 } 04335 04336 MmResidentAvailablePages -= 1; 04337 MM_BUMP_COUNTER(42, 1); 04338 MM_BUMP_SESS_COUNTER(MM_DBG_SESSION_PAGEDPOOL_PAGETABLE_ALLOC, 1); 04339 04340 MiEnsureAvailablePageOrWait (NULL, NULL); 04341 04342 // 04343 // Allocate and map in the initial page table page for session pool. 04344 // 04345 04346 PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE (PointerPde)); 04347 TempPte.u.Hard.PageFrameNumber = PageFrameIndex; 04348 MI_WRITE_VALID_PTE (PointerPde, TempPte); 04349 04350 MiInitializePfnForOtherProcess (PageFrameIndex, 04351 PointerPde, 04352 MmSessionSpace->SessionPageDirectoryIndex); 04353 04354 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 04355 04356 // 04357 // This page will be locked into working set and assigned an index when 04358 // the working set is set up on return. 04359 // 04360 04361 ASSERT (Pfn1->u1.WsIndex == 0); 04362 04363 UNLOCK_PFN (OldIrql); 04364 04365 KeFillEntryTb ((PHARDWARE_PTE) PointerPde, PointerPte, FALSE); 04366 04367 #if !defined (_WIN64) 04368 04369 Index = MiGetPdeSessionIndex (MmSessionSpace->PagedPoolStart); 04370 04371 ASSERT (MmSessionSpace->PageTables[Index].u.Long == 0); 04372 MmSessionSpace->PageTables[Index] = TempPte; 04373 04374 #endif 04375 04376 MmSessionSpace->NonPagablePages += 1; 04377 MmSessionSpace->CommittedPages += 1; 04378 04379 MiFillMemoryPte (PointerPte, PAGE_SIZE, MM_KERNEL_NOACCESS_PTE); 04380 04381 PagedPoolInfo->NextPdeForPagedPoolExpansion = PointerPde + 1; 04382 04383 // 04384 // Initialize the bitmaps. 04385 // 04386 04387 MiCreateBitMap (&PagedPoolInfo->PagedPoolAllocationMap, 04388 MI_SESSION_POOL_SIZE >> PAGE_SHIFT, 04389 NonPagedPool); 04390 04391 if (PagedPoolInfo->PagedPoolAllocationMap == NULL) { 04392 MM_BUMP_SESSION_FAILURES (MM_SESSION_FAILURE_NO_NONPAGED_POOL); 04393 goto Failure; 04394 } 04395 04396 // 04397 // We start with all pages in the virtual address space as "busy", and 04398 // clear bits to make pages available as we dynamically expand the pool. 04399 // 04400 04401 RtlSetAllBits( PagedPoolInfo->PagedPoolAllocationMap ); 04402 04403 // 04404 // Indicate first page worth of PTEs are available. 04405 // 04406 04407 RtlClearBits (PagedPoolInfo->PagedPoolAllocationMap, 0, PTE_PER_PAGE); 04408 04409 // 04410 // Create the end of allocation range bitmap. 04411 // 04412 04413 MiCreateBitMap (&PagedPoolInfo->EndOfPagedPoolBitmap, 04414 MI_SESSION_POOL_SIZE >> PAGE_SHIFT, 04415 NonPagedPool); 04416 04417 if (PagedPoolInfo->EndOfPagedPoolBitmap == NULL) { 04418 MM_BUMP_SESSION_FAILURES (MM_SESSION_FAILURE_NO_NONPAGED_POOL); 04419 goto Failure; 04420 } 04421 04422 RtlClearAllBits (PagedPoolInfo->EndOfPagedPoolBitmap); 04423 04424 // 04425 // Create the large session allocation bitmap. 04426 // 04427 04428 MiCreateBitMap (&PagedPoolInfo->PagedPoolLargeSessionAllocationMap, 04429 MI_SESSION_POOL_SIZE >> PAGE_SHIFT, 04430 NonPagedPool); 04431 04432 if (PagedPoolInfo->PagedPoolLargeSessionAllocationMap == NULL) { 04433 MM_BUMP_SESSION_FAILURES (MM_SESSION_FAILURE_NO_NONPAGED_POOL); 04434 goto Failure; 04435 } 04436 04437 RtlClearAllBits (PagedPoolInfo->PagedPoolLargeSessionAllocationMap); 04438 04439 return STATUS_SUCCESS; 04440 04441 Failure: 04442 04443 MiFreeSessionPoolBitMaps (); 04444 04445 LOCK_PFN (OldIrql); 04446 04447 ASSERT (MmSessionSpace->SessionPageDirectoryIndex == Pfn1->PteFrame); 04448 ASSERT (Pfn1->u2.ShareCount == 1); 04449 MiDecrementShareAndValidCount (Pfn1->PteFrame); 04450 MI_SET_PFN_DELETED (Pfn1); 04451 MiDecrementShareCountOnly (PageFrameIndex); 04452 04453 MI_FLUSH_SINGLE_SESSION_TB (MiGetPteAddress(PointerPde), 04454 TRUE, 04455 TRUE, 04456 (PHARDWARE_PTE)PointerPde, 04457 ZeroKernelPte.u.Flush, 04458 PreviousPte); 04459 04460 MmSessionSpace->NonPagablePages -= 1; 04461 MmSessionSpace->CommittedPages -= 1; 04462 04463 MmResidentAvailablePages += 1; 04464 MM_BUMP_COUNTER(51, 1); 04465 MM_BUMP_SESS_COUNTER(MM_DBG_SESSION_PAGEDPOOL_PAGETABLE_FREE_FAIL1, 1); 04466 04467 UNLOCK_PFN (OldIrql); 04468 04469 MiReturnCommitment (1); 04470 04471 MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_PAGED_POOL_PAGES, 1); 04472 04473 return STATUS_NO_MEMORY; 04474 } 04475 04476 04477 VOID 04478 MiFreeSessionPoolBitMaps( 04479 VOID 04480 ) 04481 04482 /*++ 04483 04484 Routine Description: 04485 04486 Free the current session's pool bitmap structures. 04487 04488 Arguments: 04489 04490 None. 04491 04492 Return Value: 04493 04494 None. 04495 04496 Environment: 04497 04498 Kernel mode. 04499 04500 --*/ 04501 04502 { 04503 PAGED_CODE(); 04504 04505 if (MmSessionSpace->PagedPoolInfo.PagedPoolAllocationMap ) { 04506 ExFreePool (MmSessionSpace->PagedPoolInfo.PagedPoolAllocationMap); 04507 MmSessionSpace->PagedPoolInfo.PagedPoolAllocationMap = NULL; 04508 } 04509 04510 if (MmSessionSpace->PagedPoolInfo.EndOfPagedPoolBitmap ) { 04511 ExFreePool (MmSessionSpace->PagedPoolInfo.EndOfPagedPoolBitmap); 04512 MmSessionSpace->PagedPoolInfo.EndOfPagedPoolBitmap = NULL; 04513 } 04514 04515 if (MmSessionSpace->PagedPoolInfo.PagedPoolLargeSessionAllocationMap) { 04516 ExFreePool (MmSessionSpace->PagedPoolInfo.PagedPoolLargeSessionAllocationMap); 04517 MmSessionSpace->PagedPoolInfo.PagedPoolLargeSessionAllocationMap = NULL; 04518 } 04519 } 04520 04521 #if DBG 04522 04523 #define MI_LOG_CONTIGUOUS 100 04524 04525 typedef struct _MI_CONTIGUOUS_ALLOCATORS { 04526 PVOID BaseAddress; 04527 SIZE_T NumberOfBytes; 04528 PVOID CallingAddress; 04529 } MI_CONTIGUOUS_ALLOCATORS, *PMI_CONTIGUOUS_ALLOCATORS; 04530 04531 ULONG MiContiguousIndex; 04532 MI_CONTIGUOUS_ALLOCATORS MiContiguousAllocators[MI_LOG_CONTIGUOUS]; 04533 04534 VOID 04535 MiInsertContiguousTag ( 04536 IN PVOID BaseAddress, 04537 IN SIZE_T NumberOfBytes, 04538 IN PVOID CallingAddress 04539 ) 04540 { 04541 KIRQL OldIrql; 04542 04543 #if !DBG 04544 if ((NtGlobalFlag & FLG_POOL_ENABLE_TAGGING) == 0) { 04545 return; 04546 } 04547 #endif 04548 04549 OldIrql = ExLockPool (NonPagedPool); 04550 04551 if (MiContiguousIndex >= MI_LOG_CONTIGUOUS) { 04552 MiContiguousIndex = 0; 04553 } 04554 04555 MiContiguousAllocators[MiContiguousIndex].BaseAddress = BaseAddress; 04556 MiContiguousAllocators[MiContiguousIndex].NumberOfBytes = NumberOfBytes; 04557 MiContiguousAllocators[MiContiguousIndex].CallingAddress = CallingAddress; 04558 04559 MiContiguousIndex += 1; 04560 04561 ExUnlockPool (NonPagedPool, OldIrql); 04562 } 04563 #else 04564 #define MiInsertContiguousTag(a, b, c) 04565 #endif 04566 04567 04568 PVOID 04569 MiFindContiguousMemory ( 04570 IN PFN_NUMBER LowestPfn, 04571 IN PFN_NUMBER HighestPfn, 04572 IN PFN_NUMBER BoundaryPfn, 04573 IN PFN_NUMBER SizeInPages, 04574 IN PVOID CallingAddress 04575 ) 04576 04577 /*++ 04578 04579 Routine Description: 04580 04581 This function searches nonpaged pool and the free, zeroed, 04582 and standby lists for contiguous pages that satisfy the 04583 request. 04584 04585 Arguments: 04586 04587 LowestPfn - Supplies the lowest acceptable physical page number. 04588 04589 HighestPfn - Supplies the highest acceptable physical page number. 04590 04591 BoundaryPfn - Supplies the page frame number multiple the allocation must 04592 not cross. 0 indicates it can cross any boundary. 04593 04594 SizeInPages - Supplies the number of pages to allocate. 04595 04596 CallingAddress - Supplies the calling address of the allocator. 04597 04598 Return Value: 04599 04600 NULL - a contiguous range could not be found to satisfy the request. 04601 04602 NON-NULL - Returns a pointer (virtual address in the nonpaged portion 04603 of the system) to the allocated physically contiguous 04604 memory. 04605 04606 Environment: 04607 04608 Kernel mode, IRQL of APC_LEVEL or below. 04609 04610 --*/ 04611 { 04612 PMMPTE PointerPte; 04613 PMMPFN Pfn1; 04614 PVOID BaseAddress; 04615 PVOID BaseAddress2; 04616 KIRQL OldIrql; 04617 KIRQL OldIrql2; 04618 PMMFREE_POOL_ENTRY FreePageInfo; 04619 PLIST_ENTRY Entry; 04620 ULONG start; 04621 ULONG Index; 04622 PFN_NUMBER count; 04623 PFN_NUMBER Page; 04624 PFN_NUMBER LastPage; 04625 PFN_NUMBER found; 04626 PFN_NUMBER BoundaryMask; 04627 MMPTE TempPte; 04628 ULONG PageColor; 04629 ULONG AllocationPosition; 04630 PVOID Va; 04631 LOGICAL AddressIsPhysical; 04632 PFN_NUMBER SpanInPages; 04633 PFN_NUMBER SpanInPages2; 04634 04635 PAGED_CODE (); 04636 04637 BaseAddress = NULL; 04638 04639 BoundaryMask = ~(BoundaryPfn - 1); 04640 04641 // 04642 // A suitable pool page was not allocated via the pool allocator. 04643 // Grab the pool lock and manually search for a page which meets 04644 // the requirements. 04645 // 04646 04647 MmLockPagableSectionByHandle (ExPageLockHandle); 04648 04649 ExAcquireFastMutex (&MmDynamicMemoryMutex); 04650 04651 OldIrql = ExLockPool (NonPagedPool); 04652 04653 // 04654 // Trace through the page allocator's pool headers for a page which 04655 // meets the requirements. 04656 // 04657 04658 // 04659 // NonPaged pool is linked together through the pages themselves. 04660 // 04661 04662 Index = (ULONG)(SizeInPages - 1); 04663 04664 if (Index >= MI_MAX_FREE_LIST_HEADS) { 04665 Index = MI_MAX_FREE_LIST_HEADS - 1; 04666 } 04667 04668 while (Index < MI_MAX_FREE_LIST_HEADS) { 04669 04670 Entry = MmNonPagedPoolFreeListHead[Index].Flink; 04671 04672 while (Entry != &MmNonPagedPoolFreeListHead[Index]) { 04673 04674 if (MmProtectFreedNonPagedPool == TRUE) { 04675 MiUnProtectFreeNonPagedPool ((PVOID)Entry, 0); 04676 } 04677 04678 // 04679 // The list is not empty, see if this one meets the physical 04680 // requirements. 04681 // 04682 04683 FreePageInfo = CONTAINING_RECORD(Entry, 04684 MMFREE_POOL_ENTRY, 04685 List); 04686 04687 ASSERT (FreePageInfo->Signature == MM_FREE_POOL_SIGNATURE); 04688 if (FreePageInfo->Size >= SizeInPages) { 04689 04690 // 04691 // This entry has sufficient space, check to see if the 04692 // pages meet the physical requirements. 04693 // 04694 04695 Va = MiCheckForContiguousMemory (PAGE_ALIGN(Entry), 04696 FreePageInfo->Size, 04697 SizeInPages, 04698 LowestPfn, 04699 HighestPfn, 04700 BoundaryPfn); 04701 04702 if (Va != NULL) { 04703 04704 // 04705 // These pages meet the requirements. The returned 04706 // address may butt up on the end, the front or be 04707 // somewhere in the middle. Split the Entry based 04708 // on which case it is. 04709 // 04710 04711 Entry = PAGE_ALIGN(Entry); 04712 if (MmProtectFreedNonPagedPool == FALSE) { 04713 RemoveEntryList (&FreePageInfo->List); 04714 } 04715 else { 04716 MiProtectedPoolRemoveEntryList (&FreePageInfo->List); 04717 } 04718 04719 // 04720 // Adjust the number of free pages remaining in the pool. 04721 // The TotalBigPages calculation appears incorrect for the 04722 // case where we're splitting a block, but it's done this 04723 // way because ExFreePool corrects it when we free the 04724 // fragment block below. Likewise for 04725 // MmAllocatedNonPagedPool and MmNumberOfFreeNonPagedPool 04726 // which is corrected by MiFreePoolPages for the fragment. 04727 // 04728 04729 NonPagedPoolDescriptor.TotalBigPages += (ULONG)FreePageInfo->Size; 04730 MmAllocatedNonPagedPool += FreePageInfo->Size; 04731 MmNumberOfFreeNonPagedPool -= FreePageInfo->Size; 04732 04733 ASSERT ((LONG)MmNumberOfFreeNonPagedPool >= 0); 04734 04735 if (Va == Entry) { 04736 04737 // 04738 // Butted against the front. 04739 // 04740 04741 AllocationPosition = 0; 04742 } 04743 else if (((PCHAR)Va + (SizeInPages << PAGE_SHIFT)) == ((PCHAR)Entry + (FreePageInfo->Size << PAGE_SHIFT))) { 04744 04745 // 04746 // Butted against the end. 04747 // 04748 04749 AllocationPosition = 2; 04750 } 04751 else { 04752 04753 // 04754 // Somewhere in the middle. 04755 // 04756 04757 AllocationPosition = 1; 04758 } 04759 04760 // 04761 // Pages are being removed from the front of 04762 // the list entry and the whole list entry 04763 // will be removed and then the remainder inserted. 04764 // 04765 04766 // 04767 // Mark start and end for the block at the top of the 04768 // list. 04769 // 04770 04771 if (MI_IS_PHYSICAL_ADDRESS(Va)) { 04772 04773 // 04774 // On certain architectures, virtual addresses 04775 // may be physical and hence have no corresponding PTE. 04776 // 04777 04778 AddressIsPhysical = TRUE; 04779 Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (Va)); 04780 } else { 04781 AddressIsPhysical = FALSE; 04782 PointerPte = MiGetPteAddress(Va); 04783 ASSERT (PointerPte->u.Hard.Valid == 1); 04784 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 04785 } 04786 04787 ASSERT (Pfn1->u3.e1.VerifierAllocation == 0); 04788 ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0); 04789 ASSERT (Pfn1->u3.e1.StartOfAllocation == 0); 04790 Pfn1->u3.e1.StartOfAllocation = 1; 04791 04792 // 04793 // Calculate the ending PFN address, note that since 04794 // these pages are contiguous, just add to the PFN. 04795 // 04796 04797 Pfn1 += SizeInPages - 1; 04798 ASSERT (Pfn1->u3.e1.VerifierAllocation == 0); 04799 ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0); 04800 ASSERT (Pfn1->u3.e1.EndOfAllocation == 0); 04801 Pfn1->u3.e1.EndOfAllocation = 1; 04802 04803 if (SizeInPages == FreePageInfo->Size) { 04804 04805 // 04806 // Unlock the pool and return. 04807 // 04808 BaseAddress = (PVOID)Va; 04809 goto Done; 04810 } 04811 04812 BaseAddress = NULL; 04813 04814 if (AllocationPosition != 2) { 04815 04816 // 04817 // The end piece needs to be freed as the removal 04818 // came from the front or the middle. 04819 // 04820 04821 BaseAddress = (PVOID)((PCHAR)Va + (SizeInPages << PAGE_SHIFT)); 04822 SpanInPages = FreePageInfo->Size - SizeInPages - 04823 (((ULONG_PTR)Va - (ULONG_PTR)Entry) >> PAGE_SHIFT); 04824 04825 // 04826 // Mark start and end of the allocation in the PFN database. 04827 // 04828 04829 if (AddressIsPhysical == TRUE) { 04830 04831 // 04832 // On certain architectures, virtual addresses 04833 // may be physical and hence have no corresponding PTE. 04834 // 04835 04836 Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (BaseAddress)); 04837 } else { 04838 PointerPte = MiGetPteAddress(BaseAddress); 04839 ASSERT (PointerPte->u.Hard.Valid == 1); 04840 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 04841 } 04842 04843 ASSERT (Pfn1->u3.e1.VerifierAllocation == 0); 04844 ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0); 04845 ASSERT (Pfn1->u3.e1.StartOfAllocation == 0); 04846 Pfn1->u3.e1.StartOfAllocation = 1; 04847 04848 // 04849 // Calculate the ending PTE's address, can't depend on 04850 // these pages being physically contiguous. 04851 // 04852 04853 if (AddressIsPhysical == TRUE) { 04854 Pfn1 += (SpanInPages - 1); 04855 } else { 04856 PointerPte += (SpanInPages - 1); 04857 ASSERT (PointerPte->u.Hard.Valid == 1); 04858 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 04859 } 04860 ASSERT (Pfn1->u3.e1.EndOfAllocation == 0); 04861 Pfn1->u3.e1.EndOfAllocation = 1; 04862 04863 ASSERT (((ULONG_PTR)BaseAddress & (PAGE_SIZE -1)) == 0); 04864 04865 SpanInPages2 = SpanInPages; 04866 } 04867 04868 BaseAddress2 = BaseAddress; 04869 BaseAddress = NULL; 04870 04871 if (AllocationPosition != 0) { 04872 04873 // 04874 // The front piece needs to be freed as the removal 04875 // came from the middle or the end. 04876 // 04877 04878 BaseAddress = (PVOID)Entry; 04879 04880 SpanInPages = ((ULONG_PTR)Va - (ULONG_PTR)Entry) >> PAGE_SHIFT; 04881 04882 // 04883 // Mark start and end of the allocation in the PFN database. 04884 // 04885 04886 if (AddressIsPhysical == TRUE) { 04887 04888 // 04889 // On certain architectures, virtual addresses 04890 // may be physical and hence have no corresponding PTE. 04891 // 04892 04893 Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (BaseAddress)); 04894 } else { 04895 PointerPte = MiGetPteAddress(BaseAddress); 04896 ASSERT (PointerPte->u.Hard.Valid == 1); 04897 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 04898 } 04899 04900 ASSERT (Pfn1->u3.e1.VerifierAllocation == 0); 04901 ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0); 04902 ASSERT (Pfn1->u3.e1.StartOfAllocation == 0); 04903 Pfn1->u3.e1.StartOfAllocation = 1; 04904 04905 // 04906 // Calculate the ending PTE's address, can't depend on 04907 // these pages being physically contiguous. 04908 // 04909 04910 if (AddressIsPhysical == TRUE) { 04911 Pfn1 += (SpanInPages - 1); 04912 } else { 04913 PointerPte += (SpanInPages - 1); 04914 ASSERT (PointerPte->u.Hard.Valid == 1); 04915 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 04916 } 04917 ASSERT (Pfn1->u3.e1.EndOfAllocation == 0); 04918 Pfn1->u3.e1.EndOfAllocation = 1; 04919 04920 ASSERT (((ULONG_PTR)BaseAddress & (PAGE_SIZE -1)) == 0); 04921 } 04922 04923 // 04924 // Unlock the pool. 04925 // 04926 04927 ExUnlockPool (NonPagedPool, OldIrql); 04928 04929 ExReleaseFastMutex (&MmDynamicMemoryMutex); 04930 04931 // 04932 // Free the split entry at BaseAddress back into the pool. 04933 // Note that we have overcharged the pool - the entire free 04934 // chunk has been billed. Here we return the piece we 04935 // didn't use and correct the momentary overbilling. 04936 // 04937 // The start and end allocation bits of this split entry 04938 // which we just set up enable ExFreePool and his callees 04939 // to correctly adjust the billing. 04940 // 04941 04942 if (BaseAddress) { 04943 ExInsertPoolTag ('tnoC', 04944 BaseAddress, 04945 SpanInPages << PAGE_SHIFT, 04946 NonPagedPool); 04947 ExFreePool (BaseAddress); 04948 } 04949 if (BaseAddress2) { 04950 ExInsertPoolTag ('tnoC', 04951 BaseAddress2, 04952 SpanInPages2 << PAGE_SHIFT, 04953 NonPagedPool); 04954 ExFreePool (BaseAddress2); 04955 } 04956 BaseAddress = Va; 04957 goto Done1; 04958 } 04959 } 04960 Entry = FreePageInfo->List.Flink; 04961 if (MmProtectFreedNonPagedPool == TRUE) { 04962 MiProtectFreeNonPagedPool ((PVOID)FreePageInfo, 04963 (ULONG)FreePageInfo->Size); 04964 } 04965 } 04966 Index += 1; 04967 } 04968 04969 // 04970 // No entry was found in free nonpaged pool that meets the requirements. 04971 // Search the PFN database for pages that meet the requirements. 04972 // 04973 04974 start = 0; 04975 do { 04976 04977 count = MmPhysicalMemoryBlock->Run[start].PageCount; 04978 Page = MmPhysicalMemoryBlock->Run[start].BasePage; 04979 04980 // 04981 // Close the gaps, then examine the range for a fit. 04982 // 04983 04984 LastPage = Page + count; 04985 04986 if (LastPage - 1 > HighestPfn) { 04987 LastPage = HighestPfn + 1; 04988 } 04989 04990 if (Page < LowestPfn) { 04991 Page = LowestPfn; 04992 } 04993 04994 if ((count != 0) && (Page + SizeInPages <= LastPage)) { 04995 04996 // 04997 // A fit may be possible in this run, check whether the pages 04998 // are on the right list. 04999 // 05000 05001 found = 0; 05002 05003 Pfn1 = MI_PFN_ELEMENT (Page); 05004 LOCK_PFN2 (OldIrql2); 05005 do { 05006 05007 if ((Pfn1->u3.e1.PageLocation == ZeroedPageList) || 05008 (Pfn1->u3.e1.PageLocation == FreePageList) || 05009 (Pfn1->u3.e1.PageLocation == StandbyPageList)) { 05010 05011 if ((Pfn1->u1.Flink != 0) && 05012 (Pfn1->u2.Blink != 0) && 05013 (Pfn1->u3.e2.ReferenceCount == 0)) { 05014 05015 // 05016 // Before starting a new run, ensure that it 05017 // can satisfy the boundary requirements (if any). 05018 // 05019 05020 if ((found == 0) && (BoundaryPfn != 0)) { 05021 if (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask) != 0) { 05022 // 05023 // This run's physical address does not meet the 05024 // requirements. 05025 // 05026 05027 goto NextPage; 05028 } 05029 } 05030 05031 found += 1; 05032 if (found == SizeInPages) { 05033 05034 // 05035 // A match has been found, remove these 05036 // pages, add them to the free pool and 05037 // return. 05038 // 05039 05040 Page = 1 + Page - found; 05041 05042 // 05043 // Try to find system PTES to expand the pool into. 05044 // 05045 05046 PointerPte = MiReserveSystemPtes ((ULONG)SizeInPages, 05047 NonPagedPoolExpansion, 05048 0, 05049 0, 05050 FALSE); 05051 05052 if (PointerPte == NULL) { 05053 UNLOCK_PFN2 (OldIrql2); 05054 goto Done; 05055 } 05056 05057 MmResidentAvailablePages -= SizeInPages; 05058 MM_BUMP_COUNTER(3, SizeInPages); 05059 MiChargeCommitmentCantExpand (SizeInPages, TRUE); 05060 MM_TRACK_COMMIT (MM_DBG_COMMIT_CONTIGUOUS_PAGES, SizeInPages); 05061 BaseAddress = MiGetVirtualAddressMappedByPte (PointerPte); 05062 PageColor = MI_GET_PAGE_COLOR_FROM_VA(BaseAddress); 05063 TempPte = ValidKernelPte; 05064 MmAllocatedNonPagedPool += SizeInPages; 05065 NonPagedPoolDescriptor.TotalBigPages += (ULONG)SizeInPages; 05066 Pfn1 = MI_PFN_ELEMENT (Page - 1); 05067 05068 do { 05069 Pfn1 += 1; 05070 if (Pfn1->u3.e1.PageLocation == StandbyPageList) { 05071 MiUnlinkPageFromList (Pfn1); 05072 MiRestoreTransitionPte (Page); 05073 } else { 05074 MiUnlinkFreeOrZeroedPage (Page); 05075 } 05076 05077 MI_CHECK_PAGE_ALIGNMENT(Page, 05078 PageColor & MM_COLOR_MASK); 05079 Pfn1->u3.e1.PageColor = PageColor & MM_COLOR_MASK; 05080 PageColor += 1; 05081 TempPte.u.Hard.PageFrameNumber = Page; 05082 MI_WRITE_VALID_PTE (PointerPte, TempPte); 05083 05084 Pfn1->u3.e2.ReferenceCount = 1; 05085 Pfn1->u2.ShareCount = 1; 05086 Pfn1->PteAddress = PointerPte; 05087 Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE; 05088 Pfn1->PteFrame = MI_GET_PAGE_FRAME_FROM_PTE (MiGetPteAddress(PointerPte)); 05089 Pfn1->u3.e1.PageLocation = ActiveAndValid; 05090 Pfn1->u3.e1.VerifierAllocation = 0; 05091 Pfn1->u3.e1.LargeSessionAllocation = 0; 05092 05093 if (found == SizeInPages) { 05094 Pfn1->u3.e1.StartOfAllocation = 1; 05095 } 05096 PointerPte += 1; 05097 Page += 1; 05098 found -= 1; 05099 } while (found); 05100 05101 Pfn1->u3.e1.EndOfAllocation = 1; 05102 UNLOCK_PFN2 (OldIrql2); 05103 goto Done; 05104 } 05105 } else { 05106 found = 0; 05107 } 05108 } else { 05109 found = 0; 05110 } 05111 NextPage: 05112 Page += 1; 05113 Pfn1 += 1; 05114 } while (Page < LastPage); 05115 UNLOCK_PFN2 (OldIrql2); 05116 } 05117 start += 1; 05118 } while (start != MmPhysicalMemoryBlock->NumberOfRuns); 05119 05120 Done: 05121 05122 ExUnlockPool (NonPagedPool, OldIrql); 05123 05124 ExReleaseFastMutex (&MmDynamicMemoryMutex); 05125 05126 Done1: 05127 05128 MmUnlockPagableImageSection (ExPageLockHandle); 05129 05130 if (BaseAddress) { 05131 05132 MiInsertContiguousTag (BaseAddress, 05133 SizeInPages << PAGE_SHIFT, 05134 CallingAddress); 05135 05136 ExInsertPoolTag ('tnoC', 05137 BaseAddress, 05138 SizeInPages << PAGE_SHIFT, 05139 NonPagedPool); 05140 } 05141 05142 return BaseAddress; 05143 } 05144 05145 LOGICAL 05146 MmIsHydraAddress ( 05147 IN PVOID VirtualAddress 05148 ) 05149 05150 /*++ 05151 05152 Routine Description: 05153 05154 This function returns TRUE if a Hydra address is specified. 05155 FALSE is returned if not. 05156 05157 Arguments: 05158 05159 VirtualAddress - Supplies the address in question. 05160 05161 Return Value: 05162 05163 See above. 05164 05165 Environment: 05166 05167 Kernel mode. Note this routine is present and nonpaged for both Hydra 05168 and non-Hydra systems. 05169 05170 --*/ 05171 05172 { 05173 return MI_IS_SESSION_ADDRESS (VirtualAddress); 05174 } 05175 05176 LOGICAL 05177 MmIsSpecialPoolAddressFree ( 05178 IN PVOID VirtualAddress 05179 ) 05180 05181 /*++ 05182 05183 Routine Description: 05184 05185 This function returns TRUE if a special pool address has been freed. 05186 FALSE is returned if it is inuse (ie: the caller overran). 05187 05188 Arguments: 05189 05190 VirtualAddress - Supplies the special pool address in question. 05191 05192 Return Value: 05193 05194 See above. 05195 05196 Environment: 05197 05198 Kernel mode. 05199 05200 --*/ 05201 05202 { 05203 PMMPTE PointerPte; 05204 05205 // 05206 // Caller must check that the address in in special pool. 05207 // 05208 05209 ASSERT (VirtualAddress >= MmSpecialPoolStart && VirtualAddress < MmSpecialPoolEnd); 05210 05211 PointerPte = MiGetPteAddress(VirtualAddress); 05212 05213 // 05214 // Take advantage of the fact that adjacent PTEs have the paged/nonpaged 05215 // bits set when in use and these bits are cleared on free. Note also 05216 // that freed pages get their PTEs chained together through PageFileHigh. 05217 // 05218 05219 if ((PointerPte->u.Soft.PageFileHigh == MI_SPECIAL_POOL_PTE_PAGABLE) || 05220 (PointerPte->u.Soft.PageFileHigh == MI_SPECIAL_POOL_PTE_NONPAGABLE)) { 05221 return FALSE; 05222 } 05223 05224 return TRUE; 05225 } 05226 05227 LOGICAL 05228 MmProtectSpecialPool ( 05229 IN PVOID VirtualAddress, 05230 IN ULONG NewProtect 05231 ) 05232 05233 /*++ 05234 05235 Routine Description: 05236 05237 This function protects a special pool allocation. 05238 05239 Arguments: 05240 05241 VirtualAddress - Supplies the special pool address to protect. 05242 05243 NewProtect - Supplies the protection to set the pages to (PAGE_XX). 05244 05245 Return Value: 05246 05247 TRUE if the protection was successfully applied, FALSE if not. 05248 05249 Environment: 05250 05251 Kernel mode, IRQL at APC_LEVEL or below for pagable pool, DISPATCH or 05252 below for nonpagable pool. 05253 05254 Note that setting an allocation to NO_ACCESS implies that an accessible 05255 protection must be applied by the caller prior to this allocation being 05256 freed. 05257 05258 Note this is a nonpagable wrapper so that machines without special pool 05259 can still support code attempting to protect special pool at 05260 DISPATCH_LEVEL. 05261 05262 --*/ 05263 05264 { 05265 if (MiSpecialPoolPtes == 0) { 05266 05267 // 05268 // The special pool allocation code was never initialized. 05269 // 05270 05271 return (ULONG)-1; 05272 } 05273 05274 return MiProtectSpecialPool (VirtualAddress, NewProtect); 05275 } 05276 05277 LOGICAL 05278 MiProtectSpecialPool ( 05279 IN PVOID VirtualAddress, 05280 IN ULONG NewProtect 05281 ) 05282 05283 /*++ 05284 05285 Routine Description: 05286 05287 This function protects a special pool allocation. 05288 05289 Arguments: 05290 05291 VirtualAddress - Supplies the special pool address to protect. 05292 05293 NewProtect - Supplies the protection to set the pages to (PAGE_XX). 05294 05295 Return Value: 05296 05297 TRUE if the protection was successfully applied, FALSE if not. 05298 05299 Environment: 05300 05301 Kernel mode, IRQL at APC_LEVEL or below for pagable pool, DISPATCH or 05302 below for nonpagable pool. 05303 05304 Note that setting an allocation to NO_ACCESS implies that an accessible 05305 protection must be applied by the caller prior to this allocation being 05306 freed. 05307 05308 --*/ 05309 05310 { 05311 KIRQL OldIrql; 05312 KIRQL OldIrql2; 05313 MMPTE PteContents; 05314 MMPTE NewPteContents; 05315 MMPTE PreviousPte; 05316 PMMPTE PointerPte; 05317 PMMPFN Pfn1; 05318 ULONG ProtectionMask; 05319 WSLE_NUMBER WsIndex; 05320 LOGICAL SystemWsLocked; 05321 05322 if ((VirtualAddress < MmSpecialPoolStart) || (VirtualAddress >= MmSpecialPoolEnd)) { 05323 return (ULONG)-1; 05324 } 05325 05326 try { 05327 ProtectionMask = MiMakeProtectionMask (NewProtect); 05328 } except (EXCEPTION_EXECUTE_HANDLER) { 05329 return (ULONG)-1; 05330 } 05331 05332 SystemWsLocked = FALSE; 05333 05334 PointerPte = MiGetPteAddress (VirtualAddress); 05335 05336 if ((PointerPte + 1)->u.Soft.PageFileHigh == MI_SPECIAL_POOL_PTE_PAGABLE) { 05337 LOCK_SYSTEM_WS (OldIrql); 05338 SystemWsLocked = TRUE; 05339 } 05340 05341 PteContents = *PointerPte; 05342 05343 if (ProtectionMask == MM_NOACCESS) { 05344 05345 if ((PointerPte + 1)->u.Soft.PageFileHigh == MI_SPECIAL_POOL_PTE_PAGABLE) { 05346 retry1: 05347 ASSERT (SystemWsLocked == TRUE); 05348 if (PteContents.u.Hard.Valid == 1) { 05349 05350 Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber); 05351 WsIndex = Pfn1->u1.WsIndex; 05352 ASSERT (WsIndex != 0); 05353 Pfn1->OriginalPte.u.Soft.Protection = ProtectionMask; 05354 MiRemovePageFromWorkingSet (PointerPte, 05355 Pfn1, 05356 &MmSystemCacheWs); 05357 } 05358 else if (PteContents.u.Soft.Transition == 1) { 05359 05360 LOCK_PFN2 (OldIrql2); 05361 05362 PteContents = *(volatile MMPTE *)PointerPte; 05363 05364 if (PteContents.u.Soft.Transition == 0) { 05365 UNLOCK_PFN2 (OldIrql2); 05366 goto retry1; 05367 } 05368 05369 Pfn1 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber); 05370 Pfn1->OriginalPte.u.Soft.Protection = ProtectionMask; 05371 PointerPte->u.Soft.Protection = ProtectionMask; 05372 UNLOCK_PFN2(OldIrql2); 05373 } 05374 else { 05375 05376 // 05377 // Must be page file space or demand zero. 05378 // 05379 05380 PointerPte->u.Soft.Protection = ProtectionMask; 05381 } 05382 ASSERT (SystemWsLocked == TRUE); 05383 UNLOCK_SYSTEM_WS (OldIrql); 05384 } 05385 else { 05386 05387 ASSERT (SystemWsLocked == FALSE); 05388 05389 // 05390 // Make it no access regardless of its previous protection state. 05391 // Note that the page frame number is preserved. 05392 // 05393 05394 PteContents.u.Hard.Valid = 0; 05395 PteContents.u.Soft.Prototype = 0; 05396 PteContents.u.Soft.Protection = MM_NOACCESS; 05397 05398 Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber); 05399 05400 LOCK_PFN2 (OldIrql2); 05401 05402 Pfn1->OriginalPte.u.Soft.Protection = ProtectionMask; 05403 05404 PreviousPte.u.Flush = KeFlushSingleTb (VirtualAddress, 05405 TRUE, 05406 TRUE, 05407 (PHARDWARE_PTE)PointerPte, 05408 PteContents.u.Flush); 05409 05410 MI_CAPTURE_DIRTY_BIT_TO_PFN (&PreviousPte, Pfn1); 05411 05412 UNLOCK_PFN2(OldIrql2); 05413 } 05414 05415 return TRUE; 05416 } 05417 05418 // 05419 // No guard pages, noncached pages or copy-on-write for special pool. 05420 // 05421 05422 if ((ProtectionMask >= MM_NOCACHE) || (ProtectionMask == MM_WRITECOPY) || (ProtectionMask == MM_EXECUTE_WRITECOPY)) { 05423 if (SystemWsLocked == TRUE) { 05424 UNLOCK_SYSTEM_WS (OldIrql); 05425 } 05426 return FALSE; 05427 } 05428 05429 // 05430 // Set accessible permissions - the page may already be protected or not. 05431 // 05432 05433 if ((PointerPte + 1)->u.Soft.PageFileHigh == MI_SPECIAL_POOL_PTE_NONPAGABLE) { 05434 05435 Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber); 05436 Pfn1->OriginalPte.u.Soft.Protection = ProtectionMask; 05437 05438 MI_MAKE_VALID_PTE (NewPteContents, 05439 PteContents.u.Hard.PageFrameNumber, 05440 ProtectionMask, 05441 PointerPte); 05442 05443 MI_WRITE_VALID_PTE_NEW_PROTECTION (PointerPte, NewPteContents); 05444 05445 ASSERT (SystemWsLocked == FALSE); 05446 return TRUE; 05447 } 05448 05449 retry2: 05450 05451 ASSERT (SystemWsLocked == TRUE); 05452 05453 if (PteContents.u.Hard.Valid == 1) { 05454 05455 Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber); 05456 ASSERT (Pfn1->u1.WsIndex != 0); 05457 05458 LOCK_PFN2 (OldIrql2); 05459 05460 Pfn1->OriginalPte.u.Soft.Protection = ProtectionMask; 05461 05462 MI_MAKE_VALID_PTE (PteContents, 05463 PteContents.u.Hard.PageFrameNumber, 05464 ProtectionMask, 05465 PointerPte); 05466 05467 PreviousPte.u.Flush = KeFlushSingleTb (VirtualAddress, 05468 TRUE, 05469 TRUE, 05470 (PHARDWARE_PTE)PointerPte, 05471 PteContents.u.Flush); 05472 05473 MI_CAPTURE_DIRTY_BIT_TO_PFN (&PreviousPte, Pfn1); 05474 05475 UNLOCK_PFN2 (OldIrql2); 05476 } 05477 else if (PteContents.u.Soft.Transition == 1) { 05478 05479 LOCK_PFN2 (OldIrql2); 05480 05481 PteContents = *(volatile MMPTE *)PointerPte; 05482 05483 if (PteContents.u.Soft.Transition == 0) { 05484 UNLOCK_PFN2 (OldIrql2); 05485 goto retry2; 05486 } 05487 05488 Pfn1 = MI_PFN_ELEMENT (PteContents.u.Trans.PageFrameNumber); 05489 Pfn1->OriginalPte.u.Soft.Protection = ProtectionMask; 05490 PointerPte->u.Soft.Protection = ProtectionMask; 05491 UNLOCK_PFN2(OldIrql2); 05492 } 05493 else { 05494 05495 // 05496 // Must be page file space or demand zero. 05497 // 05498 05499 PointerPte->u.Soft.Protection = ProtectionMask; 05500 } 05501 05502 UNLOCK_SYSTEM_WS (OldIrql); 05503 return TRUE; 05504 }

Generated on Sat May 15 19:39:14 2004 for test by doxygen 1.3.7