Main Page | Class Hierarchy | Class List | File List | Class Members | File Members

init386.c File Reference

#include "mi.h"

Go to the source code of this file.

Defines

#define MM_BIOS_START   (0xA0000 >> PAGE_SHIFT)
#define MM_BIOS_END   (0xFFFFF >> PAGE_SHIFT)
#define MM_LARGE_PAGE_MINIMUM   ((127*1024*1024) >> PAGE_SHIFT)
#define MI_LOWMEM_MAGIC_BIT   (0x80000000)

Functions

VOID MiRemoveLowPages (VOID)
VOID MiInitMachineDependent (IN PLOADER_PARAMETER_BLOCK LoaderBlock)

Variables

SIZE_T MmExpandedNonPagedPoolInBytes
ULONG MmLargeSystemCache
LOGICAL MmMakeLowMemory
LOGICAL MmPagedPoolMaximumDesired


Define Documentation

#define MI_LOWMEM_MAGIC_BIT   (0x80000000)
 

Definition at line 54 of file mm/i386/init386.c.

#define MM_BIOS_END   (0xFFFFF >> PAGE_SHIFT)
 

Definition at line 37 of file mm/i386/init386.c.

Referenced by MiInitMachineDependent().

#define MM_BIOS_START   (0xA0000 >> PAGE_SHIFT)
 

Definition at line 36 of file mm/i386/init386.c.

Referenced by MiInitMachineDependent().

#define MM_LARGE_PAGE_MINIMUM   ((127*1024*1024) >> PAGE_SHIFT)
 

Definition at line 39 of file mm/i386/init386.c.

Referenced by MiInitMachineDependent().


Function Documentation

VOID MiInitMachineDependent IN PLOADER_PARAMETER_BLOCK  LoaderBlock  ) 
 

Definition at line 58 of file mm/i386/init386.c.

References ActiveAndValid, ASSERT, BadPageList, _MEMORY_ALLOCATION_DESCRIPTOR::BasePage, BYTES_TO_PAGES, c, DISPATCH_LEVEL, ExAllocatePoolWithTag, ExpMultiUserTS, ExVerifySuite(), FALSE, FIRST_MAPPING_PTE, _MMCOLOR_TABLES::Flink, FreePageList, HYPER_SPACE, InitializationPhase, InitializePool(), KeBugCheckEx(), KeFeatureBits, KeFlushCurrentTb(), KeFlushEntireTb(), KeFlushSingleTb(), KeGetPcr, KeInitializeSpinLock(), KeLowerIrql(), KeRaiseIrql(), KernelVerifier, KeSweepDcache(), KF_GLOBAL_PAGE, KF_LARGE_PAGE, KSEG0_BASE, KSTACK_POOL_SIZE, KSTACK_POOL_START, LAST_MAPPING_PTE, _MEMORY_ALLOCATION_DESCRIPTOR::ListEntry, LoaderBad, LoaderBBTMemory, LoaderFirmwarePermanent, LoaderFirmwareTemporary, LoaderFree, LoaderLoadedProgram, LoaderOsloaderStack, LoaderSpecialMemory, LOCK_PFN, _MMSUPPORT::MaximumWorkingSetSize, _MEMORY_ALLOCATION_DESCRIPTOR::MemoryType, MI_CONVERT_PHYSICAL_TO_PFN, MI_GET_PAGE_COLOR_FROM_PTE, MI_GET_PAGE_FRAME_FROM_PTE, MI_PFN_ELEMENT, MI_PTE_LOOKUP_NEEDED, MI_SET_PFN_DELETED, MI_WRITE_VALID_PTE, MiAddSystemPtes(), MiCreateBitMap, MiDecrementReferenceCount(), MiDetermineUserGlobalPteMask(), MiFreePoolPages(), MiGetPdeAddress, MiGetPteAddress, MiGetVirtualAddressMappedByPde, MiGetVirtualAddressMappedByPte, MiHydra, MiInitializeNonPagedPool(), MiInitializeSystemPtes(), MiInsertPageInList(), MiMaximumSystemCacheSizeExtra, _MMSUPPORT::MinimumWorkingSetSize, MiNoLowMemory, MiNumberOfExtraSystemPdes, MiRemoveAnyPage(), MiRemoveLowPages(), MiRequestedSystemPtes, MiReserveSystemPtes(), MiUnlinkFreeOrZeroedPage(), MM_BIOS_END, MM_BIOS_START, MM_BOOT_IMAGE_SIZE, MM_DEMAND_ZERO_WRITE_PTE, MM_EMPTY_LIST, MM_KSEG0_BASE, MM_KSEG2_BASE, MM_LARGE_PAGE_MINIMUM, MM_LOWEST_NONPAGED_SYSTEM_START, MM_MAX_ADDITIONAL_NONPAGED_POOL, MM_MAX_INITIAL_NONPAGED_POOL, MM_PAGES_IN_KSEG0, MM_PTE_ACCESS_MASK, MM_PTE_GLOBAL_MASK, MM_SECONDARY_COLORS_DEFAULT, MM_SECONDARY_COLORS_MAX, MM_SECONDARY_COLORS_MIN, MM_SUBSECTION_MAP, MM_SYSTEM_CACHE_END_EXTRA, MM_SYSTEM_CACHE_START_EXTRA, MM_VA_MAPPED_BY_PDE, MmAllocatedNonPagedPool, MMCOLOR_TABLES, MmDefaultMaximumNonPagedPool, MmDynamicPfn, MmExpandedNonPagedPoolInBytes, MmFirstReservedMappingPte, MmFreePagesByColor, MmHighestPhysicalPage, MmHighestPossiblePhysicalPage, MmInitializeProcessAddressSpace(), MmIsAddressValid(), MmKseg2Frame, MmLargePageMinimum, MmLargeSystemCache, MmLastReservedMappingPte, MmLowestPhysicalPage, MmMakeLowMemory, MmMaxAdditionNonPagedPoolPerMb, MmMaximumNonPagedPoolInBytes, MmMinAdditionNonPagedPoolPerMb, MmMinimumNonPagedPoolSize, MmNonPagedMustSucceed, MmNonPagedPoolEnd, MmNonPagedPoolExpansionStart, MmNonPagedPoolStart, MmNonPagedSystemStart, MmNumberOfPhysicalPages, MmNumberOfSystemPtes, MmPageAlignedPoolBase, MmPagedPoolMaximumDesired, MmPageLocationList, MMPFN, MmPfnDatabase, MmPfnLock, MmProductType, MmProtectFreedNonPagedPool, MmPteGlobal, MmSecondaryColorMask, MmSecondaryColors, MmSizeOfNonPagedMustSucceed, MmSizeOfNonPagedPoolInBytes, MmSizeOfPagedPoolInBytes, MmSpecialPoolTag, MmSubsectionBase, MmSubsectionTopPage, MmSystemProcessWorkingSetMax, MmSystemProcessWorkingSetMin, MmSystemSpaceLock, MmTotalFreeSystemPtes, MmVirtualBias, MmWorkingSetList, MmWsle, NonPagedPool, NonPagedPoolExpansion, NonPagedPoolMustSucceed, NULL, _MMPFN::OriginalPte, PAGE_ALIGN, PAGE_DIRECTORY_MASK, PAGE_SHIFT, PAGE_SIZE, _MEMORY_ALLOCATION_DESCRIPTOR::PageCount, PDE_PER_PAGE, PERFINFO_INIT_POOLRANGE, PrototypePte, PsGetCurrentProcess, PTE_PER_PAGE, PTE_SHIFT, _MMPFN::PteAddress, _MMPFN::PteFrame, RtlClearAllBits(), RtlCompareMemoryUlong(), StandbyPageList, SystemPteSpace, TRUE, _MMPTE::u, _MMPFN::u2, _MMPFN::u3, UNLOCK_PFN, ValidKernelPde, ValidKernelPdeLocal, ValidKernelPte, ValidKernelPteLocal, _EPROCESS::Vm, WORKING_SET_LIST, _EPROCESS::WorkingSetPage, ZeroedPageList, ZeroKernelPte, and ZeroPte.

00064 : 00065 00066 This routine performs the necessary operations to enable virtual 00067 memory. This includes building the page directory page, building 00068 page table pages to map the code section, the data section, the 00069 stack section and the trap handler. 00070 00071 It also initializes the PFN database and populates the free list. 00072 00073 Arguments: 00074 00075 LoaderBlock - Supplies a pointer to the firmware setup loader block. 00076 00077 Return Value: 00078 00079 None. 00080 00081 Environment: 00082 00083 Kernel mode. 00084 00085 --*/ 00086 00087 { 00088 PMMPFN BasePfn; 00089 PMMPFN BottomPfn; 00090 PMMPFN TopPfn; 00091 BOOLEAN PfnInKseg0; 00092 ULONG BasePage; 00093 ULONG HighPage; 00094 ULONG HighPageInKseg0; 00095 ULONG PagesLeft; 00096 ULONG Range; 00097 ULONG i, j; 00098 ULONG PdePageNumber; 00099 ULONG PdePage; 00100 ULONG PageFrameIndex; 00101 ULONG NextPhysicalPage; 00102 ULONG OldFreeDescriptorLowMemCount; 00103 ULONG OldFreeDescriptorLowMemBase; 00104 ULONG OldFreeDescriptorCount; 00105 ULONG OldFreeDescriptorBase; 00106 ULONG PfnAllocation; 00107 ULONG NumberOfPages; 00108 ULONG MaxPool; 00109 PEPROCESS CurrentProcess; 00110 ULONG DirBase; 00111 ULONG MostFreePage; 00112 ULONG MostFreeLowMem; 00113 ULONG MostFreeLowMem512; 00114 PLIST_ENTRY NextMd; 00115 PMEMORY_ALLOCATION_DESCRIPTOR FreeDescriptor; 00116 PMEMORY_ALLOCATION_DESCRIPTOR FreeDescriptor512; 00117 PMEMORY_ALLOCATION_DESCRIPTOR FreeDescriptorLowMem; 00118 PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor; 00119 PMEMORY_ALLOCATION_DESCRIPTOR UsableDescriptor; 00120 MMPTE TempPde; 00121 MMPTE TempPte; 00122 PMMPTE PointerPde; 00123 PMMPTE PointerPte; 00124 PMMPTE LastPte; 00125 PMMPTE Pde; 00126 PMMPTE StartPde; 00127 PMMPTE EndPde; 00128 PMMPFN Pfn1; 00129 PMMPFN Pfn2; 00130 ULONG PdeCount; 00131 ULONG va; 00132 ULONG SavedSize; 00133 KIRQL OldIrql; 00134 ULONG MapLargePages; 00135 PVOID NonPagedPoolStartVirtual; 00136 ULONG LargestFreePfnCount; 00137 ULONG LargestFreePfnStart; 00138 ULONG ExtraPtes; 00139 ULONG FreePfnCount; 00140 SIZE_T UsableDescriptorRemoved; 00141 LOGICAL SwitchedDescriptors; 00142 LOGICAL NeedLowVirtualPfn; 00143 ULONG NextUsablePhysicalPage; 00144 LOGICAL UsingHighMemory; 00145 PVOID LowVirtualNonPagedPoolStart; 00146 ULONG LowVirtualNonPagedPoolSizeInBytes; 00147 LOGICAL ExtraSystemCacheViews; 00148 00149 ExtraSystemCacheViews = FALSE; 00150 SwitchedDescriptors = FALSE; 00151 PfnInKseg0 = FALSE; 00152 MostFreePage = 0; 00153 MostFreeLowMem = 0; 00154 MostFreeLowMem512 = 0; 00155 MapLargePages = 0; 00156 LargestFreePfnCount = 0; 00157 UsableDescriptor = NULL; 00158 UsableDescriptorRemoved = 0; 00159 00160 if (InitializationPhase == 1) { 00161 00162 // 00163 // If the kernel image has not been biased to allow for 3gb of user 00164 // space, the host processor supports large pages, and the number of 00165 // physical pages is greater than 127mb, then map the kernel image, 00166 // HAL, and boot drivers into a large page. 00167 // 00168 00169 if ((MmVirtualBias == 0) && 00170 #if defined (_X86PAE_) 00171 (MiNeedLowVirtualPfn == FALSE) && 00172 #endif 00173 (KeFeatureBits & KF_LARGE_PAGE) && 00174 (MmNumberOfPhysicalPages > MmLargePageMinimum)) { 00175 00176 // 00177 // Map lower 512MB of physical memory as large pages starting 00178 // at address 0x80000000. 00179 // 00180 00181 LOCK_PFN (OldIrql); 00182 00183 PointerPde = MiGetPdeAddress (MM_KSEG0_BASE); 00184 LastPte = MiGetPdeAddress (MM_KSEG2_BASE); 00185 TempPte = ValidKernelPde; 00186 TempPte.u.Hard.PageFrameNumber = 0; 00187 TempPte.u.Hard.LargePage = 1; 00188 #if defined(_X86PAE_) 00189 if (MiUseGlobalBitInLargePdes == TRUE) { 00190 TempPte.u.Hard.Global = 1; 00191 } 00192 #endif 00193 00194 do { 00195 if (PointerPde->u.Hard.Valid == 1) { 00196 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE(PointerPde); 00197 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 00198 Pfn1->u2.ShareCount = 0; 00199 Pfn1->u3.e2.ReferenceCount = 1; 00200 Pfn1->u3.e1.PageLocation = StandbyPageList; 00201 MI_SET_PFN_DELETED (Pfn1); 00202 MiDecrementReferenceCount (PageFrameIndex); 00203 KeFlushSingleTb (MiGetVirtualAddressMappedByPte (PointerPde), 00204 TRUE, 00205 TRUE, 00206 (PHARDWARE_PTE)PointerPde, 00207 TempPte.u.Flush); 00208 KeFlushEntireTb (TRUE, TRUE); //p6 errata... 00209 00210 } else { 00211 MI_WRITE_VALID_PTE (PointerPde, TempPte); 00212 } 00213 00214 TempPte.u.Hard.PageFrameNumber += MM_VA_MAPPED_BY_PDE >> PAGE_SHIFT; 00215 PointerPde += 1; 00216 } while (PointerPde < LastPte); 00217 00218 UNLOCK_PFN (OldIrql); 00219 00220 MmKseg2Frame = (512*1024*1024) >> PAGE_SHIFT; 00221 } 00222 00223 return; 00224 } 00225 00226 ASSERT (InitializationPhase == 0); 00227 00228 // 00229 // Enabling special IRQL automatically disables mapping the kernel with 00230 // large pages so we can catch kernel and HAL code. 00231 // 00232 00233 if (KernelVerifier) { 00234 MmLargePageMinimum = (ULONG)-2; 00235 } 00236 else if (MmLargePageMinimum == 0) { 00237 MmLargePageMinimum = MM_LARGE_PAGE_MINIMUM; 00238 } 00239 00240 if (MmProtectFreedNonPagedPool == TRUE) { 00241 MmLargePageMinimum = (ULONG)-2; 00242 } 00243 00244 if (MmDynamicPfn == TRUE) { 00245 if (MmVirtualBias != 0) { 00246 MmDynamicPfn = FALSE; 00247 } 00248 MmLargePageMinimum = (ULONG)-2; 00249 } 00250 00251 // 00252 // If the host processor supports global bits, then set the global 00253 // bit in the template kernel PTE and PDE entries. 00254 // 00255 00256 if (KeFeatureBits & KF_GLOBAL_PAGE) { 00257 ValidKernelPte.u.Long |= MM_PTE_GLOBAL_MASK; 00258 #if defined(_X86PAE_) 00259 // 00260 // Note that the PAE mode of the processor does not support the 00261 // global bit in PDEs which map 4K page table pages. 00262 // 00263 MiUseGlobalBitInLargePdes = TRUE; 00264 #else 00265 ValidKernelPde.u.Long |= MM_PTE_GLOBAL_MASK; 00266 #endif 00267 MmPteGlobal.u.Long = MM_PTE_GLOBAL_MASK; 00268 } 00269 00270 TempPte = ValidKernelPte; 00271 TempPde = ValidKernelPde; 00272 00273 // 00274 // Set the directory base for the system process. 00275 // 00276 00277 PointerPte = MiGetPdeAddress (PDE_BASE); 00278 PdePageNumber = MI_GET_PAGE_FRAME_FROM_PTE(PointerPte); 00279 #if defined(_X86PAE_) 00280 00281 PrototypePte.u.Soft.PageFileHigh = MI_PTE_LOOKUP_NEEDED; 00282 00283 PsGetCurrentProcess()->PaePageDirectoryPage = PdePageNumber; 00284 _asm { 00285 mov eax, cr3 00286 mov DirBase, eax 00287 } 00288 00289 // 00290 // Note cr3 must be 32-byte aligned. 00291 // 00292 00293 ASSERT ((DirBase & 0x1f) == 0); 00294 #else 00295 DirBase = MI_GET_PAGE_FRAME_FROM_PTE(PointerPte) << PAGE_SHIFT; 00296 #endif 00297 PsGetCurrentProcess()->Pcb.DirectoryTableBase[0] = DirBase; 00298 KeSweepDcache (FALSE); 00299 00300 // 00301 // Unmap the low 2Gb of memory. 00302 // 00303 00304 PointerPde = MiGetPdeAddress(0); 00305 LastPte = MiGetPdeAddress (KSEG0_BASE - 0x10000 - 1); 00306 while (PointerPde <= LastPte) { 00307 *PointerPde = ZeroKernelPte; 00308 PointerPde += 1; 00309 } 00310 00311 // 00312 // Get the lower bound of the free physical memory and the 00313 // number of physical pages by walking the memory descriptor lists. 00314 // 00315 00316 FreeDescriptor = NULL; 00317 FreeDescriptor512 = NULL; 00318 NextMd = LoaderBlock->MemoryDescriptorListHead.Flink; 00319 while (NextMd != &LoaderBlock->MemoryDescriptorListHead) { 00320 MemoryDescriptor = CONTAINING_RECORD(NextMd, 00321 MEMORY_ALLOCATION_DESCRIPTOR, 00322 ListEntry); 00323 00324 if ((MemoryDescriptor->MemoryType != LoaderFirmwarePermanent) && 00325 (MemoryDescriptor->MemoryType != LoaderBBTMemory) && 00326 (MemoryDescriptor->MemoryType != LoaderSpecialMemory)) { 00327 00328 // 00329 // This check results in /BURNMEMORY chunks not being counted. 00330 // 00331 00332 if (MemoryDescriptor->MemoryType != LoaderBad) { 00333 MmNumberOfPhysicalPages += MemoryDescriptor->PageCount; 00334 } 00335 00336 if (MemoryDescriptor->BasePage < MmLowestPhysicalPage) { 00337 MmLowestPhysicalPage = MemoryDescriptor->BasePage; 00338 } 00339 00340 if ((MemoryDescriptor->BasePage + MemoryDescriptor->PageCount) > 00341 MmHighestPhysicalPage) { 00342 MmHighestPhysicalPage = 00343 MemoryDescriptor->BasePage + MemoryDescriptor->PageCount - 1; 00344 } 00345 00346 // 00347 // Locate the largest free block and the largest free 00348 // block below 16mb. 00349 // 00350 00351 if ((MemoryDescriptor->MemoryType == LoaderFree) || 00352 (MemoryDescriptor->MemoryType == LoaderLoadedProgram) || 00353 (MemoryDescriptor->MemoryType == LoaderFirmwareTemporary) || 00354 (MemoryDescriptor->MemoryType == LoaderOsloaderStack)) { 00355 00356 if (MemoryDescriptor->PageCount > MostFreePage) { 00357 MostFreePage = MemoryDescriptor->PageCount; 00358 FreeDescriptor = MemoryDescriptor; 00359 } 00360 00361 if (MemoryDescriptor->BasePage < 0x20000) { 00362 00363 // 00364 // This memory descriptor is below 512mb. 00365 // 00366 00367 if ((MostFreeLowMem512 < MemoryDescriptor->PageCount) && 00368 (MostFreeLowMem512 < ((ULONG)0x20000 - MemoryDescriptor->BasePage))) { 00369 00370 MostFreeLowMem512 = (ULONG)0x20000 - MemoryDescriptor->BasePage; 00371 if (MemoryDescriptor->PageCount < MostFreeLowMem512) { 00372 MostFreeLowMem512 = MemoryDescriptor->PageCount; 00373 } 00374 00375 FreeDescriptor512 = MemoryDescriptor; 00376 } 00377 } 00378 00379 if (MemoryDescriptor->BasePage < 0x1000) { 00380 00381 // 00382 // This memory descriptor is below 16mb. 00383 // 00384 00385 if ((MostFreeLowMem < MemoryDescriptor->PageCount) && 00386 (MostFreeLowMem < ((ULONG)0x1000 - MemoryDescriptor->BasePage))) { 00387 00388 MostFreeLowMem = (ULONG)0x1000 - MemoryDescriptor->BasePage; 00389 if (MemoryDescriptor->PageCount < MostFreeLowMem) { 00390 MostFreeLowMem = MemoryDescriptor->PageCount; 00391 } 00392 00393 FreeDescriptorLowMem = MemoryDescriptor; 00394 } 00395 } 00396 } 00397 } 00398 00399 NextMd = MemoryDescriptor->ListEntry.Flink; 00400 } 00401 00402 if (FreeDescriptorLowMem == FreeDescriptor) { 00403 FreeDescriptor = NULL; 00404 } 00405 00406 if (MmLargeSystemCache != 0) { 00407 ExtraSystemCacheViews = TRUE; 00408 } 00409 00410 NeedLowVirtualPfn = FALSE; 00411 00412 if (MmDynamicPfn == TRUE) { 00413 #if defined(_X86PAE_) 00414 if (ExVerifySuite(DataCenter) == TRUE) { 00415 MmHighestPossiblePhysicalPage = 0x1000000 - 1; 00416 } 00417 else if ((MmProductType != 0x00690057) && 00418 (ExVerifySuite(Enterprise) == TRUE)) { 00419 MmHighestPossiblePhysicalPage = 0x200000 - 1; 00420 } 00421 else { 00422 MmHighestPossiblePhysicalPage = 0x100000 - 1; 00423 } 00424 #else 00425 MmHighestPossiblePhysicalPage = 0x100000 - 1; 00426 #endif 00427 if (MmVirtualBias == 0) { 00428 NeedLowVirtualPfn = TRUE; 00429 } 00430 } 00431 else { 00432 MmHighestPossiblePhysicalPage = MmHighestPhysicalPage; 00433 } 00434 00435 if (MmHighestPossiblePhysicalPage > 0x400000 - 1) { 00436 00437 // 00438 // The PFN database is more than 112mb. Force it to come from the 00439 // 2GB->3GB virtual address range. Note the administrator cannot be 00440 // booting /3GB as when he does, the loader throws away memory 00441 // above the physical 16GB line. 00442 // 00443 00444 ASSERT (MmVirtualBias == 0); 00445 00446 // 00447 // The virtual space between 0xA4000000 and 0xC0000000 is best used 00448 // for system PTEs when this much physical memory is present. 00449 // 00450 00451 ExtraSystemCacheViews = FALSE; 00452 } 00453 00454 // 00455 // Two large descriptors have been saved. If the one below 512mb 00456 // is sufficiently large, use that one as then large pages can be 00457 // applied to map it and the PFN database can be put in it. 00458 // Only do this if the PFN database size is small enough such that it 00459 // doesn't consume so much virtual space between 2gb and 2gb+512mb - ie: 00460 // there has to be enough virtual space to map initial nonpaged pool in 00461 // there. 00462 // 00463 00464 if (MmHighestPossiblePhysicalPage > 0x100000) { 00465 00466 // 00467 // The 0x700000 (== 28GB) is chosen carefully so that the start of 00468 // expanded nonpaged pool is always above the nonpaged system start. 00469 // 00470 00471 if (MmHighestPossiblePhysicalPage < 0x700000) { 00472 00473 if ((FreeDescriptor512 != NULL) && 00474 (FreeDescriptor512 != FreeDescriptor) && 00475 (FreeDescriptor512 != FreeDescriptorLowMem)) { 00476 00477 if (MostFreeLowMem512 >= ((MmHighestPossiblePhysicalPage * sizeof (MMPFN) + MM_MAX_INITIAL_NONPAGED_POOL) / PAGE_SIZE)) { 00478 00479 FreeDescriptor = FreeDescriptor512; 00480 MostFreePage = FreeDescriptor->PageCount; 00481 } 00482 } 00483 00484 if (FreeDescriptor->BasePage >= 0x20000) { 00485 NeedLowVirtualPfn = TRUE; 00486 } 00487 } 00488 else { 00489 NeedLowVirtualPfn = TRUE; 00490 } 00491 } 00492 00493 #if defined(_X86PAE_) 00494 00495 // 00496 // Only PAE machines with at least 5GB of physical memory get to use this 00497 // and then only if they are NOT booted /3GB. 00498 // 00499 00500 if (strstr(LoaderBlock->LoadOptions, "NOLOWMEM")) { 00501 if ((MmVirtualBias == 0) && 00502 (MmNumberOfPhysicalPages >= 5 * 1024 * 1024 / 4)) { 00503 MiNoLowMemory = TRUE; 00504 MmMakeLowMemory = TRUE; 00505 NeedLowVirtualPfn = TRUE; 00506 } 00507 } 00508 00509 MiNeedLowVirtualPfn = NeedLowVirtualPfn; 00510 #endif 00511 00512 NextPhysicalPage = FreeDescriptorLowMem->BasePage; 00513 00514 OldFreeDescriptorLowMemCount = FreeDescriptorLowMem->PageCount; 00515 OldFreeDescriptorLowMemBase = FreeDescriptorLowMem->BasePage; 00516 00517 if (FreeDescriptor != NULL) { 00518 OldFreeDescriptorCount = FreeDescriptor->PageCount; 00519 OldFreeDescriptorBase = FreeDescriptor->BasePage; 00520 } 00521 00522 NumberOfPages = FreeDescriptorLowMem->PageCount; 00523 if (MmNumberOfPhysicalPages < 1100) { 00524 KeBugCheckEx (INSTALL_MORE_MEMORY, 00525 MmNumberOfPhysicalPages, 00526 MmLowestPhysicalPage, 00527 MmHighestPhysicalPage, 00528 0); 00529 } 00530 00531 // 00532 // Build non-paged pool using the physical pages following the 00533 // data page in which to build the pool from. Non-paged pool grows 00534 // from the high range of the virtual address space and expands 00535 // downward. 00536 // 00537 // At this time non-paged pool is constructed so virtual addresses 00538 // are also physically contiguous. 00539 // 00540 00541 if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) > 00542 (7 * (MmNumberOfPhysicalPages >> 3))) { 00543 00544 // 00545 // More than 7/8 of memory is allocated to nonpagedpool, reset to 0. 00546 // 00547 00548 MmSizeOfNonPagedPoolInBytes = 0; 00549 } 00550 00551 if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize) { 00552 00553 // 00554 // Calculate the size of nonpaged pool. 00555 // Use the minimum size, then for every MB above 4mb add extra 00556 // pages. 00557 // 00558 00559 MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize; 00560 00561 MmSizeOfNonPagedPoolInBytes += 00562 ((MmNumberOfPhysicalPages - 1024)/256) * 00563 MmMinAdditionNonPagedPoolPerMb; 00564 } 00565 00566 if (MmSizeOfNonPagedPoolInBytes > MM_MAX_INITIAL_NONPAGED_POOL) { 00567 MmSizeOfNonPagedPoolInBytes = MM_MAX_INITIAL_NONPAGED_POOL; 00568 } 00569 00570 // 00571 // Align to page size boundary. 00572 // 00573 00574 MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1); 00575 00576 // 00577 // Calculate the maximum size of pool. 00578 // 00579 00580 if (MmMaximumNonPagedPoolInBytes == 0) { 00581 00582 // 00583 // Calculate the size of nonpaged pool. If 4mb or less use 00584 // the minimum size, then for every MB above 4mb add extra 00585 // pages. 00586 // 00587 00588 MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool; 00589 00590 // 00591 // Make sure enough expansion for the PFN database exists. 00592 // 00593 00594 MmMaximumNonPagedPoolInBytes += (ULONG)PAGE_ALIGN ( 00595 (MmHighestPossiblePhysicalPage + 1) * sizeof(MMPFN)); 00596 00597 // 00598 // Only use the new formula for autosizing nonpaged pool on machines 00599 // with at least 512MB. The new formula allocates 1/2 as much nonpaged 00600 // pool per MB but scales much higher - machines with ~1.2GB or more 00601 // get 256MB of nonpaged pool. Note that the old formula gave machines 00602 // with 512MB of RAM 128MB of nonpaged pool so this behavior is 00603 // preserved with the new formula as well. 00604 // 00605 00606 if (MmNumberOfPhysicalPages >= 0x1f000) { 00607 MmMaximumNonPagedPoolInBytes += 00608 ((MmNumberOfPhysicalPages - 1024)/256) * 00609 (MmMaxAdditionNonPagedPoolPerMb / 2); 00610 00611 if (MmMaximumNonPagedPoolInBytes < MM_MAX_ADDITIONAL_NONPAGED_POOL) { 00612 MmMaximumNonPagedPoolInBytes = MM_MAX_ADDITIONAL_NONPAGED_POOL; 00613 } 00614 } 00615 else { 00616 MmMaximumNonPagedPoolInBytes += 00617 ((MmNumberOfPhysicalPages - 1024)/256) * 00618 MmMaxAdditionNonPagedPoolPerMb; 00619 } 00620 } 00621 00622 MaxPool = MmSizeOfNonPagedPoolInBytes + PAGE_SIZE * 16 + 00623 (ULONG)PAGE_ALIGN ( 00624 (MmHighestPossiblePhysicalPage + 1) * sizeof(MMPFN)); 00625 00626 if (MmMaximumNonPagedPoolInBytes < MaxPool) { 00627 MmMaximumNonPagedPoolInBytes = MaxPool; 00628 } 00629 00630 // 00631 // Systems that are booted /3GB have a 128MB nonpaged pool maximum, 00632 // 00633 // Systems that have a full 2GB system virtual address space, support 00634 // large pages, and have sufficient physical memory are allowed up to 256MB 00635 // of nonpaged pool. 00636 // 00637 00638 MmExpandedNonPagedPoolInBytes = 0; 00639 00640 if ((MmVirtualBias == 0) && 00641 (KeFeatureBits & KF_LARGE_PAGE) && 00642 (MmProtectFreedNonPagedPool == FALSE) && 00643 (MmNumberOfPhysicalPages > MmLargePageMinimum)) { 00644 00645 if (MmMaximumNonPagedPoolInBytes > MM_MAX_INITIAL_NONPAGED_POOL + MM_MAX_ADDITIONAL_NONPAGED_POOL) { 00646 MmMaximumNonPagedPoolInBytes = MM_MAX_INITIAL_NONPAGED_POOL + MM_MAX_ADDITIONAL_NONPAGED_POOL; 00647 } 00648 00649 // 00650 // Initialize the expanded amount to the highest possible value as 00651 // there may not be enough contiguous pages in the FreeDescriptorLowMem 00652 // below 512MB to get the initial pool to its maximal size. 00653 // 00654 00655 if (MmMaximumNonPagedPoolInBytes > MM_MAX_ADDITIONAL_NONPAGED_POOL) { 00656 00657 MmExpandedNonPagedPoolInBytes = MM_MAX_ADDITIONAL_NONPAGED_POOL; 00658 MmSizeOfNonPagedPoolInBytes = MmMaximumNonPagedPoolInBytes - 00659 MmExpandedNonPagedPoolInBytes; 00660 } 00661 } 00662 else { 00663 if (MmMaximumNonPagedPoolInBytes > MM_MAX_ADDITIONAL_NONPAGED_POOL) { 00664 MmMaximumNonPagedPoolInBytes = MM_MAX_ADDITIONAL_NONPAGED_POOL; 00665 } 00666 } 00667 00668 // 00669 // Get secondary color value from: 00670 // 00671 // (a) from the registry (already filled in) or 00672 // (b) from the PCR or 00673 // (c) default value. 00674 // 00675 00676 if (MmSecondaryColors == 0) { 00677 MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize; 00678 } 00679 00680 MmSecondaryColors = MmSecondaryColors >> PAGE_SHIFT; 00681 00682 if (MmSecondaryColors == 0) { 00683 MmSecondaryColors = MM_SECONDARY_COLORS_DEFAULT; 00684 00685 } else { 00686 00687 // 00688 // Make sure the value is a power of two and within limits. 00689 // 00690 00691 if (((MmSecondaryColors & (MmSecondaryColors -1)) != 0) || 00692 (MmSecondaryColors < MM_SECONDARY_COLORS_MIN) || 00693 (MmSecondaryColors > MM_SECONDARY_COLORS_MAX)) { 00694 MmSecondaryColors = MM_SECONDARY_COLORS_DEFAULT; 00695 } 00696 } 00697 00698 // 00699 // Add in the PFN database size (based on the number of pages required 00700 // from page zero to the highest page). 00701 // 00702 // Get the number of secondary colors and add the array for tracking 00703 // secondary colors to the end of the PFN database. 00704 // 00705 00706 PfnAllocation = 1 + ((((MmHighestPossiblePhysicalPage + 1) * sizeof(MMPFN)) + 00707 (MmSecondaryColors * sizeof(MMCOLOR_TABLES)*2)) 00708 >> PAGE_SHIFT); 00709 00710 if (NeedLowVirtualPfn == FALSE) { 00711 MmMaximumNonPagedPoolInBytes += PfnAllocation << PAGE_SHIFT; 00712 } 00713 00714 if (MmExpandedNonPagedPoolInBytes) { 00715 if (NeedLowVirtualPfn == FALSE) { 00716 MmExpandedNonPagedPoolInBytes += PfnAllocation << PAGE_SHIFT; 00717 } 00718 MmNonPagedPoolStart = (PVOID)((ULONG)MmNonPagedPoolEnd 00719 - MmExpandedNonPagedPoolInBytes); 00720 } 00721 else { 00722 MmNonPagedPoolStart = (PVOID)((ULONG)MmNonPagedPoolEnd 00723 - MmMaximumNonPagedPoolInBytes); 00724 } 00725 00726 MmNonPagedPoolStart = (PVOID)PAGE_ALIGN(MmNonPagedPoolStart); 00727 00728 MmPageAlignedPoolBase[NonPagedPool] = MmNonPagedPoolStart; 00729 00730 // 00731 // Allocate additional paged pool provided it can fit and either the 00732 // user asked for it or we decide 460MB of PTE space is sufficient. 00733 // 00734 00735 if ((MmVirtualBias == 0) && 00736 ((MmSizeOfPagedPoolInBytes == (SIZE_T)-1) || 00737 ((MmSizeOfPagedPoolInBytes == 0) && 00738 (MmNumberOfPhysicalPages >= (1 * 1024 * 1024 * 1024 / PAGE_SIZE)) && 00739 ((MiHydra == FALSE) || (ExpMultiUserTS == FALSE)) && 00740 (MiRequestedSystemPtes != (ULONG)-1)))) { 00741 00742 ExtraSystemCacheViews = FALSE; 00743 MmNumberOfSystemPtes = 3000; 00744 MmPagedPoolMaximumDesired = TRUE; 00745 00746 // 00747 // Make sure we always allocate extra PTEs later as we have crimped 00748 // the initial allocation here. 00749 // 00750 00751 if ((MiHydra == FALSE) || (ExpMultiUserTS == FALSE)) { 00752 if (MmNumberOfPhysicalPages <= 0x7F00) { 00753 MiRequestedSystemPtes = (ULONG)-1; 00754 } 00755 } 00756 } 00757 00758 // 00759 // Calculate the starting PDE for the system PTE pool which is 00760 // right below the nonpaged pool. 00761 // 00762 00763 MmNonPagedSystemStart = (PVOID)(((ULONG)MmNonPagedPoolStart - 00764 ((MmNumberOfSystemPtes + 1) * PAGE_SIZE)) & 00765 (~PAGE_DIRECTORY_MASK)); 00766 00767 if (MmNonPagedSystemStart < MM_LOWEST_NONPAGED_SYSTEM_START) { 00768 MmNonPagedSystemStart = MM_LOWEST_NONPAGED_SYSTEM_START; 00769 MmNumberOfSystemPtes = (((ULONG)MmNonPagedPoolStart - 00770 (ULONG)MmNonPagedSystemStart) >> PAGE_SHIFT)-1; 00771 00772 ASSERT (MmNumberOfSystemPtes > 1000); 00773 } 00774 00775 // 00776 // Set up page table pages to map nonpaged pool and system PTEs. 00777 // If possible, use higher physical pages to preserve more low 00778 // memory for drivers. 00779 // 00780 00781 StartPde = MiGetPdeAddress (MmNonPagedSystemStart); 00782 EndPde = MiGetPdeAddress ((PVOID)((PCHAR)MmNonPagedPoolEnd - 1)); 00783 00784 UsingHighMemory = FALSE; 00785 if (NextPhysicalPage < (FreeDescriptorLowMem->PageCount + 00786 FreeDescriptorLowMem->BasePage)) { 00787 00788 ULONG PagesNeeded; 00789 00790 // 00791 // We haven't used the other descriptor yet, examine it now 00792 // to see if enough usable memory is available. 00793 // 00794 00795 PagesNeeded = (EndPde - StartPde + 1); 00796 00797 if ((FreeDescriptor) && (FreeDescriptor->PageCount >= PagesNeeded)) { 00798 00799 UsableDescriptor = FreeDescriptor; 00800 NextUsablePhysicalPage = FreeDescriptor->BasePage; 00801 00802 UsableDescriptorRemoved += PagesNeeded; 00803 00804 // 00805 // Note this must be undone if the PFN database is created in 00806 // virtual memory because the memory descriptors are scanned 00807 // and only pages in the descriptors get mapped. 00808 // 00809 00810 UsableDescriptor->BasePage += PagesNeeded; 00811 UsableDescriptor->PageCount -= PagesNeeded; 00812 UsingHighMemory = TRUE; 00813 } 00814 } 00815 00816 while (StartPde <= EndPde) { 00817 00818 ASSERT(StartPde->u.Hard.Valid == 0); 00819 00820 // 00821 // Map in a page table page. 00822 // 00823 00824 if (UsingHighMemory == TRUE) { 00825 TempPde.u.Hard.PageFrameNumber = NextUsablePhysicalPage; 00826 NextUsablePhysicalPage += 1; 00827 } 00828 else { 00829 TempPde.u.Hard.PageFrameNumber = NextPhysicalPage; 00830 NextPhysicalPage += 1; 00831 NumberOfPages -= 1; 00832 00833 if (NumberOfPages == 0) { 00834 if (FreeDescriptor == NULL) { 00835 KeBugCheckEx (INSTALL_MORE_MEMORY, 00836 MmNumberOfPhysicalPages, 00837 MmLowestPhysicalPage, 00838 MmHighestPhysicalPage, 00839 1); 00840 } 00841 ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage + 00842 FreeDescriptor->PageCount)); 00843 NextPhysicalPage = FreeDescriptor->BasePage; 00844 NumberOfPages = FreeDescriptor->PageCount; 00845 SwitchedDescriptors = TRUE; 00846 } 00847 } 00848 00849 *StartPde = TempPde; 00850 PointerPte = MiGetVirtualAddressMappedByPte (StartPde); 00851 RtlZeroMemory (PointerPte, PAGE_SIZE); 00852 StartPde += 1; 00853 } 00854 00855 ExtraPtes = 0; 00856 00857 MiMaximumSystemCacheSizeExtra = 0; 00858 00859 if (MmVirtualBias == 0) { 00860 00861 if ((MiRequestedSystemPtes == (ULONG)-1) || 00862 ((MiHydra == TRUE) && (ExpMultiUserTS == TRUE)) || 00863 (MmSpecialPoolTag && MmNumberOfPhysicalPages > 0x7F00)) { 00864 00865 ExtraPtes = BYTES_TO_PAGES(KSTACK_POOL_SIZE) - 1; 00866 } 00867 else if (ExtraSystemCacheViews == TRUE) { 00868 00869 // 00870 // If the system is configured to favor large system caching, use 00871 // the remaining virtual address space for the system cache. This 00872 // is possible since the 3GB user space option has not been 00873 // enabled, Hydra has not been chosen and extended special pool 00874 // is not enabled. 00875 // 00876 00877 MiMaximumSystemCacheSizeExtra = 00878 (MM_SYSTEM_CACHE_END_EXTRA - MM_SYSTEM_CACHE_START_EXTRA) >> PAGE_SHIFT; 00879 } 00880 else if (MmNumberOfPhysicalPages > 0x7F00) { 00881 ExtraPtes = BYTES_TO_PAGES(KSTACK_POOL_SIZE) - 1; 00882 } 00883 00884 if (ExtraPtes) { 00885 StartPde = MiGetPdeAddress (KSTACK_POOL_START); 00886 EndPde = MiGetPdeAddress ((PVOID)((PCHAR)KSTACK_POOL_START + 00887 (ExtraPtes << PAGE_SHIFT) - 1)); 00888 00889 // 00890 // If possible, use higher physical pages to preserve more low 00891 // memory for drivers. 00892 // 00893 00894 UsingHighMemory = FALSE; 00895 if (NextPhysicalPage < (FreeDescriptorLowMem->PageCount + 00896 FreeDescriptorLowMem->BasePage)) { 00897 00898 ULONG PagesNeeded; 00899 00900 // 00901 // We haven't used the other descriptor yet, examine it now 00902 // to see if enough usable memory is available. 00903 // 00904 00905 PagesNeeded = (EndPde - StartPde + 1); 00906 00907 if ((FreeDescriptor != NULL) && (FreeDescriptor->PageCount >= PagesNeeded)) { 00908 00909 UsableDescriptor = FreeDescriptor; 00910 NextUsablePhysicalPage = FreeDescriptor->BasePage; 00911 00912 UsableDescriptorRemoved += PagesNeeded; 00913 00914 // 00915 // Note this must be undone if the PFN database is 00916 // created in virtual memory because the memory 00917 // descriptors are scanned and only pages in the 00918 // descriptors get mapped. 00919 // 00920 00921 UsableDescriptor->BasePage += PagesNeeded; 00922 UsableDescriptor->PageCount -= PagesNeeded; 00923 UsingHighMemory = TRUE; 00924 } 00925 } 00926 00927 while (StartPde <= EndPde) { 00928 00929 ASSERT(StartPde->u.Hard.Valid == 0); 00930 00931 // 00932 // Map in a page directory page. 00933 // 00934 00935 if (UsingHighMemory == TRUE) { 00936 TempPde.u.Hard.PageFrameNumber = NextUsablePhysicalPage; 00937 NextUsablePhysicalPage += 1; 00938 } 00939 else { 00940 TempPde.u.Hard.PageFrameNumber = NextPhysicalPage; 00941 NumberOfPages -= 1; 00942 NextPhysicalPage += 1; 00943 00944 if (NumberOfPages == 0) { 00945 if (FreeDescriptor == NULL) { 00946 KeBugCheckEx (INSTALL_MORE_MEMORY, 00947 MmNumberOfPhysicalPages, 00948 MmLowestPhysicalPage, 00949 MmHighestPhysicalPage, 00950 2); 00951 } 00952 ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage + 00953 FreeDescriptor->PageCount)); 00954 NextPhysicalPage = FreeDescriptor->BasePage; 00955 NumberOfPages = FreeDescriptor->PageCount; 00956 SwitchedDescriptors = TRUE; 00957 } 00958 } 00959 00960 *StartPde = TempPde; 00961 PointerPte = MiGetVirtualAddressMappedByPte (StartPde); 00962 RtlZeroMemory (PointerPte, PAGE_SIZE); 00963 StartPde += 1; 00964 MiNumberOfExtraSystemPdes += 1; 00965 } 00966 00967 ExtraPtes = MiNumberOfExtraSystemPdes * PTE_PER_PAGE; 00968 } 00969 00970 ASSERT (NumberOfPages > 0); 00971 00972 // 00973 // If the kernel image has not been biased to allow for 3gb of user 00974 // space, the host processor supports large pages, and the number of 00975 // physical pages is greater than 127mb, then map the kernel image and 00976 // HAL into a large page. 00977 // 00978 00979 if ((KeFeatureBits & KF_LARGE_PAGE) && 00980 (NeedLowVirtualPfn == FALSE) && 00981 (MmNumberOfPhysicalPages > MmLargePageMinimum)) { 00982 00983 // 00984 // Map lower 512MB of physical memory as large pages starting 00985 // at address 0x80000000. 00986 // 00987 00988 PointerPde = MiGetPdeAddress (MM_KSEG0_BASE); 00989 LastPte = MiGetPdeAddress (MM_KSEG2_BASE) - 1; 00990 if (MmHighestPhysicalPage < MM_PAGES_IN_KSEG0) { 00991 LastPte = MiGetPdeAddress (MM_KSEG0_BASE + 00992 (MmHighestPhysicalPage << PAGE_SHIFT)); 00993 } 00994 00995 PointerPte = MiGetPteAddress (MM_KSEG0_BASE); 00996 j = 0; 00997 00998 do { 00999 PMMPTE PPte; 01000 01001 Range = 0; 01002 if (PointerPde->u.Hard.Valid == 0) { 01003 TempPde.u.Hard.PageFrameNumber = NextPhysicalPage; 01004 NextPhysicalPage += 1; 01005 NumberOfPages -= 1; 01006 if (NumberOfPages == 0) { 01007 if (FreeDescriptor == NULL) { 01008 KeBugCheckEx (INSTALL_MORE_MEMORY, 01009 MmNumberOfPhysicalPages, 01010 MmLowestPhysicalPage, 01011 MmHighestPhysicalPage, 01012 3); 01013 } 01014 ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage + 01015 FreeDescriptor->PageCount)); 01016 NextPhysicalPage = FreeDescriptor->BasePage; 01017 NumberOfPages = FreeDescriptor->PageCount; 01018 SwitchedDescriptors = TRUE; 01019 } 01020 *PointerPde = TempPde; 01021 Range = 1; 01022 } 01023 PPte = PointerPte; 01024 for (i = 0; i < PTE_PER_PAGE; i += 1) { 01025 if (Range || (PPte->u.Hard.Valid == 0)) { 01026 *PPte = ValidKernelPte; 01027 PPte->u.Hard.PageFrameNumber = i + j; 01028 } 01029 PPte += 1; 01030 } 01031 PointerPde += 1; 01032 PointerPte += PTE_PER_PAGE; 01033 j += PTE_PER_PAGE; 01034 } while (PointerPde <= LastPte); 01035 01036 MapLargePages = 1; 01037 } 01038 else if (NeedLowVirtualPfn == TRUE) { 01039 01040 // 01041 // The PFN database is so large that it cannot be virtually 01042 // mapped in system PTEs and large pages cannot be used. 01043 // Select a low virtual address for it now and map it in. 01044 // 16mb is a good choice - just after the boot images. 01045 // 01046 01047 MmPfnDatabase = (PMMPFN)(MM_KSEG0_BASE | MM_BOOT_IMAGE_SIZE); 01048 01049 // 01050 // Ensure the maximum PFN database fits into the available virtual 01051 // address space. 01052 // 01053 01054 if ((ULONG)MmPfnDatabase + (PfnAllocation << PAGE_SHIFT) > MM_KSEG2_BASE) { 01055 MmHighestPossiblePhysicalPage = (MM_KSEG2_BASE - (ULONG)MmPfnDatabase - (MmSecondaryColors * sizeof(MMCOLOR_TABLES)*2)) / sizeof (MMPFN) - 1; 01056 01057 if (MmHighestPhysicalPage > MmHighestPossiblePhysicalPage) { 01058 MmHighestPhysicalPage = MmHighestPossiblePhysicalPage; 01059 } 01060 } 01061 01062 StartPde = MiGetPdeAddress (MmPfnDatabase); 01063 EndPde = MiGetPdeAddress ((PVOID)((PCHAR)MmPfnDatabase + (MmHighestPossiblePhysicalPage + 1) * sizeof(MMPFN) + (MmSecondaryColors * sizeof(MMCOLOR_TABLES)*2) - 1)); 01064 01065 UsingHighMemory = FALSE; 01066 01067 if ((MmMakeLowMemory == TRUE) && 01068 (NextPhysicalPage < (FreeDescriptorLowMem->PageCount + 01069 FreeDescriptorLowMem->BasePage))) { 01070 01071 ULONG PagesNeeded; 01072 01073 // 01074 // We haven't used the other descriptor yet, examine it now 01075 // to see if enough usable memory is available. 01076 // 01077 01078 PagesNeeded = (EndPde - StartPde + 1); 01079 01080 if ((FreeDescriptor != NULL) && (FreeDescriptor->PageCount >= PagesNeeded)) { 01081 UsableDescriptor = FreeDescriptor; 01082 NextUsablePhysicalPage = FreeDescriptor->BasePage; 01083 UsingHighMemory = TRUE; 01084 } 01085 } 01086 01087 while (StartPde <= EndPde) { 01088 01089 if (StartPde->u.Hard.Valid == 0) { 01090 01091 // 01092 // Map in a page directory page. 01093 // 01094 01095 if (UsingHighMemory == TRUE) { 01096 01097 // 01098 // Note is undone later as the PFN database is being 01099 // created in virtual memory because the memory 01100 // descriptors are scanned and only pages in the 01101 // descriptors get mapped. 01102 // 01103 01104 TempPde.u.Hard.PageFrameNumber = NextUsablePhysicalPage; 01105 NextUsablePhysicalPage += 1; 01106 UsableDescriptorRemoved += 1; 01107 UsableDescriptor->BasePage += 1; 01108 UsableDescriptor->PageCount -= 1; 01109 } 01110 else { 01111 TempPde.u.Hard.PageFrameNumber = NextPhysicalPage; 01112 NumberOfPages -= 1; 01113 NextPhysicalPage += 1; 01114 01115 if (NumberOfPages == 0) { 01116 if (FreeDescriptor == NULL) { 01117 KeBugCheckEx (INSTALL_MORE_MEMORY, 01118 MmNumberOfPhysicalPages, 01119 MmLowestPhysicalPage, 01120 MmHighestPhysicalPage, 01121 4); 01122 } 01123 ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage + 01124 FreeDescriptor->PageCount)); 01125 NextPhysicalPage = FreeDescriptor->BasePage; 01126 NumberOfPages = FreeDescriptor->PageCount; 01127 SwitchedDescriptors = TRUE; 01128 } 01129 } 01130 01131 *StartPde = TempPde; 01132 PointerPte = MiGetVirtualAddressMappedByPte (StartPde); 01133 RtlZeroMemory (PointerPte, PAGE_SIZE); 01134 } 01135 StartPde += 1; 01136 } 01137 01138 // 01139 // Leave some room for nonpaged pool expansion in order to share 01140 // common initialization code and to map physically contiguous 01141 // requests if needed. 01142 // 01143 01144 if (MmSizeOfNonPagedPoolInBytes > MmMaximumNonPagedPoolInBytes * 2 / 3) { 01145 MmSizeOfNonPagedPoolInBytes = (SIZE_T)PAGE_ALIGN (MmMaximumNonPagedPoolInBytes * 2 / 3); 01146 } 01147 } 01148 } 01149 else { 01150 if ((PfnAllocation + 500) * PAGE_SIZE > MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes) { 01151 01152 // 01153 // Recarve portions of the initial and expansion nonpaged pools 01154 // so enough expansion PTEs will be available to map the PFN 01155 // database on large memory systems. 01156 // 01157 01158 if ((PfnAllocation + 500) * PAGE_SIZE < MmSizeOfNonPagedPoolInBytes) { 01159 MmSizeOfNonPagedPoolInBytes -= ((PfnAllocation + 500) * PAGE_SIZE); 01160 } 01161 } 01162 } 01163 01164 // 01165 // If a low virtual PFN database and initial nonpaged pool is desired, 01166 // construct the PDEs for the initial nonpaged pool now. 01167 // 01168 01169 if (NeedLowVirtualPfn == TRUE) { 01170 01171 ULONG PagesNeeded; 01172 01173 LowVirtualNonPagedPoolStart = NULL; 01174 LowVirtualNonPagedPoolSizeInBytes = 0; 01175 01176 PagesNeeded = 0; 01177 StartPde = MiGetPdeAddress ((PVOID)((PCHAR)MmPfnDatabase + (MmHighestPossiblePhysicalPage + 1) * sizeof(MMPFN) + (MmSecondaryColors * sizeof(MMCOLOR_TABLES)*2) - 1)); 01178 StartPde += 1; 01179 EndPde = MiGetPdeAddress (MM_KSEG2_BASE) - 1; 01180 01181 while (StartPde <= EndPde) { 01182 if (StartPde->u.Hard.Valid == 0) { 01183 if (LowVirtualNonPagedPoolStart == NULL) { 01184 LowVirtualNonPagedPoolStart = MiGetVirtualAddressMappedByPde (StartPde); 01185 } 01186 PagesNeeded += 1; 01187 } 01188 else { 01189 if (LowVirtualNonPagedPoolStart != NULL) { 01190 LowVirtualNonPagedPoolSizeInBytes = PagesNeeded * MM_VA_MAPPED_BY_PDE; 01191 } 01192 } 01193 01194 StartPde += 1; 01195 } 01196 01197 if (LowVirtualNonPagedPoolStart == NULL) { 01198 StartPde = MiGetPdeAddress ((PVOID)((PCHAR)MmPfnDatabase + (MmHighestPossiblePhysicalPage + 1) * sizeof(MMPFN) + (MmSecondaryColors * sizeof(MMCOLOR_TABLES)*2) - 1)); 01199 KeBugCheckEx (MEMORY_MANAGEMENT, 01200 0x7000, 01201 (ULONG_PTR)StartPde, 01202 (ULONG_PTR)EndPde, 01203 PagesNeeded); 01204 } 01205 01206 if (LowVirtualNonPagedPoolSizeInBytes == 0) { 01207 LowVirtualNonPagedPoolSizeInBytes = PagesNeeded * MM_VA_MAPPED_BY_PDE; 01208 } 01209 01210 if (LowVirtualNonPagedPoolSizeInBytes < MmSizeOfNonPagedPoolInBytes) { 01211 MmSizeOfNonPagedPoolInBytes = LowVirtualNonPagedPoolSizeInBytes; 01212 } 01213 01214 UsingHighMemory = FALSE; 01215 if ((MmMakeLowMemory == TRUE) && 01216 (NextPhysicalPage < (FreeDescriptorLowMem->PageCount + 01217 FreeDescriptorLowMem->BasePage)) && 01218 (FreeDescriptor != NULL)) { 01219 01220 // 01221 // We haven't used the other descriptor yet, examine it now 01222 // to see if enough usable memory is available. 01223 // 01224 01225 if (FreeDescriptor->PageCount >= PagesNeeded) { 01226 01227 UsableDescriptor = FreeDescriptor; 01228 NextUsablePhysicalPage = FreeDescriptor->BasePage; 01229 01230 UsableDescriptorRemoved += PagesNeeded; 01231 01232 // 01233 // Note this must be undone if the PFN database is created in 01234 // virtual memory because the memory descriptors are scanned 01235 // and only pages in the descriptors get mapped. 01236 // 01237 01238 UsableDescriptor->BasePage += PagesNeeded; 01239 UsableDescriptor->PageCount -= PagesNeeded; 01240 UsingHighMemory = TRUE; 01241 } 01242 } 01243 01244 StartPde = MiGetPdeAddress ((PVOID)((PCHAR)MmPfnDatabase + (MmHighestPossiblePhysicalPage + 1) * sizeof(MMPFN) + (MmSecondaryColors * sizeof(MMCOLOR_TABLES)*2) - 1)); 01245 StartPde += 1; 01246 01247 while (StartPde <= EndPde) { 01248 if (StartPde->u.Hard.Valid == 1) { 01249 StartPde += 1; 01250 continue; 01251 } 01252 01253 if (UsingHighMemory == TRUE) { 01254 TempPde.u.Hard.PageFrameNumber = NextUsablePhysicalPage; 01255 NextUsablePhysicalPage += 1; 01256 } 01257 else { 01258 TempPde.u.Hard.PageFrameNumber = NextPhysicalPage; 01259 NextPhysicalPage += 1; 01260 NumberOfPages -= 1; 01261 if (NumberOfPages == 0) { 01262 if (FreeDescriptor == NULL) { 01263 KeBugCheckEx (INSTALL_MORE_MEMORY, 01264 MmNumberOfPhysicalPages, 01265 MmLowestPhysicalPage, 01266 MmHighestPhysicalPage, 01267 7); 01268 } 01269 ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage + 01270 FreeDescriptor->PageCount)); 01271 NextPhysicalPage = FreeDescriptor->BasePage; 01272 NumberOfPages = FreeDescriptor->PageCount; 01273 SwitchedDescriptors = TRUE; 01274 } 01275 } 01276 *StartPde = TempPde; 01277 PointerPte = MiGetVirtualAddressMappedByPte (StartPde); 01278 RtlZeroMemory (PointerPte, PAGE_SIZE); 01279 StartPde += 1; 01280 } 01281 } 01282 01283 PointerPte = MiGetPteAddress(MmNonPagedPoolStart); 01284 NonPagedPoolStartVirtual = MmNonPagedPoolStart; 01285 01286 // 01287 // Fill in the PTEs for the initial nonpaged pool using the 01288 // largest free chunk of memory below 16mb. 01289 // 01290 01291 SavedSize = MmSizeOfNonPagedPoolInBytes; 01292 01293 if (((MmProtectFreedNonPagedPool == FALSE) && (MapLargePages)) || 01294 (NeedLowVirtualPfn == TRUE)) { 01295 01296 ULONG NonPagedVirtualStartPfn; 01297 01298 NonPagedVirtualStartPfn = 0; 01299 01300 if (MmExpandedNonPagedPoolInBytes == 0) { 01301 01302 UsingHighMemory = FALSE; 01303 if ((MmMakeLowMemory == TRUE) && 01304 (NextPhysicalPage < (FreeDescriptorLowMem->PageCount + 01305 FreeDescriptorLowMem->BasePage)) && 01306 (FreeDescriptor != NULL) && 01307 ((FreeDescriptor->BasePage < MM_PAGES_IN_KSEG0) || 01308 (NeedLowVirtualPfn == TRUE))) { 01309 01310 ULONG NumberOfUsablePages; 01311 ULONG PagesNeeded; 01312 01313 PagesNeeded = MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT; 01314 MmSizeOfNonPagedPoolInBytes = PagesNeeded << PAGE_SHIFT; 01315 01316 // 01317 // We haven't used the other descriptor yet, examine it now 01318 // to see if enough usable memory is available. 01319 // 01320 01321 NumberOfUsablePages = FreeDescriptor->PageCount; 01322 01323 if (NumberOfUsablePages > (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT)) { 01324 NumberOfUsablePages = MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT; 01325 } 01326 01327 if (NeedLowVirtualPfn == FALSE) { 01328 if (FreeDescriptor->BasePage + NumberOfUsablePages > MM_PAGES_IN_KSEG0) { 01329 NumberOfUsablePages = MM_PAGES_IN_KSEG0 - FreeDescriptor->BasePage; 01330 } 01331 } 01332 01333 if (NumberOfUsablePages >= PagesNeeded) { 01334 01335 UsableDescriptor = FreeDescriptor; 01336 NextUsablePhysicalPage = FreeDescriptor->BasePage; 01337 01338 UsableDescriptorRemoved += PagesNeeded; 01339 01340 // 01341 // Note this must be undone if the PFN database is created 01342 // in virtual memory because the memory descriptors are 01343 // scanned and only pages in the descriptors get mapped. 01344 // 01345 01346 UsableDescriptor->BasePage += PagesNeeded; 01347 UsableDescriptor->PageCount -= PagesNeeded; 01348 UsingHighMemory = TRUE; 01349 } 01350 } 01351 01352 if (UsingHighMemory == FALSE) { 01353 if (MmSizeOfNonPagedPoolInBytes > (NumberOfPages << (PAGE_SHIFT))) { 01354 MmSizeOfNonPagedPoolInBytes = NumberOfPages << PAGE_SHIFT; 01355 } 01356 } 01357 01358 NonPagedPoolStartVirtual = (PVOID)((PCHAR)NonPagedPoolStartVirtual + 01359 MmSizeOfNonPagedPoolInBytes); 01360 01361 // 01362 // No need to get page table pages for these as we can reference 01363 // them via large pages. If we are not using large pages (ie: 01364 // for NeedLowVirtualPfn), the page tables are already allocated 01365 // and just the mappings need to be filled in. 01366 // 01367 01368 if (UsingHighMemory == TRUE) { 01369 if (NeedLowVirtualPfn == TRUE) { 01370 MmNonPagedPoolStart = LowVirtualNonPagedPoolStart; 01371 } 01372 else { 01373 MmNonPagedPoolStart = 01374 (PVOID)(MM_KSEG0_BASE | (NextUsablePhysicalPage << PAGE_SHIFT)); 01375 } 01376 NonPagedVirtualStartPfn = NextUsablePhysicalPage; 01377 } 01378 else { 01379 if (NeedLowVirtualPfn == TRUE) { 01380 MmNonPagedPoolStart = LowVirtualNonPagedPoolStart; 01381 } 01382 else { 01383 MmNonPagedPoolStart = 01384 (PVOID)(MM_KSEG0_BASE | (NextPhysicalPage << PAGE_SHIFT)); 01385 } 01386 01387 NonPagedVirtualStartPfn = NextPhysicalPage; 01388 NextPhysicalPage += MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT; 01389 NumberOfPages -= MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT; 01390 01391 if (NumberOfPages == 0) { 01392 if (FreeDescriptor == NULL) { 01393 KeBugCheckEx (INSTALL_MORE_MEMORY, 01394 MmNumberOfPhysicalPages, 01395 MmLowestPhysicalPage, 01396 MmHighestPhysicalPage, 01397 5); 01398 } 01399 ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage + 01400 FreeDescriptor->PageCount)); 01401 NextPhysicalPage = FreeDescriptor->BasePage; 01402 NumberOfPages = FreeDescriptor->PageCount; 01403 SwitchedDescriptors = TRUE; 01404 } 01405 } 01406 01407 MmSubsectionBase = (ULONG)MmNonPagedPoolStart; 01408 MmSubsectionTopPage = NonPagedVirtualStartPfn + (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT); 01409 01410 // 01411 // If the entire initial nonpaged pool is below 128mb, then 01412 // widen the subsection range so large pages can be used for any 01413 // nonpaged expansion pool single-page allocation below 128mb. 01414 // 01415 01416 if (MmSubsectionTopPage < (MM_SUBSECTION_MAP >> PAGE_SHIFT)) { 01417 MmSubsectionBase = MM_KSEG0_BASE; 01418 MmSubsectionTopPage = MM_SUBSECTION_MAP >> PAGE_SHIFT; 01419 } 01420 } 01421 else { 01422 01423 ULONG NumberOfUsablePages; 01424 PMEMORY_ALLOCATION_DESCRIPTOR UseFreeDescriptor; 01425 01426 NumberOfUsablePages = 0; 01427 UseFreeDescriptor = NULL; 01428 01429 if (NextPhysicalPage < (FreeDescriptorLowMem->PageCount + 01430 FreeDescriptorLowMem->BasePage)) { 01431 01432 // 01433 // We haven't used the other descriptor yet, examine it now 01434 // to see if more usable (ie: below 512mb) memory is available 01435 // in it than the lowmem descriptor for building the initial 01436 // nonpaged pool. The 512mb restriction is because we will be 01437 // using large pages to map it. 01438 // 01439 01440 if (FreeDescriptor != NULL) { 01441 if (NeedLowVirtualPfn == TRUE) { 01442 NumberOfUsablePages = FreeDescriptor->PageCount; 01443 01444 if (NumberOfUsablePages > (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT)) { 01445 NumberOfUsablePages = MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT; 01446 } 01447 01448 ASSERT (NumberOfUsablePages <= (MM_MAX_INITIAL_NONPAGED_POOL >> PAGE_SHIFT)); 01449 01450 } 01451 else if (FreeDescriptor->BasePage < MM_PAGES_IN_KSEG0) { 01452 01453 NumberOfUsablePages = FreeDescriptor->PageCount; 01454 01455 if (NumberOfUsablePages > (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT)) { 01456 NumberOfUsablePages = MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT; 01457 } 01458 01459 ASSERT (NumberOfUsablePages <= (MM_MAX_INITIAL_NONPAGED_POOL >> PAGE_SHIFT)); 01460 01461 if (FreeDescriptor->BasePage + NumberOfUsablePages > MM_PAGES_IN_KSEG0) { 01462 NumberOfUsablePages = MM_PAGES_IN_KSEG0 - FreeDescriptor->BasePage; 01463 } 01464 } 01465 } 01466 01467 // 01468 // If the free descriptor memory meets our needs, switch to it 01469 // just for the nonpaged pool creation. Otherwise, just use 01470 // the low descriptor memory. 01471 // 01472 01473 if (NumberOfUsablePages > NumberOfPages) { 01474 UseFreeDescriptor = FreeDescriptor; 01475 NextUsablePhysicalPage = FreeDescriptor->BasePage; 01476 } 01477 } 01478 01479 if (UseFreeDescriptor == NULL) { 01480 NumberOfUsablePages = NumberOfPages; 01481 NextUsablePhysicalPage = NextPhysicalPage; 01482 } 01483 else { 01484 if (UsableDescriptor != NULL) { 01485 ASSERT (UsableDescriptor == UseFreeDescriptor); 01486 } 01487 UsableDescriptor = UseFreeDescriptor; 01488 } 01489 01490 if (MmSizeOfNonPagedPoolInBytes > (NumberOfUsablePages << (PAGE_SHIFT))) { 01491 MmSizeOfNonPagedPoolInBytes = NumberOfUsablePages << PAGE_SHIFT; 01492 } 01493 01494 MmMaximumNonPagedPoolInBytes = MmSizeOfNonPagedPoolInBytes + 01495 MmExpandedNonPagedPoolInBytes; 01496 01497 // 01498 // No need to get page table pages for these as we can reference 01499 // them via large pages. 01500 // 01501 01502 if (NeedLowVirtualPfn == TRUE) { 01503 MmNonPagedPoolStart = LowVirtualNonPagedPoolStart; 01504 } 01505 else { 01506 MmNonPagedPoolStart = 01507 (PVOID)(MM_KSEG0_BASE | (NextUsablePhysicalPage << PAGE_SHIFT)); 01508 } 01509 01510 NonPagedVirtualStartPfn = NextUsablePhysicalPage; 01511 01512 if (UseFreeDescriptor != NULL) { 01513 01514 UsableDescriptorRemoved += MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT; 01515 01516 // 01517 // Note this must be undone if the PFN database is created in 01518 // virtual memory because the memory descriptors are scanned 01519 // and only pages in the descriptors get mapped. 01520 // 01521 01522 UsableDescriptor->BasePage += (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT); 01523 UsableDescriptor->PageCount -= (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT); 01524 01525 if (UsableDescriptor->BasePage <= (MM_SUBSECTION_MAP >> PAGE_SHIFT)) { 01526 MmSubsectionBase = MM_KSEG0_BASE; 01527 MmSubsectionTopPage = MM_SUBSECTION_MAP >> PAGE_SHIFT; 01528 } 01529 else { 01530 MmSubsectionBase = (ULONG)MmNonPagedPoolStart; 01531 MmSubsectionTopPage = UsableDescriptor->BasePage; 01532 } 01533 } 01534 else { 01535 NextPhysicalPage += MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT; 01536 NumberOfPages -= MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT; 01537 01538 MmSubsectionBase = (ULONG)MmNonPagedPoolStart; 01539 MmSubsectionTopPage = NextPhysicalPage; 01540 01541 if (NextPhysicalPage < (MM_SUBSECTION_MAP >> PAGE_SHIFT)) { 01542 MmSubsectionBase = MM_KSEG0_BASE; 01543 MmSubsectionTopPage = MM_SUBSECTION_MAP >> PAGE_SHIFT; 01544 } 01545 01546 if (NumberOfPages == 0) { 01547 if (FreeDescriptor == NULL) { 01548 KeBugCheckEx (INSTALL_MORE_MEMORY, 01549 MmNumberOfPhysicalPages, 01550 MmLowestPhysicalPage, 01551 MmHighestPhysicalPage, 01552 6); 01553 } 01554 ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage + 01555 FreeDescriptor->PageCount)); 01556 NextPhysicalPage = FreeDescriptor->BasePage; 01557 NumberOfPages = FreeDescriptor->PageCount; 01558 SwitchedDescriptors = TRUE; 01559 } 01560 } 01561 } 01562 01563 if (NeedLowVirtualPfn == TRUE) { 01564 01565 ASSERT (NonPagedVirtualStartPfn != 0); 01566 PageFrameIndex = NonPagedVirtualStartPfn; 01567 01568 PointerPte = MiGetPteAddress (MmNonPagedPoolStart); 01569 LastPte = MiGetPteAddress((ULONG)MmNonPagedPoolStart + 01570 MmSizeOfNonPagedPoolInBytes - 1); 01571 while (PointerPte <= LastPte) { 01572 ASSERT (PointerPte->u.Hard.Valid == 0); 01573 TempPte.u.Hard.PageFrameNumber = PageFrameIndex; 01574 *PointerPte = TempPte; 01575 PointerPte += 1; 01576 PageFrameIndex += 1; 01577 } 01578 } 01579 01580 if (MmExpandedNonPagedPoolInBytes == 0) { 01581 MmNonPagedPoolExpansionStart = (PVOID)((PCHAR)NonPagedPoolStartVirtual + 01582 (SavedSize - MmSizeOfNonPagedPoolInBytes)); 01583 } 01584 else { 01585 MmNonPagedPoolExpansionStart = NonPagedPoolStartVirtual; 01586 } 01587 01588 #if defined(_X86PAE_) 01589 01590 // 01591 // Always widen the subsection range for PAE so large pages can be used 01592 // for all nonpaged single-page expansion allocations below 512mb as 01593 // the widened PTE can always encode it properly. 01594 // 01595 01596 MmSubsectionBase = MM_KSEG0_BASE; 01597 MmSubsectionTopPage = (512*1024*1024) >> PAGE_SHIFT; 01598 #endif 01599 01600 } 01601 else { 01602 01603 ASSERT (MmExpandedNonPagedPoolInBytes == 0); 01604 LastPte = MiGetPteAddress((ULONG)MmNonPagedPoolStart + 01605 MmSizeOfNonPagedPoolInBytes - 1); 01606 01607 UsingHighMemory = FALSE; 01608 if ((MmMakeLowMemory == TRUE) && 01609 (NextPhysicalPage < (FreeDescriptorLowMem->PageCount + 01610 FreeDescriptorLowMem->BasePage)) && 01611 (FreeDescriptor != NULL)) { 01612 01613 ULONG PagesNeeded; 01614 01615 // 01616 // We haven't used the other descriptor yet, examine it now 01617 // to see if enough usable memory is available. 01618 // 01619 01620 PagesNeeded = (LastPte - PointerPte + 1); 01621 01622 if (FreeDescriptor->PageCount >= PagesNeeded) { 01623 01624 UsableDescriptor = FreeDescriptor; 01625 NextUsablePhysicalPage = FreeDescriptor->BasePage; 01626 01627 UsableDescriptorRemoved += PagesNeeded; 01628 01629 // 01630 // Note this must be undone if the PFN database is created in 01631 // virtual memory because the memory descriptors are scanned 01632 // and only pages in the descriptors get mapped. 01633 // 01634 01635 UsableDescriptor->BasePage += PagesNeeded; 01636 UsableDescriptor->PageCount -= PagesNeeded; 01637 UsingHighMemory = TRUE; 01638 } 01639 } 01640 01641 while (PointerPte <= LastPte) { 01642 01643 if (UsingHighMemory == TRUE) { 01644 TempPte.u.Hard.PageFrameNumber = NextUsablePhysicalPage; 01645 NextUsablePhysicalPage += 1; 01646 } 01647 else { 01648 TempPte.u.Hard.PageFrameNumber = NextPhysicalPage; 01649 NextPhysicalPage += 1; 01650 NumberOfPages -= 1; 01651 if (NumberOfPages == 0) { 01652 if (FreeDescriptor == NULL) { 01653 KeBugCheckEx (INSTALL_MORE_MEMORY, 01654 MmNumberOfPhysicalPages, 01655 MmLowestPhysicalPage, 01656 MmHighestPhysicalPage, 01657 9); 01658 } 01659 ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage + 01660 FreeDescriptor->PageCount)); 01661 NextPhysicalPage = FreeDescriptor->BasePage; 01662 NumberOfPages = FreeDescriptor->PageCount; 01663 SwitchedDescriptors = TRUE; 01664 MmSizeOfNonPagedPoolInBytes = (PointerPte - MiGetPteAddress(MmNonPagedPoolStart)) << PAGE_SHIFT; 01665 break; 01666 } 01667 } 01668 *PointerPte = TempPte; 01669 PointerPte += 1; 01670 } 01671 01672 SavedSize = MmSizeOfNonPagedPoolInBytes; 01673 01674 MmNonPagedPoolExpansionStart = (PVOID)((PCHAR)NonPagedPoolStartVirtual + 01675 MmSizeOfNonPagedPoolInBytes); 01676 } 01677 01678 // 01679 // There must be at least one page of system PTEs before the expanded 01680 // nonpaged pool. 01681 // 01682 01683 ASSERT (MiGetPteAddress(MmNonPagedSystemStart) < MiGetPteAddress(MmNonPagedPoolExpansionStart)); 01684 01685 // 01686 // Non-paged pages now exist, build the pool structures. 01687 // 01688 01689 MmPageAlignedPoolBase[NonPagedPool] = MmNonPagedPoolStart; 01690 01691 if (MmExpandedNonPagedPoolInBytes == 0) { 01692 MmMaximumNonPagedPoolInBytes -= (SavedSize - MmSizeOfNonPagedPoolInBytes); 01693 MiInitializeNonPagedPool (); 01694 MmMaximumNonPagedPoolInBytes += (SavedSize - MmSizeOfNonPagedPoolInBytes); 01695 } 01696 else { 01697 MiInitializeNonPagedPool (); 01698 } 01699 01700 // 01701 // Before Non-paged pool can be used, the PFN database must 01702 // be built. This is due to the fact that the start and end of 01703 // allocation bits for nonpaged pool are maintained in the 01704 // PFN elements for the corresponding pages. 01705 // 01706 01707 MmSecondaryColorMask = MmSecondaryColors - 1; 01708 01709 // 01710 // The SwitchedDescriptors must be checked because if it is set, the 01711 // FreeDescriptor has already been allocated from without updating the 01712 // BasePage & PageCount (these were not changed because they are scanned 01713 // if the PFN database is virtually mapped to insert PTEs). 01714 // 01715 01716 if ((SwitchedDescriptors == FALSE) && (FreeDescriptor != NULL)) { 01717 BasePage = FreeDescriptor->BasePage; 01718 HighPage = FreeDescriptor->BasePage + FreeDescriptor->PageCount; 01719 } 01720 else { 01721 BasePage = NextPhysicalPage; 01722 HighPage = NextPhysicalPage + NumberOfPages; 01723 } 01724 01725 HighPageInKseg0 = HighPage; 01726 if (HighPageInKseg0 > MM_PAGES_IN_KSEG0) { 01727 HighPageInKseg0 = MM_PAGES_IN_KSEG0; 01728 } 01729 01730 PagesLeft = HighPage - BasePage; 01731 01732 #if DBG 01733 if (FreeDescriptor == NULL) { 01734 ASSERT (MapLargePages == 0); 01735 } 01736 #endif 01737 01738 #ifndef PFN_CONSISTENCY 01739 if ((MapLargePages) && (BasePage + PfnAllocation <= HighPageInKseg0)) { 01740 01741 // 01742 // Allocate the PFN database in kseg0. 01743 // 01744 // Compute the address of the PFN by allocating the appropriate 01745 // number of pages from the end of the free descriptor. 01746 // 01747 01748 PfnInKseg0 = TRUE; 01749 MmPfnDatabase = (PMMPFN)(MM_KSEG0_BASE | (BasePage << PAGE_SHIFT)); 01750 01751 // 01752 // Later we will walk the memory descriptors and add pages to the free 01753 // list in the PFN database. 01754 // 01755 // To do this correctly: 01756 // 01757 // The FreeDescriptor fields must be updated if we haven't already 01758 // switched descriptors so the PFN database consumption isn't 01759 // added to the freelist. 01760 // 01761 // If we have switched, then we must update NextPhysicalPage and the 01762 // FreeDescriptor need not be updated. 01763 // 01764 01765 if (SwitchedDescriptors == TRUE) { 01766 ASSERT (NumberOfPages > PfnAllocation); 01767 NextPhysicalPage += PfnAllocation; 01768 NumberOfPages -= PfnAllocation; 01769 } 01770 else { 01771 FreeDescriptor->BasePage += PfnAllocation; 01772 FreeDescriptor->PageCount -= PfnAllocation; 01773 } 01774 01775 RtlZeroMemory(MmPfnDatabase, PfnAllocation * PAGE_SIZE); 01776 01777 // 01778 // The PFN database was allocated in kseg0. Since space was left for 01779 // it virtually (in the nonpaged pool expansion PTEs), remove this 01780 // now unused space if it can cause PTE encoding to exceed the 27 bits. 01781 // 01782 01783 if (MmTotalFreeSystemPtes[NonPagedPoolExpansion] > 01784 (MM_MAX_ADDITIONAL_NONPAGED_POOL >> PAGE_SHIFT)) { 01785 // 01786 // Reserve the expanded pool PTEs so they cannot be used. 01787 // 01788 01789 ULONG PfnDatabaseSpace; 01790 01791 PfnDatabaseSpace = MmTotalFreeSystemPtes[NonPagedPoolExpansion] - 01792 (MM_MAX_ADDITIONAL_NONPAGED_POOL >> PAGE_SHIFT); 01793 01794 MiReserveSystemPtes ( 01795 PfnDatabaseSpace, 01796 NonPagedPoolExpansion, 01797 0, 01798 0, 01799 TRUE); 01800 01801 // 01802 // Adjust the end of nonpaged pool to reflect this reservation. 01803 // This is so the entire nonpaged pool expansion space is available 01804 // not just for general purpose consumption, but also for subsection 01805 // encoding into protoptes when subsections are allocated from the 01806 // very end of the expansion range. 01807 // 01808 01809 (PCHAR)MmNonPagedPoolEnd -= PfnDatabaseSpace * PAGE_SIZE; 01810 } 01811 else { 01812 01813 // 01814 // Allocate one more PTE just below the PFN database. This provides 01815 // protection against the caller of the first real nonpaged 01816 // expansion allocation in case he accidentally overruns his pool 01817 // block. (We'll trap instead of corrupting the PFN database). 01818 // This also allows us to freely increment in MiFreePoolPages 01819 // without having to worry about a valid PTE just after the end of 01820 // the highest nonpaged pool allocation. 01821 // 01822 01823 MiReserveSystemPtes ( 01824 1, 01825 NonPagedPoolExpansion, 01826 0, 01827 0, 01828 TRUE); 01829 } 01830 01831 } else { 01832 01833 ULONG FreeNextPhysicalPage; 01834 ULONG FreeNumberOfPages; 01835 01836 // 01837 // The PFN database will be built from the FreeDescriptor. This is 01838 // done to avoid using pages below 16mb to contain the PFN database 01839 // as these pages may be needed by drivers. The FreeDescriptor will 01840 // always have enough pages to build the PFN database from. 01841 // 01842 01843 FreeNextPhysicalPage = BasePage; 01844 FreeNumberOfPages = PagesLeft; 01845 01846 #endif // PFN_CONSISTENCY 01847 01848 // 01849 // Calculate the start of the PFN database (it starts at physical 01850 // page zero, even if the lowest physical page is not zero). 01851 // 01852 01853 if (NeedLowVirtualPfn == TRUE) { 01854 ASSERT (MmPfnDatabase != NULL); 01855 PointerPte = MiGetPteAddress (MmPfnDatabase); 01856 } 01857 else { 01858 ASSERT (PagesLeft >= PfnAllocation); 01859 01860 PointerPte = MiReserveSystemPtes (PfnAllocation, 01861 NonPagedPoolExpansion, 01862 0, 01863 0, 01864 TRUE); 01865 01866 MmPfnDatabase = (PMMPFN)(MiGetVirtualAddressMappedByPte (PointerPte)); 01867 01868 // 01869 // Adjust the end of nonpaged pool to reflect the PFN database 01870 // allocation. This is so the entire nonpaged pool expansion space 01871 // is available not just for general purpose consumption, but also 01872 // for subsection encoding into protoptes when subsections are 01873 // allocated from the very beginning of the initial nonpaged pool 01874 // range. 01875 // 01876 01877 MmNonPagedPoolEnd = (PVOID)MmPfnDatabase; 01878 01879 // 01880 // Allocate one more PTE just below the PFN database. This provides 01881 // protection against the caller of the first real nonpaged 01882 // expansion allocation in case he accidentally overruns his pool 01883 // block. (We'll trap instead of corrupting the PFN database). 01884 // This also allows us to freely increment in MiFreePoolPages 01885 // without having to worry about a valid PTE just after the end of 01886 // the highest nonpaged pool allocation. 01887 // 01888 01889 MiReserveSystemPtes (1, 01890 NonPagedPoolExpansion, 01891 0, 01892 0, 01893 TRUE); 01894 } 01895 01896 #if PFN_CONSISTENCY 01897 MiPfnStartPte = PointerPte; 01898 MiPfnPtes = PfnAllocation; 01899 #endif 01900 01901 // 01902 // Go through the memory descriptors and for each physical page make 01903 // sure the PFN database has a valid PTE to map it. This allows machines 01904 // with sparse physical memory to have a minimal PFN database. 01905 // 01906 01907 NextMd = LoaderBlock->MemoryDescriptorListHead.Flink; 01908 while (NextMd != &LoaderBlock->MemoryDescriptorListHead) { 01909 MemoryDescriptor = CONTAINING_RECORD(NextMd, 01910 MEMORY_ALLOCATION_DESCRIPTOR, 01911 ListEntry); 01912 01913 if ((MemoryDescriptor->MemoryType == LoaderFirmwarePermanent) || 01914 (MemoryDescriptor->MemoryType == LoaderBBTMemory) || 01915 (MemoryDescriptor->MemoryType == LoaderSpecialMemory)) { 01916 01917 // 01918 // If the descriptor lies within the highest PFN database entry 01919 // then create PFN pages for this range. Note the PFN entries 01920 // must be created to support \Device\PhysicalMemory. 01921 // 01922 01923 if (MemoryDescriptor->BasePage <= MmHighestPhysicalPage) { 01924 if (MemoryDescriptor->BasePage + MemoryDescriptor->PageCount > MmHighestPhysicalPage + 1) { 01925 MemoryDescriptor->PageCount = MmHighestPhysicalPage - MemoryDescriptor->BasePage + 1; 01926 } 01927 } 01928 else { 01929 NextMd = MemoryDescriptor->ListEntry.Flink; 01930 continue; 01931 } 01932 01933 } 01934 01935 PointerPte = MiGetPteAddress (MI_PFN_ELEMENT( 01936 MemoryDescriptor->BasePage)); 01937 01938 LastPte = MiGetPteAddress (((PCHAR)(MI_PFN_ELEMENT( 01939 MemoryDescriptor->BasePage + 01940 MemoryDescriptor->PageCount))) - 1); 01941 01942 if (MemoryDescriptor == UsableDescriptor) { 01943 01944 // 01945 // Temporarily add back in the memory used to create the initial 01946 // nonpaged pool so the PFN entries for it will be mapped. 01947 // 01948 // This must be done carefully as memory from the descriptor 01949 // itself could get used to map the PFNs for the descriptor ! 01950 // 01951 01952 PointerPte = MiGetPteAddress (MI_PFN_ELEMENT( 01953 MemoryDescriptor->BasePage - UsableDescriptorRemoved)); 01954 } 01955 01956 while (PointerPte <= LastPte) { 01957 if (PointerPte->u.Hard.Valid == 0) { 01958 TempPte.u.Hard.PageFrameNumber = FreeNextPhysicalPage; 01959 ASSERT (FreeNumberOfPages != 0); 01960 FreeNextPhysicalPage += 1; 01961 FreeNumberOfPages -= 1; 01962 *PointerPte = TempPte; 01963 RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte), 01964 PAGE_SIZE); 01965 } 01966 PointerPte += 1; 01967 } 01968 01969 NextMd = MemoryDescriptor->ListEntry.Flink; 01970 } 01971 01972 // 01973 // Handle the BIOS range here as some machines have big gaps in 01974 // their physical memory maps. Big meaning > 3.5mb from page 0x37 01975 // up to page 0x350. 01976 // 01977 01978 PointerPte = MiGetPteAddress (MI_PFN_ELEMENT(MM_BIOS_START)); 01979 LastPte = MiGetPteAddress ((PCHAR)(MI_PFN_ELEMENT(MM_BIOS_END))); 01980 01981 while (PointerPte <= LastPte) { 01982 if (PointerPte->u.Hard.Valid == 0) { 01983 TempPte.u.Hard.PageFrameNumber = FreeNextPhysicalPage; 01984 ASSERT (FreeNumberOfPages != 0); 01985 FreeNextPhysicalPage += 1; 01986 FreeNumberOfPages -= 1; 01987 *PointerPte = TempPte; 01988 RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte), 01989 PAGE_SIZE); 01990 } 01991 PointerPte += 1; 01992 } 01993 01994 // 01995 // Update the global counts - this would have been tricky to do while 01996 // removing pages from them as we looped above. 01997 // 01998 01999 // 02000 // Later we will walk the memory descriptors and add pages to the free 02001 // list in the PFN database. 02002 // 02003 // To do this correctly: 02004 // 02005 // The FreeDescriptor fields must be updated if we haven't already 02006 // switched descriptors so the PFN database consumption isn't 02007 // added to the freelist. 02008 // 02009 // If we have switched, then we must update NextPhysicalPage and the 02010 // FreeDescriptor need not be updated. 02011 // 02012 02013 if (SwitchedDescriptors == TRUE) { 02014 NextPhysicalPage = FreeNextPhysicalPage; 02015 NumberOfPages = FreeNumberOfPages; 02016 } 02017 else if (FreeDescriptor != NULL) { 02018 FreeDescriptor->BasePage = FreeNextPhysicalPage; 02019 FreeDescriptor->PageCount = FreeNumberOfPages; 02020 } 02021 else { 02022 FreeDescriptorLowMem->BasePage = FreeNextPhysicalPage; 02023 FreeDescriptorLowMem->PageCount = FreeNumberOfPages; 02024 NextPhysicalPage = FreeNextPhysicalPage; 02025 NumberOfPages = FreeNumberOfPages; 02026 } 02027 02028 #ifndef PFN_CONSISTENCY 02029 } 02030 #endif // PFN_CONSISTENCY 02031 02032 if (NeedLowVirtualPfn == FALSE) { 02033 MmAllocatedNonPagedPool += PfnAllocation; 02034 } 02035 02036 // 02037 // Initialize support for colored pages. 02038 // 02039 02040 MmFreePagesByColor[0] = (PMMCOLOR_TABLES) 02041 &MmPfnDatabase[MmHighestPossiblePhysicalPage + 1]; 02042 02043 MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors]; 02044 02045 // 02046 // Make sure the PTEs are mapped. 02047 // 02048 02049 if ((MmFreePagesByColor[0] > (PMMCOLOR_TABLES)MM_KSEG2_BASE) || 02050 (NeedLowVirtualPfn == TRUE)) { 02051 02052 PointerPte = MiGetPteAddress (&MmFreePagesByColor[0][0]); 02053 02054 LastPte = MiGetPteAddress ( 02055 (PVOID)((PCHAR)&MmFreePagesByColor[1][MmSecondaryColors] - 1)); 02056 02057 while (PointerPte <= LastPte) { 02058 if (PointerPte->u.Hard.Valid == 0) { 02059 TempPte.u.Hard.PageFrameNumber = NextPhysicalPage; 02060 NextPhysicalPage += 1; 02061 NumberOfPages -= 1; 02062 if (NumberOfPages == 0) { 02063 if (FreeDescriptor == NULL) { 02064 KeBugCheckEx (INSTALL_MORE_MEMORY, 02065 MmNumberOfPhysicalPages, 02066 MmLowestPhysicalPage, 02067 MmHighestPhysicalPage, 02068 8); 02069 } 02070 ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage + 02071 FreeDescriptor->PageCount)); 02072 NextPhysicalPage = FreeDescriptor->BasePage; 02073 NumberOfPages = FreeDescriptor->PageCount; 02074 SwitchedDescriptors = TRUE; 02075 } 02076 02077 *PointerPte = TempPte; 02078 RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte), 02079 PAGE_SIZE); 02080 } 02081 02082 PointerPte += 1; 02083 } 02084 } 02085 02086 for (i = 0; i < MmSecondaryColors; i += 1) { 02087 MmFreePagesByColor[ZeroedPageList][i].Flink = MM_EMPTY_LIST; 02088 MmFreePagesByColor[FreePageList][i].Flink = MM_EMPTY_LIST; 02089 } 02090 02091 // 02092 // Add nonpaged pool to PFN database if mapped via KSEG0. 02093 // 02094 02095 PointerPde = MiGetPdeAddress (PTE_BASE); 02096 02097 if ((MmNonPagedPoolStart < (PVOID)MM_KSEG2_BASE) && (MapLargePages != 0)) { 02098 j = MI_CONVERT_PHYSICAL_TO_PFN (MmNonPagedPoolStart); 02099 Pfn1 = MI_PFN_ELEMENT (j); 02100 i = MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT; 02101 02102 PERFINFO_INIT_POOLRANGE(Pfn1 - MmPfnDatabase, i); 02103 02104 do { 02105 PointerPde = MiGetPdeAddress (MM_KSEG0_BASE + (j << PAGE_SHIFT)); 02106 Pfn1->PteFrame = MI_GET_PAGE_FRAME_FROM_PTE(PointerPde); 02107 Pfn1->PteAddress = (PMMPTE)(j << PAGE_SHIFT); 02108 Pfn1->u2.ShareCount += 1; 02109 Pfn1->u3.e2.ReferenceCount = 1; 02110 Pfn1->u3.e1.PageLocation = ActiveAndValid; 02111 Pfn1->u3.e1.PageColor = 0; 02112 j += 1; 02113 Pfn1 += 1; 02114 i -= 1; 02115 } while ( i ); 02116 } 02117 02118 // 02119 // Go through the page table entries and for any page which is valid, 02120 // update the corresponding PFN database element. 02121 // 02122 02123 Pde = MiGetPdeAddress (NULL); 02124 va = 0; 02125 PdeCount = PDE_PER_PAGE; 02126 #if defined(_X86PAE_) 02127 PdeCount *= PD_PER_SYSTEM; 02128 #endif 02129 for (i = 0; i < PdeCount; i += 1) { 02130 02131 // 02132 // If the kernel image has been biased to allow for 3gb of user 02133 // address space, then the first 16mb of memory is double mapped 02134 // to KSEG0_BASE and to ALTERNATE_BASE. Therefore, the KSEG0_BASE 02135 // entries must be skipped. 02136 // 02137 02138 if (MmVirtualBias != 0) { 02139 if ((Pde >= MiGetPdeAddress(KSEG0_BASE)) && 02140 (Pde < MiGetPdeAddress(KSEG0_BASE + 16 * 1024 * 1024))) { 02141 Pde += 1; 02142 va += (ULONG)PDE_PER_PAGE * (ULONG)PAGE_SIZE; 02143 continue; 02144 } 02145 } 02146 02147 if ((Pde->u.Hard.Valid == 1) && (Pde->u.Hard.LargePage == 0)) { 02148 02149 PdePage = MI_GET_PAGE_FRAME_FROM_PTE(Pde); 02150 Pfn1 = MI_PFN_ELEMENT(PdePage); 02151 Pfn1->PteFrame = MI_GET_PAGE_FRAME_FROM_PTE(PointerPde); 02152 Pfn1->PteAddress = Pde; 02153 Pfn1->u2.ShareCount += 1; 02154 Pfn1->u3.e2.ReferenceCount = 1; 02155 Pfn1->u3.e1.PageLocation = ActiveAndValid; 02156 Pfn1->u3.e1.PageColor = 0; 02157 02158 PointerPte = MiGetPteAddress (va); 02159 02160 // 02161 // Set global bit. 02162 // 02163 02164 Pde->u.Long |= MiDetermineUserGlobalPteMask (PointerPte) & 02165 ~MM_PTE_ACCESS_MASK; 02166 for (j = 0 ; j < PTE_PER_PAGE; j += 1) { 02167 if (PointerPte->u.Hard.Valid == 1) { 02168 02169 PointerPte->u.Long |= MiDetermineUserGlobalPteMask (PointerPte) & 02170 ~MM_PTE_ACCESS_MASK; 02171 02172 Pfn1->u2.ShareCount += 1; 02173 02174 if ((PointerPte->u.Hard.PageFrameNumber <= MmHighestPhysicalPage) && 02175 ((va >= MM_KSEG2_BASE) && 02176 ((va < (KSEG0_BASE + MmVirtualBias)) || 02177 (va >= (KSEG0_BASE + MmVirtualBias + 16 * 1024 * 1024)))) || 02178 ((NeedLowVirtualPfn == TRUE) && 02179 (va >= (ULONG)MmNonPagedPoolStart) && 02180 (va < (ULONG)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes))) { 02181 02182 Pfn2 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber); 02183 02184 if (MmIsAddressValid(Pfn2) && 02185 MmIsAddressValid((PUCHAR)(Pfn2+1)-1)) { 02186 02187 Pfn2->PteFrame = PdePage; 02188 Pfn2->PteAddress = PointerPte; 02189 Pfn2->u2.ShareCount += 1; 02190 Pfn2->u3.e2.ReferenceCount = 1; 02191 Pfn2->u3.e1.PageLocation = ActiveAndValid; 02192 Pfn2->u3.e1.PageColor = 0; 02193 } 02194 } 02195 } 02196 02197 va += PAGE_SIZE; 02198 PointerPte += 1; 02199 } 02200 02201 } else { 02202 va += (ULONG)PDE_PER_PAGE * (ULONG)PAGE_SIZE; 02203 } 02204 02205 Pde += 1; 02206 } 02207 02208 KeRaiseIrql (DISPATCH_LEVEL, &OldIrql); 02209 KeFlushCurrentTb (); 02210 KeLowerIrql (OldIrql); 02211 02212 // 02213 // If the lowest physical page is zero and the page is still unused, mark 02214 // it as in use. This is temporary as we want to find bugs where a physical 02215 // page is specified as zero. 02216 // 02217 02218 Pfn1 = &MmPfnDatabase[MmLowestPhysicalPage]; 02219 02220 if ((MmLowestPhysicalPage == 0) && (Pfn1->u3.e2.ReferenceCount == 0)) { 02221 02222 ASSERT (Pfn1->u3.e2.ReferenceCount == 0); 02223 02224 // 02225 // Make the reference count non-zero and point it into a 02226 // page directory. 02227 // 02228 02229 Pde = MiGetPdeAddress (0xffffffff); 02230 PdePage = MI_GET_PAGE_FRAME_FROM_PTE(Pde); 02231 Pfn1->PteFrame = PdePageNumber; 02232 Pfn1->PteAddress = Pde; 02233 Pfn1->u2.ShareCount += 1; 02234 Pfn1->u3.e2.ReferenceCount = 0xfff0; 02235 Pfn1->u3.e1.PageLocation = ActiveAndValid; 02236 Pfn1->u3.e1.PageColor = 0; 02237 } 02238 02239 // end of temporary set to physical page zero. 02240 02241 // 02242 // Walk through the memory descriptors and add pages to the 02243 // free list in the PFN database. 02244 // 02245 02246 if (NextPhysicalPage < (FreeDescriptorLowMem->PageCount + 02247 FreeDescriptorLowMem->BasePage)) { 02248 02249 // 02250 // We haven't used the other descriptor. 02251 // 02252 02253 FreeDescriptorLowMem->PageCount -= NextPhysicalPage - 02254 OldFreeDescriptorLowMemBase; 02255 FreeDescriptorLowMem->BasePage = NextPhysicalPage; 02256 02257 } else { 02258 02259 ASSERT (FreeDescriptor != NULL); 02260 FreeDescriptorLowMem->PageCount = 0; 02261 02262 FreeDescriptor->PageCount = OldFreeDescriptorBase + OldFreeDescriptorCount - NextPhysicalPage; 02263 02264 FreeDescriptor->BasePage = NextPhysicalPage; 02265 02266 } 02267 02268 // 02269 // Since the LoaderBlock memory descriptors are generally ordered 02270 // from low physical memory address to high, walk it backwards so the 02271 // high physical pages go to the front of the freelists. The thinking 02272 // is that pages initially allocated by the system less likely to be 02273 // freed so don't waste memory below 16mb (or 4gb) that may be needed 02274 // by drivers later. 02275 // 02276 02277 NextMd = LoaderBlock->MemoryDescriptorListHead.Blink; 02278 02279 while (NextMd != &LoaderBlock->MemoryDescriptorListHead) { 02280 02281 MemoryDescriptor = CONTAINING_RECORD(NextMd, 02282 MEMORY_ALLOCATION_DESCRIPTOR, 02283 ListEntry); 02284 02285 i = MemoryDescriptor->PageCount; 02286 NextPhysicalPage = MemoryDescriptor->BasePage; 02287 02288 switch (MemoryDescriptor->MemoryType) { 02289 case LoaderBad: 02290 while (i != 0) { 02291 MiInsertPageInList (MmPageLocationList[BadPageList], 02292 NextPhysicalPage); 02293 i -= 1; 02294 NextPhysicalPage += 1; 02295 } 02296 break; 02297 02298 case LoaderFree: 02299 case LoaderLoadedProgram: 02300 case LoaderFirmwareTemporary: 02301 case LoaderOsloaderStack: 02302 02303 FreePfnCount = 0; 02304 Pfn1 = MI_PFN_ELEMENT (NextPhysicalPage); 02305 while (i != 0) { 02306 if (Pfn1->u3.e2.ReferenceCount == 0) { 02307 02308 // 02309 // Set the PTE address to the physical page for 02310 // virtual address alignment checking. 02311 // 02312 02313 Pfn1->PteAddress = 02314 (PMMPTE)(NextPhysicalPage << PTE_SHIFT); 02315 MiInsertPageInList (MmPageLocationList[FreePageList], 02316 NextPhysicalPage); 02317 FreePfnCount += 1; 02318 } 02319 else { 02320 if (FreePfnCount > LargestFreePfnCount) { 02321 LargestFreePfnCount = FreePfnCount; 02322 LargestFreePfnStart = NextPhysicalPage - FreePfnCount; 02323 FreePfnCount = 0; 02324 } 02325 } 02326 02327 Pfn1 += 1; 02328 i -= 1; 02329 NextPhysicalPage += 1; 02330 } 02331 02332 if (FreePfnCount > LargestFreePfnCount) { 02333 LargestFreePfnCount = FreePfnCount; 02334 LargestFreePfnStart = NextPhysicalPage - FreePfnCount; 02335 } 02336 02337 break; 02338 02339 case LoaderFirmwarePermanent: 02340 case LoaderSpecialMemory: 02341 case LoaderBBTMemory: 02342 02343 // 02344 // If the descriptor lies within the highest PFN database entry 02345 // then create PFN pages for this range. Note the PFN entries 02346 // must be created to support \Device\PhysicalMemory. 02347 // 02348 02349 if (MemoryDescriptor->BasePage <= MmHighestPhysicalPage) { 02350 02351 if (MemoryDescriptor->BasePage + MemoryDescriptor->PageCount > MmHighestPhysicalPage + 1) { 02352 MemoryDescriptor->PageCount = MmHighestPhysicalPage - MemoryDescriptor->BasePage + 1; 02353 i = MemoryDescriptor->PageCount; 02354 } 02355 } 02356 else { 02357 break; 02358 } 02359 02360 // 02361 // Fall through as these pages must be marked in use as they 02362 // lie within the PFN limits and may be accessed through 02363 // \Device\PhysicalMemory. 02364 // 02365 02366 default: 02367 02368 PointerPte = MiGetPteAddress (KSEG0_BASE + MmVirtualBias + 02369 (NextPhysicalPage << PAGE_SHIFT)); 02370 02371 Pfn1 = MI_PFN_ELEMENT (NextPhysicalPage); 02372 while (i != 0) { 02373 02374 // 02375 // Set page as in use. 02376 // 02377 02378 PointerPde = MiGetPdeAddress (KSEG0_BASE + MmVirtualBias + 02379 (NextPhysicalPage << PAGE_SHIFT)); 02380 02381 if (Pfn1->u3.e2.ReferenceCount == 0) { 02382 Pfn1->PteFrame = MI_GET_PAGE_FRAME_FROM_PTE(PointerPde); 02383 Pfn1->PteAddress = PointerPte; 02384 Pfn1->u2.ShareCount += 1; 02385 Pfn1->u3.e2.ReferenceCount = 1; 02386 Pfn1->u3.e1.PageLocation = ActiveAndValid; 02387 Pfn1->u3.e1.PageColor = 0; 02388 } 02389 Pfn1 += 1; 02390 i -= 1; 02391 NextPhysicalPage += 1; 02392 PointerPte += 1; 02393 } 02394 break; 02395 } 02396 02397 NextMd = MemoryDescriptor->ListEntry.Blink; 02398 } 02399 02400 if (PfnInKseg0 == FALSE) { 02401 02402 // 02403 // Indicate that the PFN database is allocated in NonPaged pool. 02404 // 02405 02406 PointerPte = MiGetPteAddress (&MmPfnDatabase[MmLowestPhysicalPage]); 02407 Pfn1 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber); 02408 Pfn1->u3.e1.StartOfAllocation = 1; 02409 02410 if (NeedLowVirtualPfn == TRUE) { 02411 LastPte = MiGetPteAddress (&MmPfnDatabase[MmHighestPossiblePhysicalPage]); 02412 while (PointerPte <= LastPte) { 02413 Pfn1 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber); 02414 Pfn1->u2.ShareCount = 1; 02415 Pfn1->u3.e2.ReferenceCount = 1; 02416 PointerPte += 1; 02417 } 02418 } 02419 02420 // 02421 // Set the end of the allocation. 02422 // 02423 02424 PointerPte = MiGetPteAddress (&MmPfnDatabase[MmHighestPossiblePhysicalPage]); 02425 Pfn1 = MI_PFN_ELEMENT(PointerPte->u.Hard.PageFrameNumber); 02426 Pfn1->u3.e1.EndOfAllocation = 1; 02427 02428 } 02429 else { 02430 02431 // 02432 // The PFN database is allocated in KSEG0. 02433 // 02434 // Mark all PFN entries for the PFN pages in use. 02435 // 02436 02437 PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (MmPfnDatabase); 02438 Pfn1 = MI_PFN_ELEMENT(PageFrameIndex); 02439 do { 02440 Pfn1->PteAddress = (PMMPTE)(PageFrameIndex << PTE_SHIFT); 02441 Pfn1->u3.e1.PageColor = 0; 02442 Pfn1->u3.e2.ReferenceCount += 1; 02443 PageFrameIndex += 1; 02444 Pfn1 += 1; 02445 PfnAllocation -= 1; 02446 } while (PfnAllocation != 0); 02447 02448 // 02449 // Scan the PFN database backward for pages that are completely zero. 02450 // These pages are unused and can be added to the free list. 02451 // 02452 02453 BottomPfn = MI_PFN_ELEMENT(MmHighestPhysicalPage); 02454 do { 02455 02456 // 02457 // Compute the address of the start of the page that is next 02458 // lower in memory and scan backwards until that page address 02459 // is reached or just crossed. 02460 // 02461 02462 if (((ULONG)BottomPfn & (PAGE_SIZE - 1)) != 0) { 02463 BasePfn = (PMMPFN)((ULONG)BottomPfn & ~(PAGE_SIZE - 1)); 02464 TopPfn = BottomPfn + 1; 02465 02466 } else { 02467 BasePfn = (PMMPFN)((ULONG)BottomPfn - PAGE_SIZE); 02468 TopPfn = BottomPfn; 02469 } 02470 02471 while (BottomPfn > BasePfn) { 02472 BottomPfn -= 1; 02473 } 02474 02475 // 02476 // If the entire range over which the PFN entries span is 02477 // completely zero and the PFN entry that maps the page is 02478 // not in the range, then add the page to the appropriate 02479 // free list. 02480 // 02481 02482 Range = (ULONG)TopPfn - (ULONG)BottomPfn; 02483 if (RtlCompareMemoryUlong((PVOID)BottomPfn, Range, 0) == Range) { 02484 02485 // 02486 // Set the PTE address to the physical page for virtual 02487 // address alignment checking. 02488 // 02489 02490 PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (BasePfn); 02491 Pfn1 = MI_PFN_ELEMENT(PageFrameIndex); 02492 02493 ASSERT (Pfn1->u3.e2.ReferenceCount == 1); 02494 ASSERT (Pfn1->PteAddress == (PMMPTE)(PageFrameIndex << PTE_SHIFT)); 02495 Pfn1->u3.e2.ReferenceCount = 0; 02496 PfnAllocation += 1; 02497 Pfn1->PteAddress = (PMMPTE)(PageFrameIndex << PTE_SHIFT); 02498 Pfn1->u3.e1.PageColor = 0; 02499 MiInsertPageInList(MmPageLocationList[FreePageList], 02500 PageFrameIndex); 02501 } 02502 } while (BottomPfn > MmPfnDatabase); 02503 } 02504 02505 // 02506 // Indicate that nonpaged pool must succeed is allocated in 02507 // nonpaged pool. 02508 // 02509 02510 PointerPte = MiGetPteAddress(MmNonPagedMustSucceed); 02511 i = MmSizeOfNonPagedMustSucceed; 02512 while ((LONG)i > 0) { 02513 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 02514 Pfn1->u3.e1.StartOfAllocation = 1; 02515 Pfn1->u3.e1.EndOfAllocation = 1; 02516 i -= PAGE_SIZE; 02517 PointerPte += 1; 02518 } 02519 02520 // 02521 // Adjust the memory descriptors to indicate that free pool has 02522 // been used for nonpaged pool creation. 02523 // 02524 02525 FreeDescriptorLowMem->PageCount = OldFreeDescriptorLowMemCount; 02526 FreeDescriptorLowMem->BasePage = OldFreeDescriptorLowMemBase; 02527 02528 if (FreeDescriptor != NULL) { 02529 FreeDescriptor->PageCount = OldFreeDescriptorCount; 02530 FreeDescriptor->BasePage = OldFreeDescriptorBase; 02531 } 02532 02533 KeInitializeSpinLock (&MmSystemSpaceLock); 02534 02535 KeInitializeSpinLock (&MmPfnLock); 02536 02537 // 02538 // Initialize the nonpaged available PTEs for mapping I/O space 02539 // and kernel stacks. 02540 // 02541 02542 PointerPte = MiGetPteAddress (MmNonPagedSystemStart); 02543 ASSERT (((ULONG)PointerPte & (PAGE_SIZE - 1)) == 0); 02544 02545 MmNumberOfSystemPtes = MiGetPteAddress(NonPagedPoolStartVirtual) - PointerPte - 1; 02546 02547 MiInitializeSystemPtes (PointerPte, MmNumberOfSystemPtes, SystemPteSpace); 02548 02549 if (ExtraPtes != 0) { 02550 02551 // 02552 // Add extra system PTEs to the pool. 02553 // 02554 02555 PointerPte = MiGetPteAddress (KSTACK_POOL_START); 02556 MiAddSystemPtes (PointerPte, ExtraPtes, SystemPteSpace); 02557 } 02558 02559 // 02560 // Add pages to nonpaged pool if we could not allocate enough physically 02561 // contiguous. 02562 // 02563 02564 j = (SavedSize - MmSizeOfNonPagedPoolInBytes) >> PAGE_SHIFT; 02565 02566 if ((j != 0) && (MmExpandedNonPagedPoolInBytes == 0)) { 02567 02568 ULONG CountContiguous; 02569 02570 CountContiguous = LargestFreePfnCount; 02571 PageFrameIndex = LargestFreePfnStart - 1; 02572 02573 PointerPte = MiGetPteAddress (NonPagedPoolStartVirtual); 02574 02575 while (j) { 02576 if (CountContiguous) { 02577 PageFrameIndex += 1; 02578 MiUnlinkFreeOrZeroedPage (PageFrameIndex); 02579 CountContiguous -= 1; 02580 } else { 02581 PageFrameIndex = MiRemoveAnyPage ( 02582 MI_GET_PAGE_COLOR_FROM_PTE (PointerPte)); 02583 } 02584 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 02585 02586 Pfn1->u3.e2.ReferenceCount = 1; 02587 Pfn1->u2.ShareCount = 1; 02588 Pfn1->PteAddress = PointerPte; 02589 Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE; 02590 Pfn1->PteFrame = MI_GET_PAGE_FRAME_FROM_PTE(MiGetPteAddress(PointerPte)); 02591 Pfn1->u3.e1.PageLocation = ActiveAndValid; 02592 02593 TempPte.u.Hard.PageFrameNumber = PageFrameIndex; 02594 *PointerPte = TempPte; 02595 PointerPte += 1; 02596 02597 j -= 1; 02598 } 02599 Pfn1->u3.e1.EndOfAllocation = 1; 02600 Pfn1 = MI_PFN_ELEMENT (MiGetPteAddress(NonPagedPoolStartVirtual)->u.Hard.PageFrameNumber); 02601 Pfn1->u3.e1.StartOfAllocation = 1; 02602 02603 Range = MmAllocatedNonPagedPool; 02604 MiFreePoolPages (NonPagedPoolStartVirtual); 02605 MmAllocatedNonPagedPool = Range; 02606 } 02607 02608 // 02609 // Initialize the nonpaged pool. 02610 // 02611 02612 InitializePool (NonPagedPool, 0); 02613 02614 // 02615 // Initialize memory management structures for this process. 02616 // 02617 02618 // 02619 // Build working set list. This requires the creation of a PDE 02620 // to map HYPER space and the page table page pointed to 02621 // by the PDE must be initialized. 02622 // 02623 // Note, we can't remove a zeroed page as hyper space does not 02624 // exist and we map non-zeroed pages into hyper space to zero. 02625 // 02626 02627 TempPde = ValidKernelPdeLocal; 02628 02629 PointerPde = MiGetPdeAddress(HYPER_SPACE); 02630 02631 LOCK_PFN (OldIrql); 02632 02633 PageFrameIndex = MiRemoveAnyPage (0); 02634 TempPde.u.Hard.PageFrameNumber = PageFrameIndex; 02635 *PointerPde = TempPde; 02636 02637 #if defined (_X86PAE_) 02638 PointerPde = MiGetPdeAddress((PVOID)((PCHAR)HYPER_SPACE + MM_VA_MAPPED_BY_PDE)); 02639 02640 PageFrameIndex = MiRemoveAnyPage (0); 02641 TempPde.u.Hard.PageFrameNumber = PageFrameIndex; 02642 *PointerPde = TempPde; 02643 02644 // 02645 // Point to the page table page we just created and zero it. 02646 // 02647 02648 PointerPte = MiGetVirtualAddressMappedByPte (PointerPde); 02649 RtlZeroMemory (PointerPte, PAGE_SIZE); 02650 #endif 02651 02652 KeFlushCurrentTb(); 02653 02654 UNLOCK_PFN (OldIrql); 02655 02656 // 02657 // Point to the page table page we just created and zero it. 02658 // 02659 02660 PointerPte = MiGetPteAddress(HYPER_SPACE); 02661 RtlZeroMemory ((PVOID)PointerPte, PAGE_SIZE); 02662 02663 // 02664 // Hyper space now exists, set the necessary variables. 02665 // 02666 02667 MmFirstReservedMappingPte = MiGetPteAddress (FIRST_MAPPING_PTE); 02668 MmLastReservedMappingPte = MiGetPteAddress (LAST_MAPPING_PTE); 02669 02670 MmWorkingSetList = WORKING_SET_LIST; 02671 MmWsle = (PMMWSLE)((PUCHAR)WORKING_SET_LIST + sizeof(MMWSL)); 02672 02673 // 02674 // Initialize this process's memory management structures including 02675 // the working set list. 02676 // 02677 02678 // 02679 // The PFN element for the page directory has already been initialized, 02680 // zero the reference count and the share count so they won't be 02681 // wrong. 02682 // 02683 02684 Pfn1 = MI_PFN_ELEMENT (PdePageNumber); 02685 02686 LOCK_PFN (OldIrql); 02687 02688 Pfn1->u2.ShareCount = 0; 02689 Pfn1->u3.e2.ReferenceCount = 0; 02690 02691 #if defined (_X86PAE_) 02692 PointerPte = MiGetPteAddress (PDE_BASE); 02693 for (i = 0; i < PD_PER_SYSTEM; i += 1) { 02694 02695 PdePageNumber = MI_GET_PAGE_FRAME_FROM_PTE(PointerPte); 02696 02697 Pfn1 = MI_PFN_ELEMENT (PdePageNumber); 02698 Pfn1->u2.ShareCount = 0; 02699 Pfn1->u3.e2.ReferenceCount = 0; 02700 02701 PointerPte += 1; 02702 } 02703 #endif 02704 02705 CurrentProcess = PsGetCurrentProcess (); 02706 02707 // 02708 // Get a page for the working set list and zero it. 02709 // 02710 02711 TempPte = ValidKernelPteLocal; 02712 PointerPte = MiGetPteAddress (HYPER_SPACE); 02713 PageFrameIndex = MiRemoveAnyPage (0); 02714 02715 TempPte.u.Hard.PageFrameNumber = PageFrameIndex; 02716 PointerPte = MiGetPteAddress (HYPER_SPACE); 02717 *PointerPte = TempPte; 02718 RtlZeroMemory ((PVOID)HYPER_SPACE, PAGE_SIZE); 02719 *PointerPte = ZeroPte; 02720 02721 CurrentProcess->WorkingSetPage = PageFrameIndex; 02722 02723 #if defined (_X86PAE_) 02724 MiPaeInitialize (); 02725 #endif 02726 02727 KeFlushCurrentTb(); 02728 02729 UNLOCK_PFN (OldIrql); 02730 02731 CurrentProcess->Vm.MaximumWorkingSetSize = MmSystemProcessWorkingSetMax; 02732 CurrentProcess->Vm.MinimumWorkingSetSize = MmSystemProcessWorkingSetMin; 02733 02734 MmInitializeProcessAddressSpace (CurrentProcess, 02735 (PEPROCESS)NULL, 02736 (PVOID)NULL, 02737 (PVOID)NULL); 02738 02739 // 02740 // Check to see if moving the secondary page structures to the end 02741 // of the PFN database is a waste of memory. 02742 // 02743 // If the PFN database ends on a page aligned boundary and the 02744 // size of the two arrays is less than a page, free the page 02745 // and reallocate it from nonpagedpool. 02746 // 02747 02748 if (NeedLowVirtualPfn == TRUE) { 02749 02750 ASSERT (MmFreePagesByColor[0] < (PMMCOLOR_TABLES)MM_KSEG2_BASE); 02751 02752 PointerPde = MiGetPdeAddress(MmFreePagesByColor[0]); 02753 ASSERT (PointerPde->u.Hard.Valid == 1); 02754 02755 PointerPte = MiGetPteAddress(MmFreePagesByColor[0]); 02756 ASSERT (PointerPte->u.Hard.Valid == 1); 02757 02758 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE(PointerPte); 02759 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 02760 02761 LOCK_PFN (OldIrql); 02762 02763 if (Pfn1->u3.e2.ReferenceCount == 0) { 02764 Pfn1->PteFrame = MI_GET_PAGE_FRAME_FROM_PTE(PointerPde); 02765 Pfn1->PteAddress = PointerPte; 02766 Pfn1->u2.ShareCount += 1; 02767 Pfn1->u3.e2.ReferenceCount = 1; 02768 Pfn1->u3.e1.PageLocation = ActiveAndValid; 02769 Pfn1->u3.e1.PageColor = 0; 02770 } 02771 UNLOCK_PFN (OldIrql); 02772 } 02773 else if ((((ULONG)MmFreePagesByColor[0] & (PAGE_SIZE - 1)) == 0) && 02774 ((MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES)) < PAGE_SIZE)) { 02775 02776 PMMCOLOR_TABLES c; 02777 02778 c = MmFreePagesByColor[0]; 02779 02780 MmFreePagesByColor[0] = ExAllocatePoolWithTag (NonPagedPoolMustSucceed, 02781 MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES), 02782 ' mM'); 02783 02784 MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors]; 02785 02786 RtlMoveMemory (MmFreePagesByColor[0], 02787 c, 02788 MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES)); 02789 02790 // 02791 // Free the page. 02792 // 02793 02794 if (c > (PMMCOLOR_TABLES)MM_KSEG2_BASE) { 02795 PointerPte = MiGetPteAddress(c); 02796 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE(PointerPte); 02797 *PointerPte = ZeroKernelPte; 02798 } else { 02799 PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (c); 02800 } 02801 02802 LOCK_PFN (OldIrql); 02803 02804 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 02805 ASSERT ((Pfn1->u2.ShareCount <= 1) && (Pfn1->u3.e2.ReferenceCount <= 1)); 02806 Pfn1->u2.ShareCount = 0; 02807 Pfn1->u3.e2.ReferenceCount = 1; 02808 MI_SET_PFN_DELETED (Pfn1); 02809 #if DBG 02810 Pfn1->u3.e1.PageLocation = StandbyPageList; 02811 #endif //DBG 02812 MiDecrementReferenceCount (PageFrameIndex); 02813 02814 UNLOCK_PFN (OldIrql); 02815 } 02816 02817 // 02818 // Handle physical pages in BIOS memory range (640k to 1mb) by 02819 // explicitly initializing them in the PFN database so that they 02820 // can be handled properly when I/O is done to these pages (or virtual 02821 // reads across processes). 02822 // 02823 02824 02825 Pfn1 = MI_PFN_ELEMENT (MM_BIOS_START); 02826 Pfn2 = MI_PFN_ELEMENT (MM_BIOS_END); 02827 02828 LOCK_PFN (OldIrql); 02829 02830 do { 02831 if ((Pfn1->u2.ShareCount == 0) && 02832 (Pfn1->u3.e2.ReferenceCount == 0) && 02833 (Pfn1->PteAddress == 0)) { 02834 02835 // 02836 // Set this as in use. 02837 // 02838 02839 Pfn1->u3.e2.ReferenceCount = 1; 02840 Pfn1->PteAddress = (PMMPTE)0x7FFFFFFF; 02841 Pfn1->u3.e1.PageLocation = ActiveAndValid; 02842 Pfn1->u3.e1.PageColor = 0; 02843 } 02844 Pfn1 += 1; 02845 } while (Pfn1 <= Pfn2); 02846 02847 UNLOCK_PFN (OldIrql); 02848 02849 #if defined (_X86PAE_) 02850 if (MiNoLowMemory == TRUE) { 02851 02852 MiCreateBitMap (&MiLowMemoryBitMap, 1024 * 1024, NonPagedPool); 02853 02854 if (MiLowMemoryBitMap != NULL) { 02855 RtlClearAllBits (MiLowMemoryBitMap); 02856 MiRemoveLowPages (); 02857 MmMakeLowMemory = TRUE; 02858 } 02859 } 02860 #endif 02861 02862 return; 02863 }

VOID MiRemoveLowPages VOID   ) 
 


Variable Documentation

SIZE_T MmExpandedNonPagedPoolInBytes
 

Definition at line 41 of file mm/i386/init386.c.

ULONG MmLargeSystemCache
 

Definition at line 43 of file mm/i386/init386.c.

LOGICAL MmMakeLowMemory
 

Definition at line 44 of file mm/i386/init386.c.

LOGICAL MmPagedPoolMaximumDesired
 

Definition at line 45 of file mm/i386/init386.c.


Generated on Sat May 15 19:44:11 2004 for test by doxygen 1.3.7