Main Page | Class Hierarchy | Class List | File List | Class Members | File Members

inialpha.c File Reference

#include "mi.h"
#include <inbv.h>

Go to the source code of this file.

Defines

#define _1MB   (0x100000)
#define _16MB   (0x1000000)
#define _24MB   (0x1800000)
#define _32MB   (0x2000000)

Functions

VOID MiInitMachineDependent (IN PLOADER_PARAMETER_BLOCK LoaderBlock)

Variables

SIZE_T MmExpandedNonPagedPoolInBytes


Define Documentation

#define _16MB   (0x1000000)
 

Definition at line 33 of file inialpha.c.

Referenced by MiInitMachineDependent().

#define _1MB   (0x100000)
 

Definition at line 32 of file inialpha.c.

Referenced by MiInitMachineDependent().

#define _24MB   (0x1800000)
 

Definition at line 34 of file inialpha.c.

#define _32MB   (0x2000000)
 

Definition at line 35 of file inialpha.c.


Function Documentation

VOID MiInitMachineDependent IN PLOADER_PARAMETER_BLOCK  LoaderBlock  ) 
 

Definition at line 41 of file inialpha.c.

00047 : 00048 00049 This routine performs the necessary operations to enable virtual 00050 memory. This includes building the page directory page, building 00051 page table pages to map the code section, the data section, the' 00052 stack section and the trap handler. 00053 00054 It also initializes the PFN database and populates the free list. 00055 00056 00057 Arguments: 00058 00059 None. 00060 00061 Return Value: 00062 00063 None. 00064 00065 Environment: 00066 00067 Kernel mode. 00068 00069 --*/ 00070 00071 { 00072 PMMPFN BasePfn; 00073 PMMPFN BottomPfn; 00074 PMMPFN TopPfn; 00075 BOOLEAN PfnInKseg0; 00076 ULONG LowMemoryReserved; 00077 ULONG i, j; 00078 ULONG HighPage; 00079 ULONG PagesLeft; 00080 ULONG PageNumber; 00081 ULONG PdePageNumber; 00082 ULONG PdePage; 00083 ULONG PageFrameIndex; 00084 ULONG NextPhysicalPage; 00085 ULONG PfnAllocation; 00086 ULONG NumberOfPages; 00087 PEPROCESS CurrentProcess; 00088 PVOID SpinLockPage; 00089 ULONG MostFreePage; 00090 ULONG MostFreeLowMem; 00091 PLIST_ENTRY NextMd; 00092 ULONG MaxPool; 00093 KIRQL OldIrql; 00094 PMEMORY_ALLOCATION_DESCRIPTOR FreeDescriptor; 00095 PMEMORY_ALLOCATION_DESCRIPTOR FreeDescriptorLowMem; 00096 PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor; 00097 MMPTE TempPte; 00098 PMMPTE PointerPde; 00099 PMMPTE PointerPte; 00100 PMMPTE LastPte; 00101 PMMPTE CacheStackPage; 00102 PMMPTE Pde; 00103 PMMPTE StartPde; 00104 PMMPTE EndPde; 00105 PMMPFN Pfn1; 00106 PMMPFN Pfn2; 00107 PULONG PointerLong; 00108 CHAR Buffer[256]; 00109 PMMFREE_POOL_ENTRY Entry; 00110 PVOID NonPagedPoolStartVirtual; 00111 ULONG Range; 00112 ULONG RemovedLowPage; 00113 ULONG RemovedLowCount; 00114 00115 RemovedLowPage = 0; 00116 RemovedLowCount = 0; 00117 LowMemoryReserved = 0; 00118 MostFreePage = 0; 00119 MostFreeLowMem = 0; 00120 FreeDescriptor = NULL; 00121 FreeDescriptorLowMem = NULL; 00122 00123 PointerPte = MiGetPdeAddress (PDE_BASE); 00124 00125 PdePageNumber = PointerPte->u.Hard.PageFrameNumber; 00126 00127 PsGetCurrentProcess()->Pcb.DirectoryTableBase[0] = PointerPte->u.Long; 00128 00129 KeSweepDcache (FALSE); 00130 00131 // 00132 // Get the lower bound of the free physical memory and the 00133 // number of physical pages by walking the memory descriptor lists. 00134 // In addition, find the memory descriptor with the most free pages 00135 // that begins at a physical address less than 16MB. The 16 MB 00136 // boundary is necessary for allocating common buffers for use by 00137 // ISA devices that cannot address more than 24 bits. 00138 // 00139 00140 NextMd = LoaderBlock->MemoryDescriptorListHead.Flink; 00141 00142 // 00143 // When restoring a hibernation image, OS Loader needs to use "a few" extra 00144 // pages of LoaderFree memory. 00145 // This is not accounted for when reserving memory for hibernation below. 00146 // Start with a safety margin to allow for this plus modest future increase. 00147 // 00148 00149 MmHiberPages = 96; 00150 00151 while (NextMd != &LoaderBlock->MemoryDescriptorListHead) { 00152 00153 MemoryDescriptor = CONTAINING_RECORD(NextMd, 00154 MEMORY_ALLOCATION_DESCRIPTOR, 00155 ListEntry); 00156 00157 HighPage = MemoryDescriptor->BasePage + MemoryDescriptor->PageCount-1; 00158 00159 // 00160 // This check results in /BURNMEMORY chunks not being counted. 00161 // 00162 00163 if (MemoryDescriptor->MemoryType != LoaderBad) { 00164 MmNumberOfPhysicalPages += MemoryDescriptor->PageCount; 00165 } 00166 00167 if (MemoryDescriptor->BasePage < MmLowestPhysicalPage) { 00168 MmLowestPhysicalPage = MemoryDescriptor->BasePage; 00169 } 00170 00171 if (HighPage > MmHighestPhysicalPage) { 00172 MmHighestPhysicalPage = HighPage; 00173 } 00174 00175 // 00176 // Locate the largest free block starting below 16 megs 00177 // and the largest free block. 00178 // 00179 00180 if ((MemoryDescriptor->MemoryType == LoaderFree) || 00181 (MemoryDescriptor->MemoryType == LoaderLoadedProgram) || 00182 (MemoryDescriptor->MemoryType == LoaderFirmwareTemporary) || 00183 (MemoryDescriptor->MemoryType == LoaderOsloaderStack)) { 00184 00185 // 00186 // Every page that will be used as free memory that is not already 00187 // marked as LoaderFree must be counted so a hibernate can reserve 00188 // the proper amount. 00189 // 00190 00191 if (MemoryDescriptor->MemoryType != LoaderFree) { 00192 MmHiberPages += MemoryDescriptor->PageCount; 00193 } 00194 00195 if ((MemoryDescriptor->PageCount > MostFreeLowMem) && 00196 (MemoryDescriptor->BasePage < (_16MB >> PAGE_SHIFT)) && 00197 (HighPage < MM_PAGES_IN_KSEG0)) { 00198 00199 MostFreeLowMem = MemoryDescriptor->PageCount; 00200 FreeDescriptorLowMem = MemoryDescriptor; 00201 00202 } else if (MemoryDescriptor->PageCount > MostFreePage) { 00203 00204 MostFreePage = MemoryDescriptor->PageCount; 00205 FreeDescriptor = MemoryDescriptor; 00206 } 00207 } else if (MemoryDescriptor->MemoryType == LoaderOsloaderHeap) { 00208 // 00209 // We do not want to use this memory yet as it still has important 00210 // data structures in it. But we still want to account for this in 00211 // the hibernation pages 00212 // 00213 MmHiberPages += MemoryDescriptor->PageCount; 00214 } 00215 00216 NextMd = MemoryDescriptor->ListEntry.Flink; 00217 } 00218 00219 // 00220 // Perform sanity checks on the results of walking the memory 00221 // descriptors. 00222 // 00223 00224 if (MmNumberOfPhysicalPages < 1024) { 00225 KeBugCheckEx (INSTALL_MORE_MEMORY, 00226 MmNumberOfPhysicalPages, 00227 MmLowestPhysicalPage, 00228 MmHighestPhysicalPage, 00229 0); 00230 } 00231 00232 if (FreeDescriptorLowMem == NULL){ 00233 InbvDisplayString("MmInit *** FATAL ERROR *** no free descriptors that begin below physical address 16MB\n"); 00234 KeBugCheck (MEMORY_MANAGEMENT); 00235 } 00236 00237 if (MmDynamicPfn == TRUE) { 00238 00239 // 00240 // Since a ~128mb PFN database is required to span the 32GB supported 00241 // by Alpha, require 256mb of memory to be present to support 00242 // this option. 00243 // 00244 00245 if (MmNumberOfPhysicalPages >= (256 * 1024 * 1024) / PAGE_SIZE) { 00246 MmHighestPossiblePhysicalPage = 0x400000 - 1; 00247 } 00248 else { 00249 MmDynamicPfn = FALSE; 00250 } 00251 } 00252 else { 00253 MmHighestPossiblePhysicalPage = MmHighestPhysicalPage; 00254 } 00255 00256 // 00257 // Used later to build nonpaged pool. 00258 // 00259 00260 NextPhysicalPage = FreeDescriptorLowMem->BasePage; 00261 NumberOfPages = FreeDescriptorLowMem->PageCount; 00262 00263 // 00264 // Build non-paged pool using the physical pages following the 00265 // data page in which to build the pool from. Non-paged pool grows 00266 // from the high range of the virtual address space and expands 00267 // downward. 00268 // 00269 // At this time non-paged pool is constructed so virtual addresses 00270 // are also physically contiguous. 00271 // 00272 00273 if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) > 00274 (7 * (MmNumberOfPhysicalPages >> 3))) { 00275 00276 // 00277 // More than 7/8 of memory allocated to nonpagedpool, reset to 0. 00278 // 00279 00280 MmSizeOfNonPagedPoolInBytes = 0; 00281 } 00282 00283 if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize) { 00284 00285 // 00286 // Calculate the size of nonpaged pool. Use the minimum size, 00287 // then for every MB above 8mb add extra pages. 00288 // 00289 00290 MmSizeOfNonPagedPoolInBytes = MmMinimumNonPagedPoolSize; 00291 00292 MmSizeOfNonPagedPoolInBytes += 00293 ((MmNumberOfPhysicalPages - 1024) / 00294 (_1MB >> PAGE_SHIFT) ) * 00295 MmMinAdditionNonPagedPoolPerMb; 00296 } 00297 00298 // 00299 // Align to page size boundary. 00300 // 00301 00302 MmSizeOfNonPagedPoolInBytes &= ~(PAGE_SIZE - 1); 00303 00304 // 00305 // Limit initial nonpaged pool size to MM_MAX_INITIAL_NONPAGED_POOL 00306 // 00307 00308 if (MmSizeOfNonPagedPoolInBytes > MM_MAX_INITIAL_NONPAGED_POOL) { 00309 MmSizeOfNonPagedPoolInBytes = MM_MAX_INITIAL_NONPAGED_POOL; 00310 } 00311 00312 // 00313 // If the non-paged pool that we want to allocate will not fit in 00314 // the free memory descriptor that we have available then recompute 00315 // the size of non-paged pool to be the size of the free memory 00316 // descriptor. If the free memory descriptor cannot fit the 00317 // minimum non-paged pool size (MmMinimumNonPagedPoolSize) then we 00318 // cannot boot the operating system. 00319 // 00320 00321 if ((MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT) > NumberOfPages) { 00322 00323 // 00324 // Reserve all of low memory for nonpaged pool. 00325 // 00326 00327 MmSizeOfNonPagedPoolInBytes = NumberOfPages << PAGE_SHIFT; 00328 LowMemoryReserved = NextPhysicalPage; 00329 00330 // 00331 // Switch to backup descriptor for all other allocations. 00332 // 00333 00334 NextPhysicalPage = FreeDescriptor->BasePage; 00335 NumberOfPages = FreeDescriptor->PageCount; 00336 00337 if (MmSizeOfNonPagedPoolInBytes < MmMinimumNonPagedPoolSize) { 00338 InbvDisplayString("MmInit *** FATAL ERROR *** cannot allocate non-paged pool\n"); 00339 sprintf(Buffer, 00340 "Largest description = %d pages, require %d pages\n", 00341 NumberOfPages, 00342 MmMinimumNonPagedPoolSize >> PAGE_SHIFT); 00343 InbvDisplayString (Buffer ); 00344 KeBugCheck (MEMORY_MANAGEMENT); 00345 00346 } 00347 } 00348 00349 // 00350 // Calculate the maximum size of pool. 00351 // 00352 00353 if (MmMaximumNonPagedPoolInBytes == 0) { 00354 00355 // 00356 // Calculate the size of nonpaged pool. 00357 // For every MB above 8mb add extra pages. 00358 // 00359 00360 MmMaximumNonPagedPoolInBytes = MmDefaultMaximumNonPagedPool; 00361 00362 // 00363 // Make sure enough expansion for the PFN database exists. 00364 // 00365 00366 MmMaximumNonPagedPoolInBytes += (ULONG)PAGE_ALIGN ( 00367 MmHighestPhysicalPage * sizeof(MMPFN)); 00368 00369 MmMaximumNonPagedPoolInBytes += 00370 ((MmNumberOfPhysicalPages - 1024) / 00371 (_1MB >> PAGE_SHIFT) ) * 00372 MmMaxAdditionNonPagedPoolPerMb; 00373 } 00374 00375 MaxPool = MmSizeOfNonPagedPoolInBytes + PAGE_SIZE * 16 + 00376 (ULONG)PAGE_ALIGN ( 00377 MmHighestPhysicalPage * sizeof(MMPFN)); 00378 00379 if (MmMaximumNonPagedPoolInBytes < MaxPool) { 00380 MmMaximumNonPagedPoolInBytes = MaxPool; 00381 } 00382 00383 // 00384 // If the system is configured for maximum system PTEs then limit maximum 00385 // nonpaged pool to 128mb so the rest of the virtual address space can 00386 // be used for the PTEs. Also push as much nonpaged pool as possible 00387 // into kseg0 to free up more PTEs. 00388 // 00389 00390 if (MmMaximumNonPagedPoolInBytes > MM_MAX_ADDITIONAL_NONPAGED_POOL) { 00391 00392 ULONG InitialNonPagedPages; 00393 ULONG ExpansionPagesToMove; 00394 ULONG LowAvailPages; 00395 00396 if ((MiRequestedSystemPtes == (ULONG)-1) || (MiHydra == TRUE)) { 00397 MmMaximumNonPagedPoolInBytes = MM_MAX_ADDITIONAL_NONPAGED_POOL; 00398 00399 if (LowMemoryReserved == 0) { 00400 00401 InitialNonPagedPages = (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT); 00402 00403 if (InitialNonPagedPages + 1024 < NumberOfPages) { 00404 LowAvailPages = NumberOfPages - 1024 - InitialNonPagedPages; 00405 00406 ExpansionPagesToMove = (MmMaximumNonPagedPoolInBytes >> PAGE_SHIFT) - InitialNonPagedPages; 00407 00408 if (ExpansionPagesToMove > 32) { 00409 ExpansionPagesToMove -= 32; 00410 if (LowAvailPages > ExpansionPagesToMove) { 00411 LowAvailPages = ExpansionPagesToMove; 00412 } 00413 00414 MmSizeOfNonPagedPoolInBytes += (LowAvailPages << PAGE_SHIFT); 00415 } 00416 } 00417 } 00418 00419 if (MmSizeOfNonPagedPoolInBytes == MmMaximumNonPagedPoolInBytes) { 00420 ASSERT (MmSizeOfNonPagedPoolInBytes > (32 << PAGE_SHIFT)); 00421 MmSizeOfNonPagedPoolInBytes -= (32 << PAGE_SHIFT); 00422 } 00423 } 00424 } 00425 00426 // 00427 // Limit maximum nonpaged pool to MM_MAX_ADDITIONAL_NONPAGED_POOL. 00428 // 00429 00430 if (MmMaximumNonPagedPoolInBytes > MM_MAX_ADDITIONAL_NONPAGED_POOL) { 00431 00432 if (MmMaximumNonPagedPoolInBytes > MM_MAX_INITIAL_NONPAGED_POOL + MM_MAX_ADDITIONAL_NONPAGED_POOL) { 00433 MmMaximumNonPagedPoolInBytes = MM_MAX_INITIAL_NONPAGED_POOL + MM_MAX_ADDITIONAL_NONPAGED_POOL; 00434 } 00435 00436 if (LowMemoryReserved != 0) { 00437 if (MmMaximumNonPagedPoolInBytes > MmSizeOfNonPagedPoolInBytes + MM_MAX_ADDITIONAL_NONPAGED_POOL) { 00438 MmMaximumNonPagedPoolInBytes = MmSizeOfNonPagedPoolInBytes + MM_MAX_ADDITIONAL_NONPAGED_POOL; 00439 } 00440 MmExpandedNonPagedPoolInBytes = MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes; 00441 } 00442 else { 00443 00444 if ((MM_MAX_INITIAL_NONPAGED_POOL >> PAGE_SHIFT) >= NumberOfPages) { 00445 00446 // 00447 // Reserve all of low memory for nonpaged pool. 00448 // 00449 00450 SIZE_T Diff; 00451 00452 Diff = MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes; 00453 if (Diff > MM_MAX_ADDITIONAL_NONPAGED_POOL) { 00454 Diff = MM_MAX_ADDITIONAL_NONPAGED_POOL; 00455 } 00456 00457 MmSizeOfNonPagedPoolInBytes = NumberOfPages << PAGE_SHIFT; 00458 MmMaximumNonPagedPoolInBytes = MmSizeOfNonPagedPoolInBytes + Diff; 00459 LowMemoryReserved = NextPhysicalPage; 00460 00461 // 00462 // Switch to backup descriptor for all other allocations. 00463 // 00464 00465 NextPhysicalPage = FreeDescriptor->BasePage; 00466 NumberOfPages = FreeDescriptor->PageCount; 00467 } 00468 else { 00469 00470 MmSizeOfNonPagedPoolInBytes = MM_MAX_INITIAL_NONPAGED_POOL; 00471 00472 // 00473 // The pages must be subtracted from the low descriptor so 00474 // they are not used for anything else or put on the freelist. 00475 // But they must be added back in later when initializing PFNs 00476 // for all the descriptor ranges. 00477 // 00478 00479 RemovedLowCount = (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT); 00480 FreeDescriptorLowMem->PageCount -= RemovedLowCount; 00481 RemovedLowPage = FreeDescriptorLowMem->BasePage + FreeDescriptorLowMem->PageCount; 00482 00483 NumberOfPages = FreeDescriptorLowMem->PageCount; 00484 } 00485 00486 MmExpandedNonPagedPoolInBytes = MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes; 00487 00488 if (MmExpandedNonPagedPoolInBytes > MM_MAX_ADDITIONAL_NONPAGED_POOL) { 00489 MmExpandedNonPagedPoolInBytes = MM_MAX_ADDITIONAL_NONPAGED_POOL; 00490 } 00491 } 00492 } 00493 00494 if (MmExpandedNonPagedPoolInBytes) { 00495 MmNonPagedPoolStart = (PVOID)((ULONG)MmNonPagedPoolEnd 00496 - MmExpandedNonPagedPoolInBytes); 00497 } 00498 else { 00499 MmNonPagedPoolStart = (PVOID)((ULONG)MmNonPagedPoolEnd 00500 - (MmMaximumNonPagedPoolInBytes - 1)); 00501 } 00502 00503 MmNonPagedPoolStart = (PVOID)PAGE_ALIGN(MmNonPagedPoolStart); 00504 NonPagedPoolStartVirtual = MmNonPagedPoolStart; 00505 00506 // 00507 // Calculate the starting PDE for the system PTE pool which is 00508 // right below the nonpaged pool. 00509 // 00510 00511 MmNonPagedSystemStart = (PVOID)(((ULONG)MmNonPagedPoolStart - 00512 ((MmNumberOfSystemPtes + 1) * PAGE_SIZE)) & 00513 (~PAGE_DIRECTORY_MASK)); 00514 00515 if (MmNonPagedSystemStart < MM_LOWEST_NONPAGED_SYSTEM_START) { 00516 MmNonPagedSystemStart = MM_LOWEST_NONPAGED_SYSTEM_START; 00517 } 00518 00519 MmNumberOfSystemPtes = (((ULONG)MmNonPagedPoolStart - 00520 (ULONG)MmNonPagedSystemStart) >> PAGE_SHIFT)-1; 00521 ASSERT (MmNumberOfSystemPtes > 1000); 00522 00523 // 00524 // Set the global bit for all PDEs in system space. 00525 // 00526 00527 StartPde = MiGetPdeAddress (MM_SYSTEM_SPACE_START); 00528 EndPde = MiGetPdeAddress (MM_SYSTEM_SPACE_END); 00529 00530 while (StartPde <= EndPde) { 00531 00532 if (StartPde->u.Hard.Global == 0) { 00533 TempPte = *StartPde; 00534 TempPte.u.Hard.Global = 1; 00535 *StartPde = TempPte; 00536 } 00537 00538 StartPde += 1; 00539 } 00540 00541 if (MiHydra == TRUE) { 00542 00543 // 00544 // Clear the global bit for all session space addresses. 00545 // 00546 00547 StartPde = MiGetPdeAddress (MmSessionBase); 00548 EndPde = MiGetPdeAddress (MI_SESSION_SPACE_END); 00549 00550 while (StartPde < EndPde) { 00551 00552 if (StartPde->u.Hard.Global == 1) { 00553 TempPte = *StartPde; 00554 TempPte.u.Hard.Global = 0; 00555 *StartPde = TempPte; 00556 } 00557 00558 ASSERT (StartPde->u.Long == 0); 00559 StartPde += 1; 00560 } 00561 } 00562 00563 StartPde = MiGetPdeAddress (MmNonPagedSystemStart); 00564 00565 EndPde = MiGetPdeAddress (MmNonPagedPoolEnd); 00566 00567 ASSERT ((EndPde - StartPde) < (LONG)NumberOfPages); 00568 00569 TempPte = ValidKernelPte; 00570 00571 while (StartPde <= EndPde) { 00572 if (StartPde->u.Hard.Valid == 0) { 00573 00574 // 00575 // Map in a page directory page. 00576 // 00577 00578 TempPte.u.Hard.PageFrameNumber = NextPhysicalPage; 00579 NumberOfPages -= 1; 00580 NextPhysicalPage += 1; 00581 00582 if (NumberOfPages == 0) { 00583 ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage + 00584 FreeDescriptor->PageCount)); 00585 NextPhysicalPage = FreeDescriptor->BasePage; 00586 NumberOfPages = FreeDescriptor->PageCount; 00587 } 00588 *StartPde = TempPte; 00589 } 00590 StartPde += 1; 00591 } 00592 00593 // 00594 // Zero the PTEs before non-paged pool. 00595 // 00596 00597 StartPde = MiGetPteAddress (MmNonPagedSystemStart); 00598 PointerPte = MiGetPteAddress (MmNonPagedPoolStart); 00599 00600 RtlZeroMemory (StartPde, (ULONG)PointerPte - (ULONG)StartPde); 00601 00602 // 00603 // Fill in the PTEs for non-paged pool. 00604 // 00605 00606 PointerPte = MiGetPteAddress(MmNonPagedPoolStart); 00607 LastPte = MiGetPteAddress((ULONG)MmNonPagedPoolStart + 00608 MmSizeOfNonPagedPoolInBytes - 1); 00609 00610 if (MmExpandedNonPagedPoolInBytes == 0) { 00611 if (!LowMemoryReserved) { 00612 00613 if (NumberOfPages < (ULONG)(LastPte - PointerPte + 1)) { 00614 00615 // 00616 // Can't just switch descriptors here - the initial nonpaged 00617 // pool is always mapped via KSEG0 and is thus required to be 00618 // virtually and physically contiguous. 00619 // 00620 00621 KeBugCheckEx (INSTALL_MORE_MEMORY, 00622 MmNumberOfPhysicalPages, 00623 NumberOfPages, 00624 LastPte - PointerPte + 1, 00625 1); 00626 } 00627 00628 while (PointerPte <= LastPte) { 00629 TempPte.u.Hard.PageFrameNumber = NextPhysicalPage; 00630 NextPhysicalPage += 1; 00631 NumberOfPages -= 1; 00632 ASSERT (NumberOfPages != 0); 00633 *PointerPte = TempPte; 00634 PointerPte += 1; 00635 } 00636 00637 } else { 00638 00639 ULONG ReservedPage = FreeDescriptorLowMem->BasePage; 00640 00641 while (PointerPte <= LastPte) { 00642 TempPte.u.Hard.PageFrameNumber = ReservedPage; 00643 ReservedPage += 1; 00644 *PointerPte = TempPte; 00645 PointerPte += 1; 00646 } 00647 } 00648 LastPte = MiGetPteAddress ((ULONG)MmNonPagedPoolStart + 00649 MmMaximumNonPagedPoolInBytes - 1); 00650 } 00651 else { 00652 LastPte = MiGetPteAddress ((ULONG)MmNonPagedPoolStart + 00653 MmExpandedNonPagedPoolInBytes - 1); 00654 } 00655 00656 // 00657 // Zero the remaining PTEs for non-paged pool maximum. 00658 // 00659 00660 while (PointerPte <= LastPte) { 00661 *PointerPte = ZeroKernelPte; 00662 PointerPte += 1; 00663 } 00664 00665 // 00666 // Zero the remaining PTEs (if any). 00667 // 00668 00669 while (((ULONG)PointerPte & (PAGE_SIZE - 1)) != 0) { 00670 *PointerPte = ZeroKernelPte; 00671 PointerPte += 1; 00672 } 00673 00674 if (MmExpandedNonPagedPoolInBytes) { 00675 00676 if (LowMemoryReserved) { 00677 MmNonPagedPoolStart = (PVOID)((LowMemoryReserved << PAGE_SHIFT) | 00678 KSEG0_BASE); 00679 } 00680 else if (RemovedLowPage) { 00681 MmNonPagedPoolStart = (PVOID)((RemovedLowPage << PAGE_SHIFT) | 00682 KSEG0_BASE); 00683 } 00684 else { 00685 ASSERT (FALSE); 00686 } 00687 } 00688 else { 00689 PointerPte = MiGetPteAddress (MmNonPagedPoolStart); 00690 MmNonPagedPoolStart = (PVOID)((PointerPte->u.Hard.PageFrameNumber << PAGE_SHIFT) | 00691 KSEG0_BASE); 00692 } 00693 00694 MmPageAlignedPoolBase[NonPagedPool] = MmNonPagedPoolStart; 00695 00696 MmSubsectionBase = (ULONG)MmNonPagedPoolStart; 00697 00698 if (MmExpandedNonPagedPoolInBytes == 0) { 00699 if (NextPhysicalPage < (MM_SUBSECTION_MAP >> PAGE_SHIFT)) { 00700 MmSubsectionBase = KSEG0_BASE; 00701 } 00702 } 00703 00704 MmSubsectionTopPage = (((MmSubsectionBase & ~KSEG0_BASE) + MM_SUBSECTION_MAP) >> PAGE_SHIFT); 00705 00706 // 00707 // Non-paged pages now exist, build the pool structures. 00708 // 00709 00710 if (MmExpandedNonPagedPoolInBytes) { 00711 MmNonPagedPoolExpansionStart = (PVOID)NonPagedPoolStartVirtual; 00712 } 00713 else { 00714 MmNonPagedPoolExpansionStart = (PVOID)((PCHAR)NonPagedPoolStartVirtual + 00715 MmSizeOfNonPagedPoolInBytes); 00716 } 00717 00718 MiInitializeNonPagedPool (); 00719 00720 // 00721 // Before Non-paged pool can be used, the PFN database must 00722 // be built. This is due to the fact that the start and end of 00723 // allocation bits for nonpaged pool are maintained in the 00724 // PFN elements for the corresponding pages. 00725 // 00726 00727 // 00728 // Calculate the number of pages required from page zero to 00729 // the highest page. 00730 // 00731 // Get the number of secondary colors and add the array for tracking 00732 // secondary colors to the end of the PFN database. 00733 // 00734 00735 if (MmSecondaryColors == 0) { 00736 MmSecondaryColors = PCR->SecondLevelCacheSize; 00737 } 00738 00739 MmSecondaryColors = MmSecondaryColors >> PAGE_SHIFT; 00740 00741 // 00742 // Make sure value is power of two and within limits. 00743 // 00744 00745 if (((MmSecondaryColors & (MmSecondaryColors -1)) != 0) || 00746 (MmSecondaryColors < MM_SECONDARY_COLORS_MIN) || 00747 (MmSecondaryColors > MM_SECONDARY_COLORS_MAX)) { 00748 00749 MmSecondaryColors = MM_SECONDARY_COLORS_DEFAULT; 00750 } 00751 00752 MmSecondaryColorMask = MmSecondaryColors - 1; 00753 00754 PfnAllocation = 1 + ((((MmHighestPossiblePhysicalPage + 1) * sizeof(MMPFN)) + 00755 (MmSecondaryColors * sizeof(MMCOLOR_TABLES)*2)) 00756 >> PAGE_SHIFT); 00757 00758 // 00759 // If the number of pages remaining in the current descriptor is 00760 // greater than the number of pages needed for the PFN database, 00761 // and the descriptor is for memory below 1 gig, then allocate the 00762 // PFN database from the current free descriptor. 00763 // Note: FW creates a new memory descriptor for any memory above 1GB. 00764 // Thus we don't need to worry if the highest page will go beyond 1GB for 00765 // this memory descriptor. 00766 // 00767 00768 #ifndef PFN_CONSISTENCY 00769 if ((NumberOfPages >= PfnAllocation) && 00770 (NextPhysicalPage + NumberOfPages <= MM_PAGES_IN_KSEG0)) { 00771 00772 // 00773 // Allocate the PFN database in kseg0. 00774 // 00775 // Compute the address of the PFN by allocating the appropriate 00776 // number of pages from the end of the free descriptor. 00777 // 00778 00779 PfnInKseg0 = TRUE; 00780 HighPage = NextPhysicalPage + NumberOfPages; 00781 MmPfnDatabase = (PMMPFN)(KSEG0_BASE | 00782 ((HighPage - PfnAllocation) << PAGE_SHIFT)); 00783 RtlZeroMemory(MmPfnDatabase, PfnAllocation * PAGE_SIZE); 00784 00785 // 00786 // Mark off the chunk of memory used for the PFN database. 00787 // 00788 00789 NumberOfPages -= PfnAllocation; 00790 00791 if (NextPhysicalPage >= FreeDescriptorLowMem->BasePage && 00792 NextPhysicalPage < (FreeDescriptorLowMem->BasePage + 00793 FreeDescriptorLowMem->PageCount)) { 00794 00795 // 00796 // We haven't used the other descriptor. 00797 // 00798 00799 FreeDescriptorLowMem->PageCount -= PfnAllocation; 00800 00801 } else { 00802 00803 FreeDescriptor->PageCount -= PfnAllocation; 00804 } 00805 00806 // 00807 // Allocate one PTE at the very top of the Mm virtual address space. 00808 // This provides protection against the caller of the first real 00809 // nonpaged expansion allocation in case he accidentally overruns his 00810 // pool block. (We'll trap instead of corrupting the crashdump PTEs). 00811 // This also allows us to freely increment in MiFreePoolPages 00812 // without having to worry about a valid PTE just after the end of 00813 // the highest nonpaged pool allocation. 00814 // 00815 00816 MiReserveSystemPtes (1, 00817 NonPagedPoolExpansion, 00818 0, 00819 0, 00820 TRUE); 00821 00822 } else { 00823 00824 #endif // PFN_CONSISTENCY 00825 00826 // 00827 // Calculate the start of the Pfn database (it starts at physical 00828 // page zero, even if the lowest physical page is not zero). 00829 // 00830 00831 PfnInKseg0 = FALSE; 00832 PointerPte = MiReserveSystemPtes (PfnAllocation, 00833 NonPagedPoolExpansion, 00834 0, 00835 0, 00836 TRUE); 00837 00838 #if PFN_CONSISTENCY 00839 MiPfnStartPte = PointerPte; 00840 MiPfnPtes = PfnAllocation; 00841 #endif 00842 00843 MmPfnDatabase = (PMMPFN)(MiGetVirtualAddressMappedByPte (PointerPte)); 00844 00845 // 00846 // Allocate one more PTE just below the PFN database. This provides 00847 // protection against the caller of the first real nonpaged 00848 // expansion allocation in case he accidentally overruns his pool 00849 // block. (We'll trap instead of corrupting the PFN database). 00850 // This also allows us to freely increment in MiFreePoolPages 00851 // without having to worry about a valid PTE just after the end of 00852 // the highest nonpaged pool allocation. 00853 // 00854 00855 MiReserveSystemPtes (1, 00856 NonPagedPoolExpansion, 00857 0, 00858 0, 00859 TRUE); 00860 00861 // 00862 // Go through the memory descriptors and for each physical page 00863 // make sure the PFN database has a valid PTE to map it. This allows 00864 // machines with sparse physical memory to have a minimal PFN 00865 // database. 00866 // 00867 00868 NextMd = LoaderBlock->MemoryDescriptorListHead.Flink; 00869 00870 while (NextMd != &LoaderBlock->MemoryDescriptorListHead) { 00871 00872 MemoryDescriptor = CONTAINING_RECORD(NextMd, 00873 MEMORY_ALLOCATION_DESCRIPTOR, 00874 ListEntry); 00875 00876 PointerPte = MiGetPteAddress (MI_PFN_ELEMENT( 00877 MemoryDescriptor->BasePage)); 00878 00879 LastPte = MiGetPteAddress (((PCHAR)(MI_PFN_ELEMENT( 00880 MemoryDescriptor->BasePage + 00881 MemoryDescriptor->PageCount))) - 1); 00882 00883 // 00884 // If memory was temporarily removed to create the initial non 00885 // paged pool, account for it now so PFN entries are created for it. 00886 // 00887 00888 if (MemoryDescriptor == FreeDescriptorLowMem) { 00889 if (RemovedLowPage) { 00890 ASSERT (MemoryDescriptor->BasePage + MemoryDescriptor->PageCount == RemovedLowPage); 00891 LastPte = MiGetPteAddress (((PCHAR)(MI_PFN_ELEMENT( 00892 MemoryDescriptor->BasePage + 00893 RemovedLowCount + 00894 MemoryDescriptor->PageCount))) - 1); 00895 } 00896 } 00897 00898 while (PointerPte <= LastPte) { 00899 00900 if (PointerPte->u.Hard.Valid == 0) { 00901 TempPte.u.Hard.PageFrameNumber = NextPhysicalPage; 00902 NextPhysicalPage += 1; 00903 NumberOfPages -= 1; 00904 if (NumberOfPages == 0) { 00905 ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage + 00906 FreeDescriptor->PageCount)); 00907 NextPhysicalPage = FreeDescriptor->BasePage; 00908 NumberOfPages = FreeDescriptor->PageCount; 00909 } 00910 *PointerPte = TempPte; 00911 RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte), 00912 PAGE_SIZE); 00913 } 00914 PointerPte += 1; 00915 } 00916 NextMd = MemoryDescriptor->ListEntry.Flink; 00917 } 00918 #ifndef PFN_CONSISTENCY 00919 } 00920 #endif // PFN_CONSISTENCY 00921 00922 // 00923 // Initialize support for colored pages. 00924 // 00925 00926 MmFreePagesByColor[0] = (PMMCOLOR_TABLES) 00927 &MmPfnDatabase[MmHighestPossiblePhysicalPage + 1]; 00928 00929 MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors]; 00930 00931 // 00932 // Make sure the PTEs are mapped. 00933 // 00934 00935 if (!MI_IS_PHYSICAL_ADDRESS(MmFreePagesByColor[0])) { 00936 00937 PointerPte = MiGetPteAddress (&MmFreePagesByColor[0][0]); 00938 00939 LastPte = MiGetPteAddress ( 00940 (PVOID)((PCHAR)&MmFreePagesByColor[1][MmSecondaryColors]-1)); 00941 00942 while (PointerPte <= LastPte) { 00943 if (PointerPte->u.Hard.Valid == 0) { 00944 TempPte.u.Hard.PageFrameNumber = NextPhysicalPage; 00945 NextPhysicalPage += 1; 00946 NumberOfPages -= 1; 00947 if (NumberOfPages == 0) { 00948 ASSERT (NextPhysicalPage != (FreeDescriptor->BasePage + 00949 FreeDescriptor->PageCount)); 00950 NextPhysicalPage = FreeDescriptor->BasePage; 00951 NumberOfPages = FreeDescriptor->PageCount; 00952 } 00953 *PointerPte = TempPte; 00954 RtlZeroMemory (MiGetVirtualAddressMappedByPte (PointerPte), 00955 PAGE_SIZE); 00956 } 00957 PointerPte += 1; 00958 } 00959 } 00960 00961 for (i = 0; i < MmSecondaryColors; i += 1) { 00962 MmFreePagesByColor[ZeroedPageList][i].Flink = MM_EMPTY_LIST; 00963 MmFreePagesByColor[FreePageList][i].Flink = MM_EMPTY_LIST; 00964 } 00965 00966 #if MM_MAXIMUM_NUMBER_OF_COLORS > 1 00967 for (i = 0; i < MM_MAXIMUM_NUMBER_OF_COLORS; i += 1) { 00968 MmFreePagesByPrimaryColor[ZeroedPageList][i].ListName = ZeroedPageList; 00969 MmFreePagesByPrimaryColor[FreePageList][i].ListName = FreePageList; 00970 MmFreePagesByPrimaryColor[ZeroedPageList][i].Flink = MM_EMPTY_LIST; 00971 MmFreePagesByPrimaryColor[FreePageList][i].Flink = MM_EMPTY_LIST; 00972 MmFreePagesByPrimaryColor[ZeroedPageList][i].Blink = MM_EMPTY_LIST; 00973 MmFreePagesByPrimaryColor[FreePageList][i].Blink = MM_EMPTY_LIST; 00974 } 00975 #endif 00976 00977 // 00978 // Go through the page table entries and for any page which is 00979 // valid, update the corresponding PFN database element. 00980 // 00981 00982 PointerPde = MiGetPdeAddress (PTE_BASE); 00983 00984 PdePage = PointerPde->u.Hard.PageFrameNumber; 00985 Pfn1 = MI_PFN_ELEMENT(PdePage); 00986 Pfn1->PteFrame = PdePage; 00987 Pfn1->PteAddress = PointerPde; 00988 Pfn1->u2.ShareCount += 1; 00989 Pfn1->u3.e2.ReferenceCount = 1; 00990 Pfn1->u3.e1.PageLocation = ActiveAndValid; 00991 Pfn1->u3.e1.PageColor = 00992 MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (PointerPde)); 00993 00994 // 00995 // Add the pages which were used to construct nonpaged pool to 00996 // the PFN database. 00997 // 00998 00999 Pde = MiGetPdeAddress (MmNonPagedSystemStart); 01000 01001 EndPde = MiGetPdeAddress(NON_PAGED_SYSTEM_END); 01002 01003 while (Pde <= EndPde) { 01004 if (Pde->u.Hard.Valid == 1) { 01005 PdePage = Pde->u.Hard.PageFrameNumber; 01006 Pfn1 = MI_PFN_ELEMENT(PdePage); 01007 Pfn1->PteFrame = PointerPde->u.Hard.PageFrameNumber; 01008 Pfn1->PteAddress = Pde; 01009 Pfn1->u2.ShareCount += 1; 01010 Pfn1->u3.e2.ReferenceCount = 1; 01011 Pfn1->u3.e1.PageLocation = ActiveAndValid; 01012 Pfn1->u3.e1.PageColor = 01013 MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (Pde)); 01014 01015 PointerPte = MiGetVirtualAddressMappedByPte (Pde); 01016 for (j = 0 ; j < PTE_PER_PAGE; j += 1) { 01017 if (PointerPte->u.Hard.Valid == 1) { 01018 01019 PageFrameIndex = PointerPte->u.Hard.PageFrameNumber; 01020 Pfn2 = MI_PFN_ELEMENT(PageFrameIndex); 01021 Pfn2->PteFrame = PdePage; 01022 Pfn2->u2.ShareCount += 1; 01023 Pfn2->u3.e2.ReferenceCount = 1; 01024 Pfn2->u3.e1.PageLocation = ActiveAndValid; 01025 01026 Pfn2->PteAddress = 01027 (PMMPTE)(KSEG0_BASE | (PageFrameIndex << PTE_SHIFT)); 01028 01029 Pfn2->u3.e1.PageColor = 01030 MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (Pfn2->PteAddress)); 01031 } 01032 PointerPte += 1; 01033 } 01034 } 01035 Pde += 1; 01036 } 01037 01038 // 01039 // Handle the initial nonpaged pool on expanded systems. 01040 // 01041 01042 if (MmExpandedNonPagedPoolInBytes) { 01043 PageFrameIndex = (((ULONG_PTR)MmNonPagedPoolStart & ~KSEG0_BASE) >> PAGE_SHIFT); 01044 Pfn1 = MI_PFN_ELEMENT(PageFrameIndex); 01045 j = PageFrameIndex + (MmSizeOfNonPagedPoolInBytes >> PAGE_SHIFT); 01046 while (PageFrameIndex < j) { 01047 Pfn1->PteFrame = PdePage; 01048 Pfn1->u2.ShareCount += 1; 01049 Pfn1->u3.e2.ReferenceCount = 1; 01050 Pfn1->u3.e1.PageLocation = ActiveAndValid; 01051 01052 Pfn1->PteAddress = 01053 (PMMPTE)(KSEG0_BASE | (PageFrameIndex << PTE_SHIFT)); 01054 01055 Pfn1->u3.e1.PageColor = 01056 MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (Pfn1->PteAddress)); 01057 PageFrameIndex += 1; 01058 Pfn1 += 1; 01059 } 01060 } 01061 01062 // 01063 // If page zero is still unused, mark it as in use. This is 01064 // temporary as we want to find bugs where a physical page 01065 // is specified as zero. 01066 // 01067 01068 Pfn1 = &MmPfnDatabase[MmLowestPhysicalPage]; 01069 if (Pfn1->u3.e2.ReferenceCount == 0) { 01070 01071 // 01072 // Make the reference count non-zero and point it into a 01073 // page directory. 01074 // 01075 01076 Pde = MiGetPdeAddress (0xb0000000); 01077 PdePage = Pde->u.Hard.PageFrameNumber; 01078 Pfn1->PteFrame = PdePageNumber; 01079 Pfn1->PteAddress = Pde; 01080 Pfn1->u2.ShareCount += 1; 01081 Pfn1->u3.e2.ReferenceCount = 1; 01082 Pfn1->u3.e1.PageLocation = ActiveAndValid; 01083 Pfn1->u3.e1.PageColor = 01084 MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (Pde)); 01085 } 01086 01087 // end of temporary set to physical page zero. 01088 01089 // 01090 // Walk through the memory descriptors and add pages to the 01091 // free list in the PFN database. 01092 // 01093 01094 NextMd = LoaderBlock->MemoryDescriptorListHead.Flink; 01095 01096 while (NextMd != &LoaderBlock->MemoryDescriptorListHead) { 01097 01098 MemoryDescriptor = CONTAINING_RECORD(NextMd, 01099 MEMORY_ALLOCATION_DESCRIPTOR, 01100 ListEntry); 01101 01102 i = MemoryDescriptor->PageCount; 01103 NextPhysicalPage = MemoryDescriptor->BasePage; 01104 01105 switch (MemoryDescriptor->MemoryType) { 01106 case LoaderBad: 01107 while (i != 0) { 01108 MiInsertPageInList (MmPageLocationList[BadPageList], 01109 NextPhysicalPage); 01110 i -= 1; 01111 NextPhysicalPage += 1; 01112 } 01113 break; 01114 01115 case LoaderFree: 01116 case LoaderLoadedProgram: 01117 case LoaderFirmwareTemporary: 01118 case LoaderOsloaderStack: 01119 01120 Pfn1 = MI_PFN_ELEMENT (NextPhysicalPage); 01121 while (i != 0) { 01122 if (Pfn1->u3.e2.ReferenceCount == 0) { 01123 01124 // 01125 // Set the PTE address to the physical page for 01126 // virtual address alignment checking. 01127 // 01128 01129 Pfn1->PteAddress = 01130 (PMMPTE)(NextPhysicalPage << PTE_SHIFT); 01131 01132 Pfn1->u3.e1.PageColor = 01133 MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (Pfn1->PteAddress)); 01134 MiInsertPageInList (MmPageLocationList[FreePageList], 01135 NextPhysicalPage); 01136 } 01137 Pfn1 += 1; 01138 i -= 1; 01139 NextPhysicalPage += 1; 01140 } 01141 break; 01142 01143 default: 01144 01145 PointerPte = MiGetPteAddress (KSEG0_BASE | 01146 (NextPhysicalPage << PAGE_SHIFT)); 01147 Pfn1 = MI_PFN_ELEMENT (NextPhysicalPage); 01148 while (i != 0) { 01149 01150 // 01151 // Set page as in use. 01152 // 01153 01154 Pfn1->PteFrame = PdePageNumber; 01155 Pfn1->PteAddress = PointerPte; 01156 Pfn1->u2.ShareCount += 1; 01157 Pfn1->u3.e2.ReferenceCount = 1; 01158 Pfn1->u3.e1.PageLocation = ActiveAndValid; 01159 Pfn1->u3.e1.PageColor = 01160 MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (PointerPte)); 01161 01162 Pfn1 += 1; 01163 i -= 1; 01164 NextPhysicalPage += 1; 01165 PointerPte += 1; 01166 } 01167 01168 break; 01169 } 01170 01171 NextMd = MemoryDescriptor->ListEntry.Flink; 01172 } 01173 01174 // 01175 // Indicate that the PFN database is allocated in NonPaged pool. 01176 // 01177 if (PfnInKseg0 == FALSE) { 01178 01179 // 01180 // The PFN database is allocated in virtual memory 01181 // 01182 // Set the start and end of allocation. 01183 // 01184 01185 Pfn1 = MI_PFN_ELEMENT(MiGetPteAddress(&MmPfnDatabase[MmLowestPhysicalPage])->u.Hard.PageFrameNumber); 01186 Pfn1->u3.e1.StartOfAllocation = 1; 01187 Pfn1 = MI_PFN_ELEMENT(MiGetPteAddress(&MmPfnDatabase[MmHighestPossiblePhysicalPage])->u.Hard.PageFrameNumber); 01188 Pfn1->u3.e1.EndOfAllocation = 1; 01189 01190 } else { 01191 01192 // 01193 // The PFN database is allocated in KSEG0. 01194 // 01195 // Mark all PFN entries for the PFN pages in use. 01196 // 01197 01198 PageNumber = ((ULONG)MmPfnDatabase - KSEG0_BASE) >> PAGE_SHIFT; 01199 Pfn1 = MI_PFN_ELEMENT(PageNumber); 01200 do { 01201 Pfn1->PteAddress = (PMMPTE)(PageNumber << PTE_SHIFT); 01202 Pfn1->u3.e1.PageColor = 01203 MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (Pfn1->PteAddress)); 01204 Pfn1 += 1; 01205 PfnAllocation -= 1; 01206 } while (PfnAllocation != 0); 01207 01208 // 01209 // Scan the PFN database backward for pages that are completely zero. 01210 // These pages are unused and can be added to the free list 01211 // 01212 01213 if (MmDynamicPfn == FALSE) { 01214 BottomPfn = MI_PFN_ELEMENT(MmHighestPhysicalPage); 01215 do { 01216 01217 // 01218 // Compute the address of the start of the page that is next 01219 // lower in memory and scan backwards until that page address 01220 // is reached or just crossed. 01221 // 01222 01223 if (((ULONG)BottomPfn & (PAGE_SIZE - 1)) != 0) { 01224 BasePfn = (PMMPFN)((ULONG)BottomPfn & ~(PAGE_SIZE - 1)); 01225 TopPfn = BottomPfn + 1; 01226 01227 } else { 01228 BasePfn = (PMMPFN)((ULONG)BottomPfn - PAGE_SIZE); 01229 TopPfn = BottomPfn; 01230 } 01231 01232 while (BottomPfn > BasePfn) { 01233 BottomPfn -= 1; 01234 } 01235 01236 // 01237 // If the entire range over which the PFN entries span is 01238 // completely zero and the PFN entry that maps the page is 01239 // not in the range, then add the page to the appropriate 01240 // free list. 01241 // 01242 01243 Range = (ULONG)TopPfn - (ULONG)BottomPfn; 01244 if (RtlCompareMemoryUlong((PVOID)BottomPfn, Range, 0) == Range) { 01245 01246 // 01247 // Set the PTE address to the physical page for 01248 // virtual address alignment checking. 01249 // 01250 01251 PageNumber = ((ULONG)BasePfn - KSEG0_BASE) >> PAGE_SHIFT; 01252 Pfn1 = MI_PFN_ELEMENT(PageNumber); 01253 01254 ASSERT(Pfn1->u3.e2.ReferenceCount == 0); 01255 01256 PfnAllocation += 1; 01257 01258 Pfn1->PteAddress = (PMMPTE)(PageNumber << PTE_SHIFT); 01259 Pfn1->u3.e1.PageColor = 01260 MI_GET_COLOR_FROM_SECONDARY(GET_PAGE_COLOR_FROM_PTE (Pfn1->PteAddress)); 01261 01262 MiInsertPageInList(MmPageLocationList[FreePageList], 01263 PageNumber); 01264 } 01265 01266 } while (BottomPfn > MmPfnDatabase); 01267 } 01268 } 01269 01270 // 01271 // Indicate that nonpaged pool must succeed is allocated in 01272 // nonpaged pool. 01273 // 01274 01275 i = MmSizeOfNonPagedMustSucceed; 01276 Pfn1 = MI_PFN_ELEMENT(MI_CONVERT_PHYSICAL_TO_PFN (MmNonPagedMustSucceed)); 01277 01278 while ((LONG)i > 0) { 01279 Pfn1->u3.e1.StartOfAllocation = 1; 01280 Pfn1->u3.e1.EndOfAllocation = 1; 01281 i -= PAGE_SIZE; 01282 Pfn1 += 1; 01283 } 01284 01285 KeInitializeSpinLock (&MmSystemSpaceLock); 01286 KeInitializeSpinLock (&MmPfnLock); 01287 01288 // 01289 // Initialize the nonpaged available PTEs for mapping I/O space 01290 // and kernel stacks. 01291 // 01292 01293 PointerPte = MiGetPteAddress (MmNonPagedSystemStart); 01294 01295 // 01296 // Since the initial nonpaged pool must always reside in KSEG0 (many changes 01297 // would be needed in this routine otherwise), reallocate the PTEs for it 01298 // to the pagable system PTE pool now. 01299 // 01300 01301 MmNumberOfSystemPtes = MiGetPteAddress(MmNonPagedPoolExpansionStart) - PointerPte - 1; 01302 01303 MiInitializeSystemPtes (PointerPte, MmNumberOfSystemPtes, SystemPteSpace); 01304 01305 // 01306 // Initialize the nonpaged pool. 01307 // 01308 01309 InitializePool (NonPagedPool, 0); 01310 01311 // 01312 // Initialize memory management structures for this process. 01313 // 01314 01315 // 01316 // Build working set list. System initialization has created 01317 // a PTE for hyperspace. 01318 // 01319 // Note, we can't remove a zeroed page as hyper space does not 01320 // exist and we map non-zeroed pages into hyper space to zero. 01321 // 01322 01323 PointerPte = MiGetPdeAddress(HYPER_SPACE); 01324 01325 ASSERT (PointerPte->u.Hard.Valid == 1); 01326 PointerPte->u.Hard.Global = 0; 01327 PointerPte->u.Hard.Write = 1; 01328 PageFrameIndex = PointerPte->u.Hard.PageFrameNumber; 01329 01330 // 01331 // Point to the page table page we just created and zero it. 01332 // 01333 01334 PointerPte = MiGetPteAddress(HYPER_SPACE); 01335 RtlZeroMemory ((PVOID)PointerPte, PAGE_SIZE); 01336 01337 // 01338 // Hyper space now exists, set the necessary variables. 01339 // 01340 01341 MmFirstReservedMappingPte = MiGetPteAddress (FIRST_MAPPING_PTE); 01342 MmLastReservedMappingPte = MiGetPteAddress (LAST_MAPPING_PTE); 01343 01344 MmWorkingSetList = WORKING_SET_LIST; 01345 MmWsle = (PMMWSLE)((PUCHAR)WORKING_SET_LIST + sizeof(MMWSL)); 01346 01347 // 01348 // Initialize this process's memory management structures including 01349 // the working set list. 01350 // 01351 01352 // 01353 // The PFN element for the page directory has already been initialized, 01354 // zero the reference count and the share count so they won't be 01355 // wrong. 01356 // 01357 01358 Pfn1 = MI_PFN_ELEMENT (PdePageNumber); 01359 01360 LOCK_PFN (OldIrql); 01361 01362 Pfn1->u2.ShareCount = 0; 01363 Pfn1->u3.e2.ReferenceCount = 0; 01364 01365 // 01366 // The PFN element for the PDE which maps hyperspace has already 01367 // been initialized, zero the reference count and the share count 01368 // so they won't be wrong. 01369 // 01370 01371 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 01372 Pfn1->u2.ShareCount = 0; 01373 Pfn1->u3.e2.ReferenceCount = 0; 01374 01375 CurrentProcess = PsGetCurrentProcess (); 01376 01377 // 01378 // Get a page for the working set list and map it into the Page 01379 // directory at the page after hyperspace. 01380 // 01381 01382 PointerPte = MiGetPteAddress (HYPER_SPACE); 01383 PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE(PointerPte)); 01384 01385 CurrentProcess->WorkingSetPage = PageFrameIndex; 01386 01387 TempPte.u.Hard.PageFrameNumber = PageFrameIndex; 01388 PointerPde = MiGetPdeAddress (HYPER_SPACE) + 1; 01389 01390 // 01391 // Assert that the double mapped pages have the same alignment. 01392 // 01393 01394 ASSERT ((PointerPte->u.Long & (0xF << PTE_SHIFT)) == 01395 (PointerPde->u.Long & (0xF << PTE_SHIFT))); 01396 01397 *PointerPde = TempPte; 01398 PointerPde->u.Hard.Global = 0; 01399 01400 PointerPte = MiGetVirtualAddressMappedByPte (PointerPde); 01401 01402 KeFillEntryTb ((PHARDWARE_PTE)PointerPde, 01403 PointerPte, 01404 TRUE); 01405 01406 RtlZeroMemory ((PVOID)PointerPte, PAGE_SIZE); 01407 01408 TempPte = *PointerPde; 01409 TempPte.u.Hard.Valid = 0; 01410 TempPte.u.Hard.Global = 0; 01411 01412 KeFlushSingleTb (PointerPte, 01413 TRUE, 01414 FALSE, 01415 (PHARDWARE_PTE)PointerPde, 01416 TempPte.u.Hard); 01417 01418 UNLOCK_PFN (OldIrql); 01419 01420 // 01421 // Initialize hyperspace for this process. 01422 // 01423 01424 PointerPte = MmFirstReservedMappingPte; 01425 PointerPte->u.Hard.PageFrameNumber = NUMBER_OF_MAPPING_PTES; 01426 01427 CurrentProcess->Vm.MaximumWorkingSetSize = MmSystemProcessWorkingSetMax; 01428 CurrentProcess->Vm.MinimumWorkingSetSize = MmSystemProcessWorkingSetMin; 01429 01430 MmInitializeProcessAddressSpace (CurrentProcess, 01431 (PEPROCESS)NULL, 01432 (PVOID)NULL, 01433 (PVOID)NULL); 01434 01435 *PointerPde = ZeroKernelPte; 01436 01437 // 01438 // Check to see if moving the secondary page structures to the end 01439 // of the PFN database is a waste of memory. And if so, copy it 01440 // to paged pool. 01441 // 01442 // If the PFN database ends on a page aligned boundary and the 01443 // size of the two arrays is less than a page, free the page 01444 // and allocate nonpagedpool for this. 01445 // 01446 01447 if ((((ULONG)MmFreePagesByColor[0] & (PAGE_SIZE - 1)) == 0) && 01448 ((MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES)) < PAGE_SIZE)) { 01449 01450 PMMCOLOR_TABLES c; 01451 01452 c = MmFreePagesByColor[0]; 01453 01454 MmFreePagesByColor[0] = ExAllocatePoolWithTag (NonPagedPoolMustSucceed, 01455 MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES), 01456 ' mM'); 01457 01458 MmFreePagesByColor[1] = &MmFreePagesByColor[0][MmSecondaryColors]; 01459 01460 RtlMoveMemory (MmFreePagesByColor[0], 01461 c, 01462 MmSecondaryColors * 2 * sizeof(MMCOLOR_TABLES)); 01463 01464 // 01465 // Free the page. 01466 // 01467 01468 if (!MI_IS_PHYSICAL_ADDRESS(c)) { 01469 PointerPte = MiGetPteAddress(c); 01470 PageFrameIndex = PointerPte->u.Hard.PageFrameNumber; 01471 *PointerPte = ZeroKernelPte; 01472 } else { 01473 PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (c); 01474 } 01475 01476 LOCK_PFN (OldIrql); 01477 01478 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 01479 ASSERT ((Pfn1->u3.e2.ReferenceCount <= 1) && (Pfn1->u2.ShareCount <= 1)); 01480 Pfn1->u2.ShareCount = 0; 01481 Pfn1->u3.e2.ReferenceCount = 0; 01482 MI_SET_PFN_DELETED (Pfn1); 01483 #if DBG 01484 Pfn1->u3.e1.PageLocation = StandbyPageList; 01485 #endif //DBG 01486 MiInsertPageInList (MmPageLocationList[FreePageList], PageFrameIndex); 01487 UNLOCK_PFN (OldIrql); 01488 } 01489 01490 return; 01491 } }


Variable Documentation

SIZE_T MmExpandedNonPagedPoolInBytes
 

Definition at line 37 of file inialpha.c.

Referenced by MiInitMachineDependent().


Generated on Sat May 15 19:44:10 2004 for test by doxygen 1.3.7