Main Page | Class Hierarchy | Class List | File List | Class Members | File Members

physical.c File Reference

#include "mi.h"

Go to the source code of this file.

Defines

#define COPY_STACK_SIZE   256
#define BITS_IN_ULONG   ((sizeof (ULONG)) * 8)
#define LOWEST_USABLE_PHYSICAL_ADDRESS   (16 * 1024 * 1024)
#define LOWEST_USABLE_PHYSICAL_PAGE   (LOWEST_USABLE_PHYSICAL_ADDRESS >> PAGE_SHIFT)
#define LOWEST_BITMAP_PHYSICAL_PAGE   0
#define MI_FRAME_TO_BITMAP_INDEX(x)   ((ULONG)(x))
#define MI_BITMAP_INDEX_TO_FRAME(x)   ((ULONG)(x))

Functions

VOID MiFlushUserPhysicalPteList (IN PMMPTE_FLUSH_LIST PteFlushList)
NTSTATUS NtMapUserPhysicalPages (IN PVOID VirtualAddress, IN ULONG_PTR NumberOfPages, IN PULONG_PTR UserPfnArray OPTIONAL)
NTSTATUS NtMapUserPhysicalPagesScatter (IN PVOID *VirtualAddresses, IN ULONG_PTR NumberOfPages, IN PULONG_PTR UserPfnArray OPTIONAL)
NTSTATUS NtAllocateUserPhysicalPages (IN HANDLE ProcessHandle, IN OUT PULONG_PTR NumberOfPages, OUT PULONG_PTR UserPfnArray)
NTSTATUS NtFreeUserPhysicalPages (IN HANDLE ProcessHandle, IN OUT PULONG_PTR NumberOfPages, IN PULONG_PTR UserPfnArray)
VOID MiRemoveUserPhysicalPagesVad (IN PMMVAD_SHORT Vad)
VOID MiUpdateVadPhysicalPages (IN ULONG_PTR TotalFreedPages)
VOID MiCleanPhysicalProcessPages (IN PEPROCESS Process)

Variables

ULONG_PTR MmVadPhysicalPages


Define Documentation

#define BITS_IN_ULONG   ((sizeof (ULONG)) * 8)
 

Definition at line 67 of file physical.c.

#define COPY_STACK_SIZE   256
 

Definition at line 65 of file physical.c.

Referenced by MiCleanPhysicalProcessPages(), MiDoPoolCopy(), NtFreeUserPhysicalPages(), NtMapUserPhysicalPages(), and NtMapUserPhysicalPagesScatter().

#define LOWEST_BITMAP_PHYSICAL_PAGE   0
 

Definition at line 72 of file physical.c.

#define LOWEST_USABLE_PHYSICAL_ADDRESS   (16 * 1024 * 1024)
 

Definition at line 69 of file physical.c.

Referenced by NtAllocateUserPhysicalPages().

#define LOWEST_USABLE_PHYSICAL_PAGE   (LOWEST_USABLE_PHYSICAL_ADDRESS >> PAGE_SHIFT)
 

Definition at line 70 of file physical.c.

Referenced by MiCleanPhysicalProcessPages(), MiRemoveUserPhysicalPagesVad(), NtAllocateUserPhysicalPages(), NtFreeUserPhysicalPages(), NtMapUserPhysicalPages(), and NtMapUserPhysicalPagesScatter().

#define MI_BITMAP_INDEX_TO_FRAME  )     ((ULONG)(x))
 

Definition at line 74 of file physical.c.

Referenced by MiCleanPhysicalProcessPages().

#define MI_FRAME_TO_BITMAP_INDEX  )     ((ULONG)(x))
 

Definition at line 73 of file physical.c.

Referenced by NtAllocateUserPhysicalPages(), NtFreeUserPhysicalPages(), NtMapUserPhysicalPages(), and NtMapUserPhysicalPagesScatter().


Function Documentation

VOID MiCleanPhysicalProcessPages IN PEPROCESS  Process  ) 
 

Definition at line 2313 of file physical.c.

References APC_LEVEL, ASSERT, BitMap, COPY_STACK_SIZE, ExFreePool(), LOWEST_USABLE_PHYSICAL_PAGE, MI_BITMAP_INDEX_TO_FRAME, MI_PFN_ELEMENT, MI_PFN_IS_AWE, MI_SET_PFN_DELETED, MiUpdateVadPhysicalPages(), MmFreePagesFromMdl(), MmHighestPossiblePhysicalPage, MmInitializeMdl, NonPagedPool, NULL, PAGE_SHIFT, PsReturnPoolQuota(), _MMPFN::PteAddress, RtlFindSetBits(), TRUE, and _MMPFN::u2.

Referenced by MmCleanProcessAddressSpace().

02319 : 02320 02321 This routine frees the VadPhysicalBitMap, any remaining physical pages (as 02322 they may not have been currently mapped into any Vads) and returns the 02323 bitmap quota. 02324 02325 Arguments: 02326 02327 Process - Supplies the process to clean. 02328 02329 Return Value: 02330 02331 None. 02332 02333 Environment: 02334 02335 Kernel mode, APC level, working set mutex held. Called only on process 02336 exit, so the AWE lock is not needed here. 02337 02338 --*/ 02339 02340 { 02341 PMMPFN Pfn1; 02342 ULONG BitMapSize; 02343 ULONG BitMapIndex; 02344 ULONG BitMapHint; 02345 PRTL_BITMAP BitMap; 02346 PPFN_NUMBER MdlPage; 02347 PFN_NUMBER MdlHack[(sizeof(MDL) / sizeof(PFN_NUMBER)) + COPY_STACK_SIZE]; 02348 ULONG_PTR MdlPages; 02349 ULONG_PTR NumberOfPages; 02350 ULONG_PTR TotalFreedPages; 02351 PMDL MemoryDescriptorList; 02352 PFN_NUMBER PageFrameIndex; 02353 #if DBG 02354 ULONG_PTR ActualPages = 0; 02355 ULONG_PTR ExpectedPages = 0; 02356 #endif 02357 02358 ASSERT (KeGetCurrentIrql() == APC_LEVEL); 02359 02360 #if DBG 02361 ExpectedPages = Process->VadPhysicalPages; 02362 #else 02363 if (Process->VadPhysicalPages == 0) { 02364 return; 02365 } 02366 #endif 02367 02368 TotalFreedPages = 0; 02369 BitMap = Process->VadPhysicalPagesBitMap; 02370 02371 if (BitMap != NULL) { 02372 02373 MdlPages = COPY_STACK_SIZE; 02374 MemoryDescriptorList = (PMDL)&MdlHack[0]; 02375 02376 MdlPage = (PPFN_NUMBER)(MemoryDescriptorList + 1); 02377 NumberOfPages = 0; 02378 02379 BitMapHint = 0; 02380 02381 while (TRUE) { 02382 02383 BitMapIndex = RtlFindSetBits (BitMap, 1, BitMapHint); 02384 02385 if (BitMapIndex < BitMapHint) { 02386 break; 02387 } 02388 02389 if (BitMapIndex == 0xFFFFFFFF) { 02390 break; 02391 } 02392 02393 PageFrameIndex = MI_BITMAP_INDEX_TO_FRAME(BitMapIndex); 02394 02395 #if defined (_WIN64) 02396 02397 // 02398 // This may become a problem for 64-bit systems with > 32tb 02399 // of physical memory as the 3rd parameter to RtlFindSetBits is 02400 // a ULONG. 02401 // 02402 02403 ASSERT (PageFrameIndex < 0x100000000); 02404 #endif 02405 02406 // 02407 // The bitmap search wraps, so handle it here. 02408 // Note PFN 0 is illegal. 02409 // 02410 02411 ASSERT (PageFrameIndex != 0); 02412 ASSERT (PageFrameIndex >= LOWEST_USABLE_PHYSICAL_PAGE); 02413 02414 ASSERT (ExpectedPages != 0); 02415 Pfn1 = MI_PFN_ELEMENT(PageFrameIndex); 02416 ASSERT (Pfn1->u2.ShareCount == 1); 02417 ASSERT (Pfn1->PteAddress == (PMMPTE)0); 02418 02419 ASSERT (MI_PFN_IS_AWE (Pfn1)); 02420 02421 MI_SET_PFN_DELETED(Pfn1); 02422 02423 *MdlPage = PageFrameIndex; 02424 MdlPage += 1; 02425 NumberOfPages += 1; 02426 #if DBG 02427 ActualPages += 1; 02428 #endif 02429 02430 if (NumberOfPages == COPY_STACK_SIZE) { 02431 02432 // 02433 // Free the pages in the full MDL. 02434 // 02435 02436 MmInitializeMdl (MemoryDescriptorList, 02437 0, 02438 NumberOfPages << PAGE_SHIFT); 02439 02440 MmFreePagesFromMdl (MemoryDescriptorList); 02441 02442 MdlPage = (PPFN_NUMBER)(MemoryDescriptorList + 1); 02443 Process->VadPhysicalPages -= NumberOfPages; 02444 TotalFreedPages += NumberOfPages; 02445 NumberOfPages = 0; 02446 } 02447 02448 BitMapHint = BitMapIndex + 1; 02449 if (BitMapHint >= BitMap->SizeOfBitMap) { 02450 break; 02451 } 02452 } 02453 02454 // 02455 // Free any straggling MDL pages here. 02456 // 02457 02458 if (NumberOfPages != 0) { 02459 MmInitializeMdl (MemoryDescriptorList, 02460 0, 02461 NumberOfPages << PAGE_SHIFT); 02462 02463 MmFreePagesFromMdl (MemoryDescriptorList); 02464 Process->VadPhysicalPages -= NumberOfPages; 02465 TotalFreedPages += NumberOfPages; 02466 } 02467 02468 ASSERT (ExpectedPages == ActualPages); 02469 02470 BitMapSize = sizeof(RTL_BITMAP) + (ULONG)((((MmHighestPossiblePhysicalPage + 1) + 31) / 32) * 4); 02471 02472 Process->VadPhysicalPagesBitMap = NULL; 02473 ExFreePool (BitMap); 02474 PsReturnPoolQuota (Process, NonPagedPool, BitMapSize); 02475 } 02476 02477 ASSERT (ExpectedPages == ActualPages); 02478 ASSERT (Process->VadPhysicalPages == 0); 02479 02480 if (TotalFreedPages != 0) { 02481 MiUpdateVadPhysicalPages (TotalFreedPages); 02482 } 02483 02484 return; 02485 }

VOID MiFlushUserPhysicalPteList IN PMMPTE_FLUSH_LIST  PteFlushList  ) 
 

Definition at line 2488 of file physical.c.

References FALSE, KeFlushEntireTb(), KeFlushMultipleTb(), KeFlushSingleTb(), MM_MAXIMUM_FLUSH_COUNT, NULL, TRUE, _MMPTE::u, and ZeroPte.

Referenced by MiRemoveUserPhysicalPagesVad(), NtAllocateUserPhysicalPages(), NtFreeUserPhysicalPages(), NtMapUserPhysicalPages(), and NtMapUserPhysicalPagesScatter().

02494 : 02495 02496 This routine flushes all the PTEs in the PTE flush list. 02497 If the list has overflowed, the entire TB is flushed. 02498 02499 N.B. The intent was for this routine to NEVER write the PTEs and have 02500 the caller do this instead. There is no such export from Ke, so 02501 the flush of a single TB just reuses the PTE. 02502 02503 Arguments: 02504 02505 PteFlushList - Supplies an optional pointer to the list to be flushed. 02506 02507 Return Value: 02508 02509 None. 02510 02511 Environment: 02512 02513 Kernel mode, PFN lock NOT held. 02514 02515 --*/ 02516 02517 { 02518 ULONG count; 02519 02520 count = PteFlushList->Count; 02521 02522 if (count == 0) { 02523 return; 02524 } 02525 02526 if (count != 1) { 02527 if (count < MM_MAXIMUM_FLUSH_COUNT) { 02528 KeFlushMultipleTb (count, 02529 &PteFlushList->FlushVa[0], 02530 TRUE, 02531 FALSE, 02532 NULL, 02533 ZeroPte.u.Flush); 02534 } 02535 else { 02536 02537 // 02538 // Array has overflowed, flush the entire TB. 02539 // 02540 02541 KeFlushEntireTb (TRUE, FALSE); 02542 } 02543 } 02544 else { 02545 02546 // 02547 // This always writes the (same) value into the PTE. 02548 // 02549 02550 KeFlushSingleTb (PteFlushList->FlushVa[0], 02551 TRUE, 02552 FALSE, 02553 (PHARDWARE_PTE)PteFlushList->FlushPte[0], 02554 *(PHARDWARE_PTE)PteFlushList->FlushPte[0]); 02555 } 02556 02557 PteFlushList->Count = 0; 02558 return; 02559 } }

VOID MiRemoveUserPhysicalPagesVad IN PMMVAD_SHORT  Vad  ) 
 

Definition at line 2110 of file physical.c.

References APC_LEVEL, ASSERT, _MMPTE_FLUSH_LIST::Count, DbgPrint, _MMPTE_FLUSH_LIST::FlushPte, _MMPTE_FLUSH_LIST::FlushVa, LOCK_AWE, LOCK_PFN2, LOWEST_USABLE_PHYSICAL_PAGE, MI_GET_PAGE_FRAME_FROM_PTE, MI_PFN_ELEMENT, MI_PFN_IS_AWE, MI_VPN_TO_VA, MI_VPN_TO_VA_ENDING, MI_WRITE_INVALID_PTE, MiFlushUserPhysicalPteList(), MiGetPteAddress, MiGetVirtualAddressMappedByPte, MM_MAXIMUM_FLUSH_COUNT, NULL, _EPROCESS::PhysicalVadList, PsGetCurrentProcess, _MMPTE::u, UNLOCK_AWE, UNLOCK_PFN2, _MI_PHYSICAL_VIEW::Vad, _EPROCESS::VadPhysicalPages, _EPROCESS::VadPhysicalPagesBitMap, and ZeroPte.

Referenced by MmCleanProcessAddressSpace(), and NtFreeVirtualMemory().

02116 : 02117 02118 This function removes the user-physical-pages mapped region from the 02119 current process's address space. This mapped region is private memory. 02120 02121 The physical pages of this Vad are unmapped here, but not freed. 02122 02123 Pagetable pages are freed and their use/commitment counts/quotas are 02124 managed by our caller. 02125 02126 Arguments: 02127 02128 Vad - Supplies the VAD which manages the address space. 02129 02130 Return Value: 02131 02132 None. 02133 02134 Environment: 02135 02136 APC level, working set mutex and address creation mutex held. 02137 02138 --*/ 02139 02140 { 02141 PMMPFN Pfn1; 02142 PEPROCESS Process; 02143 PFN_NUMBER PageFrameIndex; 02144 MMPTE_FLUSH_LIST PteFlushList; 02145 PMMPTE PointerPte; 02146 MMPTE PteContents; 02147 PMMPTE EndingPte; 02148 #if DBG 02149 KIRQL OldIrql; 02150 KIRQL OldIrql2; 02151 ULONG_PTR ActualPages; 02152 ULONG_PTR ExpectedPages; 02153 PLIST_ENTRY NextEntry; 02154 PMI_PHYSICAL_VIEW PhysicalView; 02155 #endif 02156 02157 ASSERT (KeGetCurrentIrql() == APC_LEVEL); 02158 02159 ASSERT (Vad->u.VadFlags.UserPhysicalPages == 1); 02160 02161 Process = PsGetCurrentProcess(); 02162 02163 // 02164 // If the physical pages count is zero, nothing needs to be done. 02165 // On checked systems, verify the list anyway. 02166 // 02167 02168 #if DBG 02169 ActualPages = 0; 02170 ExpectedPages = Process->VadPhysicalPages; 02171 #else 02172 if (Process->VadPhysicalPages == 0) { 02173 return; 02174 } 02175 #endif 02176 02177 // 02178 // The caller must have removed this Vad from the physical view list, 02179 // otherwise another thread could immediately remap pages back into the Vad. 02180 // 02181 // This allows us to proceed without acquiring the AWE or PFN locks - 02182 // everything can be done under the WS lock which is already held. 02183 // 02184 02185 #if DBG 02186 LOCK_AWE (Process, OldIrql); 02187 02188 LOCK_PFN2 (OldIrql2); 02189 02190 NextEntry = Process->PhysicalVadList.Flink; 02191 while (NextEntry != &Process->PhysicalVadList) { 02192 02193 PhysicalView = CONTAINING_RECORD(NextEntry, 02194 MI_PHYSICAL_VIEW, 02195 ListEntry); 02196 02197 if (PhysicalView->Vad == (PMMVAD)Vad) { 02198 DbgPrint ("MiRemoveUserPhysicalPagesVad : Vad %p still in list!\n", 02199 Vad); 02200 DbgBreakPoint (); 02201 } 02202 02203 NextEntry = NextEntry->Flink; 02204 } 02205 02206 UNLOCK_PFN2 (OldIrql2); 02207 UNLOCK_AWE (Process, OldIrql); 02208 #endif 02209 02210 // 02211 // If the physical pages bitmap doesn't exist, nothing needs to be done. 02212 // 02213 02214 if (Process->VadPhysicalPagesBitMap == NULL) { 02215 ASSERT (ExpectedPages == 0); 02216 return; 02217 } 02218 02219 PointerPte = MiGetPteAddress (MI_VPN_TO_VA (Vad->StartingVpn)); 02220 EndingPte = MiGetPteAddress (MI_VPN_TO_VA_ENDING (Vad->EndingVpn)); 02221 02222 PteFlushList.Count = 0; 02223 02224 while (PointerPte <= EndingPte) { 02225 PteContents = *PointerPte; 02226 if (PteContents.u.Hard.Valid == 0) { 02227 PointerPte += 1; 02228 continue; 02229 } 02230 02231 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 02232 02233 ASSERT (PageFrameIndex >= LOWEST_USABLE_PHYSICAL_PAGE); 02234 ASSERT (ExpectedPages != 0); 02235 02236 Pfn1 = MI_PFN_ELEMENT(PageFrameIndex); 02237 02238 ASSERT (MI_PFN_IS_AWE (Pfn1)); 02239 ASSERT (Pfn1->u2.ShareCount == 2); 02240 ASSERT (Pfn1->PteAddress == PointerPte); 02241 02242 // 02243 // The frame is currently mapped in this Vad so the PTE must 02244 // be cleared and the TB entry flushed. 02245 // 02246 02247 Pfn1->u2.ShareCount -= 1; 02248 Pfn1->PteAddress = (PMMPTE)0; 02249 02250 if (PteFlushList.Count != MM_MAXIMUM_FLUSH_COUNT) { 02251 PteFlushList.FlushVa[PteFlushList.Count] = 02252 MiGetVirtualAddressMappedByPte (PointerPte); 02253 PteFlushList.FlushPte[PteFlushList.Count] = PointerPte; 02254 PteFlushList.Count += 1; 02255 } 02256 02257 MI_WRITE_INVALID_PTE (PointerPte, ZeroPte); 02258 02259 PointerPte += 1; 02260 #if DBG 02261 ActualPages += 1; 02262 #endif 02263 ASSERT (ActualPages <= ExpectedPages); 02264 } 02265 02266 // 02267 // Flush the TB entries for these pages. Note ZeroPte is only used 02268 // when the FlushPte[0] field is nonzero or if only a single PTE is 02269 // being flushed. 02270 // 02271 02272 MiFlushUserPhysicalPteList (&PteFlushList); 02273 02274 return; 02275 }

VOID MiUpdateVadPhysicalPages IN ULONG_PTR  TotalFreedPages  ) 
 

Definition at line 2278 of file physical.c.

References LOCK_PFN, MmVadPhysicalPages, and UNLOCK_PFN.

Referenced by MiCleanPhysicalProcessPages().

02284 : 02285 02286 Nonpaged helper routine to update the VadPhysicalPages count. 02287 02288 Arguments: 02289 02290 TotalFreedPages - Supplies the number of pages just freed. 02291 02292 Return Value: 02293 02294 None. 02295 02296 Environment: 02297 02298 Kernel mode, APC level or below. 02299 02300 --*/ 02301 02302 { 02303 KIRQL OldIrql; 02304 02305 LOCK_PFN (OldIrql); 02306 MmVadPhysicalPages -= TotalFreedPages; 02307 UNLOCK_PFN (OldIrql); 02308 02309 return; 02310 }

NTSTATUS NtAllocateUserPhysicalPages IN HANDLE  ProcessHandle,
IN OUT PULONG_PTR  NumberOfPages,
OUT PULONG_PTR  UserPfnArray
 

Definition at line 970 of file physical.c.

References _EPROCESS::AddressSpaceDeleted, ASSERT, BitMap, _MDL::ByteCount, _MMPTE_FLUSH_LIST::Count, ExAllocatePoolWithTag, EXCEPTION_EXECUTE_HANDLER, ExFreePool(), ExSystemExceptionFilter(), FALSE, _MMPTE_FLUSH_LIST::FlushPte, _MMPTE_FLUSH_LIST::FlushVa, KeAttachProcess(), KeDetachProcess(), KernelMode, KPROCESSOR_MODE, L, LOCK_AWE, LOCK_PFN, LOCK_WS, LOWEST_USABLE_PHYSICAL_ADDRESS, LOWEST_USABLE_PHYSICAL_PAGE, MI_FRAME_TO_BITMAP_INDEX, MI_PFN_ELEMENT, MI_PFN_IS_AWE, MI_SET_PFN_DELETED, MI_WRITE_INVALID_PTE, MiFlushUserPhysicalPteList(), MiGetVirtualAddressMappedByPte, MM_MAXIMUM_FLUSH_COUNT, MmAllocatePagesForMdl(), MmFreePagesFromMdl(), MmHighestPossiblePhysicalPage, MmVadPhysicalPages, _MDL::Next, NonPagedPool, NT_SUCCESS, NTSTATUS(), NULL, ObDereferenceObject, ObReferenceObjectByHandle(), PAGE_SHIFT, PAGE_SIZE, PASSIVE_LEVEL, _EPROCESS::Pcb, ProbeForWrite(), ProbeForWritePointer, PsChargePoolQuota(), PsGetCurrentProcess, PsProcessType, PsReturnPoolQuota(), RtlClearAllBits(), RtlClearBits(), RtlFindSetBits(), RtlInitializeBitMap(), RtlSetBits(), SeLockMemoryPrivilege, SeSinglePrivilegeCheck(), Status, TRUE, _MMPTE::u, UNLOCK_AWE, UNLOCK_PFN, UNLOCK_WS, _EPROCESS::VadPhysicalPages, _EPROCESS::VadPhysicalPagesBitMap, and ZeroPte.

00978 : 00979 00980 This function allocates nonpaged physical pages for the specified 00981 subject process. 00982 00983 No WSLEs are maintained for this range. 00984 00985 The caller must check the NumberOfPages returned to determine how many 00986 pages were actually allocated (this number may be less than the requested 00987 amount). 00988 00989 On success, the user array is filled with the allocated physical page 00990 frame numbers (only up to the returned NumberOfPages is filled in). 00991 00992 No PTEs are filled here - this gives the application the flexibility 00993 to order the address space with no metadata structure imposed by the Mm. 00994 Applications do this via NtMapUserPhysicalPages - ie: 00995 00996 - Each physical page allocated is set in the process's bitmap. 00997 This provides remap, free and unmap a way to validate and rundown 00998 these frames. 00999 01000 Unmaps may result in a walk of the entire bitmap, but that's ok as 01001 unmaps should be less frequent. The win is it saves us from 01002 using up system virtual address space to manage these frames. 01003 01004 - Note that the same physical frame may NOT be mapped at two different 01005 virtual addresses in the process. This makes frees and unmaps 01006 substantially faster as no checks for aliasing need be performed. 01007 01008 Arguments: 01009 01010 ProcessHandle - Supplies an open handle to a process object. 01011 01012 NumberOfPages - Supplies a pointer to a variable that supplies the 01013 desired size in pages of the allocation. This is filled 01014 with the actual number of pages allocated. 01015 01016 UserPfnArray - Supplies a pointer to user memory to store the allocated 01017 frame numbers into. 01018 01019 Return Value: 01020 01021 Various NTSTATUS codes. 01022 01023 --*/ 01024 01025 { 01026 ULONG i; 01027 KIRQL OldIrql; 01028 KIRQL OldIrqlPfn; 01029 PEPROCESS Process; 01030 KPROCESSOR_MODE PreviousMode; 01031 NTSTATUS Status; 01032 LOGICAL Attached; 01033 LOGICAL WsHeld; 01034 ULONG_PTR CapturedNumberOfPages; 01035 ULONG_PTR AllocatedPages; 01036 ULONG_PTR MdlRequestInPages; 01037 ULONG_PTR TotalAllocatedPages; 01038 PMDL MemoryDescriptorList; 01039 PMDL MemoryDescriptorList2; 01040 PMDL MemoryDescriptorHead; 01041 PPFN_NUMBER MdlPage; 01042 PRTL_BITMAP BitMap; 01043 ULONG BitMapSize; 01044 ULONG BitMapIndex; 01045 PMMPFN Pfn1; 01046 PHYSICAL_ADDRESS LowAddress; 01047 PHYSICAL_ADDRESS MdlLowAddress; 01048 PHYSICAL_ADDRESS HighAddress; 01049 PHYSICAL_ADDRESS SkipBytes; 01050 PMMPTE PointerPte; 01051 MMPTE OldPteContents; 01052 MMPTE_FLUSH_LIST PteFlushList; 01053 ULONG SizeOfBitMap; 01054 01055 ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL); 01056 01057 Attached = FALSE; 01058 WsHeld = FALSE; 01059 01060 // 01061 // Check the allocation type field. 01062 // 01063 01064 PreviousMode = KeGetPreviousMode(); 01065 01066 // 01067 // Establish an exception handler, probe the specified addresses 01068 // for write access and capture the initial values. 01069 // 01070 01071 try { 01072 01073 // 01074 // Capture the number of pages. 01075 // 01076 01077 if (PreviousMode != KernelMode) { 01078 01079 ProbeForWritePointer (NumberOfPages); 01080 01081 CapturedNumberOfPages = *NumberOfPages; 01082 01083 if (CapturedNumberOfPages == 0) { 01084 return STATUS_SUCCESS; 01085 } 01086 01087 if (CapturedNumberOfPages > (MAXULONG_PTR / sizeof(ULONG_PTR))) { 01088 return STATUS_INVALID_PARAMETER_2; 01089 } 01090 01091 ProbeForWrite (UserPfnArray, 01092 (ULONG)(CapturedNumberOfPages * sizeof (ULONG_PTR)), 01093 sizeof(PULONG_PTR)); 01094 01095 } 01096 else { 01097 CapturedNumberOfPages = *NumberOfPages; 01098 } 01099 01100 } except (ExSystemExceptionFilter()) { 01101 01102 // 01103 // If an exception occurs during the probe or capture 01104 // of the initial values, then handle the exception and 01105 // return the exception code as the status value. 01106 // 01107 01108 return GetExceptionCode(); 01109 } 01110 01111 // 01112 // Reference the specified process handle for VM_OPERATION access. 01113 // 01114 01115 if (ProcessHandle == NtCurrentProcess()) { 01116 Process = PsGetCurrentProcess(); 01117 } 01118 else { 01119 Status = ObReferenceObjectByHandle ( ProcessHandle, 01120 PROCESS_VM_OPERATION, 01121 PsProcessType, 01122 PreviousMode, 01123 (PVOID *)&Process, 01124 NULL ); 01125 01126 if (!NT_SUCCESS(Status)) { 01127 return Status; 01128 } 01129 } 01130 01131 if (!SeSinglePrivilegeCheck (SeLockMemoryPrivilege, PreviousMode)) { 01132 if (ProcessHandle != NtCurrentProcess()) { 01133 ObDereferenceObject (Process); 01134 } 01135 return STATUS_PRIVILEGE_NOT_HELD; 01136 } 01137 01138 // 01139 // If the specified process is not the current process, attach 01140 // to the specified process. 01141 // 01142 01143 if (PsGetCurrentProcess() != Process) { 01144 KeAttachProcess (&Process->Pcb); 01145 Attached = TRUE; 01146 } 01147 01148 BitMapSize = 0; 01149 01150 // 01151 // Get the working set mutex to synchronize. This also blocks APCs so 01152 // an APC which takes a page fault does not corrupt various structures. 01153 // 01154 01155 LOCK_WS (Process); 01156 01157 WsHeld = TRUE; 01158 01159 // 01160 // Make sure the address space was not deleted, If so, return an error. 01161 // 01162 01163 if (Process->AddressSpaceDeleted != 0) { 01164 Status = STATUS_PROCESS_IS_TERMINATING; 01165 goto ErrorReturn; 01166 } 01167 01168 // 01169 // Create the physical pages bitmap if it does not already exist. 01170 // LockMemory privilege is required. 01171 // 01172 01173 BitMap = Process->VadPhysicalPagesBitMap; 01174 01175 if (BitMap == NULL) { 01176 01177 BitMapSize = sizeof(RTL_BITMAP) + (ULONG)((((MmHighestPossiblePhysicalPage + 1) + 31) / 32) * 4); 01178 01179 BitMap = ExAllocatePoolWithTag (NonPagedPool, BitMapSize, 'LdaV'); 01180 01181 if (BitMap == NULL) { 01182 Status = STATUS_INSUFFICIENT_RESOURCES; 01183 goto ErrorReturn; 01184 } 01185 01186 RtlInitializeBitMap (BitMap, 01187 (PULONG)(BitMap + 1), 01188 (ULONG)(MmHighestPossiblePhysicalPage + 1)); 01189 01190 RtlClearAllBits (BitMap); 01191 01192 try { 01193 01194 // 01195 // Charge quota for the nonpaged pool for the bitmap. This is 01196 // done here rather than by using ExAllocatePoolWithQuota 01197 // so the process object is not referenced by the quota charge. 01198 // 01199 01200 PsChargePoolQuota (Process, NonPagedPool, BitMapSize); 01201 01202 } except (EXCEPTION_EXECUTE_HANDLER) { 01203 Status = GetExceptionCode(); 01204 ExFreePool (BitMap); 01205 goto ErrorReturn; 01206 } 01207 01208 SizeOfBitMap = BitMap->SizeOfBitMap; 01209 } 01210 else { 01211 01212 // 01213 // It's ok to snap this without a lock. 01214 // 01215 01216 SizeOfBitMap = Process->VadPhysicalPagesBitMap->SizeOfBitMap; 01217 } 01218 01219 AllocatedPages = 0; 01220 TotalAllocatedPages = 0; 01221 MemoryDescriptorHead = NULL; 01222 01223 SkipBytes.QuadPart = 0; 01224 01225 // 01226 // Allocate from the top of memory going down to preserve low pages 01227 // for 32/24-bit device drivers. Just under 4gb is the maximum allocation 01228 // per MDL so the ByteCount field does not overflow. 01229 // 01230 01231 HighAddress.QuadPart = ((ULONGLONG)(SizeOfBitMap - 1)) << PAGE_SHIFT; 01232 01233 if (HighAddress.QuadPart > (ULONGLONG)0x100000000) { 01234 LowAddress.QuadPart = (ULONGLONG)0x100000000; 01235 } 01236 else { 01237 LowAddress.QuadPart = LOWEST_USABLE_PHYSICAL_ADDRESS; 01238 if (LowAddress.QuadPart >= HighAddress.QuadPart) { 01239 if (BitMapSize) { 01240 ExFreePool (BitMap); 01241 PsReturnPoolQuota (Process, NonPagedPool, BitMapSize); 01242 } 01243 Status = STATUS_INSUFFICIENT_RESOURCES; 01244 goto ErrorReturn; 01245 } 01246 } 01247 01248 MdlLowAddress = LowAddress; 01249 01250 do { 01251 01252 MdlRequestInPages = CapturedNumberOfPages - TotalAllocatedPages; 01253 01254 if (MdlRequestInPages > (ULONG_PTR)((MAXULONG - PAGE_SIZE) >> PAGE_SHIFT)) { 01255 MdlRequestInPages = (ULONG_PTR)((MAXULONG - PAGE_SIZE) >> PAGE_SHIFT); 01256 } 01257 01258 // 01259 // Note this allocation returns zeroed pages. 01260 // 01261 01262 MemoryDescriptorList = MmAllocatePagesForMdl (MdlLowAddress, 01263 HighAddress, 01264 SkipBytes, 01265 MdlRequestInPages << PAGE_SHIFT); 01266 01267 if (MemoryDescriptorList != NULL) { 01268 MemoryDescriptorList->Next = MemoryDescriptorHead; 01269 MemoryDescriptorHead = MemoryDescriptorList; 01270 01271 MdlPage = (PPFN_NUMBER)(MemoryDescriptorList + 1); 01272 01273 AllocatedPages = MemoryDescriptorList->ByteCount >> PAGE_SHIFT; 01274 TotalAllocatedPages += AllocatedPages; 01275 01276 LOCK_PFN (OldIrqlPfn); 01277 MmVadPhysicalPages += AllocatedPages; 01278 UNLOCK_PFN (OldIrqlPfn); 01279 01280 // 01281 // The per-process WS lock guards updates to 01282 // Process->VadPhysicalPages. 01283 // 01284 01285 Process->VadPhysicalPages += AllocatedPages; 01286 01287 #if defined(_ALPHA_) && !defined(_AXP64_) 01288 if (BitMapSize == 0) { 01289 LOCK_AWE (Process, OldIrql); 01290 } 01291 #endif 01292 01293 // 01294 // Update the allocation bitmap for each allocated frame. 01295 // Note the PFN lock is not needed to modify the PteAddress below. 01296 // In fact, even the AWE lock is not needed (except on Alpha32 due 01297 // to word tearing in an already existing bitmap) as these pages 01298 // are brand new. 01299 // 01300 01301 for (i = 0; i < AllocatedPages; i += 1) { 01302 01303 ASSERT (*MdlPage >= LOWEST_USABLE_PHYSICAL_PAGE); 01304 01305 BitMapIndex = MI_FRAME_TO_BITMAP_INDEX(*MdlPage); 01306 01307 ASSERT (BitMapIndex < BitMap->SizeOfBitMap); 01308 ASSERT (RtlCheckBit (BitMap, BitMapIndex) == 0); 01309 01310 #if defined (_WIN64) 01311 // 01312 // This may become a problem for 64-bit systems with > 32tb 01313 // of physical memory as the 2nd parameter to RtlSetBits is 01314 // a ULONG. 01315 // 01316 01317 ASSERT (*MdlPage < 0x100000000); 01318 #endif 01319 01320 Pfn1 = MI_PFN_ELEMENT (*MdlPage); 01321 ASSERT (MI_PFN_IS_AWE (Pfn1)); 01322 Pfn1->PteAddress = (PMMPTE)0; 01323 ASSERT (Pfn1->u2.ShareCount == 1); 01324 01325 RtlSetBits (BitMap, BitMapIndex, 1L); 01326 01327 MdlPage += 1; 01328 } 01329 01330 #if defined(_ALPHA_) && !defined(_AXP64_) 01331 if (BitMapSize == 0) { 01332 UNLOCK_AWE (Process, OldIrql); 01333 } 01334 #endif 01335 01336 ASSERT (TotalAllocatedPages <= CapturedNumberOfPages); 01337 01338 if (TotalAllocatedPages == CapturedNumberOfPages) { 01339 break; 01340 } 01341 01342 // 01343 // Try the same memory range again - there might be more pages 01344 // left in it that can be claimed as a truncated MDL had to be 01345 // used for the last request. 01346 // 01347 01348 continue; 01349 } 01350 01351 if (LowAddress.QuadPart == LOWEST_USABLE_PHYSICAL_ADDRESS) { 01352 01353 // 01354 // No (more) pages available. If this becomes a common situation, 01355 // all the working sets could be flushed here. 01356 // 01357 01358 if (TotalAllocatedPages == 0) { 01359 if (BitMapSize) { 01360 ExFreePool (BitMap); 01361 PsReturnPoolQuota (Process, NonPagedPool, BitMapSize); 01362 } 01363 Status = STATUS_INSUFFICIENT_RESOURCES; 01364 goto ErrorReturn; 01365 } 01366 01367 // 01368 // Make do with what we've gotten so far. 01369 // 01370 01371 break; 01372 } 01373 01374 ASSERT (HighAddress.QuadPart > (ULONGLONG)0x100000000); 01375 01376 HighAddress.QuadPart = (ULONGLONG)0x100000000 - 1; 01377 LowAddress.QuadPart = LOWEST_USABLE_PHYSICAL_ADDRESS; 01378 01379 MdlLowAddress = LowAddress; 01380 01381 } while (TRUE); 01382 01383 ASSERT (TotalAllocatedPages != 0); 01384 01385 if (BitMapSize != 0) { 01386 01387 // 01388 // If this API resulted in the creation of the bitmap, then set it 01389 // in the process structure now. No need for locking around this. 01390 // 01391 01392 Process->VadPhysicalPagesBitMap = BitMap; 01393 } 01394 01395 UNLOCK_WS (Process); 01396 WsHeld = FALSE; 01397 01398 if (Attached == TRUE) { 01399 KeDetachProcess(); 01400 Attached = FALSE; 01401 } 01402 01403 // 01404 // Establish an exception handler and carefully write out the 01405 // number of pages and the frame numbers. 01406 // 01407 01408 try { 01409 01410 ASSERT (TotalAllocatedPages <= CapturedNumberOfPages); 01411 01412 *NumberOfPages = TotalAllocatedPages; 01413 01414 MemoryDescriptorList = MemoryDescriptorHead; 01415 01416 while (MemoryDescriptorList != NULL) { 01417 01418 MdlPage = (PPFN_NUMBER)(MemoryDescriptorList + 1); 01419 AllocatedPages = MemoryDescriptorList->ByteCount >> PAGE_SHIFT; 01420 01421 for (i = 0; i < AllocatedPages; i += 1) { 01422 *UserPfnArray = *(PULONG_PTR)MdlPage; 01423 ASSERT (MI_PFN_ELEMENT(*MdlPage)->u2.ShareCount == 1); 01424 UserPfnArray += 1; 01425 MdlPage += 1; 01426 } 01427 MemoryDescriptorList = MemoryDescriptorList->Next; 01428 } 01429 01430 Status = STATUS_SUCCESS; 01431 01432 } except (ExSystemExceptionFilter()) { 01433 01434 // 01435 // If anything went wrong communicating the pages back to the user 01436 // then the entire system service is rolled back. 01437 // 01438 01439 Status = GetExceptionCode(); 01440 01441 MemoryDescriptorList = MemoryDescriptorHead; 01442 01443 PteFlushList.Count = 0; 01444 01445 if (PsGetCurrentProcess() != Process) { 01446 KeAttachProcess (&Process->Pcb); 01447 Attached = TRUE; 01448 } 01449 01450 LOCK_WS (Process); 01451 WsHeld = TRUE; 01452 01453 if (Process->AddressSpaceDeleted != 0) { 01454 Status = STATUS_PROCESS_IS_TERMINATING; 01455 goto ErrorReturn; 01456 } 01457 01458 // 01459 // AWE lock protection is needed here to prevent the malicious app 01460 // that is mapping these pages between our allocation and our free 01461 // below. 01462 // 01463 01464 LOCK_AWE (Process, OldIrql); 01465 01466 while (MemoryDescriptorList != NULL) { 01467 01468 AllocatedPages = MemoryDescriptorList->ByteCount >> PAGE_SHIFT; 01469 MdlPage = (PPFN_NUMBER)(MemoryDescriptorList + 1); 01470 01471 for (i = 0; i < AllocatedPages; i += 1) { 01472 01473 BitMapIndex = MI_FRAME_TO_BITMAP_INDEX(*MdlPage); 01474 01475 ASSERT (BitMapIndex < BitMap->SizeOfBitMap); 01476 ASSERT (RtlCheckBit (BitMap, BitMapIndex) == 1); 01477 01478 #if defined (_WIN64) 01479 // 01480 // This may become a problem for 64-bit systems with > 32tb 01481 // of physical memory as the 2nd parameter to RtlSetBits is 01482 // a ULONG. 01483 // 01484 01485 ASSERT (*MdlPage < 0x100000000); 01486 #endif 01487 RtlClearBits (BitMap, BitMapIndex, 1L); 01488 01489 // 01490 // Note the PFN lock is not needed for the operations below. 01491 // 01492 01493 Pfn1 = MI_PFN_ELEMENT (*MdlPage); 01494 ASSERT (MI_PFN_IS_AWE (Pfn1)); 01495 01496 // 01497 // The frame cannot be currently mapped in any Vad unless a 01498 // malicious app is trying random pages in an attempt to 01499 // corrupt the system. Prevent this behavior by checking 01500 // the sharecount and handling it properly here. 01501 // 01502 01503 if (Pfn1->u2.ShareCount != 1) { 01504 01505 ASSERT (Pfn1->u2.ShareCount == 2); 01506 01507 Pfn1->u2.ShareCount -= 1; 01508 01509 PointerPte = Pfn1->PteAddress; 01510 Pfn1->PteAddress = (PMMPTE)0; 01511 01512 OldPteContents = *PointerPte; 01513 01514 ASSERT (OldPteContents.u.Hard.Valid == 1); 01515 01516 if (PteFlushList.Count != MM_MAXIMUM_FLUSH_COUNT) { 01517 PteFlushList.FlushVa[PteFlushList.Count] = 01518 MiGetVirtualAddressMappedByPte (PointerPte); 01519 PteFlushList.FlushPte[PteFlushList.Count] = PointerPte; 01520 PteFlushList.Count += 1; 01521 } 01522 01523 MI_WRITE_INVALID_PTE (PointerPte, ZeroPte); 01524 } 01525 01526 ASSERT (Pfn1->u2.ShareCount == 1); 01527 01528 MI_SET_PFN_DELETED(Pfn1); 01529 01530 MdlPage += 1; 01531 } 01532 01533 Process->VadPhysicalPages -= AllocatedPages; 01534 LOCK_PFN (OldIrqlPfn); 01535 MmVadPhysicalPages -= AllocatedPages; 01536 UNLOCK_PFN (OldIrqlPfn); 01537 MemoryDescriptorList = MemoryDescriptorList->Next; 01538 } 01539 01540 // 01541 // Flush the TB entries for any pages which a malicious user may 01542 // have mapped. Note ZeroPte is only used when the FlushPte[0] 01543 // field is nonzero or if only a single PTE is being flushed. 01544 // 01545 01546 MiFlushUserPhysicalPteList (&PteFlushList); 01547 01548 // 01549 // Carefully check to see if the bitmap can be freed. 01550 // Remember the working set mutex was dropped, so other threads may 01551 // have created or added to the bitmap. If there is no one else using 01552 // the bitmap and it's empty, free the bitmap and return its quota. 01553 // Keep in mind that this thread may not have even been the creator. 01554 // 01555 01556 if (Process->VadPhysicalPages == 0) { 01557 01558 BitMap = Process->VadPhysicalPagesBitMap; 01559 01560 ASSERT (BitMap != NULL); 01561 ASSERT (RtlFindSetBits (BitMap, 1, 0) == 0xFFFFFFFF); 01562 01563 BitMapSize = sizeof(RTL_BITMAP) + (ULONG)((((MmHighestPossiblePhysicalPage + 1) + 31) / 32) * 4); 01564 Process->VadPhysicalPagesBitMap = NULL; 01565 ExFreePool (BitMap); 01566 PsReturnPoolQuota (Process, NonPagedPool, BitMapSize); 01567 } 01568 else { 01569 ASSERT (Process->VadPhysicalPagesBitMap != NULL); 01570 } 01571 01572 UNLOCK_AWE (Process, OldIrql); 01573 01574 UNLOCK_WS (Process); 01575 01576 WsHeld = FALSE; 01577 01578 // 01579 // Now that we're back at APC level or below, free the pages. 01580 // 01581 01582 MemoryDescriptorList = MemoryDescriptorHead; 01583 while (MemoryDescriptorList != NULL) { 01584 MmFreePagesFromMdl (MemoryDescriptorList); 01585 MemoryDescriptorList = MemoryDescriptorList->Next; 01586 } 01587 01588 // 01589 // Fall through... 01590 // 01591 } 01592 01593 // 01594 // Free the space consumed by the MDLs now that the page frame numbers 01595 // have been saved in the bitmap and copied to the user. 01596 // 01597 01598 MemoryDescriptorList = MemoryDescriptorHead; 01599 while (MemoryDescriptorList != NULL) { 01600 MemoryDescriptorList2 = MemoryDescriptorList->Next; 01601 ExFreePool (MemoryDescriptorList); 01602 MemoryDescriptorList = MemoryDescriptorList2; 01603 } 01604 01605 ErrorReturn: 01606 01607 if (WsHeld == TRUE) { 01608 UNLOCK_WS (Process); 01609 } 01610 01611 if (Attached == TRUE) { 01612 KeDetachProcess(); 01613 } 01614 01615 if (ProcessHandle != NtCurrentProcess()) { 01616 ObDereferenceObject (Process); 01617 } 01618 01619 return Status; 01620 }

NTSTATUS NtFreeUserPhysicalPages IN HANDLE  ProcessHandle,
IN OUT PULONG_PTR  NumberOfPages,
IN PULONG_PTR  UserPfnArray
 

Definition at line 1624 of file physical.c.

References _EPROCESS::AddressSpaceDeleted, ASSERT, BitMap, _MDL::ByteCount, COPY_STACK_SIZE, _MMPTE_FLUSH_LIST::Count, EXCEPTION_EXECUTE_HANDLER, ExFreePool(), ExSystemExceptionFilter(), FALSE, _MMPTE_FLUSH_LIST::FlushPte, _MMPTE_FLUSH_LIST::FlushVa, KeAttachProcess(), KeDetachProcess(), KernelMode, KPROCESSOR_MODE, L, LOCK_AWE, LOCK_PFN2, LOCK_WS, LOWEST_USABLE_PHYSICAL_PAGE, MI_FRAME_TO_BITMAP_INDEX, MI_PFN_ELEMENT, MI_PFN_IS_AWE, MI_SET_PFN_DELETED, MI_WRITE_INVALID_PTE, MiFlushUserPhysicalPteList(), MiGetVirtualAddressMappedByPte, MM_MAXIMUM_FLUSH_COUNT, MmCreateMdl(), MmFreePagesFromMdl(), MmInitializeMdl, MmVadPhysicalPages, NT_SUCCESS, NTSTATUS(), NULL, ObDereferenceObject, ObReferenceObjectByHandle(), PAGE_SHIFT, PASSIVE_LEVEL, _EPROCESS::Pcb, ProbeForRead, ProbeForWritePointer, PsGetCurrentProcess, PsProcessType, RtlClearBits(), Status, TRUE, _MMPTE::u, UNLOCK_AWE, UNLOCK_PFN2, UNLOCK_WS, _EPROCESS::VadPhysicalPages, _EPROCESS::VadPhysicalPagesBitMap, and ZeroPte.

01632 : 01633 01634 This function frees the nonpaged physical pages for the specified 01635 subject process. Any PTEs referencing these pages are also invalidated. 01636 01637 Note there is no need to walk the entire VAD tree to clear the PTEs that 01638 match each page as each physical page can only be mapped at a single 01639 virtual address (alias addresses within the VAD are not allowed). 01640 01641 Arguments: 01642 01643 ProcessHandle - Supplies an open handle to a process object. 01644 01645 NumberOfPages - Supplies the size in pages of the allocation to delete. 01646 Returns the actual number of pages deleted. 01647 01648 UserPfnArray - Supplies a pointer to memory to retrieve the page frame 01649 numbers from. 01650 01651 Return Value: 01652 01653 Various NTSTATUS codes. 01654 01655 --*/ 01656 01657 { 01658 ULONG i; 01659 KIRQL OldIrql; 01660 KIRQL OldIrqlPfn; 01661 ULONG_PTR CapturedNumberOfPages; 01662 PMDL MemoryDescriptorList; 01663 PPFN_NUMBER MdlPage; 01664 PFN_NUMBER PagesInMdl; 01665 PFN_NUMBER PageFrameIndex; 01666 PRTL_BITMAP BitMap; 01667 ULONG BitMapIndex; 01668 ULONG_PTR PagesProcessed; 01669 PFN_NUMBER MdlHack[(sizeof(MDL) / sizeof(PFN_NUMBER)) + COPY_STACK_SIZE]; 01670 ULONG_PTR MdlPages; 01671 ULONG_PTR NumberOfBytes; 01672 PEPROCESS Process; 01673 KPROCESSOR_MODE PreviousMode; 01674 NTSTATUS Status; 01675 LOGICAL Attached; 01676 PMMPFN Pfn1; 01677 LOGICAL WsHeld; 01678 LOGICAL AweLockHeld; 01679 LOGICAL OnePassComplete; 01680 LOGICAL ProcessReferenced; 01681 MMPTE_FLUSH_LIST PteFlushList; 01682 PMMPTE PointerPte; 01683 MMPTE OldPteContents; 01684 01685 ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL); 01686 01687 // 01688 // Establish an exception handler, probe the specified addresses 01689 // for read access and capture the page frame numbers. 01690 // 01691 01692 PreviousMode = KeGetPreviousMode(); 01693 01694 if (PreviousMode != KernelMode) { 01695 01696 try { 01697 01698 ProbeForWritePointer (NumberOfPages); 01699 01700 CapturedNumberOfPages = *NumberOfPages; 01701 01702 // 01703 // Initialize the NumberOfPages freed to zero so the user can be 01704 // reasonably informed about errors that occur midway through 01705 // the transaction. 01706 // 01707 01708 *NumberOfPages = 0; 01709 01710 } except (ExSystemExceptionFilter()) { 01711 01712 // 01713 // If an exception occurs during the probe or capture 01714 // of the initial values, then handle the exception and 01715 // return the exception code as the status value. 01716 // 01717 01718 return GetExceptionCode(); 01719 } 01720 } 01721 else { 01722 CapturedNumberOfPages = *NumberOfPages; 01723 } 01724 01725 if (CapturedNumberOfPages == 0) { 01726 return STATUS_INVALID_PARAMETER_2; 01727 } 01728 01729 OnePassComplete = FALSE; 01730 PagesProcessed = 0; 01731 01732 MemoryDescriptorList = (PMDL)0; 01733 01734 if (CapturedNumberOfPages > COPY_STACK_SIZE) { 01735 01736 // 01737 // Ensure the number of pages can fit into an MDL's ByteCount. 01738 // 01739 01740 if (CapturedNumberOfPages > ((ULONG)MAXULONG >> PAGE_SHIFT)) { 01741 MdlPages = (ULONG_PTR)((ULONG)MAXULONG >> PAGE_SHIFT); 01742 } 01743 else { 01744 MdlPages = CapturedNumberOfPages; 01745 } 01746 01747 while (MdlPages > COPY_STACK_SIZE) { 01748 MemoryDescriptorList = MmCreateMdl (NULL, 01749 0, 01750 MdlPages << PAGE_SHIFT); 01751 01752 if (MemoryDescriptorList != NULL) { 01753 break; 01754 } 01755 01756 MdlPages >>= 1; 01757 } 01758 } 01759 01760 if (MemoryDescriptorList == NULL) { 01761 MdlPages = COPY_STACK_SIZE; 01762 MemoryDescriptorList = (PMDL)&MdlHack[0]; 01763 } 01764 01765 WsHeld = FALSE; 01766 AweLockHeld = FALSE; 01767 ProcessReferenced = FALSE; 01768 01769 repeat: 01770 01771 if (CapturedNumberOfPages < MdlPages) { 01772 MdlPages = CapturedNumberOfPages; 01773 } 01774 01775 MmInitializeMdl (MemoryDescriptorList, 0, MdlPages << PAGE_SHIFT); 01776 01777 MdlPage = (PPFN_NUMBER)(MemoryDescriptorList + 1); 01778 01779 NumberOfBytes = MdlPages * sizeof(ULONG_PTR); 01780 01781 Attached = FALSE; 01782 01783 // 01784 // Establish an exception handler, probe the specified addresses 01785 // for read access and capture the page frame numbers. 01786 // 01787 01788 if (PreviousMode != KernelMode) { 01789 01790 try { 01791 01792 // 01793 // Update the user's count so if anything goes wrong, the user can 01794 // be reasonably informed about how far into the transaction it 01795 // occurred. 01796 // 01797 01798 *NumberOfPages = PagesProcessed; 01799 01800 ProbeForRead (UserPfnArray, 01801 NumberOfBytes, 01802 sizeof(PULONG_PTR)); 01803 01804 RtlCopyMemory ((PVOID)MdlPage, 01805 UserPfnArray, 01806 NumberOfBytes); 01807 01808 } except (ExSystemExceptionFilter()) { 01809 01810 // 01811 // If an exception occurs during the probe or capture 01812 // of the initial values, then handle the exception and 01813 // return the exception code as the status value. 01814 // 01815 01816 Status = GetExceptionCode(); 01817 goto ErrorReturn; 01818 } 01819 } 01820 else { 01821 RtlCopyMemory ((PVOID)MdlPage, 01822 UserPfnArray, 01823 NumberOfBytes); 01824 } 01825 01826 if (OnePassComplete == FALSE) { 01827 01828 // 01829 // Reference the specified process handle for VM_OPERATION access. 01830 // 01831 01832 if (ProcessHandle == NtCurrentProcess()) { 01833 Process = PsGetCurrentProcess(); 01834 } 01835 else { 01836 Status = ObReferenceObjectByHandle ( ProcessHandle, 01837 PROCESS_VM_OPERATION, 01838 PsProcessType, 01839 PreviousMode, 01840 (PVOID *)&Process, 01841 NULL ); 01842 01843 if (!NT_SUCCESS(Status)) { 01844 goto ErrorReturn; 01845 } 01846 ProcessReferenced = TRUE; 01847 } 01848 } 01849 01850 // 01851 // If the specified process is not the current process, attach 01852 // to the specified process. 01853 // 01854 01855 if (PsGetCurrentProcess() != Process) { 01856 KeAttachProcess (&Process->Pcb); 01857 Attached = TRUE; 01858 } 01859 01860 // 01861 // Get the address creation mutex to block multiple threads from 01862 // creating or deleting address space at the same time and 01863 // get the working set mutex so virtual address descriptors can 01864 // be inserted and walked. Block APCs so an APC which takes a page 01865 // fault does not corrupt various structures. 01866 // 01867 01868 LOCK_WS (Process); 01869 WsHeld = TRUE; 01870 01871 // 01872 // Make sure the address space was not deleted, if so, return an error. 01873 // 01874 01875 if (Process->AddressSpaceDeleted != 0) { 01876 Status = STATUS_PROCESS_IS_TERMINATING; 01877 goto ErrorReturn; 01878 } 01879 01880 LOCK_AWE (Process, OldIrql); 01881 AweLockHeld = TRUE; 01882 01883 // 01884 // The physical pages bitmap must exist. 01885 // 01886 01887 BitMap = Process->VadPhysicalPagesBitMap; 01888 01889 if (BitMap == NULL) { 01890 Status = STATUS_INVALID_PARAMETER_2; 01891 goto ErrorReturn; 01892 } 01893 01894 PteFlushList.Count = 0; 01895 01896 Status = STATUS_SUCCESS; 01897 01898 for (i = 0; i < MdlPages; i += 1, MdlPage += 1) { 01899 01900 PageFrameIndex = *MdlPage; 01901 BitMapIndex = MI_FRAME_TO_BITMAP_INDEX(PageFrameIndex); 01902 01903 #if defined (_WIN64) 01904 // 01905 // Ensure the frame is a 32-bit number. 01906 // 01907 01908 if (BitMapIndex != PageFrameIndex) { 01909 Status = STATUS_CONFLICTING_ADDRESSES; 01910 break; 01911 } 01912 #endif 01913 01914 // 01915 // Frames past the end of the bitmap are not allowed. 01916 // 01917 01918 if (BitMapIndex >= BitMap->SizeOfBitMap) { 01919 Status = STATUS_CONFLICTING_ADDRESSES; 01920 break; 01921 } 01922 01923 // 01924 // Frames not in the bitmap are not allowed. 01925 // 01926 01927 if (RtlCheckBit (BitMap, BitMapIndex) == 0) { 01928 Status = STATUS_CONFLICTING_ADDRESSES; 01929 break; 01930 } 01931 01932 ASSERT (PageFrameIndex >= LOWEST_USABLE_PHYSICAL_PAGE); 01933 01934 PagesProcessed += 1; 01935 01936 #if defined (_WIN64) 01937 // 01938 // This may become a problem for 64-bit systems with > 32tb 01939 // of physical memory as the 2nd parameter to RtlClearBits is 01940 // a ULONG. 01941 // 01942 01943 ASSERT (PageFrameIndex < 0x100000000); 01944 #endif 01945 01946 RtlClearBits (BitMap, BitMapIndex, 1L); 01947 01948 Pfn1 = MI_PFN_ELEMENT(PageFrameIndex); 01949 01950 ASSERT (MI_PFN_IS_AWE (Pfn1)); 01951 01952 #if DBG 01953 if (Pfn1->u2.ShareCount == 1) { 01954 ASSERT (Pfn1->PteAddress == (PMMPTE)0); 01955 } 01956 else if (Pfn1->u2.ShareCount == 2) { 01957 ASSERT (Pfn1->PteAddress != (PMMPTE)0); 01958 } 01959 else { 01960 ASSERT (FALSE); 01961 } 01962 #endif 01963 01964 // 01965 // If the frame is currently mapped in the Vad then the PTE must 01966 // be cleared and the TB entry flushed. 01967 // 01968 01969 if (Pfn1->u2.ShareCount != 1) { 01970 01971 Pfn1->u2.ShareCount -= 1; 01972 01973 PointerPte = Pfn1->PteAddress; 01974 Pfn1->PteAddress = (PMMPTE)0; 01975 01976 OldPteContents = *PointerPte; 01977 01978 ASSERT (OldPteContents.u.Hard.Valid == 1); 01979 01980 if (PteFlushList.Count != MM_MAXIMUM_FLUSH_COUNT) { 01981 PteFlushList.FlushVa[PteFlushList.Count] = 01982 MiGetVirtualAddressMappedByPte (PointerPte); 01983 PteFlushList.FlushPte[PteFlushList.Count] = PointerPte; 01984 PteFlushList.Count += 1; 01985 } 01986 01987 MI_WRITE_INVALID_PTE (PointerPte, ZeroPte); 01988 } 01989 01990 MI_SET_PFN_DELETED(Pfn1); 01991 } 01992 01993 // 01994 // Flush the TB entries for these pages. Note ZeroPte is only used 01995 // when the FlushPte[0] field is nonzero or if only a single PTE is 01996 // being flushed. 01997 // 01998 01999 MiFlushUserPhysicalPteList (&PteFlushList); 02000 02001 // 02002 // Free the actual pages (this may be a partially filled MDL). 02003 // 02004 02005 PagesInMdl = MdlPage - (PPFN_NUMBER)(MemoryDescriptorList + 1); 02006 02007 // 02008 // Set the ByteCount to the actual number of validated pages - the caller 02009 // may have lied and we have to sync up here to account for any bogus 02010 // frames. 02011 // 02012 02013 MemoryDescriptorList->ByteCount = (ULONG)(PagesInMdl << PAGE_SHIFT); 02014 02015 if (PagesInMdl != 0) { 02016 Process->VadPhysicalPages -= PagesInMdl; 02017 UNLOCK_AWE (Process, OldIrql); 02018 AweLockHeld = FALSE; 02019 02020 LOCK_PFN2 (OldIrqlPfn); 02021 MmVadPhysicalPages -= PagesInMdl; 02022 UNLOCK_PFN2 (OldIrqlPfn); 02023 02024 MmFreePagesFromMdl (MemoryDescriptorList); 02025 } 02026 else { 02027 if (AweLockHeld == TRUE) { 02028 UNLOCK_AWE (Process, OldIrql); 02029 AweLockHeld = FALSE; 02030 } 02031 } 02032 02033 CapturedNumberOfPages -= PagesInMdl; 02034 02035 if ((Status == STATUS_SUCCESS) && (CapturedNumberOfPages != 0)) { 02036 02037 UNLOCK_WS (Process); 02038 WsHeld = FALSE; 02039 02040 if (Attached == TRUE) { 02041 KeDetachProcess(); 02042 Attached = FALSE; 02043 } 02044 02045 OnePassComplete = TRUE; 02046 ASSERT (MdlPages == PagesInMdl); 02047 UserPfnArray += MdlPages; 02048 02049 // 02050 // Do it all again until all the pages are freed or an error occurs. 02051 // 02052 02053 goto repeat; 02054 } 02055 02056 // 02057 // Fall through. 02058 // 02059 02060 ErrorReturn: 02061 02062 if (AweLockHeld == TRUE) { 02063 UNLOCK_AWE (Process, OldIrql); 02064 } 02065 02066 if (WsHeld == TRUE) { 02067 UNLOCK_WS (Process); 02068 } 02069 02070 // 02071 // Free any pool acquired for holding MDLs. 02072 // 02073 02074 if (MemoryDescriptorList != (PMDL)&MdlHack[0]) { 02075 ExFreePool (MemoryDescriptorList); 02076 } 02077 02078 if (Attached == TRUE) { 02079 KeDetachProcess(); 02080 } 02081 02082 // 02083 // Establish an exception handler and carefully write out the 02084 // number of pages actually processed. 02085 // 02086 02087 try { 02088 02089 *NumberOfPages = PagesProcessed; 02090 02091 } except (EXCEPTION_EXECUTE_HANDLER) { 02092 02093 // 02094 // Return success at this point even if the results 02095 // cannot be written. 02096 // 02097 02098 NOTHING; 02099 } 02100 02101 if (ProcessReferenced == TRUE) { 02102 ObDereferenceObject (Process); 02103 } 02104 02105 return Status; 02106 }

NTSTATUS NtMapUserPhysicalPages IN PVOID  VirtualAddress,
IN ULONG_PTR  NumberOfPages,
IN PULONG_PTR UserPfnArray  OPTIONAL
 

Definition at line 85 of file physical.c.

References ASSERT, BitMap, COPY_STACK_SIZE, _MMPTE_FLUSH_LIST::Count, _MI_PHYSICAL_VIEW::EndVa, ExAllocatePoolWithTag, EXCEPTION_EXECUTE_HANDLER, ExFreePool(), _MMPTE_FLUSH_LIST::FlushPte, _MMPTE_FLUSH_LIST::FlushVa, LOCK_AWE, LOWEST_USABLE_PHYSICAL_PAGE, MI_FRAME_TO_BITMAP_INDEX, MI_MAKE_VALID_PTE, MI_PFN_ELEMENT, MI_PFN_IS_AWE, MI_SET_PTE_DIRTY, MI_WRITE_INVALID_PTE, MI_WRITE_VALID_PTE, MiFlushUserPhysicalPteList(), MiGetPteAddress, MM_MAXIMUM_FLUSH_COUNT, MM_READWRITE, NonPagedPool, NTSTATUS(), NULL, PAGE_ALIGN, PAGE_SHIFT, PAGE_SIZE, PASSIVE_LEVEL, _EPROCESS::PhysicalVadList, ProbeForRead, PsGetCurrentProcess, _MMPFN::PteAddress, _MI_PHYSICAL_VIEW::StartVa, Status, _MMPTE::u, _MMVAD::u, _MMPFN::u2, UNLOCK_AWE, _MI_PHYSICAL_VIEW::Vad, _EPROCESS::VadPhysicalPagesBitMap, VOID(), and ZeroPte.

00093 : 00094 00095 This function maps the specified nonpaged physical pages into the specified 00096 user address range. 00097 00098 Note no WSLEs are maintained for this range as it is all nonpaged. 00099 00100 Arguments: 00101 00102 VirtualAddress - Supplies a user virtual address within a UserPhysicalPages 00103 Vad. 00104 00105 NumberOfPages - Supplies the number of pages to map. 00106 00107 UserPfnArray - Supplies a pointer to the page frame numbers to map in. 00108 If this is zero, then the virtual addresses are set to 00109 NO_ACCESS. 00110 00111 Return Value: 00112 00113 Various NTSTATUS codes. 00114 00115 --*/ 00116 00117 { 00118 PMMVAD FoundVad; 00119 KIRQL OldIrql; 00120 ULONG_PTR i; 00121 PEPROCESS Process; 00122 PMMPTE PointerPte; 00123 PVOID EndAddress; 00124 PFN_NUMBER PageFrameIndex; 00125 PMMPFN Pfn1; 00126 NTSTATUS Status; 00127 MMPTE_FLUSH_LIST PteFlushList; 00128 PVOID PoolArea; 00129 PPFN_NUMBER FrameList; 00130 ULONG BitMapIndex; 00131 ULONG_PTR StackArray[COPY_STACK_SIZE]; 00132 MMPTE OldPteContents; 00133 MMPTE NewPteContents; 00134 ULONG_PTR NumberOfBytes; 00135 PRTL_BITMAP BitMap; 00136 PLIST_ENTRY NextEntry; 00137 PMI_PHYSICAL_VIEW PhysicalView; 00138 00139 ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL); 00140 00141 if (NumberOfPages > (MAXULONG_PTR / PAGE_SIZE)) { 00142 return STATUS_INVALID_PARAMETER_2; 00143 } 00144 00145 VirtualAddress = PAGE_ALIGN(VirtualAddress); 00146 EndAddress = (PVOID)((PCHAR)VirtualAddress + (NumberOfPages << PAGE_SHIFT) -1); 00147 00148 if (EndAddress <= VirtualAddress) { 00149 return STATUS_INVALID_PARAMETER_2; 00150 } 00151 00152 // 00153 // Carefully probe and capture all user parameters. 00154 // 00155 00156 PoolArea = (PVOID)&StackArray[0]; 00157 00158 if (ARGUMENT_PRESENT(UserPfnArray)) { 00159 00160 NumberOfBytes = NumberOfPages * sizeof(ULONG_PTR); 00161 00162 if (NumberOfPages > COPY_STACK_SIZE) { 00163 PoolArea = ExAllocatePoolWithTag (NonPagedPool, 00164 NumberOfBytes, 00165 'wRmM'); 00166 00167 if (PoolArea == NULL) { 00168 return STATUS_INSUFFICIENT_RESOURCES; 00169 } 00170 } 00171 00172 // 00173 // Capture the specified page frame numbers. 00174 // 00175 00176 try { 00177 ProbeForRead (UserPfnArray, 00178 NumberOfBytes, 00179 sizeof(ULONG_PTR)); 00180 00181 RtlCopyMemory (PoolArea, UserPfnArray, NumberOfBytes); 00182 00183 } except(EXCEPTION_EXECUTE_HANDLER) { 00184 if (PoolArea != (PVOID)&StackArray[0]) { 00185 ExFreePool (PoolArea); 00186 } 00187 return GetExceptionCode(); 00188 } 00189 } 00190 00191 PointerPte = MiGetPteAddress (VirtualAddress); 00192 00193 Process = PsGetCurrentProcess(); 00194 00195 // 00196 // The AWE lock protects insertion/removal of Vads into each process' 00197 // PhysicalVadList. It also protects creation/deletion and adds/removes 00198 // of the VadPhysicalPagesBitMap. Finally, it protects the PFN 00199 // modifications for pages in the bitmap. 00200 // 00201 00202 LOCK_AWE (Process, OldIrql); 00203 00204 // 00205 // The physical pages bitmap must exist. 00206 // 00207 00208 BitMap = Process->VadPhysicalPagesBitMap; 00209 00210 if (BitMap == NULL) { 00211 Status = STATUS_INVALID_PARAMETER_1; 00212 goto ErrorReturn; 00213 } 00214 00215 // 00216 // Note that the PFN lock is not needed to traverse this list (even though 00217 // MmProbeAndLockPages uses it), because all modifications are made while 00218 // also holding the AWE lock. 00219 // 00220 // The PhysicalVadList should typically have just one entry - the view 00221 // we're looking for, so this traverse should be quick. 00222 // 00223 00224 FoundVad = NULL; 00225 NextEntry = Process->PhysicalVadList.Flink; 00226 while (NextEntry != &Process->PhysicalVadList) { 00227 00228 PhysicalView = CONTAINING_RECORD(NextEntry, 00229 MI_PHYSICAL_VIEW, 00230 ListEntry); 00231 00232 if (PhysicalView->Vad->u.VadFlags.UserPhysicalPages == 1) { 00233 00234 if ((VirtualAddress >= (PVOID)PhysicalView->StartVa) && 00235 (EndAddress <= (PVOID)PhysicalView->EndVa)) { 00236 00237 FoundVad = PhysicalView->Vad; 00238 break; 00239 } 00240 } 00241 00242 NextEntry = NextEntry->Flink; 00243 continue; 00244 } 00245 00246 if (FoundVad == (PMMVAD)NULL) { 00247 00248 // 00249 // No virtual address is reserved at the specified base address, 00250 // return an error. 00251 // 00252 00253 Status = STATUS_INVALID_PARAMETER_1; 00254 goto ErrorReturn; 00255 } 00256 00257 // 00258 // Ensure the PFN element corresponding to each specified page is owned 00259 // by the specified VAD. 00260 // 00261 // Since this ownership can only be changed while holding this process' 00262 // working set lock, the PFN can be scanned here without holding the PFN 00263 // lock. 00264 // 00265 // Note the PFN lock is not needed because any race with MmProbeAndLockPages 00266 // can only result in the I/O going to the old page or the new page. 00267 // If the user breaks the rules, the PFN database (and any pages being 00268 // windowed here) are still protected because of the reference counts 00269 // on the pages with inprogress I/O. This is possible because NO pages 00270 // are actually freed here - they are just windowed. 00271 // 00272 00273 PteFlushList.Count = 0; 00274 00275 if (ARGUMENT_PRESENT(UserPfnArray)) { 00276 00277 // 00278 // By keeping the PFN bitmap in the VAD (instead of in the PFN 00279 // database itself), a few benefits are realized: 00280 // 00281 // 1. No need to acquire the PFN lock here. 00282 // 2. Faster handling of PFN databases with holes. 00283 // 3. Transparent support for dynamic PFN database growth. 00284 // 4. Less nonpaged memory is used (for the bitmap vs adding a 00285 // field to the PFN) on systems with no unused pack space in 00286 // the PFN database, presuming not many of these VADs get 00287 // allocated. 00288 // 00289 00290 // 00291 // The first pass here ensures all the frames are secure. 00292 // 00293 00294 // 00295 // N.B. This implies that PFN_NUMBER is always ULONG_PTR in width 00296 // as PFN_NUMBER is not exposed to application code today. 00297 // 00298 00299 FrameList = (PPFN_NUMBER)PoolArea; 00300 00301 for (i = 0; i < NumberOfPages; i += 1, FrameList += 1) { 00302 00303 PageFrameIndex = *FrameList; 00304 00305 // 00306 // Frames past the end of the bitmap are not allowed. 00307 // 00308 00309 BitMapIndex = MI_FRAME_TO_BITMAP_INDEX(PageFrameIndex); 00310 00311 #if defined (_WIN64) 00312 // 00313 // Ensure the frame is a 32-bit number. 00314 // 00315 00316 if (BitMapIndex != PageFrameIndex) { 00317 Status = STATUS_CONFLICTING_ADDRESSES; 00318 goto ErrorReturn0; 00319 } 00320 #endif 00321 00322 if (BitMapIndex >= BitMap->SizeOfBitMap) { 00323 Status = STATUS_CONFLICTING_ADDRESSES; 00324 goto ErrorReturn0; 00325 } 00326 00327 // 00328 // Frames not in the bitmap are not allowed. 00329 // 00330 00331 if (RtlCheckBit (BitMap, BitMapIndex) == 0) { 00332 Status = STATUS_CONFLICTING_ADDRESSES; 00333 goto ErrorReturn0; 00334 } 00335 00336 // 00337 // The frame must not be already mapped anywhere. 00338 // Or be passed in twice in different spots in the array. 00339 // 00340 00341 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 00342 00343 if (Pfn1->u2.ShareCount != 1) { 00344 Status = STATUS_INVALID_PARAMETER_3; 00345 goto ErrorReturn0; 00346 } 00347 00348 ASSERT (MI_PFN_IS_AWE (Pfn1)); 00349 00350 // 00351 // Mark the frame as "about to be mapped". 00352 // 00353 00354 Pfn1->u2.ShareCount = 3; 00355 00356 ASSERT (PageFrameIndex >= LOWEST_USABLE_PHYSICAL_PAGE); 00357 } 00358 00359 // 00360 // This pass actually inserts them all into the page table pages and 00361 // the TBs now that we know the frames are good. 00362 // 00363 00364 FrameList = (PPFN_NUMBER)PoolArea; 00365 00366 MI_MAKE_VALID_PTE (NewPteContents, 00367 PageFrameIndex, 00368 MM_READWRITE, 00369 PointerPte); 00370 00371 MI_SET_PTE_DIRTY (NewPteContents); 00372 00373 for (i = 0; i < NumberOfPages; i += 1) { 00374 00375 PageFrameIndex = *FrameList; 00376 NewPteContents.u.Hard.PageFrameNumber = PageFrameIndex; 00377 00378 OldPteContents = *PointerPte; 00379 00380 // 00381 // Flush the TB entry for this page if it's valid. 00382 // 00383 00384 if (OldPteContents.u.Hard.Valid == 1) { 00385 Pfn1 = MI_PFN_ELEMENT (OldPteContents.u.Hard.PageFrameNumber); 00386 ASSERT (Pfn1->PteAddress != (PMMPTE)0); 00387 ASSERT (Pfn1->u2.ShareCount == 2); 00388 Pfn1->u2.ShareCount -= 1; 00389 Pfn1->PteAddress = (PMMPTE)0; 00390 00391 #if defined (_X86PAE_) 00392 (VOID)KeInterlockedSwapPte ((PHARDWARE_PTE)PointerPte, 00393 (PHARDWARE_PTE)&NewPteContents); 00394 #else 00395 *PointerPte = NewPteContents; 00396 #endif 00397 00398 if (PteFlushList.Count != MM_MAXIMUM_FLUSH_COUNT) { 00399 PteFlushList.FlushVa[PteFlushList.Count] = VirtualAddress; 00400 PteFlushList.FlushPte[PteFlushList.Count] = PointerPte; 00401 PteFlushList.Count += 1; 00402 } 00403 } 00404 else { 00405 MI_WRITE_VALID_PTE (PointerPte, NewPteContents); 00406 } 00407 00408 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 00409 ASSERT (Pfn1->PteAddress == (PMMPTE)0); 00410 ASSERT (Pfn1->u2.ShareCount == 3); 00411 Pfn1->u2.ShareCount = 2; 00412 Pfn1->PteAddress = PointerPte; 00413 00414 VirtualAddress = (PVOID)((PCHAR)VirtualAddress + PAGE_SIZE); 00415 PointerPte += 1; 00416 FrameList += 1; 00417 } 00418 } 00419 else { 00420 00421 // 00422 // Set the specified virtual address range to no access. 00423 // 00424 00425 for (i = 0; i < NumberOfPages; i += 1) { 00426 00427 OldPteContents = *PointerPte; 00428 00429 // 00430 // Flush the TB entry for this page if it's valid. 00431 // 00432 00433 if (OldPteContents.u.Hard.Valid == 1) { 00434 00435 Pfn1 = MI_PFN_ELEMENT (OldPteContents.u.Hard.PageFrameNumber); 00436 ASSERT (Pfn1->PteAddress != (PMMPTE)0); 00437 ASSERT (Pfn1->u2.ShareCount == 2); 00438 ASSERT (MI_PFN_IS_AWE (Pfn1)); 00439 Pfn1->u2.ShareCount -= 1; 00440 Pfn1->PteAddress = (PMMPTE)0; 00441 00442 if (PteFlushList.Count != MM_MAXIMUM_FLUSH_COUNT) { 00443 PteFlushList.FlushVa[PteFlushList.Count] = VirtualAddress; 00444 PteFlushList.FlushPte[PteFlushList.Count] = PointerPte; 00445 PteFlushList.Count += 1; 00446 } 00447 } 00448 00449 MI_WRITE_INVALID_PTE (PointerPte, ZeroPte); 00450 00451 VirtualAddress = (PVOID)((PCHAR)VirtualAddress + PAGE_SIZE); 00452 PointerPte += 1; 00453 } 00454 } 00455 00456 // 00457 // Flush the TB entries for these pages. Note ZeroPte is only used 00458 // when the FlushPte[0] field is nonzero or if only a single PTE is 00459 // being flushed. 00460 // 00461 00462 if (PteFlushList.Count != 0) { 00463 MiFlushUserPhysicalPteList (&PteFlushList); 00464 } 00465 00466 UNLOCK_AWE (Process, OldIrql); 00467 00468 if (PoolArea != (PVOID)&StackArray[0]) { 00469 ExFreePool (PoolArea); 00470 } 00471 00472 return STATUS_SUCCESS; 00473 00474 ErrorReturn0: 00475 00476 while (i != 0) { 00477 FrameList -= 1; 00478 PageFrameIndex = *FrameList; 00479 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 00480 ASSERT (Pfn1->u2.ShareCount == 3); 00481 Pfn1->u2.ShareCount = 1; 00482 i -= 1; 00483 } 00484 00485 ErrorReturn: 00486 00487 UNLOCK_AWE (Process, OldIrql); 00488 00489 if (PoolArea != (PVOID)&StackArray[0]) { 00490 ExFreePool (PoolArea); 00491 } 00492 00493 return Status; 00494 }

NTSTATUS NtMapUserPhysicalPagesScatter IN PVOID *  VirtualAddresses,
IN ULONG_PTR  NumberOfPages,
IN PULONG_PTR UserPfnArray  OPTIONAL
 

Definition at line 498 of file physical.c.

References ASSERT, BitMap, COPY_STACK_SIZE, _MMPTE_FLUSH_LIST::Count, _MI_PHYSICAL_VIEW::EndVa, ExAllocatePoolWithTag, EXCEPTION_EXECUTE_HANDLER, ExFreePool(), _MMPTE_FLUSH_LIST::FlushPte, _MMPTE_FLUSH_LIST::FlushVa, LOCK_AWE, LOWEST_USABLE_PHYSICAL_PAGE, MI_FRAME_TO_BITMAP_INDEX, MI_MAKE_VALID_PTE, MI_PFN_ELEMENT, MI_PFN_IS_AWE, MI_SET_PTE_DIRTY, MI_WRITE_INVALID_PTE, MI_WRITE_VALID_PTE, MiFlushUserPhysicalPteList(), MiGetPteAddress, MM_MAXIMUM_FLUSH_COUNT, MM_READWRITE, NonPagedPool, NTSTATUS(), NULL, PAGE_SIZE, PASSIVE_LEVEL, _EPROCESS::PhysicalVadList, ProbeForRead, PsGetCurrentProcess, _MMPFN::PteAddress, _MI_PHYSICAL_VIEW::StartVa, Status, _MMPTE::u, _MMVAD::u, _MMPFN::u2, UNLOCK_AWE, _MI_PHYSICAL_VIEW::Vad, _EPROCESS::VadPhysicalPagesBitMap, VOID(), and ZeroPte.

00506 : 00507 00508 This function maps the specified nonpaged physical pages into the specified 00509 user address range. 00510 00511 Note no WSLEs are maintained for this range as it is all nonpaged. 00512 00513 Arguments: 00514 00515 VirtualAddresses - Supplies a pointer to an array of user virtual addresses 00516 within UserPhysicalPages Vads. Each array entry is 00517 presumed to map a single page. 00518 00519 NumberOfPages - Supplies the number of pages to map. 00520 00521 UserPfnArray - Supplies a pointer to the page frame numbers to map in. 00522 If this is zero, then the virtual addresses are set to 00523 NO_ACCESS. If the array entry is zero then just the 00524 corresponding virtual address is set to NO_ACCESS. 00525 00526 Return Value: 00527 00528 Various NTSTATUS codes. 00529 00530 --*/ 00531 00532 { 00533 PMMVAD FoundVad; 00534 KIRQL OldIrql; 00535 ULONG_PTR i; 00536 PEPROCESS Process; 00537 PMMPTE PointerPte; 00538 PFN_NUMBER PageFrameIndex; 00539 PMMPFN Pfn1; 00540 NTSTATUS Status; 00541 MMPTE_FLUSH_LIST PteFlushList; 00542 PVOID PoolArea; 00543 PVOID *PoolVirtualArea; 00544 PPFN_NUMBER FrameList; 00545 ULONG BitMapIndex; 00546 PVOID StackVirtualArray[COPY_STACK_SIZE]; 00547 ULONG_PTR StackArray[COPY_STACK_SIZE]; 00548 MMPTE OldPteContents; 00549 MMPTE NewPteContents; 00550 ULONG_PTR NumberOfBytes; 00551 PRTL_BITMAP BitMap; 00552 PLIST_ENTRY NextEntry; 00553 PMI_PHYSICAL_VIEW PhysicalView; 00554 PVOID VirtualAddress; 00555 00556 ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL); 00557 00558 if (NumberOfPages > (MAXULONG_PTR / PAGE_SIZE)) { 00559 return STATUS_INVALID_PARAMETER_2; 00560 } 00561 00562 // 00563 // Carefully probe and capture the user virtual address array. 00564 // 00565 00566 PoolArea = (PVOID)&StackArray[0]; 00567 PoolVirtualArea = (PVOID)&StackVirtualArray[0]; 00568 00569 NumberOfBytes = NumberOfPages * sizeof(PVOID); 00570 00571 if (NumberOfPages > COPY_STACK_SIZE) { 00572 PoolVirtualArea = ExAllocatePoolWithTag (NonPagedPool, 00573 NumberOfBytes, 00574 'wRmM'); 00575 00576 if (PoolVirtualArea == NULL) { 00577 return STATUS_INSUFFICIENT_RESOURCES; 00578 } 00579 } 00580 00581 try { 00582 ProbeForRead (VirtualAddresses, 00583 NumberOfBytes, 00584 sizeof(PVOID)); 00585 00586 RtlCopyMemory (PoolVirtualArea, VirtualAddresses, NumberOfBytes); 00587 00588 } except(EXCEPTION_EXECUTE_HANDLER) { 00589 Status = GetExceptionCode(); 00590 goto ErrorReturn2; 00591 } 00592 00593 // 00594 // Carefully probe and capture the user PFN array. 00595 // 00596 00597 if (ARGUMENT_PRESENT(UserPfnArray)) { 00598 00599 NumberOfBytes = NumberOfPages * sizeof(ULONG_PTR); 00600 00601 if (NumberOfPages > COPY_STACK_SIZE) { 00602 PoolArea = ExAllocatePoolWithTag (NonPagedPool, 00603 NumberOfBytes, 00604 'wRmM'); 00605 00606 if (PoolArea == NULL) { 00607 Status = STATUS_INSUFFICIENT_RESOURCES; 00608 goto ErrorReturn2; 00609 } 00610 } 00611 00612 // 00613 // Capture the specified page frame numbers. 00614 // 00615 00616 try { 00617 ProbeForRead (UserPfnArray, 00618 NumberOfBytes, 00619 sizeof(ULONG_PTR)); 00620 00621 RtlCopyMemory (PoolArea, UserPfnArray, NumberOfBytes); 00622 00623 } except(EXCEPTION_EXECUTE_HANDLER) { 00624 Status = GetExceptionCode(); 00625 goto ErrorReturn2; 00626 } 00627 } 00628 00629 Process = PsGetCurrentProcess(); 00630 00631 // 00632 // The AWE lock protects insertion/removal of Vads into each process' 00633 // PhysicalVadList. It also protects creation/deletion and adds/removes 00634 // of the VadPhysicalPagesBitMap. Finally, it protects the PFN 00635 // modifications for pages in the bitmap. 00636 // 00637 00638 PhysicalView = NULL; 00639 00640 LOCK_AWE (Process, OldIrql); 00641 00642 // 00643 // The physical pages bitmap must exist. 00644 // 00645 00646 BitMap = Process->VadPhysicalPagesBitMap; 00647 00648 if (BitMap == NULL) { 00649 Status = STATUS_INVALID_PARAMETER_1; 00650 goto ErrorReturn; 00651 } 00652 00653 // 00654 // Note that the PFN lock is not needed to traverse this list (even though 00655 // MmProbeAndLockPages uses it), because all modifications are made while 00656 // also holding the AWE lock. 00657 // 00658 // The PhysicalVadList should typically have just one entry - the view 00659 // we're looking for, so this traverse should be quick. 00660 // 00661 00662 for (i = 0; i < NumberOfPages; i += 1) { 00663 00664 VirtualAddress = PoolVirtualArea[i]; 00665 00666 if (PhysicalView != NULL) { 00667 ASSERT (PhysicalView->Vad->u.VadFlags.UserPhysicalPages == 1); 00668 if ((VirtualAddress >= (PVOID)PhysicalView->StartVa) && 00669 (VirtualAddress <= (PVOID)PhysicalView->EndVa)) { 00670 continue; 00671 } 00672 } 00673 00674 FoundVad = NULL; 00675 NextEntry = Process->PhysicalVadList.Flink; 00676 while (NextEntry != &Process->PhysicalVadList) { 00677 00678 PhysicalView = CONTAINING_RECORD(NextEntry, 00679 MI_PHYSICAL_VIEW, 00680 ListEntry); 00681 00682 if (PhysicalView->Vad->u.VadFlags.UserPhysicalPages == 1) { 00683 00684 if ((VirtualAddress >= (PVOID)PhysicalView->StartVa) && 00685 (VirtualAddress <= (PVOID)PhysicalView->EndVa)) { 00686 00687 FoundVad = PhysicalView->Vad; 00688 break; 00689 } 00690 } 00691 00692 NextEntry = NextEntry->Flink; 00693 continue; 00694 } 00695 00696 if (FoundVad == (PMMVAD)NULL) { 00697 00698 // 00699 // No virtual address is reserved at the specified base address, 00700 // return an error. 00701 // 00702 00703 Status = STATUS_INVALID_PARAMETER_1; 00704 goto ErrorReturn; 00705 } 00706 } 00707 00708 // 00709 // Ensure the PFN element corresponding to each specified page is owned 00710 // by the specified VAD. 00711 // 00712 // Since this ownership can only be changed while holding this process' 00713 // working set lock, the PFN can be scanned here without holding the PFN 00714 // lock. 00715 // 00716 // Note the PFN lock is not needed because any race with MmProbeAndLockPages 00717 // can only result in the I/O going to the old page or the new page. 00718 // If the user breaks the rules, the PFN database (and any pages being 00719 // windowed here) are still protected because of the reference counts 00720 // on the pages with inprogress I/O. This is possible because NO pages 00721 // are actually freed here - they are just windowed. 00722 // 00723 00724 PteFlushList.Count = 0; 00725 00726 if (ARGUMENT_PRESENT(UserPfnArray)) { 00727 00728 // 00729 // By keeping the PFN bitmap in the VAD (instead of in the PFN 00730 // database itself), a few benefits are realized: 00731 // 00732 // 1. No need to acquire the PFN lock here. 00733 // 2. Faster handling of PFN databases with holes. 00734 // 3. Transparent support for dynamic PFN database growth. 00735 // 4. Less nonpaged memory is used (for the bitmap vs adding a 00736 // field to the PFN) on systems with no unused pack space in 00737 // the PFN database, presuming not many of these VADs get 00738 // allocated. 00739 // 00740 00741 // 00742 // The first pass here ensures all the frames are secure. 00743 // 00744 00745 // 00746 // N.B. This implies that PFN_NUMBER is always ULONG_PTR in width 00747 // as PFN_NUMBER is not exposed to application code today. 00748 // 00749 00750 FrameList = (PPFN_NUMBER)PoolArea; 00751 00752 for (i = 0; i < NumberOfPages; i += 1, FrameList += 1) { 00753 00754 PageFrameIndex = *FrameList; 00755 00756 // 00757 // Zero entries are treated as a command to unmap. 00758 // 00759 00760 if (PageFrameIndex == 0) { 00761 continue; 00762 } 00763 00764 // 00765 // Frames past the end of the bitmap are not allowed. 00766 // 00767 00768 BitMapIndex = MI_FRAME_TO_BITMAP_INDEX(PageFrameIndex); 00769 00770 #if defined (_WIN64) 00771 // 00772 // Ensure the frame is a 32-bit number. 00773 // 00774 00775 if (BitMapIndex != PageFrameIndex) { 00776 Status = STATUS_CONFLICTING_ADDRESSES; 00777 goto ErrorReturn0; 00778 } 00779 #endif 00780 00781 if (BitMapIndex >= BitMap->SizeOfBitMap) { 00782 Status = STATUS_CONFLICTING_ADDRESSES; 00783 goto ErrorReturn0; 00784 } 00785 00786 // 00787 // Frames not in the bitmap are not allowed. 00788 // 00789 00790 if (RtlCheckBit (BitMap, BitMapIndex) == 0) { 00791 Status = STATUS_CONFLICTING_ADDRESSES; 00792 goto ErrorReturn0; 00793 } 00794 00795 // 00796 // The frame must not be already mapped anywhere. 00797 // Or be passed in twice in different spots in the array. 00798 // 00799 00800 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 00801 00802 if (Pfn1->u2.ShareCount != 1) { 00803 Status = STATUS_INVALID_PARAMETER_3; 00804 goto ErrorReturn0; 00805 } 00806 00807 ASSERT (MI_PFN_IS_AWE (Pfn1)); 00808 00809 // 00810 // Mark the frame as "about to be mapped". 00811 // 00812 00813 Pfn1->u2.ShareCount = 3; 00814 00815 ASSERT (PageFrameIndex >= LOWEST_USABLE_PHYSICAL_PAGE); 00816 } 00817 00818 // 00819 // This pass actually inserts them all into the page table pages and 00820 // the TBs now that we know the frames are good. 00821 // 00822 00823 FrameList = (PPFN_NUMBER)PoolArea; 00824 00825 MI_MAKE_VALID_PTE (NewPteContents, 00826 PageFrameIndex, 00827 MM_READWRITE, 00828 0); 00829 00830 MI_SET_PTE_DIRTY (NewPteContents); 00831 00832 for (i = 0; i < NumberOfPages; i += 1, FrameList += 1) { 00833 00834 PageFrameIndex = *FrameList; 00835 00836 VirtualAddress = PoolVirtualArea[i]; 00837 PointerPte = MiGetPteAddress (VirtualAddress); 00838 OldPteContents = *PointerPte; 00839 00840 // 00841 // Flush the TB entry for this page if it's valid. 00842 // 00843 00844 if (OldPteContents.u.Hard.Valid == 1) { 00845 Pfn1 = MI_PFN_ELEMENT (OldPteContents.u.Hard.PageFrameNumber); 00846 ASSERT (Pfn1->PteAddress != (PMMPTE)0); 00847 ASSERT (Pfn1->u2.ShareCount == 2); 00848 ASSERT (MI_PFN_IS_AWE (Pfn1)); 00849 Pfn1->u2.ShareCount -= 1; 00850 Pfn1->PteAddress = (PMMPTE)0; 00851 00852 if (PageFrameIndex != 0) { 00853 00854 NewPteContents.u.Hard.PageFrameNumber = PageFrameIndex; 00855 #if defined (_X86PAE_) 00856 (VOID)KeInterlockedSwapPte ((PHARDWARE_PTE)PointerPte, 00857 (PHARDWARE_PTE)&NewPteContents); 00858 #else 00859 *PointerPte = NewPteContents; 00860 #endif 00861 } 00862 else { 00863 MI_WRITE_INVALID_PTE (PointerPte, ZeroPte); 00864 } 00865 00866 if (PteFlushList.Count != MM_MAXIMUM_FLUSH_COUNT) { 00867 PteFlushList.FlushVa[PteFlushList.Count] = VirtualAddress; 00868 PteFlushList.FlushPte[PteFlushList.Count] = PointerPte; 00869 PteFlushList.Count += 1; 00870 } 00871 } 00872 else { 00873 if (PageFrameIndex != 0) { 00874 NewPteContents.u.Hard.PageFrameNumber = PageFrameIndex; 00875 MI_WRITE_VALID_PTE (PointerPte, NewPteContents); 00876 } 00877 } 00878 00879 if (PageFrameIndex != 0) { 00880 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 00881 ASSERT (Pfn1->PteAddress == (PMMPTE)0); 00882 ASSERT (Pfn1->u2.ShareCount == 3); 00883 Pfn1->u2.ShareCount = 2; 00884 Pfn1->PteAddress = PointerPte; 00885 } 00886 } 00887 } 00888 else { 00889 00890 // 00891 // Set the specified virtual address range to no access. 00892 // 00893 00894 for (i = 0; i < NumberOfPages; i += 1) { 00895 00896 VirtualAddress = PoolVirtualArea[i]; 00897 PointerPte = MiGetPteAddress (VirtualAddress); 00898 OldPteContents = *PointerPte; 00899 00900 // 00901 // Flush the TB entry for this page if it's valid. 00902 // 00903 00904 if (OldPteContents.u.Hard.Valid == 1) { 00905 00906 Pfn1 = MI_PFN_ELEMENT (OldPteContents.u.Hard.PageFrameNumber); 00907 ASSERT (Pfn1->PteAddress != (PMMPTE)0); 00908 ASSERT (Pfn1->u2.ShareCount == 2); 00909 ASSERT (MI_PFN_IS_AWE (Pfn1)); 00910 Pfn1->u2.ShareCount -= 1; 00911 Pfn1->PteAddress = (PMMPTE)0; 00912 00913 if (PteFlushList.Count != MM_MAXIMUM_FLUSH_COUNT) { 00914 PteFlushList.FlushVa[PteFlushList.Count] = VirtualAddress; 00915 PteFlushList.FlushPte[PteFlushList.Count] = PointerPte; 00916 PteFlushList.Count += 1; 00917 } 00918 } 00919 00920 MI_WRITE_INVALID_PTE (PointerPte, ZeroPte); 00921 } 00922 } 00923 00924 // 00925 // Flush the TB entries for these pages. Note ZeroPte is only used 00926 // when the FlushPte[0] field is nonzero or if only a single PTE is 00927 // being flushed. 00928 // 00929 00930 if (PteFlushList.Count != 0) { 00931 MiFlushUserPhysicalPteList (&PteFlushList); 00932 } 00933 00934 Status = STATUS_SUCCESS; 00935 00936 ErrorReturn: 00937 00938 UNLOCK_AWE (Process, OldIrql); 00939 00940 ErrorReturn2: 00941 00942 if (PoolArea != (PVOID)&StackArray[0]) { 00943 ExFreePool (PoolArea); 00944 } 00945 00946 if (PoolVirtualArea != (PVOID)&StackVirtualArray[0]) { 00947 ExFreePool (PoolVirtualArea); 00948 } 00949 00950 return Status; 00951 00952 ErrorReturn0: 00953 00954 while (i != 0) { 00955 FrameList -= 1; 00956 PageFrameIndex = *FrameList; 00957 if (PageFrameIndex != 0) { 00958 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 00959 ASSERT (Pfn1->u2.ShareCount == 3); 00960 ASSERT (MI_PFN_IS_AWE (Pfn1)); 00961 Pfn1->u2.ShareCount = 1; 00962 } 00963 i -= 1; 00964 } 00965 goto ErrorReturn; 00966 }


Variable Documentation

ULONG_PTR MmVadPhysicalPages
 

Definition at line 76 of file physical.c.

Referenced by MiUpdateVadPhysicalPages(), NtAllocateUserPhysicalPages(), and NtFreeUserPhysicalPages().


Generated on Sat May 15 19:45:02 2004 for test by doxygen 1.3.7