Main Page | Class Hierarchy | Class List | File List | Class Members | File Members

iosup.c File Reference

#include "mi.h"

Go to the source code of this file.

Classes

struct  _PTE_TRACKER
struct  _SYSPTES_HEADER

Defines

#define MI_PROBE_RAISE_SIZE   10
#define MI_INSTRUMENT_PROBE_RAISES(i)

Typedefs

typedef _PTE_TRACKER PTE_TRACKER
typedef _PTE_TRACKERPPTE_TRACKER
typedef _SYSPTES_HEADER SYSPTES_HEADER
typedef _SYSPTES_HEADERPSYSPTES_HEADER

Functions

BOOLEAN MmIsRecursiveIoFault (VOID)
PVOID MiAllocateContiguousMemory (IN SIZE_T NumberOfBytes, IN PFN_NUMBER LowestAcceptablePfn, IN PFN_NUMBER HighestAcceptablePfn, IN PFN_NUMBER BoundaryPfn, PVOID CallingAddress)
PVOID MiMapLockedPagesInUserSpace (IN PMDL MemoryDescriptorList, IN PVOID StartingVa, IN MEMORY_CACHING_TYPE CacheType, IN PVOID BaseVa)
VOID MiUnmapLockedPagesInUserSpace (IN PVOID BaseAddress, IN PMDL MemoryDescriptorList)
LOGICAL MiGetSystemPteAvailability (IN ULONG NumberOfPtes, IN MM_PAGE_PRIORITY Priority)
VOID MiAddMdlTracker (IN PMDL MemoryDescriptorList, IN PVOID CallingAddress, IN PVOID CallersCaller, IN PFN_NUMBER NumberOfPagesToLock, IN ULONG Who)
VOID MiInsertPteTracker (IN PVOID PoolBlock, IN PMDL MemoryDescriptorList, IN PFN_NUMBER NumberOfPtes, IN PVOID MyCaller, IN PVOID MyCallersCaller)
PVOID MiRemovePteTracker (IN PMDL MemoryDescriptorList, IN PVOID PteAddress, IN PFN_NUMBER NumberOfPtes)
VOID MiReleaseDeadPteTrackers (VOID)
VOID MiInsertDeadPteTrackingBlock (IN PVOID PoolBlock)
VOID MiProtectFreeNonPagedPool (IN PVOID VirtualAddress, IN ULONG SizeInPages)
LOGICAL MiUnProtectFreeNonPagedPool (IN PVOID VirtualAddress, IN ULONG SizeInPages)
PVOID MiAllocateLowMemory (IN SIZE_T NumberOfBytes, IN PFN_NUMBER LowestAcceptablePfn, IN PFN_NUMBER HighestAcceptablePfn, IN PFN_NUMBER BoundaryPfn, IN PVOID CallingAddress, IN ULONG Tag)
LOGICAL MiFreeLowMemory (IN PVOID BaseAddress, IN ULONG Tag)
VOID MmProbeAndLockPages (IN OUT PMDL MemoryDescriptorList, IN KPROCESSOR_MODE AccessMode, IN LOCK_OPERATION Operation)
NTKERNELAPI VOID MmProbeAndLockProcessPages (IN OUT PMDL MemoryDescriptorList, IN PEPROCESS Process, IN KPROCESSOR_MODE AccessMode, IN LOCK_OPERATION Operation)
LOGICAL MiFreeMdlTracker (IN OUT PMDL MemoryDescriptorList, IN PFN_NUMBER NumberOfPages)
NTKERNELAPI VOID MmProbeAndLockSelectedPages (IN OUT PMDL MemoryDescriptorList, IN PFILE_SEGMENT_ELEMENT SegmentArray, IN KPROCESSOR_MODE AccessMode, IN LOCK_OPERATION Operation)
VOID MmUnlockPages (IN OUT PMDL MemoryDescriptorList)
VOID MmBuildMdlForNonPagedPool (IN OUT PMDL MemoryDescriptorList)
VOID MiInitializeIoTrackers (VOID)
PVOID MiGetHighestPteConsumer (OUT PULONG_PTR NumberOfPtes)
PVOID MmMapLockedPages (IN PMDL MemoryDescriptorList, IN KPROCESSOR_MODE AccessMode)
PVOID MmMapLockedPagesSpecifyCache (IN PMDL MemoryDescriptorList, IN KPROCESSOR_MODE AccessMode, IN MEMORY_CACHING_TYPE CacheType, IN PVOID RequestedAddress, IN ULONG BugCheckOnFailure, IN MM_PAGE_PRIORITY Priority)
PVOID MiMapSinglePage (IN PVOID VirtualAddress OPTIONAL, IN PFN_NUMBER PageFrameIndex, IN MEMORY_CACHING_TYPE CacheType, IN MM_PAGE_PRIORITY Priority)
VOID MiUnmapSinglePage (IN PVOID VirtualAddress)
VOID MiPhysicalViewInserter (IN PEPROCESS Process, IN PMI_PHYSICAL_VIEW PhysicalView)
VOID MiPhysicalViewRemover (IN PEPROCESS Process, IN PMMVAD Vad)
VOID MiPhysicalViewAdjuster (IN PEPROCESS Process, IN PMMVAD OldVad, IN PMMVAD NewVad)
VOID MmUnmapLockedPages (IN PVOID BaseAddress, IN PMDL MemoryDescriptorList)
PVOID MmMapIoSpace (IN PHYSICAL_ADDRESS PhysicalAddress, IN SIZE_T NumberOfBytes, IN MEMORY_CACHING_TYPE CacheType)
VOID MmUnmapIoSpace (IN PVOID BaseAddress, IN SIZE_T NumberOfBytes)
PVOID MmAllocateContiguousMemorySpecifyCache (IN SIZE_T NumberOfBytes, IN PHYSICAL_ADDRESS LowestAcceptableAddress, IN PHYSICAL_ADDRESS HighestAcceptableAddress, IN PHYSICAL_ADDRESS BoundaryAddressMultiple OPTIONAL, IN MEMORY_CACHING_TYPE CacheType)
PVOID MmAllocateContiguousMemory (IN SIZE_T NumberOfBytes, IN PHYSICAL_ADDRESS HighestAcceptableAddress)
PVOID MmAllocateIndependentPages (IN SIZE_T NumberOfBytes)
BOOLEAN MmSetPageProtection (IN PVOID VirtualAddress, IN SIZE_T NumberOfBytes, IN ULONG NewProtect)
PMDL MmAllocatePagesForMdl (IN PHYSICAL_ADDRESS LowAddress, IN PHYSICAL_ADDRESS HighAddress, IN PHYSICAL_ADDRESS SkipBytes, IN SIZE_T TotalBytes)
VOID MmFreePagesFromMdl (IN PMDL MemoryDescriptorList)
NTSTATUS MmMapUserAddressesToPage (IN PVOID BaseAddress, IN SIZE_T NumberOfBytes, IN PVOID PageAddress)
VOID MmFreeContiguousMemory (IN PVOID BaseAddress)
VOID MmFreeContiguousMemorySpecifyCache (IN PVOID BaseAddress, IN SIZE_T NumberOfBytes, IN MEMORY_CACHING_TYPE CacheType)
PHYSICAL_ADDRESS MmGetPhysicalAddress (IN PVOID BaseAddress)
PVOID MmGetVirtualForPhysical (IN PHYSICAL_ADDRESS PhysicalAddress)
PVOID MmAllocateNonCachedMemory (IN SIZE_T NumberOfBytes)
VOID MmFreeNonCachedMemory (IN PVOID BaseAddress, IN SIZE_T NumberOfBytes)
SIZE_T MmSizeOfMdl (IN PVOID Base, IN SIZE_T Length)
PMDL MmCreateMdl (IN PMDL MemoryDescriptorList OPTIONAL, IN PVOID Base, IN SIZE_T Length)
BOOLEAN MmSetAddressRangeModified (IN PVOID Address, IN SIZE_T Length)
PVOID MiCheckForContiguousMemory (IN PVOID BaseAddress, IN PFN_NUMBER BaseAddressPages, IN PFN_NUMBER SizeInPages, IN PFN_NUMBER LowestPfn, IN PFN_NUMBER HighestPfn, IN PFN_NUMBER BoundaryPfn)
VOID MmLockPagableSectionByHandle (IN PVOID ImageSectionHandle)
VOID MiLockCode (IN PMMPTE FirstPte, IN PMMPTE LastPte, IN ULONG LockType)
NTSTATUS MmGetSectionRange (IN PVOID AddressWithinSection, OUT PVOID *StartingSectionAddress, OUT PULONG SizeofSection)
PVOID MmLockPagableDataSection (IN PVOID AddressWithinSection)
PLDR_DATA_TABLE_ENTRY MiLookupDataTableEntry (IN PVOID AddressWithinSection, IN ULONG ResourceHeld)
VOID MmUnlockPagableImageSection (IN PVOID ImageSectionHandle)
VOID MmMapMemoryDumpMdl (IN OUT PMDL MemoryDumpMdl)
VOID MmReleaseDumpAddresses (IN PFN_NUMBER Pages)
NTSTATUS MmSetBankedSection (IN HANDLE ProcessHandle, IN PVOID VirtualAddress, IN ULONG BankLength, IN BOOLEAN ReadWriteBank, IN PBANKED_SECTION_ROUTINE BankRoutine, IN PVOID Context)
PVOID MmMapVideoDisplay (IN PHYSICAL_ADDRESS PhysicalAddress, IN SIZE_T NumberOfBytes, IN MEMORY_CACHING_TYPE CacheType)
VOID MmUnmapVideoDisplay (IN PVOID BaseAddress, IN SIZE_T NumberOfBytes)
VOID MmLockPagedPool (IN PVOID Address, IN SIZE_T SizeInBytes)
NTKERNELAPI VOID MmUnlockPagedPool (IN PVOID Address, IN SIZE_T SizeInBytes)
NTKERNELAPI ULONG MmGatherMemoryForHibernate (IN PMDL Mdl, IN BOOLEAN Wait)
NTKERNELAPI VOID MmReturnMemoryForHibernate (IN PMDL Mdl)
VOID MmSetKernelDumpRange (IN OUT PVOID pDumpContext)

Variables

PFN_NUMBER MmSystemLockPagesCount
ULONG MmTotalSystemDriverPages
LOGICAL MmTrackPtes = FALSE
BOOLEAN MiTrackPtesAborted = FALSE
SYSPTES_HEADER MiPteHeader
LIST_ENTRY MiDeadPteTrackerListHead
KSPIN_LOCK MiPteTrackerLock
LOCK_HEADER MmLockedPagesHead
BOOLEAN MiTrackingAborted = FALSE
LOGICAL MiNoLowMemory
POOL_DESCRIPTOR NonPagedPoolDescriptor
PFN_NUMBER MmMdlPagesAllocated
KEVENT MmCollidedLockEvent
ULONG MmCollidedLockWait
SIZE_T MmLockedCode
BOOLEAN MiWriteCombiningPtes = FALSE
ULONG MiProbeRaises [MI_PROBE_RAISE_SIZE]
ULONG MmReferenceCountCheck = 2500
PFN_NUMBER MiLastCallLowPage
PFN_NUMBER MiLastCallHighPage
ULONG MiLastCallColor


Define Documentation

#define MI_INSTRUMENT_PROBE_RAISES  ) 
 

Value:

ASSERT (i < MI_PROBE_RAISE_SIZE); \ MiProbeRaises[i] += 1;

Definition at line 225 of file iosup.c.

Referenced by MmProbeAndLockPages().

#define MI_PROBE_RAISE_SIZE   10
 

Definition at line 221 of file iosup.c.


Typedef Documentation

typedef struct _PTE_TRACKER * PPTE_TRACKER
 

Referenced by MiGetHighestPteConsumer(), MiInsertPteTracker(), and MiRemovePteTracker().

typedef struct _SYSPTES_HEADER * PSYSPTES_HEADER
 

typedef struct _PTE_TRACKER PTE_TRACKER
 

typedef struct _SYSPTES_HEADER SYSPTES_HEADER
 


Function Documentation

VOID MiAddMdlTracker IN PMDL  MemoryDescriptorList,
IN PVOID  CallingAddress,
IN PVOID  CallersCaller,
IN PFN_NUMBER  NumberOfPagesToLock,
IN ULONG  Who
 

Definition at line 879 of file iosup.c.

References ASSERT, _LOCK_TRACKER::CallersCaller, _LOCK_TRACKER::CallingAddress, _LOCK_HEADER::Count, _LOCK_TRACKER::Count, ExAllocatePoolWithTag, _LOCK_TRACKER::GlobalListEntry, KeBugCheckEx(), _LOCK_TRACKER::Length, _LOCK_TRACKER::ListEntry, _LOCK_HEADER::ListHead, LOCK_PFN2, LOCK_TRACKER, _EPROCESS::LockedPagesList, _LOCK_TRACKER::Mdl, MiTrackingAborted, MmLockedPagesHead, MmTrackLockedPages, NonPagedPool, NULL, _LOCK_TRACKER::Offset, _LOCK_TRACKER::Page, PLOCK_HEADER, PLOCK_TRACKER, _LOCK_TRACKER::Process, _LOCK_TRACKER::StartVa, TRUE, UNLOCK_PFN2, and _LOCK_TRACKER::Who.

Referenced by MmProbeAndLockPages(), and MmProbeAndLockSelectedPages().

00889 : 00890 00891 This routine adds an MDL to the specified process' chain. 00892 00893 Arguments: 00894 00895 MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List 00896 (MDL). The MDL must supply the length. The 00897 physical page portion of the MDL is updated when 00898 the pages are locked in memory. 00899 00900 CallingAddress - Supplies the address of the caller of our caller. 00901 00902 CallersCaller - Supplies the address of the caller of CallingAddress. 00903 00904 NumberOfPagesToLock - Specifies the number of pages to lock. 00905 00906 Who - Specifies which routine is adding the entry. 00907 00908 Return Value: 00909 00910 None - exceptions are raised. 00911 00912 Environment: 00913 00914 Kernel mode. APC_LEVEL and below. 00915 00916 --*/ 00917 00918 { 00919 KIRQL OldIrql; 00920 PEPROCESS Process; 00921 PLOCK_HEADER LockedPagesHeader; 00922 PLOCK_TRACKER Tracker; 00923 PLOCK_TRACKER P; 00924 PLIST_ENTRY NextEntry; 00925 00926 ASSERT (MmTrackLockedPages == TRUE); 00927 00928 Process = MemoryDescriptorList->Process; 00929 00930 if (Process == NULL) { 00931 return; 00932 } 00933 00934 LockedPagesHeader = Process->LockedPagesList; 00935 00936 if (LockedPagesHeader == NULL) { 00937 return; 00938 } 00939 00940 // 00941 // It's ok to check unsynchronized for aborted tracking as the worst case 00942 // is just that one more entry gets added which will be freed later anyway. 00943 // The main purpose behind aborted tracking is that frees and exits don't 00944 // mistakenly bugcheck when an entry cannot be found. 00945 // 00946 00947 if (MiTrackingAborted == TRUE) { 00948 return; 00949 } 00950 00951 Tracker = ExAllocatePoolWithTag (NonPagedPool, 00952 sizeof (LOCK_TRACKER), 00953 'kLmM'); 00954 00955 if (Tracker == NULL) { 00956 00957 // 00958 // It's ok to set this without synchronization as the worst case 00959 // is just that a few more entries gets added which will be freed 00960 // later anyway. The main purpose behind aborted tracking is that 00961 // frees and exits don't mistakenly bugcheck when an entry cannot 00962 // be found. 00963 // 00964 00965 MiTrackingAborted = TRUE; 00966 00967 return; 00968 } 00969 00970 Tracker->Mdl = MemoryDescriptorList; 00971 Tracker->Count = NumberOfPagesToLock; 00972 Tracker->StartVa = MemoryDescriptorList->StartVa; 00973 Tracker->Offset = MemoryDescriptorList->ByteOffset; 00974 Tracker->Length = MemoryDescriptorList->ByteCount; 00975 Tracker->Page = *(PPFN_NUMBER)(MemoryDescriptorList + 1); 00976 00977 Tracker->CallingAddress = CallingAddress; 00978 Tracker->CallersCaller = CallersCaller; 00979 00980 Tracker->Who = Who; 00981 Tracker->Process = Process; 00982 00983 LOCK_PFN2 (OldIrql); 00984 00985 // 00986 // Update the list for this process. First make sure it's not already 00987 // inserted. 00988 // 00989 00990 NextEntry = LockedPagesHeader->ListHead.Flink; 00991 while (NextEntry != &LockedPagesHeader->ListHead) { 00992 00993 P = CONTAINING_RECORD (NextEntry, 00994 LOCK_TRACKER, 00995 ListEntry); 00996 00997 if (P->Mdl == MemoryDescriptorList) { 00998 KeBugCheckEx (LOCKED_PAGES_TRACKER_CORRUPTION, 00999 0x1, 01000 (ULONG_PTR)P, 01001 (ULONG_PTR)MemoryDescriptorList, 01002 (ULONG_PTR)MmLockedPagesHead.Count); 01003 } 01004 NextEntry = NextEntry->Flink; 01005 } 01006 01007 InsertHeadList (&LockedPagesHeader->ListHead, &Tracker->ListEntry); 01008 LockedPagesHeader->Count += NumberOfPagesToLock; 01009 01010 // 01011 // Update the systemwide global list. First make sure it's not 01012 // already inserted. 01013 // 01014 01015 NextEntry = MmLockedPagesHead.ListHead.Flink; 01016 while (NextEntry != &MmLockedPagesHead.ListHead) { 01017 01018 P = CONTAINING_RECORD(NextEntry, 01019 LOCK_TRACKER, 01020 GlobalListEntry); 01021 01022 if (P->Mdl == MemoryDescriptorList) { 01023 KeBugCheckEx (LOCKED_PAGES_TRACKER_CORRUPTION, 01024 0x2, 01025 (ULONG_PTR)P, 01026 (ULONG_PTR)MemoryDescriptorList, 01027 (ULONG_PTR)MmLockedPagesHead.Count); 01028 } 01029 01030 NextEntry = NextEntry->Flink; 01031 } 01032 01033 InsertHeadList (&MmLockedPagesHead.ListHead, 01034 &Tracker->GlobalListEntry); 01035 MmLockedPagesHead.Count += NumberOfPagesToLock; 01036 01037 UNLOCK_PFN2 (OldIrql); 01038 }

PVOID MiAllocateContiguousMemory IN SIZE_T  NumberOfBytes,
IN PFN_NUMBER  LowestAcceptablePfn,
IN PFN_NUMBER  HighestAcceptablePfn,
IN PFN_NUMBER  BoundaryPfn,
PVOID  CallingAddress
 

Definition at line 4107 of file iosup.c.

References APC_LEVEL, ASSERT, BYTES_TO_PAGES, ExAllocatePoolWithTag, ExFreePool(), FALSE, KeDelayExecutionThread(), KernelMode, MiAllocateLowMemory(), MiCheckForContiguousMemory(), MiDelayPageFaults, MiEmptyAllWorkingSets(), MiFindContiguousMemory(), MiFlushAllPages(), MiNoLowMemory, MmHalfSecond, NonPagedPoolCacheAligned, NULL, and TRUE.

Referenced by MmAllocateContiguousMemory(), and MmAllocateContiguousMemorySpecifyCache().

04117 : 04118 04119 This function allocates a range of physically contiguous non-paged 04120 pool. It relies on the fact that non-paged pool is built at 04121 system initialization time from a contiguous range of physical 04122 memory. It allocates the specified size of non-paged pool and 04123 then checks to ensure it is contiguous as pool expansion does 04124 not maintain the contiguous nature of non-paged pool. 04125 04126 This routine is designed to be used by a driver's initialization 04127 routine to allocate a contiguous block of physical memory for 04128 issuing DMA requests from. 04129 04130 Arguments: 04131 04132 NumberOfBytes - Supplies the number of bytes to allocate. 04133 04134 LowestAcceptablePfn - Supplies the lowest page frame number 04135 which is valid for the allocation. 04136 04137 HighestAcceptablePfn - Supplies the highest page frame number 04138 which is valid for the allocation. 04139 04140 BoundaryPfn - Supplies the page frame number multiple the allocation must 04141 not cross. 0 indicates it can cross any boundary. 04142 04143 CallingAddress - Supplies the calling address of the allocator. 04144 04145 Return Value: 04146 04147 NULL - a contiguous range could not be found to satisfy the request. 04148 04149 NON-NULL - Returns a pointer (virtual address in the nonpaged portion 04150 of the system) to the allocated physically contiguous 04151 memory. 04152 04153 Environment: 04154 04155 Kernel mode, IRQL of DISPATCH_LEVEL or below. 04156 04157 --*/ 04158 04159 { 04160 PVOID BaseAddress; 04161 PFN_NUMBER SizeInPages; 04162 PFN_NUMBER LowestPfn; 04163 PFN_NUMBER HighestPfn; 04164 PFN_NUMBER i; 04165 04166 ASSERT (NumberOfBytes != 0); 04167 04168 #if defined (_X86PAE_) 04169 if (MiNoLowMemory == TRUE) { 04170 if (HighestAcceptablePfn <= 0xFFFFF) { 04171 return MiAllocateLowMemory (NumberOfBytes, 04172 LowestAcceptablePfn, 04173 HighestAcceptablePfn, 04174 BoundaryPfn, 04175 CallingAddress, 04176 'tnoC'); 04177 } 04178 LowestPfn = 0x100000; 04179 } 04180 #endif 04181 04182 BaseAddress = ExAllocatePoolWithTag (NonPagedPoolCacheAligned, 04183 NumberOfBytes, 04184 'mCmM'); 04185 04186 // 04187 // N.B. This setting of SizeInPages to exactly the request size means the 04188 // non-NULL return value from MiCheckForContiguousMemory is guaranteed to 04189 // be the BaseAddress. If this size is ever changed, then the non-NULL 04190 // return value must be checked and split/returned accordingly. 04191 // 04192 04193 SizeInPages = BYTES_TO_PAGES (NumberOfBytes); 04194 04195 LowestPfn = LowestAcceptablePfn; 04196 HighestPfn = HighestAcceptablePfn; 04197 04198 if (BaseAddress != NULL) { 04199 if (MiCheckForContiguousMemory( BaseAddress, 04200 SizeInPages, 04201 SizeInPages, 04202 LowestPfn, 04203 HighestPfn, 04204 BoundaryPfn)) { 04205 04206 return BaseAddress; 04207 } 04208 04209 // 04210 // The allocation from pool does not meet the contiguous 04211 // requirements. Free the page and see if any of the free 04212 // pool pages meet the requirement. 04213 // 04214 04215 ExFreePool (BaseAddress); 04216 04217 } else { 04218 04219 // 04220 // No pool was available, return NULL. 04221 // 04222 04223 return NULL; 04224 } 04225 04226 if (KeGetCurrentIrql() > APC_LEVEL) { 04227 return NULL; 04228 } 04229 04230 BaseAddress = NULL; 04231 04232 i = 3; 04233 04234 InterlockedIncrement (&MiDelayPageFaults); 04235 04236 for (; ; ) { 04237 BaseAddress = MiFindContiguousMemory (LowestPfn, 04238 HighestPfn, 04239 BoundaryPfn, 04240 SizeInPages, 04241 CallingAddress); 04242 04243 if ((BaseAddress != NULL) || (i == 0)) { 04244 break; 04245 } 04246 04247 // 04248 // Attempt to move pages to the standby list. 04249 // 04250 04251 MiEmptyAllWorkingSets (); 04252 MiFlushAllPages(); 04253 04254 KeDelayExecutionThread (KernelMode, 04255 FALSE, 04256 (PLARGE_INTEGER)&MmHalfSecond); 04257 04258 i -= 1; 04259 } 04260 InterlockedDecrement (&MiDelayPageFaults); 04261 return BaseAddress; 04262 }

PVOID MiAllocateLowMemory IN SIZE_T  NumberOfBytes,
IN PFN_NUMBER  LowestAcceptablePfn,
IN PFN_NUMBER  HighestAcceptablePfn,
IN PFN_NUMBER  BoundaryPfn,
IN PVOID  CallingAddress,
IN ULONG  Tag
 

PVOID MiCheckForContiguousMemory IN PVOID  BaseAddress,
IN PFN_NUMBER  BaseAddressPages,
IN PFN_NUMBER  SizeInPages,
IN PFN_NUMBER  LowestPfn,
IN PFN_NUMBER  HighestPfn,
IN PFN_NUMBER  BoundaryPfn
 

Definition at line 6052 of file iosup.c.

References ASSERT, MI_CONVERT_PHYSICAL_TO_PFN, MI_GET_PAGE_FRAME_FROM_PTE, MI_IS_PHYSICAL_ADDRESS, MiGetPteAddress, MiGetVirtualAddressMappedByPte, NULL, PAGE_SHIFT, and _MMPTE::u.

Referenced by MiAllocateContiguousMemory(), and MiFindContiguousMemory().

06063 : 06064 06065 This routine checks to see if the physical memory mapped 06066 by the specified BaseAddress for the specified size is 06067 contiguous and that the first page is greater than or equal to 06068 the specified LowestPfn and that the last page of the physical memory is 06069 less than or equal to the specified HighestPfn. 06070 06071 Arguments: 06072 06073 BaseAddress - Supplies the base address to start checking at. 06074 06075 BaseAddressPages - Supplies the number of pages to scan from the 06076 BaseAddress. 06077 06078 SizeInPages - Supplies the number of pages in the range. 06079 06080 LowestPfn - Supplies lowest PFN acceptable as a physical page. 06081 06082 HighestPfn - Supplies the highest PFN acceptable as a physical page. 06083 06084 BoundaryPfn - Supplies the PFN multiple the allocation must 06085 not cross. 0 indicates it can cross any boundary. 06086 06087 Return Value: 06088 06089 Returns the usable virtual address within the argument range that the 06090 caller should return to his caller. NULL if there is no usable address. 06091 06092 Environment: 06093 06094 Kernel mode, memory management internal. 06095 06096 --*/ 06097 06098 { 06099 PMMPTE PointerPte; 06100 PMMPTE LastPte; 06101 PFN_NUMBER PreviousPage; 06102 PFN_NUMBER Page; 06103 PFN_NUMBER HighestStartPage; 06104 PFN_NUMBER LastPage; 06105 PFN_NUMBER OriginalPage; 06106 PFN_NUMBER OriginalLastPage; 06107 PVOID BoundaryAllocation; 06108 PFN_NUMBER BoundaryMask; 06109 ULONG PageCount; 06110 MMPTE PteContents; 06111 06112 BoundaryMask = ~(BoundaryPfn - 1); 06113 06114 if (LowestPfn > HighestPfn) { 06115 return NULL; 06116 } 06117 06118 if (LowestPfn + SizeInPages <= LowestPfn) { 06119 return NULL; 06120 } 06121 06122 if (LowestPfn + SizeInPages > HighestPfn + 1) { 06123 return NULL; 06124 } 06125 06126 if (BaseAddressPages < SizeInPages) { 06127 return NULL; 06128 } 06129 06130 if (MI_IS_PHYSICAL_ADDRESS (BaseAddress)) { 06131 06132 OriginalPage = MI_CONVERT_PHYSICAL_TO_PFN(BaseAddress); 06133 OriginalLastPage = OriginalPage + BaseAddressPages; 06134 06135 Page = OriginalPage; 06136 LastPage = OriginalLastPage; 06137 06138 // 06139 // Close the gaps, then examine the range for a fit. 06140 // 06141 06142 if (Page < LowestPfn) { 06143 Page = LowestPfn; 06144 } 06145 06146 if (LastPage > HighestPfn + 1) { 06147 LastPage = HighestPfn + 1; 06148 } 06149 06150 HighestStartPage = LastPage - SizeInPages; 06151 06152 if (Page > HighestStartPage) { 06153 return NULL; 06154 } 06155 06156 if (BoundaryPfn != 0) { 06157 do { 06158 if (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask) == 0) { 06159 06160 // 06161 // This portion of the range meets the alignment 06162 // requirements. 06163 // 06164 06165 break; 06166 } 06167 Page |= (BoundaryPfn - 1); 06168 Page += 1; 06169 } while (Page <= HighestStartPage); 06170 06171 if (Page > HighestStartPage) { 06172 return NULL; 06173 } 06174 BoundaryAllocation = (PVOID)((PCHAR)BaseAddress + ((Page - OriginalPage) << PAGE_SHIFT)); 06175 06176 // 06177 // The request can be satisfied. Since specific alignment was 06178 // requested, return the fit now without getting fancy. 06179 // 06180 06181 return BoundaryAllocation; 06182 } 06183 06184 // 06185 // If possible return a chunk on the end to reduce fragmentation. 06186 // 06187 06188 if (LastPage == OriginalLastPage) { 06189 return (PVOID)((PCHAR)BaseAddress + ((BaseAddressPages - SizeInPages) << PAGE_SHIFT)); 06190 } 06191 06192 // 06193 // The end chunk did not satisfy the requirements. The next best option 06194 // is to return a chunk from the beginning. Since that's where the search 06195 // began, just return the current chunk. 06196 // 06197 06198 return (PVOID)((PCHAR)BaseAddress + ((Page - OriginalPage) << PAGE_SHIFT)); 06199 } 06200 06201 // 06202 // Check the virtual addresses for physical contiguity. 06203 // 06204 06205 PointerPte = MiGetPteAddress (BaseAddress); 06206 LastPte = PointerPte + BaseAddressPages; 06207 06208 HighestStartPage = HighestPfn + 1 - SizeInPages; 06209 PageCount = 0; 06210 06211 while (PointerPte < LastPte) { 06212 06213 PteContents = *PointerPte; 06214 ASSERT (PteContents.u.Hard.Valid == 1); 06215 Page = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents); 06216 06217 // 06218 // Before starting a new run, ensure that it 06219 // can satisfy the location & boundary requirements (if any). 06220 // 06221 06222 if (PageCount == 0) { 06223 06224 if ((Page >= LowestPfn) && (Page <= HighestStartPage)) { 06225 06226 if (BoundaryPfn == 0) { 06227 PageCount += 1; 06228 } 06229 else if (((Page ^ (Page + SizeInPages - 1)) & BoundaryMask) == 0) { 06230 // 06231 // This run's physical address meets the alignment 06232 // requirement. 06233 // 06234 06235 PageCount += 1; 06236 } 06237 } 06238 06239 if (PageCount == SizeInPages) { 06240 06241 // 06242 // Success - found a single page satifying the requirements. 06243 // 06244 06245 BaseAddress = MiGetVirtualAddressMappedByPte (PointerPte); 06246 return BaseAddress; 06247 } 06248 06249 PreviousPage = Page; 06250 PointerPte += 1; 06251 continue; 06252 } 06253 06254 if (Page != PreviousPage + 1) { 06255 06256 // 06257 // This page is not physically contiguous. Start over. 06258 // 06259 06260 PageCount = 0; 06261 continue; 06262 } 06263 06264 PageCount += 1; 06265 06266 if (PageCount == SizeInPages) { 06267 06268 // 06269 // Success - found a page range satifying the requirements. 06270 // 06271 06272 BaseAddress = MiGetVirtualAddressMappedByPte (PointerPte - PageCount + 1); 06273 return BaseAddress; 06274 } 06275 06276 PointerPte += 1; 06277 } 06278 06279 return NULL; 06280 }

LOGICAL MiFreeLowMemory IN PVOID  BaseAddress,
IN ULONG  Tag
 

LOGICAL MiFreeMdlTracker IN OUT PMDL  MemoryDescriptorList,
IN PFN_NUMBER  NumberOfPages
 

Definition at line 1041 of file iosup.c.

References ASSERT, _LOCK_HEADER::Count, _LOCK_TRACKER::Count, ExFreePool(), _LOCK_TRACKER::GlobalListEntry, KeBugCheckEx(), _LOCK_TRACKER::ListEntry, _LOCK_HEADER::ListHead, LOCK_PFN2, _LOCK_TRACKER::Mdl, MiTrackingAborted, MmLockedPagesHead, NULL, _LOCK_TRACKER::Page, TRUE, and UNLOCK_PFN2.

Referenced by MmProbeAndLockSelectedPages(), and MmUnlockPages().

01048 : 01049 01050 This deletes an MDL from the specified process' chain. Used specifically 01051 by MmProbeAndLockSelectedPages () because it builds an MDL in its local 01052 stack and then copies the requested pages into the real MDL. this lets 01053 us track these pages. 01054 01055 Arguments: 01056 01057 MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List 01058 (MDL). The MDL must supply the length. 01059 01060 NumberOfPages - Supplies the number of pages to be freed. 01061 01062 Return Value: 01063 01064 TRUE. 01065 01066 Environment: 01067 01068 Kernel mode. APC_LEVEL and below. 01069 01070 --*/ 01071 { 01072 KIRQL OldIrql; 01073 PLOCK_TRACKER Tracker; 01074 PLIST_ENTRY NextEntry; 01075 PLOCK_HEADER LockedPagesHeader; 01076 PPFN_NUMBER Page; 01077 PLOCK_TRACKER Found; 01078 PVOID PoolToFree; 01079 01080 ASSERT (MemoryDescriptorList->Process != NULL); 01081 01082 LockedPagesHeader = (PLOCK_HEADER)MemoryDescriptorList->Process->LockedPagesList; 01083 01084 if (LockedPagesHeader == NULL) { 01085 return TRUE; 01086 } 01087 01088 Found = NULL; 01089 Page = (PPFN_NUMBER) (MemoryDescriptorList + 1); 01090 01091 LOCK_PFN2 (OldIrql); 01092 01093 NextEntry = LockedPagesHeader->ListHead.Flink; 01094 while (NextEntry != &LockedPagesHeader->ListHead) { 01095 01096 Tracker = CONTAINING_RECORD (NextEntry, 01097 LOCK_TRACKER, 01098 ListEntry); 01099 01100 if (MemoryDescriptorList == Tracker->Mdl) { 01101 01102 if (Found != NULL) { 01103 KeBugCheckEx (LOCKED_PAGES_TRACKER_CORRUPTION, 01104 0x3, 01105 (ULONG_PTR)Found, 01106 (ULONG_PTR)Tracker, 01107 (ULONG_PTR)MemoryDescriptorList); 01108 } 01109 01110 ASSERT (Tracker->Page == *Page); 01111 ASSERT (NumberOfPages == Tracker->Count); 01112 Tracker->Count = (PFN_NUMBER)-1; 01113 RemoveEntryList (NextEntry); 01114 LockedPagesHeader->Count -= NumberOfPages; 01115 01116 RemoveEntryList (&Tracker->GlobalListEntry); 01117 MmLockedPagesHead.Count -= NumberOfPages; 01118 01119 Found = Tracker; 01120 PoolToFree = (PVOID)NextEntry; 01121 } 01122 NextEntry = Tracker->ListEntry.Flink; 01123 } 01124 01125 UNLOCK_PFN2 (OldIrql); 01126 01127 if (Found == NULL) { 01128 01129 // 01130 // A driver is trying to unlock pages that aren't locked. 01131 // 01132 01133 if (MiTrackingAborted == TRUE) { 01134 return TRUE; 01135 } 01136 01137 KeBugCheckEx (PROCESS_HAS_LOCKED_PAGES, 01138 1, 01139 (ULONG_PTR)MemoryDescriptorList, 01140 MemoryDescriptorList->Process->NumberOfLockedPages, 01141 (ULONG_PTR)MemoryDescriptorList->Process->LockedPagesList); 01142 } 01143 01144 ExFreePool (PoolToFree); 01145 01146 return TRUE; 01147 }

PVOID MiGetHighestPteConsumer OUT PULONG_PTR  NumberOfPtes  ) 
 

Definition at line 1901 of file iosup.c.

References _PTE_TRACKER::CallingAddress, _PTE_TRACKER::Count, FALSE, _SYSPTES_HEADER::ListHead, MiPteHeader, MiTrackPtesAborted, MmTrackPtes, NULL, PPTE_TRACKER, PsLoadedModuleList, and TRUE.

Referenced by MiReserveSystemPtes2().

01907 : 01908 01909 This function examines the PTE tracking blocks and returns the biggest 01910 consumer. 01911 01912 Arguments: 01913 01914 None. 01915 01916 Return Value: 01917 01918 The loaded module entry of the biggest consumer. 01919 01920 Environment: 01921 01922 Kernel mode, called during bugcheck only. Many locks may be held. 01923 01924 --*/ 01925 01926 { 01927 PPTE_TRACKER Tracker; 01928 PVOID BaseAddress; 01929 PFN_NUMBER NumberOfPages; 01930 PLIST_ENTRY NextEntry; 01931 PLIST_ENTRY NextEntry2; 01932 PLDR_DATA_TABLE_ENTRY DataTableEntry; 01933 ULONG_PTR Highest; 01934 ULONG_PTR PagesByThisModule; 01935 PLDR_DATA_TABLE_ENTRY HighDataTableEntry; 01936 01937 *NumberOfPtes = 0; 01938 01939 // 01940 // No locks are acquired as this is only called during a bugcheck. 01941 // 01942 01943 if (MmTrackPtes == FALSE) { 01944 return NULL; 01945 } 01946 01947 if (MiTrackPtesAborted == TRUE) { 01948 return NULL; 01949 } 01950 01951 if (IsListEmpty(&MiPteHeader.ListHead)) { 01952 return NULL; 01953 } 01954 01955 if (PsLoadedModuleList.Flink == NULL) { 01956 return NULL; 01957 } 01958 01959 Highest = 0; 01960 HighDataTableEntry = NULL; 01961 01962 NextEntry = PsLoadedModuleList.Flink; 01963 while (NextEntry != &PsLoadedModuleList) { 01964 01965 DataTableEntry = CONTAINING_RECORD(NextEntry, 01966 LDR_DATA_TABLE_ENTRY, 01967 InLoadOrderLinks); 01968 01969 PagesByThisModule = 0; 01970 01971 // 01972 // Walk the PTE mapping list and update each driver's counts. 01973 // 01974 01975 NextEntry2 = MiPteHeader.ListHead.Flink; 01976 while (NextEntry2 != &MiPteHeader.ListHead) { 01977 01978 Tracker = (PPTE_TRACKER) CONTAINING_RECORD (NextEntry2, 01979 PTE_TRACKER, 01980 ListEntry.Flink); 01981 01982 BaseAddress = Tracker->CallingAddress; 01983 NumberOfPages = Tracker->Count; 01984 01985 if ((BaseAddress >= DataTableEntry->DllBase) && 01986 (BaseAddress < (PVOID)((ULONG_PTR)(DataTableEntry->DllBase) + DataTableEntry->SizeOfImage))) { 01987 01988 PagesByThisModule += NumberOfPages; 01989 } 01990 01991 NextEntry2 = NextEntry2->Flink; 01992 01993 } 01994 01995 if (PagesByThisModule > Highest) { 01996 Highest = PagesByThisModule; 01997 HighDataTableEntry = DataTableEntry; 01998 } 01999 02000 NextEntry = NextEntry->Flink; 02001 } 02002 02003 *NumberOfPtes = Highest; 02004 02005 return (PVOID)HighDataTableEntry; 02006 }

LOGICAL MiGetSystemPteAvailability IN ULONG  NumberOfPtes,
IN MM_PAGE_PRIORITY  Priority
 

Definition at line 2030 of file sysptes.c.

References FALSE, HighPagePriority, Index, MM_PTE_TABLE_LIMIT, MmSysPteListBySizeCount, MmSysPteMinimumFree, MmSysPteTables, MmTotalFreeSystemPtes, NormalPagePriority, SystemPteSpace, and TRUE.

Referenced by MiMapSinglePage(), and MmMapLockedPagesSpecifyCache().

02037 : 02038 02039 This routine checks how many SystemPteSpace PTEs are available for the 02040 requested size. If plenty are available then TRUE is returned. 02041 If we are reaching a low resource situation, then the request is evaluated 02042 based on the argument priority. 02043 02044 Arguments: 02045 02046 NumberOfPtes - Supplies the number of PTEs needed. 02047 02048 Priority - Supplies the priority of the request. 02049 02050 Return Value: 02051 02052 TRUE if the caller should allocate the PTEs, FALSE if not. 02053 02054 Environment: 02055 02056 Kernel mode. 02057 02058 --*/ 02059 02060 { 02061 ULONG Index; 02062 ULONG FreePtes; 02063 ULONG FreeBinnedPtes; 02064 02065 if (Priority == HighPagePriority) { 02066 return TRUE; 02067 } 02068 02069 #ifdef _MI_GUARD_PTE_ 02070 NumberOfPtes += 1; 02071 #endif 02072 02073 FreePtes = MmTotalFreeSystemPtes[SystemPteSpace]; 02074 02075 if (NumberOfPtes <= MM_PTE_TABLE_LIMIT) { 02076 Index = MmSysPteTables [NumberOfPtes]; 02077 FreeBinnedPtes = MmSysPteListBySizeCount[Index]; 02078 02079 if (FreeBinnedPtes > MmSysPteMinimumFree[Index]) { 02080 return TRUE; 02081 } 02082 if (FreeBinnedPtes != 0) { 02083 if (Priority == NormalPagePriority) { 02084 if (FreeBinnedPtes > 1 || FreePtes > 512) { 02085 return TRUE; 02086 } 02087 return FALSE; 02088 } 02089 if (FreePtes > 2048) { 02090 return TRUE; 02091 } 02092 return FALSE; 02093 } 02094 } 02095 02096 if (Priority == NormalPagePriority) { 02097 if ((LONG)NumberOfPtes < (LONG)FreePtes - 512) { 02098 return TRUE; 02099 } 02100 return FALSE; 02101 } 02102 02103 if ((LONG)NumberOfPtes < (LONG)FreePtes - 2048) { 02104 return TRUE; 02105 } 02106 return FALSE; 02107 }

VOID MiInitializeIoTrackers VOID   ) 
 

Definition at line 1595 of file iosup.c.

References KeInitializeSpinLock(), _SYSPTES_HEADER::ListHead, _LOCK_HEADER::ListHead, MiDeadPteTrackerListHead, MiPteHeader, MiPteTrackerLock, MmLockedPagesHead, MmTrackLockedPages, MmTrackPtes, and TRUE.

Referenced by MmInitSystem().

01598 { 01599 if (MmTrackPtes != 0) { 01600 InitializeListHead (&MiDeadPteTrackerListHead); 01601 KeInitializeSpinLock (&MiPteTrackerLock); 01602 InitializeListHead (&MiPteHeader.ListHead); 01603 } 01604 01605 if (MmTrackLockedPages == TRUE) { 01606 InitializeListHead (&MmLockedPagesHead.ListHead); 01607 } 01608 }

VOID MiInsertDeadPteTrackingBlock IN PVOID  PoolBlock  ) 
 

Definition at line 1823 of file iosup.c.

References MiDeadPteTrackerListHead, and MiPteTrackerLock.

Referenced by MmUnmapIoSpace(), and MmUnmapLockedPages().

01829 : 01830 01831 This routine inserts a tracking block into the dead PTE list for later 01832 release. Locks (including the PFN lock) may be held on entry, thus the 01833 block cannot be directly freed to pool at this time. 01834 01835 Arguments: 01836 01837 PoolBlock - Supplies the base pool address to free. 01838 01839 Return Value: 01840 01841 None. 01842 01843 Environment: 01844 01845 Kernel mode. DISPATCH_LEVEL or below, locks may be held. 01846 01847 --*/ 01848 { 01849 KIRQL OldIrql; 01850 01851 ExAcquireSpinLock (&MiPteTrackerLock, &OldIrql); 01852 01853 InsertTailList (&MiDeadPteTrackerListHead, (PLIST_ENTRY)PoolBlock); 01854 01855 ExReleaseSpinLock (&MiPteTrackerLock, OldIrql); 01856 }

VOID MiInsertPteTracker IN PVOID  PoolBlock,
IN PMDL  MemoryDescriptorList,
IN PFN_NUMBER  NumberOfPtes,
IN PVOID  MyCaller,
IN PVOID  MyCallersCaller
 

Definition at line 1611 of file iosup.c.

References _PTE_TRACKER::CallersCaller, _PTE_TRACKER::CallingAddress, _SYSPTES_HEADER::Count, _PTE_TRACKER::Count, _PTE_TRACKER::Length, _PTE_TRACKER::ListEntry, _SYSPTES_HEADER::ListHead, _PTE_TRACKER::Mdl, MiGetPteAddress, MiPteHeader, _PTE_TRACKER::Offset, _PTE_TRACKER::Page, PPTE_TRACKER, _PTE_TRACKER::PteAddress, _PTE_TRACKER::StartVa, and _PTE_TRACKER::SystemVa.

Referenced by MmMapIoSpace(), and MmMapLockedPagesSpecifyCache().

01620 : 01621 01622 This function inserts a PTE tracking block as the caller has just 01623 consumed system PTEs. 01624 01625 Arguments: 01626 01627 PoolBlock - Supplies a tracker pool block. This is supplied by the caller 01628 since the MmSystemSpaceLock is held on entry hence pool 01629 allocations may not be done here. 01630 01631 MemoryDescriptorList - Supplies a valid Memory Descriptor List. 01632 01633 NumberOfPtes - Supplies the number of system PTEs allocated. 01634 01635 MyCaller - Supplies the return address of the caller who consumed the 01636 system PTEs to map this MDL. 01637 01638 MyCallersCaller - Supplies the return address of the caller of the caller 01639 who consumed the system PTEs to map this MDL. 01640 01641 Return Value: 01642 01643 None. 01644 01645 Environment: 01646 01647 Kernel mode, protected by MmSystemSpaceLock at DISPATCH_LEVEL. 01648 01649 --*/ 01650 01651 { 01652 PPTE_TRACKER Tracker; 01653 01654 Tracker = (PPTE_TRACKER)PoolBlock; 01655 01656 Tracker->Mdl = MemoryDescriptorList; 01657 Tracker->SystemVa = MemoryDescriptorList->MappedSystemVa; 01658 Tracker->Count = NumberOfPtes; 01659 01660 Tracker->StartVa = MemoryDescriptorList->StartVa; 01661 Tracker->Offset = MemoryDescriptorList->ByteOffset; 01662 Tracker->Length = MemoryDescriptorList->ByteCount; 01663 Tracker->Page = *(PPFN_NUMBER)(MemoryDescriptorList + 1); 01664 01665 Tracker->CallingAddress = MyCaller; 01666 Tracker->CallersCaller = MyCallersCaller; 01667 Tracker->PteAddress = MiGetPteAddress (Tracker->SystemVa); 01668 01669 MiPteHeader.Count += NumberOfPtes; 01670 01671 InsertHeadList (&MiPteHeader.ListHead, &Tracker->ListEntry); 01672 }

VOID MiLockCode IN PMMPTE  FirstPte,
IN PMMPTE  LastPte,
IN ULONG  LockType
 

Definition at line 6461 of file iosup.c.

References ActiveAndValid, APC_LEVEL, ASSERT, FALSE, _MMWSL::FirstDynamic, LOCK_PFN, MI_ADD_LOCKED_PAGE_CHARGE, MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE, MI_IS_PHYSICAL_ADDRESS, MI_IS_SESSION_IMAGE_ADDRESS, MI_MAKE_VALID_PTE, MI_PFN_ELEMENT, MI_REMOVE_LOCKED_PAGE_CHARGE, MI_SET_PTE_IN_WORKING_SET, MI_WRITE_VALID_PTE, MI_ZERO_WSINDEX, MiGetPteAddress, MiGetVirtualAddressMappedByPte, MiLocateAndReserveWsle(), MiLocateWsle(), MiMakeSystemAddressValidPfnSystemWs(), MiReleaseWsle(), MiRemoveWsle(), MiSwapWslEntries(), MiUnlinkPageFromList(), MiUpdateWsle(), MM_BUMP_COUNTER, MM_LOCK_BY_REFCOUNT, MM_PFN_LOCK_ASSERT, MmLockedCode, MmPagedPoolEnd, MmPagedPoolStart, MmResidentAvailablePages, MmSessionSpace, MmSpecialPoolEnd, MmSpecialPoolStart, MmSystemCacheWorkingSetList, MmSystemCacheWs, MmTotalSystemDriverPages, _MMPFN::OriginalPte, PMMSUPPORT, PsGetCurrentThread, _MMPFN::PteAddress, TRUE, _MMPTE::u, _MMPFN::u1, _MMWSLE::u1, _MMPFN::u2, _MMPFN::u3, UNLOCK_PFN, _MM_SESSION_SPACE::Vm, _MMSUPPORT::VmWorkingSetList, _MM_SESSION_SPACE::Wsle, WSLE_NUMBER, and ZeroKernelPte.

Referenced by MmLockPagableSectionByHandle(), MmLockPagedPool(), and MmResetDriverPaging().

06469 : 06470 06471 This routine checks to see if the specified pages are resident in 06472 the process's working set and if so the reference count for the 06473 page is incremented. This allows the virtual address to be accessed 06474 without getting a hard page fault (have to go to the disk...) except 06475 for the extremely rare case when the page table page is removed from the 06476 working set and migrates to the disk. 06477 06478 If the virtual address is that of the system wide global "cache", the 06479 virtual address of the "locked" pages is always guaranteed to 06480 be valid. 06481 06482 NOTE: This routine is not to be used for general locking of user 06483 addresses - use MmProbeAndLockPages. This routine is intended for 06484 well behaved system code like the file system caches which allocates 06485 virtual addresses for mapping files AND guarantees that the mapping 06486 will not be modified (deleted or changed) while the pages are locked. 06487 06488 Arguments: 06489 06490 FirstPte - Supplies the base address to begin locking. 06491 06492 LastPte - The last PTE to lock. 06493 06494 LockType - Supplies either MM_LOCK_BY_REFCOUNT or MM_LOCK_NONPAGE. 06495 LOCK_BY_REFCOUNT increments the reference count to keep 06496 the page in memory, LOCK_NONPAGE removes the page from 06497 the working set so it's locked just like nonpaged pool. 06498 06499 Return Value: 06500 06501 None. 06502 06503 Environment: 06504 06505 Kernel mode, System working set mutex and PFN LOCK held. 06506 06507 --*/ 06508 06509 { 06510 PMMPFN Pfn1; 06511 PMMPTE PointerPte; 06512 MMPTE TempPte; 06513 MMPTE PteContents; 06514 WSLE_NUMBER WorkingSetIndex; 06515 WSLE_NUMBER SwapEntry; 06516 PFN_NUMBER PageFrameIndex; 06517 KIRQL OldIrql; 06518 LOGICAL SessionSpace; 06519 PMMWSL WorkingSetList; 06520 PMMSUPPORT Vm; 06521 #if PFN_CONSISTENCY 06522 KIRQL PfnIrql; 06523 #endif 06524 06525 MM_PFN_LOCK_ASSERT(); 06526 06527 SessionSpace = MI_IS_SESSION_IMAGE_ADDRESS (MiGetVirtualAddressMappedByPte(FirstPte)); 06528 06529 if (SessionSpace == TRUE) { 06530 Vm = &MmSessionSpace->Vm; 06531 WorkingSetList = MmSessionSpace->Vm.VmWorkingSetList; 06532 } 06533 06534 // 06535 // Session space is never locked by refcount. 06536 // 06537 06538 ASSERT ((SessionSpace == FALSE) || (LockType != MM_LOCK_BY_REFCOUNT)); 06539 06540 ASSERT (!MI_IS_PHYSICAL_ADDRESS(MiGetVirtualAddressMappedByPte(FirstPte))); 06541 PointerPte = FirstPte; 06542 06543 MmLockedCode += 1 + LastPte - FirstPte; 06544 06545 do { 06546 06547 PteContents = *PointerPte; 06548 ASSERT (PteContents.u.Long != ZeroKernelPte.u.Long); 06549 if (PteContents.u.Hard.Valid == 0) { 06550 06551 if (PteContents.u.Soft.Prototype == 1) { 06552 06553 // 06554 // Page is not in memory and it is a prototype. 06555 // 06556 06557 MiMakeSystemAddressValidPfnSystemWs ( 06558 MiGetVirtualAddressMappedByPte(PointerPte)); 06559 06560 continue; 06561 } 06562 else if (PteContents.u.Soft.Transition == 1) { 06563 06564 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (&PteContents); 06565 06566 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 06567 if ((Pfn1->u3.e1.ReadInProgress) || 06568 (Pfn1->u3.e1.InPageError)) { 06569 06570 // 06571 // Page read is ongoing, force a collided fault. 06572 // 06573 06574 MiMakeSystemAddressValidPfnSystemWs ( 06575 MiGetVirtualAddressMappedByPte(PointerPte)); 06576 06577 continue; 06578 } 06579 06580 // 06581 // Paged pool is trimmed without regard to sharecounts. 06582 // This means a paged pool PTE can be in transition while 06583 // the page is still marked active. 06584 // 06585 06586 if (Pfn1->u3.e1.PageLocation == ActiveAndValid) { 06587 06588 ASSERT (((Pfn1->PteAddress >= MiGetPteAddress(MmPagedPoolStart)) && 06589 (Pfn1->PteAddress <= MiGetPteAddress(MmPagedPoolEnd))) || 06590 ((Pfn1->PteAddress >= MiGetPteAddress(MmSpecialPoolStart)) && 06591 (Pfn1->PteAddress <= MiGetPteAddress(MmSpecialPoolEnd)))); 06592 06593 // 06594 // Don't increment the valid PTE count for the 06595 // paged pool page. 06596 // 06597 06598 ASSERT (Pfn1->u2.ShareCount != 0); 06599 ASSERT (Pfn1->u3.e2.ReferenceCount != 0); 06600 Pfn1->u2.ShareCount += 1; 06601 } 06602 else { 06603 06604 MiUnlinkPageFromList (Pfn1); 06605 06606 // 06607 // Set the reference count and share counts to 1. Note the 06608 // reference count may be 1 already if a modified page 06609 // write is underway. The systemwide locked page charges 06610 // are correct in either case and nothing needs to be done 06611 // just yet. 06612 // 06613 06614 Pfn1->u3.e2.ReferenceCount += 1; 06615 Pfn1->u2.ShareCount = 1; 06616 } 06617 06618 Pfn1->u3.e1.PageLocation = ActiveAndValid; 06619 06620 MI_MAKE_VALID_PTE (TempPte, 06621 PageFrameIndex, 06622 Pfn1->OriginalPte.u.Soft.Protection, 06623 PointerPte); 06624 06625 MI_WRITE_VALID_PTE (PointerPte, TempPte); 06626 06627 // 06628 // Increment the reference count one for putting it the 06629 // working set list and one for locking it for I/O. 06630 // 06631 06632 if (LockType == MM_LOCK_BY_REFCOUNT) { 06633 06634 // 06635 // Lock the page in the working set by upping the 06636 // reference count. 06637 // 06638 06639 MI_ADD_LOCKED_PAGE_CHARGE (Pfn1, 34); 06640 Pfn1->u3.e2.ReferenceCount += 1; 06641 Pfn1->u1.Event = (PVOID)PsGetCurrentThread(); 06642 06643 UNLOCK_PFN (APC_LEVEL); 06644 WorkingSetIndex = MiLocateAndReserveWsle (&MmSystemCacheWs); 06645 06646 MiUpdateWsle (&WorkingSetIndex, 06647 MiGetVirtualAddressMappedByPte (PointerPte), 06648 MmSystemCacheWorkingSetList, 06649 Pfn1); 06650 06651 MI_SET_PTE_IN_WORKING_SET (PointerPte, WorkingSetIndex); 06652 06653 LOCK_PFN (OldIrql); 06654 06655 } else { 06656 06657 // 06658 // The wsindex field must be zero because the 06659 // page is not in the system (or session) working set. 06660 // 06661 06662 ASSERT (Pfn1->u1.WsIndex == 0); 06663 06664 // 06665 // Adjust available pages as this page is now not in any 06666 // working set, just like a non-paged pool page. On entry 06667 // this page was in transition so it was part of the 06668 // available pages by definition. 06669 // 06670 06671 MmResidentAvailablePages -= 1; 06672 if (Pfn1->u3.e1.PrototypePte == 0) { 06673 MmTotalSystemDriverPages -= 1; 06674 } 06675 MM_BUMP_COUNTER(29, 1); 06676 } 06677 } else { 06678 06679 // 06680 // Page is not in memory. 06681 // 06682 06683 MiMakeSystemAddressValidPfnSystemWs ( 06684 MiGetVirtualAddressMappedByPte(PointerPte)); 06685 06686 continue; 06687 } 06688 06689 } 06690 else { 06691 06692 // 06693 // This address is already in the system (or session) working set. 06694 // 06695 06696 Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber); 06697 06698 // 06699 // Up the reference count so the page cannot be released. 06700 // 06701 06702 MI_ADD_LOCKED_PAGE_CHARGE (Pfn1, 36); 06703 Pfn1->u3.e2.ReferenceCount += 1; 06704 06705 if (LockType != MM_LOCK_BY_REFCOUNT) { 06706 06707 // 06708 // If the page is in the system working set, remove it. 06709 // The system working set lock MUST be owned to check to 06710 // see if this page is in the working set or not. This 06711 // is because the pager may have just released the PFN lock, 06712 // acquired the system lock and is now trying to add the 06713 // page to the system working set. 06714 // 06715 // If the page is in the SESSION working set, it cannot be 06716 // removed as all these pages are carefully accounted for. 06717 // Instead move it to the locked portion of the working set 06718 // if it is not there already. 06719 // 06720 06721 if (Pfn1->u1.WsIndex != 0) { 06722 06723 UNLOCK_PFN (APC_LEVEL); 06724 06725 if (SessionSpace == TRUE) { 06726 06727 WorkingSetIndex = MiLocateWsle ( 06728 MiGetVirtualAddressMappedByPte(PointerPte), 06729 WorkingSetList, 06730 Pfn1->u1.WsIndex); 06731 06732 if (WorkingSetIndex >= WorkingSetList->FirstDynamic) { 06733 06734 SwapEntry = WorkingSetList->FirstDynamic; 06735 06736 if (WorkingSetIndex != WorkingSetList->FirstDynamic) { 06737 06738 // 06739 // Swap this entry with the one at first 06740 // dynamic. Note that the working set index 06741 // in the PTE is updated here as well. 06742 // 06743 06744 MiSwapWslEntries (WorkingSetIndex, 06745 SwapEntry, 06746 Vm); 06747 } 06748 06749 WorkingSetList->FirstDynamic += 1; 06750 } 06751 else { 06752 SwapEntry = WorkingSetIndex; 06753 } 06754 06755 // 06756 // Indicate that the page is locked. 06757 // 06758 06759 MmSessionSpace->Wsle[SwapEntry].u1.e1.LockedInWs = 1; 06760 } 06761 else { 06762 MiRemoveWsle (Pfn1->u1.WsIndex, MmSystemCacheWorkingSetList); 06763 MiReleaseWsle (Pfn1->u1.WsIndex, &MmSystemCacheWs); 06764 06765 MI_SET_PTE_IN_WORKING_SET (PointerPte, 0); 06766 } 06767 06768 LOCK_PFN (OldIrql); 06769 06770 MI_ZERO_WSINDEX (Pfn1); 06771 06772 // 06773 // Adjust available pages as this page is now not in any 06774 // working set, just like a non-paged pool page. 06775 // 06776 06777 MmResidentAvailablePages -= 1; 06778 MM_BUMP_COUNTER(29, 1); 06779 if (Pfn1->u3.e1.PrototypePte == 0) { 06780 MmTotalSystemDriverPages -= 1; 06781 } 06782 } 06783 ASSERT (Pfn1->u3.e2.ReferenceCount > 1); 06784 MI_REMOVE_LOCKED_PAGE_CHARGE (Pfn1, 37); 06785 Pfn1->u3.e2.ReferenceCount -= 1; 06786 } 06787 } 06788 06789 PointerPte += 1; 06790 } while (PointerPte <= LastPte); 06791 06792 return; 06793 }

PLDR_DATA_TABLE_ENTRY MiLookupDataTableEntry IN PVOID  AddressWithinSection,
IN ULONG  ResourceHeld
 

Definition at line 6991 of file iosup.c.

References ExAcquireResourceShared, ExReleaseResource, KeEnterCriticalRegion, KeLeaveCriticalRegion, NULL, PAGED_CODE, PsLoadedModuleList, PsLoadedModuleResource, and TRUE.

Referenced by MmAddVerifierThunks(), MmGetSectionRange(), MmLockPagableDataSection(), MmPageEntireDriver(), and MmResetDriverPaging().

06998 : 06999 07000 This functions locates the data table entry that maps the specified address. 07001 07002 Arguments: 07003 07004 AddressWithinSection - Supplies the address of a function contained 07005 within the desired module. 07006 07007 ResourceHeld - Supplies TRUE if the loaded module resource is already held, 07008 FALSE if not. 07009 07010 Return Value: 07011 07012 The address of the loaded module list data table entry that maps the 07013 argument address. 07014 07015 --*/ 07016 07017 { 07018 PLDR_DATA_TABLE_ENTRY DataTableEntry; 07019 PLDR_DATA_TABLE_ENTRY FoundEntry = NULL; 07020 PLIST_ENTRY NextEntry; 07021 07022 PAGED_CODE(); 07023 07024 // 07025 // Search the loaded module list for the data table entry that describes 07026 // the DLL that was just unloaded. It is possible that an entry is not in 07027 // the list if a failure occurred at a point in loading the DLL just before 07028 // the data table entry was generated. 07029 // 07030 07031 if (!ResourceHeld) { 07032 KeEnterCriticalRegion(); 07033 ExAcquireResourceShared (&PsLoadedModuleResource, TRUE); 07034 } 07035 07036 NextEntry = PsLoadedModuleList.Flink; 07037 do { 07038 07039 DataTableEntry = CONTAINING_RECORD(NextEntry, 07040 LDR_DATA_TABLE_ENTRY, 07041 InLoadOrderLinks); 07042 07043 // 07044 // Locate the loaded module that contains this address. 07045 // 07046 07047 if ( AddressWithinSection >= DataTableEntry->DllBase && 07048 AddressWithinSection < (PVOID)((PUCHAR)DataTableEntry->DllBase+DataTableEntry->SizeOfImage) ) { 07049 07050 FoundEntry = DataTableEntry; 07051 break; 07052 } 07053 07054 NextEntry = NextEntry->Flink; 07055 } while (NextEntry != &PsLoadedModuleList); 07056 07057 if (!ResourceHeld) { 07058 ExReleaseResource (&PsLoadedModuleResource); 07059 KeLeaveCriticalRegion(); 07060 } 07061 return FoundEntry; 07062 }

PVOID MiMapLockedPagesInUserSpace IN PMDL  MemoryDescriptorList,
IN PVOID  StartingVa,
IN MEMORY_CACHING_TYPE  CacheType,
IN PVOID  BaseVa
 

Definition at line 2785 of file iosup.c.

References _EPROCESS::AddressSpaceDeleted, ASSERT, COMPUTE_PAGES_SPANNED, _MMVAD::ControlArea, _MMVAD::EndingVpn, _MI_PHYSICAL_VIEW::EndVa, ExAllocatePoolWithTag, EXCEPTION_EXECUTE_HANDLER, ExFreePool(), ExRaiseStatus(), FALSE, _MMVAD::FirstPrototypePte, KeFlushEntireTb(), KeInvalidateAllCaches(), LOCK_WS_AND_ADDRESS_SPACE, MDL_IO_SPACE, MI_DISABLE_CACHING, MI_GET_USED_PTES_HANDLE, MI_INCREMENT_USED_PTES_BY_HANDLE, MI_PFN_ELEMENT, MI_PHYSICAL_VIEW_KEY, MI_SET_PTE_WRITE_COMBINE, MI_VA_TO_VPN, MI_WRITE_VALID_PTE, MiCheckForConflictingVad, MiFindEmptyAddressRange(), MiGetPdeAddress, MiGetPteAddress, MiInsertVad(), MiMakePdeExistAndMakeValid(), MiMakePpeExistAndMakeValid, MiPhysicalViewInserter(), MiSweepCacheMachineDependent(), MiWriteCombiningPtes, MM_EMPTY_LIST, MM_HIGHEST_VAD_ADDRESS, MM_READWRITE, MmCached, MmHardwareCoherentCached, MmHighestPhysicalPage, MmNonCached, MmNonCachedUnordered, MmWriteCombined, NonPagedPool, NULL, PAGE_SIZE, PAGED_CODE, PsGetCurrentProcess, _MMVAD::StartingVpn, _MI_PHYSICAL_VIEW::StartVa, TRUE, _MMPTE::u, _MMVAD::u, _MMPFN::u2, _MMVAD::u4, UNLOCK_WS_AND_ADDRESS_SPACE, _MI_PHYSICAL_VIEW::Vad, ValidUserPte, and X64K.

Referenced by MmMapLockedPagesSpecifyCache().

02794 : 02795 02796 This function maps physical pages described by a memory descriptor 02797 list into the user portion of the virtual address space. 02798 02799 Arguments: 02800 02801 MemoryDescriptorList - Supplies a valid Memory Descriptor List which has 02802 been updated by MmProbeAndLockPages. 02803 02804 02805 StartingVa - Supplies the starting address. 02806 02807 CacheType - Supplies the type of cache mapping to use for the MDL. 02808 MmCached indicates "normal" user mappings. 02809 02810 BaseVa - Supplies the base address of the view. If the initial 02811 value of this argument is not null, then the view will 02812 be allocated starting at the specified virtual 02813 address rounded down to the next 64kb address 02814 boundary. If the initial value of this argument is 02815 null, then the operating system will determine 02816 where to allocate the view. 02817 02818 Return Value: 02819 02820 Returns the base address where the pages are mapped. The base address 02821 has the same offset as the virtual address in the MDL. 02822 02823 This routine will raise an exception if the processor mode is USER_MODE 02824 and quota limits or VM limits are exceeded. 02825 02826 Environment: 02827 02828 Kernel mode. APC_LEVEL or below. 02829 02830 --*/ 02831 02832 { 02833 PFN_NUMBER NumberOfPages; 02834 PPFN_NUMBER Page; 02835 PMMPTE PointerPte; 02836 PMMPTE PointerPde; 02837 PMMPTE PointerPpe; 02838 PCHAR Va; 02839 MMPTE TempPte; 02840 PVOID EndingAddress; 02841 PMMVAD Vad; 02842 PEPROCESS Process; 02843 PMMPFN Pfn2; 02844 PVOID UsedPageTableHandle; 02845 PMI_PHYSICAL_VIEW PhysicalView; 02846 #if defined (_WIN64) 02847 PVOID UsedPageDirectoryHandle; 02848 #endif 02849 02850 PAGED_CODE (); 02851 Page = (PPFN_NUMBER)(MemoryDescriptorList + 1); 02852 NumberOfPages = COMPUTE_PAGES_SPANNED (StartingVa, 02853 MemoryDescriptorList->ByteCount); 02854 02855 if (MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) { 02856 ExRaiseStatus (STATUS_INVALID_ADDRESS); 02857 return NULL; 02858 } 02859 02860 // 02861 // Map the pages into the user part of the address as user 02862 // read/write no-delete. 02863 // 02864 02865 TempPte = ValidUserPte; 02866 02867 switch (CacheType) { 02868 02869 case MmNonCached: 02870 MI_DISABLE_CACHING (TempPte); 02871 break; 02872 02873 case MmCached: 02874 break; 02875 02876 case MmWriteCombined: 02877 MI_SET_PTE_WRITE_COMBINE (TempPte); 02878 break; 02879 02880 case MmHardwareCoherentCached: 02881 break; 02882 02883 #if 0 02884 case MmNonCachedUnordered: 02885 break; 02886 #endif 02887 02888 default: 02889 break; 02890 } 02891 02892 Process = PsGetCurrentProcess (); 02893 02894 // 02895 // Make sure the specified starting and ending addresses are 02896 // within the user part of the virtual address space. 02897 // 02898 02899 if (BaseVa != NULL) { 02900 02901 if ((ULONG_PTR)BaseVa & (PAGE_SIZE - 1)) { 02902 02903 // 02904 // Invalid base address. 02905 // 02906 02907 ExRaiseStatus (STATUS_INVALID_ADDRESS); 02908 return NULL; 02909 } 02910 02911 EndingAddress = (PVOID)((PCHAR)BaseVa + ((ULONG_PTR)NumberOfPages * PAGE_SIZE) - 1); 02912 02913 if (EndingAddress <= BaseVa) { 02914 02915 // 02916 // Invalid region size. 02917 // 02918 02919 ExRaiseStatus (STATUS_INVALID_ADDRESS); 02920 return NULL; 02921 } 02922 02923 if (EndingAddress > MM_HIGHEST_VAD_ADDRESS) { 02924 02925 // 02926 // Invalid region size. 02927 // 02928 02929 ExRaiseStatus (STATUS_INVALID_ADDRESS); 02930 return NULL; 02931 } 02932 02933 LOCK_WS_AND_ADDRESS_SPACE (Process); 02934 02935 // 02936 // Make sure the address space was not deleted, if so, return an error. 02937 // 02938 02939 if (Process->AddressSpaceDeleted != 0) { 02940 UNLOCK_WS_AND_ADDRESS_SPACE (Process); 02941 ExRaiseStatus (STATUS_PROCESS_IS_TERMINATING); 02942 return NULL; 02943 } 02944 02945 Vad = MiCheckForConflictingVad (BaseVa, EndingAddress); 02946 02947 // 02948 // Make sure the address space is not already in use. 02949 // 02950 02951 if (Vad != (PMMVAD)NULL) { 02952 UNLOCK_WS_AND_ADDRESS_SPACE (Process); 02953 ExRaiseStatus (STATUS_CONFLICTING_ADDRESSES); 02954 return NULL; 02955 } 02956 } 02957 else { 02958 02959 // 02960 // Get the working set mutex and address creation mutex. 02961 // 02962 02963 LOCK_WS_AND_ADDRESS_SPACE (Process); 02964 02965 // 02966 // Make sure the address space was not deleted, if so, return an error. 02967 // 02968 02969 if (Process->AddressSpaceDeleted != 0) { 02970 UNLOCK_WS_AND_ADDRESS_SPACE (Process); 02971 ExRaiseStatus (STATUS_PROCESS_IS_TERMINATING); 02972 return NULL; 02973 } 02974 02975 try { 02976 02977 BaseVa = MiFindEmptyAddressRange ( (ULONG_PTR)NumberOfPages * PAGE_SIZE, 02978 X64K, 02979 0 ); 02980 02981 EndingAddress = (PVOID)((PCHAR)BaseVa + ((ULONG_PTR)NumberOfPages * PAGE_SIZE) - 1); 02982 02983 } except (EXCEPTION_EXECUTE_HANDLER) { 02984 BaseVa = NULL; 02985 goto Done; 02986 } 02987 } 02988 02989 PhysicalView = (PMI_PHYSICAL_VIEW)ExAllocatePoolWithTag (NonPagedPool, 02990 sizeof(MI_PHYSICAL_VIEW), 02991 MI_PHYSICAL_VIEW_KEY); 02992 if (PhysicalView == NULL) { 02993 BaseVa = NULL; 02994 goto Done; 02995 } 02996 02997 Vad = ExAllocatePoolWithTag (NonPagedPool, sizeof(MMVAD), ' daV'); 02998 02999 if (Vad == NULL) { 03000 ExFreePool (PhysicalView); 03001 BaseVa = NULL; 03002 goto Done; 03003 } 03004 03005 PhysicalView->Vad = Vad; 03006 PhysicalView->StartVa = BaseVa; 03007 PhysicalView->EndVa = EndingAddress; 03008 03009 Vad->StartingVpn = MI_VA_TO_VPN (BaseVa); 03010 Vad->EndingVpn = MI_VA_TO_VPN (EndingAddress); 03011 Vad->ControlArea = NULL; 03012 Vad->FirstPrototypePte = NULL; 03013 Vad->u.LongFlags = 0; 03014 Vad->u.VadFlags.Protection = MM_READWRITE; 03015 Vad->u.VadFlags.PhysicalMapping = 1; 03016 Vad->u.VadFlags.PrivateMemory = 1; 03017 Vad->u4.Banked = NULL; 03018 03019 try { 03020 03021 MiInsertVad (Vad); 03022 03023 } except (EXCEPTION_EXECUTE_HANDLER) { 03024 ExFreePool (PhysicalView); 03025 ExFreePool (Vad); 03026 BaseVa = NULL; 03027 goto Done; 03028 } 03029 03030 MiPhysicalViewInserter (Process, PhysicalView); 03031 03032 #if defined(_IA64_) 03033 if (CacheType != MmCached) { 03034 KeFlushEntireTb(FALSE, TRUE); 03035 } 03036 #endif 03037 03038 // 03039 // Create a page table and fill in the mappings for the Vad. 03040 // 03041 03042 Va = BaseVa; 03043 PointerPte = MiGetPteAddress (BaseVa); 03044 03045 do { 03046 03047 if (*Page == MM_EMPTY_LIST) { 03048 break; 03049 } 03050 03051 ASSERT (*Page <= MmHighestPhysicalPage); 03052 03053 PointerPde = MiGetPteAddress (PointerPte); 03054 PointerPpe = MiGetPdeAddress (PointerPte); 03055 03056 #if defined (_WIN64) 03057 MiMakePpeExistAndMakeValid (PointerPpe, Process, FALSE); 03058 if (PointerPde->u.Long == 0) { 03059 UsedPageDirectoryHandle = MI_GET_USED_PTES_HANDLE (PointerPte); 03060 MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageDirectoryHandle); 03061 } 03062 #endif 03063 03064 MiMakePdeExistAndMakeValid(PointerPde, Process, FALSE); 03065 03066 ASSERT (PointerPte->u.Hard.Valid == 0); 03067 TempPte.u.Hard.PageFrameNumber = *Page; 03068 MI_WRITE_VALID_PTE (PointerPte, TempPte); 03069 03070 // 03071 // A PTE just went from not present, not transition to 03072 // present. The share count and valid count must be 03073 // updated in the page table page which contains this 03074 // PTE. 03075 // 03076 03077 Pfn2 = MI_PFN_ELEMENT(PointerPde->u.Hard.PageFrameNumber); 03078 Pfn2->u2.ShareCount += 1; 03079 03080 // 03081 // Another zeroed PTE has become non-zero. 03082 // 03083 03084 UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (Va); 03085 03086 MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle); 03087 03088 Page += 1; 03089 PointerPte += 1; 03090 NumberOfPages -= 1; 03091 Va += PAGE_SIZE; 03092 } while (NumberOfPages != 0); 03093 03094 #if defined(_IA64_) 03095 if (CacheType != MmCached) { 03096 MiSweepCacheMachineDependent (BaseVa, MemoryDescriptorList->ByteCount, CacheType); 03097 } 03098 #endif 03099 03100 Done: 03101 UNLOCK_WS_AND_ADDRESS_SPACE (Process); 03102 if (BaseVa == NULL) { 03103 ExRaiseStatus (STATUS_INSUFFICIENT_RESOURCES); 03104 return NULL; 03105 } 03106 03107 #if defined(i386) 03108 // 03109 // If write combined was specified then flush all caches and TBs. 03110 // 03111 03112 if (CacheType == MmWriteCombined && MiWriteCombiningPtes == TRUE) { 03113 KeFlushEntireTb (FALSE, TRUE); 03114 KeInvalidateAllCaches (TRUE); 03115 } 03116 #endif 03117 03118 BaseVa = (PVOID)((PCHAR)BaseVa + MemoryDescriptorList->ByteOffset); 03119 03120 return BaseVa; 03121 }

PVOID MiMapSinglePage IN PVOID VirtualAddress  OPTIONAL,
IN PFN_NUMBER  PageFrameIndex,
IN MEMORY_CACHING_TYPE  CacheType,
IN MM_PAGE_PRIORITY  Priority
 

Definition at line 2395 of file iosup.c.

References ASSERT, FALSE, HighPagePriority, KeFlushEntireTb(), KeFlushSingleTb(), KeInvalidateAllCaches(), MI_DISABLE_CACHING, MI_IS_PHYSICAL_ADDRESS, MI_SET_PTE_WRITE_COMBINE, MI_WRITE_INVALID_PTE, MI_WRITE_VALID_PTE, MiGetPteAddress, MiGetSystemPteAvailability(), MiGetVirtualAddressMappedByPte, MiReserveSystemPtes(), MiSweepCacheMachineDependent(), MiWriteCombiningPtes, MM_COLOR_ALIGNMENT, MmCached, MmHardwareCoherentCached, MmNonCached, MmNonCachedUnordered, MmWriteCombined, NULL, PAGE_SIZE, PAGED_CODE, SystemPteSpace, TRUE, _MMPTE::u, ValidKernelPte, and ZeroPte.

Referenced by MiCloneProcessAddressSpace().

02404 : 02405 02406 This function (re)maps a single system PTE to the specified physical page. 02407 02408 Arguments: 02409 02410 VirtualAddress - Supplies the virtual address to map the page frame at. 02411 NULL indicates a system PTE is needed. Non-NULL supplies 02412 the virtual address returned by an earlier 02413 MiMapSinglePage call. 02414 02415 PageFrameIndex - Supplies the page frame index to map. 02416 02417 CacheType - Supplies the type of cache mapping to use for the MDL. 02418 MmCached indicates "normal" kernel or user mappings. 02419 02420 Priority - Supplies an indication as to how important it is that this 02421 request succeed under low available PTE conditions. 02422 02423 Return Value: 02424 02425 Returns the base address where the page is mapped, or NULL if it the 02426 mapping failed. 02427 02428 Environment: 02429 02430 Kernel mode. APC_LEVEL or below. 02431 02432 --*/ 02433 02434 { 02435 PMMPTE PointerPte; 02436 MMPTE TempPte; 02437 02438 PAGED_CODE (); 02439 02440 if (VirtualAddress == NULL) { 02441 02442 // 02443 // Make sure there are enough PTEs of the requested size. 02444 // 02445 02446 if ((Priority != HighPagePriority) && 02447 (MiGetSystemPteAvailability (1, Priority) == FALSE)) { 02448 return NULL; 02449 } 02450 02451 PointerPte = MiReserveSystemPtes (1, 02452 SystemPteSpace, 02453 MM_COLOR_ALIGNMENT, 02454 0, 02455 0); 02456 02457 if (PointerPte == NULL) { 02458 02459 // 02460 // Not enough system PTES are available. 02461 // 02462 02463 return NULL; 02464 } 02465 02466 ASSERT (PointerPte->u.Hard.Valid == 0); 02467 VirtualAddress = MiGetVirtualAddressMappedByPte (PointerPte); 02468 } 02469 else { 02470 ASSERT (MI_IS_PHYSICAL_ADDRESS (VirtualAddress) == 0); 02471 ASSERT (VirtualAddress >= MM_SYSTEM_RANGE_START); 02472 02473 PointerPte = MiGetPteAddress (VirtualAddress); 02474 ASSERT (PointerPte->u.Hard.Valid == 1); 02475 02476 MI_WRITE_INVALID_PTE (PointerPte, ZeroPte); 02477 02478 KeFlushSingleTb (VirtualAddress, 02479 TRUE, 02480 TRUE, 02481 (PHARDWARE_PTE)PointerPte, 02482 ZeroPte.u.Flush); 02483 } 02484 02485 TempPte = ValidKernelPte; 02486 02487 switch (CacheType) { 02488 02489 case MmNonCached: 02490 MI_DISABLE_CACHING (TempPte); 02491 break; 02492 02493 case MmCached: 02494 break; 02495 02496 case MmWriteCombined: 02497 MI_SET_PTE_WRITE_COMBINE (TempPte); 02498 break; 02499 02500 case MmHardwareCoherentCached: 02501 break; 02502 02503 #if 0 02504 case MmNonCachedUnordered: 02505 break; 02506 #endif 02507 02508 default: 02509 break; 02510 } 02511 02512 #if defined(_IA64_) 02513 if (CacheType != MmCached) { 02514 KeFlushEntireTb(FALSE, TRUE); 02515 } 02516 #endif 02517 02518 TempPte.u.Hard.PageFrameNumber = PageFrameIndex; 02519 02520 MI_WRITE_VALID_PTE (PointerPte, TempPte); 02521 02522 #if defined(i386) 02523 // 02524 // If write combined was specified then flush all caches and TBs. 02525 // 02526 02527 if (CacheType == MmWriteCombined && MiWriteCombiningPtes == TRUE) { 02528 KeFlushEntireTb (FALSE, TRUE); 02529 KeInvalidateAllCaches (TRUE); 02530 } 02531 #endif 02532 02533 #if defined(_IA64_) 02534 if (CacheType != MmCached) { 02535 MiSweepCacheMachineDependent(VirtualAddress, PAGE_SIZE, CacheType); 02536 } 02537 #endif 02538 02539 return VirtualAddress; 02540 }

VOID MiPhysicalViewAdjuster IN PEPROCESS  Process,
IN PMMVAD  OldVad,
IN PMMVAD  NewVad
 

Definition at line 2721 of file iosup.c.

References ASSERT, FALSE, LOCK_AWE, LOCK_PFN2, UNLOCK_AWE, UNLOCK_PFN2, and _MI_PHYSICAL_VIEW::Vad.

Referenced by MmSecureVirtualMemory().

02729 : 02730 02731 This function is a nonpaged wrapper which acquires the PFN lock to repoint 02732 a physical VAD in the process chain. 02733 02734 Arguments: 02735 02736 Process - Supplies the process in which to adjust the physical VAD. 02737 02738 Vad - Supplies the old Vad to replace. 02739 02740 NewVad - Supplies the newVad to substitute. 02741 02742 Return Value: 02743 02744 None. 02745 02746 Environment: 02747 02748 Kernel mode, called with APCs disabled, working set mutex held. 02749 02750 --*/ 02751 { 02752 KIRQL OldIrql; 02753 KIRQL OldIrql2; 02754 PLIST_ENTRY NextEntry; 02755 PMI_PHYSICAL_VIEW PhysicalView; 02756 02757 LOCK_AWE (Process, OldIrql); 02758 02759 LOCK_PFN2 (OldIrql2); 02760 02761 NextEntry = Process->PhysicalVadList.Flink; 02762 while (NextEntry != &Process->PhysicalVadList) { 02763 02764 PhysicalView = CONTAINING_RECORD(NextEntry, 02765 MI_PHYSICAL_VIEW, 02766 ListEntry); 02767 02768 if (PhysicalView->Vad == OldVad) { 02769 PhysicalView->Vad = NewVad; 02770 UNLOCK_PFN2 (OldIrql2); 02771 UNLOCK_AWE (Process, OldIrql); 02772 return; 02773 } 02774 02775 NextEntry = NextEntry->Flink; 02776 } 02777 02778 ASSERT (FALSE); 02779 02780 UNLOCK_PFN2 (OldIrql2); 02781 UNLOCK_AWE (Process, OldIrql); 02782 }

VOID MiPhysicalViewInserter IN PEPROCESS  Process,
IN PMI_PHYSICAL_VIEW  PhysicalView
 

Definition at line 2583 of file iosup.c.

References LOCK_AWE, LOCK_PFN2, MiActiveWriteWatch, MiMarkProcessAsWriteWatch(), UNLOCK_AWE, and UNLOCK_PFN2.

Referenced by MiMapLockedPagesInUserSpace(), and NtAllocateVirtualMemory().

02590 : 02591 02592 This function is a nonpaged wrapper which acquires the PFN lock to insert 02593 a physical VAD into the process chain. 02594 02595 Arguments: 02596 02597 Process - Supplies the process to add the physical VAD to. 02598 02599 PhysicalView - Supplies the physical view data to link in. 02600 02601 Return Value: 02602 02603 None. 02604 02605 Environment: 02606 02607 Kernel mode. APC_LEVEL, working set and address space mutexes held. 02608 02609 --*/ 02610 { 02611 KIRQL OldIrql; 02612 KIRQL OldIrql2; 02613 02614 LOCK_AWE (Process, OldIrql); 02615 02616 LOCK_PFN2 (OldIrql2); 02617 02618 InsertHeadList (&Process->PhysicalVadList, &PhysicalView->ListEntry); 02619 02620 if (PhysicalView->Vad->u.VadFlags.WriteWatch == 1) { 02621 MiActiveWriteWatch += 1; 02622 } 02623 02624 UNLOCK_PFN2 (OldIrql2); 02625 02626 UNLOCK_AWE (Process, OldIrql); 02627 02628 if (PhysicalView->Vad->u.VadFlags.WriteWatch == 1) { 02629 02630 // 02631 // Mark this process as forever containing write-watch 02632 // address space(s). 02633 02634 if (Process->Vm.u.Flags.WriteWatch == 0) { 02635 MiMarkProcessAsWriteWatch (Process); 02636 } 02637 } 02638 }

VOID MiPhysicalViewRemover IN PEPROCESS  Process,
IN PMMVAD  Vad
 

Definition at line 2641 of file iosup.c.

References ASSERT, BitMap, _MI_PHYSICAL_VIEW::BitMap, ExFreePool(), FALSE, LOCK_AWE, LOCK_PFN2, MiActiveWriteWatch, NonPagedPool, NULL, PsReturnPoolQuota(), UNLOCK_AWE, UNLOCK_PFN2, and _MI_PHYSICAL_VIEW::Vad.

Referenced by MiUnmapLockedPagesInUserSpace(), MmCleanProcessAddressSpace(), and NtFreeVirtualMemory().

02648 : 02649 02650 This function is a nonpaged wrapper which acquires the PFN lock to remove 02651 a physical VAD from the process chain. 02652 02653 Arguments: 02654 02655 Process - Supplies the process to remove the physical VAD from. 02656 02657 Vad - Supplies the Vad to remove. 02658 02659 Return Value: 02660 02661 None. 02662 02663 Environment: 02664 02665 Kernel mode, APC_LEVEL, working set and address space mutexes held. 02666 02667 --*/ 02668 { 02669 KIRQL OldIrql; 02670 KIRQL OldIrql2; 02671 PRTL_BITMAP BitMap; 02672 PLIST_ENTRY NextEntry; 02673 PMI_PHYSICAL_VIEW PhysicalView; 02674 ULONG BitMapSize; 02675 02676 BitMap = NULL; 02677 02678 LOCK_AWE (Process, OldIrql); 02679 02680 LOCK_PFN2 (OldIrql2); 02681 02682 NextEntry = Process->PhysicalVadList.Flink; 02683 while (NextEntry != &Process->PhysicalVadList) { 02684 02685 PhysicalView = CONTAINING_RECORD(NextEntry, 02686 MI_PHYSICAL_VIEW, 02687 ListEntry); 02688 02689 if (PhysicalView->Vad == Vad) { 02690 RemoveEntryList (NextEntry); 02691 02692 if (Vad->u.VadFlags.WriteWatch == 1) { 02693 MiActiveWriteWatch -= 1; 02694 BitMap = PhysicalView->BitMap; 02695 ASSERT (BitMap != NULL); 02696 } 02697 02698 UNLOCK_PFN2 (OldIrql2); 02699 UNLOCK_AWE (Process, OldIrql); 02700 ExFreePool (PhysicalView); 02701 02702 if (BitMap != NULL) { 02703 BitMapSize = sizeof(RTL_BITMAP) + (ULONG)(((BitMap->SizeOfBitMap + 31) / 32) * 4); 02704 PsReturnPoolQuota (Process, NonPagedPool, BitMapSize); 02705 ExFreePool (BitMap); 02706 } 02707 02708 return; 02709 } 02710 02711 NextEntry = NextEntry->Flink; 02712 } 02713 02714 ASSERT (FALSE); 02715 02716 UNLOCK_PFN2 (OldIrql2); 02717 UNLOCK_AWE (Process, OldIrql); 02718 }

VOID MiProtectFreeNonPagedPool IN PVOID  VirtualAddress,
IN ULONG  SizeInPages
 

Definition at line 150 of file allocpag.c.

References KeFlushSingleTb(), MI_IS_PHYSICAL_ADDRESS, MiGetPteAddress, PAGE_SIZE, TRUE, and _MMPTE::u.

Referenced by MiAllocatePoolPages(), MiFindContiguousMemory(), MiFreePoolPages(), MiProtectedPoolInsertList(), MiProtectedPoolRemoveEntryList(), and MmSetKernelDumpRange().

00157 : 00158 00159 This function protects freed nonpaged pool. 00160 00161 Arguments: 00162 00163 VirtualAddress - Supplies the freed pool address to protect. 00164 00165 SizeInPages - Supplies the size of the request in pages. 00166 00167 Return Value: 00168 00169 None. 00170 00171 Environment: 00172 00173 Kernel mode. 00174 00175 --*/ 00176 00177 { 00178 ULONG i; 00179 MMPTE PteContents; 00180 PMMPTE PointerPte; 00181 00182 // 00183 // Prevent anyone from touching the free non paged pool 00184 // 00185 00186 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress) == 0) { 00187 PointerPte = MiGetPteAddress (VirtualAddress); 00188 00189 for (i = 0; i < SizeInPages; i += 1) { 00190 00191 PteContents = *PointerPte; 00192 00193 PteContents.u.Hard.Valid = 0; 00194 PteContents.u.Soft.Prototype = 1; 00195 00196 KeFlushSingleTb (VirtualAddress, 00197 TRUE, 00198 TRUE, 00199 (PHARDWARE_PTE)PointerPte, 00200 PteContents.u.Flush); 00201 VirtualAddress = (PVOID)((PCHAR)VirtualAddress + PAGE_SIZE); 00202 PointerPte += 1; 00203 } 00204 } 00205 }

VOID MiReleaseDeadPteTrackers VOID   ) 
 

Definition at line 1859 of file iosup.c.

References ASSERT, DISPATCH_LEVEL, ExFreePool(), MiDeadPteTrackerListHead, and MiPteTrackerLock.

Referenced by MmMapIoSpace(), and MmMapLockedPagesSpecifyCache().

01864 : 01865 01866 This routine removes tracking blocks from the dead PTE list and frees 01867 them to pool. 01868 01869 Arguments: 01870 01871 None. 01872 01873 Return Value: 01874 01875 None. 01876 01877 Environment: 01878 01879 Kernel mode. No locks held. 01880 01881 --*/ 01882 { 01883 KIRQL OldIrql; 01884 PVOID PoolBlock; 01885 01886 ASSERT (KeGetCurrentIrql() <= DISPATCH_LEVEL); 01887 01888 ExAcquireSpinLock (&MiPteTrackerLock, &OldIrql); 01889 01890 while (IsListEmpty(&MiDeadPteTrackerListHead) == 0) { 01891 PoolBlock = (PVOID)RemoveHeadList(&MiDeadPteTrackerListHead); 01892 ExReleaseSpinLock (&MiPteTrackerLock, OldIrql); 01893 ExFreePool (PoolBlock); 01894 ExAcquireSpinLock (&MiPteTrackerLock, &OldIrql); 01895 } 01896 01897 ExReleaseSpinLock (&MiPteTrackerLock, OldIrql); 01898 }

PVOID MiRemovePteTracker IN PMDL  MemoryDescriptorList,
IN PVOID  PteAddress,
IN PFN_NUMBER  NumberOfPtes
 

Definition at line 1675 of file iosup.c.

References _SYSPTES_HEADER::Count, _PTE_TRACKER::Count, FALSE, KeBugCheckEx(), _PTE_TRACKER::ListEntry, _SYSPTES_HEADER::ListHead, MiGetVirtualAddressMappedByPte, MiPteHeader, MiTrackPtesAborted, NULL, _PTE_TRACKER::Page, PPTE_TRACKER, _PTE_TRACKER::PteAddress, _PTE_TRACKER::StartVa, and _PTE_TRACKER::SystemVa.

Referenced by MmUnmapIoSpace(), and MmUnmapLockedPages().

01683 : 01684 01685 This function removes a PTE tracking block from the lists as the PTEs 01686 are being freed. 01687 01688 Arguments: 01689 01690 MemoryDescriptorList - Supplies a valid Memory Descriptor List. 01691 01692 PteAddress - Supplies the address the system PTEs were mapped to. 01693 01694 NumberOfPtes - Supplies the number of system PTEs allocated. 01695 01696 Return Value: 01697 01698 The pool block that held the tracking info that must be freed by our 01699 caller _AFTER_ our caller releases MmSystemSpaceLock (to prevent deadlock). 01700 01701 Environment: 01702 01703 Kernel mode, protected by MmSystemSpaceLock at DISPATCH_LEVEL. 01704 01705 --*/ 01706 01707 { 01708 PPTE_TRACKER Tracker; 01709 PFN_NUMBER Page; 01710 PVOID BaseAddress; 01711 PLIST_ENTRY LastFound; 01712 PLIST_ENTRY NextEntry; 01713 01714 BaseAddress = MiGetVirtualAddressMappedByPte (PteAddress); 01715 01716 if (ARGUMENT_PRESENT (MemoryDescriptorList)) { 01717 Page = *(PPFN_NUMBER)(MemoryDescriptorList + 1); 01718 } 01719 01720 LastFound = NULL; 01721 NextEntry = MiPteHeader.ListHead.Flink; 01722 while (NextEntry != &MiPteHeader.ListHead) { 01723 01724 Tracker = (PPTE_TRACKER) CONTAINING_RECORD (NextEntry, 01725 PTE_TRACKER, 01726 ListEntry.Flink); 01727 01728 if (PteAddress == Tracker->PteAddress) { 01729 01730 if (LastFound != NULL) { 01731 01732 // 01733 // Duplicate map entry. 01734 // 01735 01736 KeBugCheckEx (SYSTEM_PTE_MISUSE, 01737 0x1, 01738 (ULONG_PTR)Tracker, 01739 (ULONG_PTR)MemoryDescriptorList, 01740 (ULONG_PTR)LastFound); 01741 } 01742 01743 if (Tracker->Count != NumberOfPtes) { 01744 01745 // 01746 // Not unmapping the same of number of PTEs that were mapped. 01747 // 01748 01749 KeBugCheckEx (SYSTEM_PTE_MISUSE, 01750 0x2, 01751 (ULONG_PTR)Tracker, 01752 Tracker->Count, 01753 NumberOfPtes); 01754 } 01755 01756 if (ARGUMENT_PRESENT (MemoryDescriptorList)) { 01757 01758 if (Tracker->SystemVa != MemoryDescriptorList->MappedSystemVa) { 01759 01760 // 01761 // Not unmapping the same address that was mapped. 01762 // 01763 01764 KeBugCheckEx (SYSTEM_PTE_MISUSE, 01765 0x3, 01766 (ULONG_PTR)Tracker, 01767 (ULONG_PTR)Tracker->SystemVa, 01768 (ULONG_PTR)MemoryDescriptorList->MappedSystemVa); 01769 } 01770 01771 if (Tracker->Page != Page) { 01772 01773 // 01774 // The first page in the MDL has changed since it was mapped. 01775 // 01776 01777 KeBugCheckEx (SYSTEM_PTE_MISUSE, 01778 0x4, 01779 (ULONG_PTR)Tracker, 01780 (ULONG_PTR)Tracker->Page, 01781 (ULONG_PTR)Page); 01782 } 01783 01784 if (Tracker->StartVa != MemoryDescriptorList->StartVa) { 01785 01786 // 01787 // Map and unmap don't match up. 01788 // 01789 01790 KeBugCheckEx (SYSTEM_PTE_MISUSE, 01791 0x5, 01792 (ULONG_PTR)Tracker, 01793 (ULONG_PTR)Tracker->StartVa, 01794 (ULONG_PTR)MemoryDescriptorList->StartVa); 01795 } 01796 } 01797 01798 RemoveEntryList (NextEntry); 01799 LastFound = NextEntry; 01800 } 01801 NextEntry = Tracker->ListEntry.Flink; 01802 } 01803 01804 if ((LastFound == NULL) && (MiTrackPtesAborted == FALSE)) { 01805 01806 // 01807 // Can't unmap something that was never (or isn't currently) mapped. 01808 // 01809 01810 KeBugCheckEx (SYSTEM_PTE_MISUSE, 01811 0x6, 01812 (ULONG_PTR)MemoryDescriptorList, 01813 (ULONG_PTR)BaseAddress, 01814 (ULONG_PTR)NumberOfPtes); 01815 } 01816 01817 MiPteHeader.Count -= NumberOfPtes; 01818 01819 return (PVOID)LastFound; 01820 }

VOID MiUnmapLockedPagesInUserSpace IN PVOID  BaseAddress,
IN PMDL  MemoryDescriptorList
 

Definition at line 3253 of file iosup.c.

References ASSERT, ASSERT64, COMPUTE_PAGES_SPANNED, ExFreePool(), ExPageLockHandle, FALSE, KeFlushSingleTb(), LOCK_PFN, LOCK_WS_AND_ADDRESS_SPACE, MI_DECREMENT_USED_PTES_BY_HANDLE, MI_GET_PAGE_FRAME_FROM_PTE, MI_GET_USED_PTES_FROM_HANDLE, MI_GET_USED_PTES_HANDLE, MiDecrementShareAndValidCount, MiDeletePte(), MiGetPdeAddress, MiGetPteAddress, MiGetVirtualAddressMappedByPte, MiLocateAddress(), MiPhysicalViewRemover(), MiRemoveVad(), MM_EMPTY_LIST, MmLockPagableSectionByHandle(), MmUnlockPagableImageSection(), NULL, PAGE_SIZE, PsGetCurrentProcess, TRUE, _MMPTE::u, UNLOCK_PFN, UNLOCK_WS_AND_ADDRESS_SPACE, VOID(), and ZeroPte.

Referenced by MmUnmapLockedPages().

03260 : 03261 03262 This routine unmaps locked pages which were previously mapped via 03263 a MmMapLockedPages function. 03264 03265 Arguments: 03266 03267 BaseAddress - Supplies the base address where the pages were previously 03268 mapped. 03269 03270 MemoryDescriptorList - Supplies a valid Memory Descriptor List which has 03271 been updated by MmProbeAndLockPages. 03272 03273 Return Value: 03274 03275 None. 03276 03277 Environment: 03278 03279 Kernel mode. DISPATCH_LEVEL or below if base address is within system space; 03280 APC_LEVEL or below if base address is user space. 03281 03282 --*/ 03283 03284 { 03285 PFN_NUMBER NumberOfPages; 03286 PPFN_NUMBER Page; 03287 PMMPTE PointerPte; 03288 PMMPTE PointerBase; 03289 PMMPTE PointerPde; 03290 PVOID StartingVa; 03291 KIRQL OldIrql; 03292 PMMVAD Vad; 03293 PVOID TempVa; 03294 PEPROCESS Process; 03295 PVOID UsedPageTableHandle; 03296 #if defined (_WIN64) 03297 PVOID UsedPageDirectoryHandle; 03298 #endif 03299 03300 MmLockPagableSectionByHandle (ExPageLockHandle); 03301 03302 StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa + 03303 MemoryDescriptorList->ByteOffset); 03304 03305 Page = (PPFN_NUMBER)(MemoryDescriptorList + 1); 03306 NumberOfPages = COMPUTE_PAGES_SPANNED (StartingVa, 03307 MemoryDescriptorList->ByteCount); 03308 03309 PointerPte = MiGetPteAddress (BaseAddress); 03310 PointerBase = PointerPte; 03311 03312 // 03313 // This was mapped into the user portion of the address space and 03314 // the corresponding virtual address descriptor must be deleted. 03315 // 03316 03317 // 03318 // Get the working set mutex and address creation mutex. 03319 // 03320 03321 Process = PsGetCurrentProcess (); 03322 03323 LOCK_WS_AND_ADDRESS_SPACE (Process); 03324 03325 Vad = MiLocateAddress (BaseAddress); 03326 ASSERT (Vad != NULL); 03327 03328 MiPhysicalViewRemover (Process, Vad); 03329 03330 MiRemoveVad (Vad); 03331 03332 // 03333 // Get the PFN mutex so we can safely decrement share and valid 03334 // counts on page table pages. 03335 // 03336 03337 LOCK_PFN (OldIrql); 03338 03339 do { 03340 03341 if (*Page == MM_EMPTY_LIST) { 03342 break; 03343 } 03344 03345 ASSERT64 (MiGetPdeAddress(PointerPte)->u.Hard.Valid == 1); 03346 ASSERT (MiGetPteAddress(PointerPte)->u.Hard.Valid == 1); 03347 ASSERT (PointerPte->u.Hard.Valid == 1); 03348 03349 (VOID)KeFlushSingleTb (BaseAddress, 03350 TRUE, 03351 FALSE, 03352 (PHARDWARE_PTE)PointerPte, 03353 ZeroPte.u.Flush); 03354 03355 PointerPde = MiGetPteAddress(PointerPte); 03356 MiDecrementShareAndValidCount (MI_GET_PAGE_FRAME_FROM_PTE (PointerPde)); 03357 03358 // 03359 // Another PTE has become zero. 03360 // 03361 03362 UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (BaseAddress); 03363 03364 MI_DECREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle); 03365 03366 // 03367 // If all the entries have been eliminated from the previous 03368 // page table page, delete the page table page itself. Likewise 03369 // with the page directory page. 03370 // 03371 03372 if (MI_GET_USED_PTES_FROM_HANDLE (UsedPageTableHandle) == 0) { 03373 03374 TempVa = MiGetVirtualAddressMappedByPte (PointerPde); 03375 MiDeletePte (PointerPde, 03376 TempVa, 03377 FALSE, 03378 Process, 03379 (PMMPTE)NULL, 03380 NULL); 03381 03382 #if defined (_WIN64) 03383 UsedPageDirectoryHandle = MI_GET_USED_PTES_HANDLE (PointerPte); 03384 03385 MI_DECREMENT_USED_PTES_BY_HANDLE (UsedPageDirectoryHandle); 03386 03387 if (MI_GET_USED_PTES_FROM_HANDLE (UsedPageDirectoryHandle) == 0) { 03388 03389 TempVa = MiGetVirtualAddressMappedByPte(MiGetPteAddress(PointerPde)); 03390 MiDeletePte (MiGetPteAddress(PointerPde), 03391 TempVa, 03392 FALSE, 03393 Process, 03394 NULL, 03395 NULL); 03396 } 03397 #endif 03398 } 03399 03400 Page += 1; 03401 PointerPte += 1; 03402 NumberOfPages -= 1; 03403 BaseAddress = (PVOID)((PCHAR)BaseAddress + PAGE_SIZE); 03404 } while (NumberOfPages != 0); 03405 03406 UNLOCK_PFN (OldIrql); 03407 UNLOCK_WS_AND_ADDRESS_SPACE (Process); 03408 ExFreePool (Vad); 03409 MmUnlockPagableImageSection(ExPageLockHandle); 03410 return; 03411 }

VOID MiUnmapSinglePage IN PVOID  VirtualAddress  ) 
 

Definition at line 2543 of file iosup.c.

References ASSERT, MI_IS_PHYSICAL_ADDRESS, MiGetPteAddress, MiReleaseSystemPtes(), PAGED_CODE, and SystemPteSpace.

Referenced by MiCloneProcessAddressSpace().

02549 : 02550 02551 This routine unmaps a single locked pages which was previously mapped via 02552 an MiMapSinglePage call. 02553 02554 Arguments: 02555 02556 VirtualAddress - Supplies the virtual address used to map the page. 02557 02558 Return Value: 02559 02560 None. 02561 02562 Environment: 02563 02564 Kernel mode. APC_LEVEL or below, base address is within system space. 02565 02566 --*/ 02567 02568 { 02569 PMMPTE PointerPte; 02570 02571 PAGED_CODE (); 02572 02573 ASSERT (MI_IS_PHYSICAL_ADDRESS (VirtualAddress) == 0); 02574 ASSERT (VirtualAddress >= MM_SYSTEM_RANGE_START); 02575 02576 PointerPte = MiGetPteAddress (VirtualAddress); 02577 02578 MiReleaseSystemPtes (PointerPte, 1, SystemPteSpace); 02579 return; 02580 }

LOGICAL MiUnProtectFreeNonPagedPool IN PVOID  VirtualAddress,
IN ULONG  SizeInPages
 

Definition at line 209 of file allocpag.c.

References FALSE, MI_IS_PHYSICAL_ADDRESS, MI_WRITE_VALID_PTE, MiGetPteAddress, TRUE, and _MMPTE::u.

Referenced by MiAllocatePoolPages(), MiFindContiguousMemory(), MiFreePoolPages(), MiProtectedPoolInsertList(), MiProtectedPoolRemoveEntryList(), and MmSetKernelDumpRange().

00216 : 00217 00218 This function unprotects freed nonpaged pool. 00219 00220 Arguments: 00221 00222 VirtualAddress - Supplies the freed pool address to unprotect. 00223 00224 SizeInPages - Supplies the size of the request in pages - zero indicates 00225 to keep going until there are no more protected PTEs (ie: the 00226 caller doesn't know how many protected PTEs there are). 00227 00228 Return Value: 00229 00230 TRUE if pages were unprotected, FALSE if not. 00231 00232 Environment: 00233 00234 Kernel mode. 00235 00236 --*/ 00237 00238 { 00239 PMMPTE PointerPte; 00240 MMPTE PteContents; 00241 ULONG PagesDone; 00242 00243 PagesDone = 0; 00244 00245 // 00246 // Unprotect the previously freed pool so it can be manipulated 00247 // 00248 00249 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress) == 0) { 00250 00251 PointerPte = MiGetPteAddress((PVOID)VirtualAddress); 00252 00253 PteContents = *PointerPte; 00254 00255 while (PteContents.u.Hard.Valid == 0 && PteContents.u.Soft.Prototype == 1) { 00256 00257 PteContents.u.Hard.Valid = 1; 00258 PteContents.u.Soft.Prototype = 0; 00259 00260 MI_WRITE_VALID_PTE (PointerPte, PteContents); 00261 00262 PagesDone += 1; 00263 00264 if (PagesDone == SizeInPages) { 00265 break; 00266 } 00267 00268 PointerPte += 1; 00269 PteContents = *PointerPte; 00270 } 00271 } 00272 00273 if (PagesDone == 0) { 00274 return FALSE; 00275 } 00276 00277 return TRUE; 00278 }

PVOID MmAllocateContiguousMemory IN SIZE_T  NumberOfBytes,
IN PHYSICAL_ADDRESS  HighestAcceptableAddress
 

Definition at line 3881 of file iosup.c.

References MiAllocateContiguousMemory(), PAGE_SHIFT, and RtlGetCallersAddress().

Referenced by IopGetDumpStack(), and Ki386AllocateContiguousMemory().

03888 : 03889 03890 This function allocates a range of physically contiguous non-paged pool. 03891 03892 This routine is designed to be used by a driver's initialization 03893 routine to allocate a contiguous block of physical memory for 03894 issuing DMA requests from. 03895 03896 Arguments: 03897 03898 NumberOfBytes - Supplies the number of bytes to allocate. 03899 03900 HighestAcceptableAddress - Supplies the highest physical address 03901 which is valid for the allocation. For 03902 example, if the device can only reference 03903 physical memory in the lower 16MB this 03904 value would be set to 0xFFFFFF (16Mb - 1). 03905 03906 Return Value: 03907 03908 NULL - a contiguous range could not be found to satisfy the request. 03909 03910 NON-NULL - Returns a pointer (virtual address in the nonpaged portion 03911 of the system) to the allocated physically contiguous 03912 memory. 03913 03914 Environment: 03915 03916 Kernel mode, IRQL of DISPATCH_LEVEL or below. 03917 03918 --*/ 03919 03920 { 03921 PFN_NUMBER HighestPfn; 03922 PVOID CallingAddress; 03923 03924 #if defined (_X86_) 03925 PVOID CallersCaller; 03926 03927 RtlGetCallersAddress(&CallingAddress, &CallersCaller); 03928 #else 03929 CallingAddress = (PVOID)_ReturnAddress(); 03930 #endif 03931 03932 HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT); 03933 03934 return MiAllocateContiguousMemory(NumberOfBytes, 03935 0, 03936 HighestPfn, 03937 0, 03938 CallingAddress); 03939 }

PVOID MmAllocateContiguousMemorySpecifyCache IN SIZE_T  NumberOfBytes,
IN PHYSICAL_ADDRESS  LowestAcceptableAddress,
IN PHYSICAL_ADDRESS  HighestAcceptableAddress,
IN PHYSICAL_ADDRESS BoundaryAddressMultiple  OPTIONAL,
IN MEMORY_CACHING_TYPE  CacheType
 

Definition at line 3744 of file iosup.c.

References ASSERT, BYTE_OFFSET, KeSweepDcache(), MiAllocateContiguousMemory(), MiGetPteAddress, MmCached, MmFreeContiguousMemory(), MmGetPhysicalAddress(), MmMapIoSpace(), NULL, PAGE_SHIFT, PAGE_SIZE, RtlGetCallersAddress(), TRUE, and _MMPTE::u.

03754 : 03755 03756 This function allocates a range of physically contiguous non-cached, 03757 non-paged memory. This is accomplished by using MmAllocateContiguousMemory 03758 which uses nonpaged pool virtual addresses to map the found memory chunk. 03759 03760 Then this function establishes another map to the same physical addresses, 03761 but this alternate map is initialized as non-cached. All references by 03762 our caller will be done through this alternate map. 03763 03764 This routine is designed to be used by a driver's initialization 03765 routine to allocate a contiguous block of noncached physical memory for 03766 things like the AGP GART. 03767 03768 Arguments: 03769 03770 NumberOfBytes - Supplies the number of bytes to allocate. 03771 03772 LowestAcceptableAddress - Supplies the lowest physical address 03773 which is valid for the allocation. For 03774 example, if the device can only reference 03775 physical memory in the 8M to 16MB range, this 03776 value would be set to 0x800000 (8Mb). 03777 03778 HighestAcceptableAddress - Supplies the highest physical address 03779 which is valid for the allocation. For 03780 example, if the device can only reference 03781 physical memory below 16MB, this 03782 value would be set to 0xFFFFFF (16Mb - 1). 03783 03784 BoundaryAddressMultiple - Supplies the physical address multiple this 03785 allocation must not cross. 03786 03787 Return Value: 03788 03789 NULL - a contiguous range could not be found to satisfy the request. 03790 03791 NON-NULL - Returns a pointer (virtual address in the nonpaged portion 03792 of the system) to the allocated physically contiguous 03793 memory. 03794 03795 Environment: 03796 03797 Kernel mode, IRQL of DISPATCH_LEVEL or below. 03798 03799 --*/ 03800 03801 { 03802 PVOID BaseAddress; 03803 PVOID NewVa; 03804 PFN_NUMBER LowestPfn; 03805 PFN_NUMBER HighestPfn; 03806 PFN_NUMBER BoundaryPfn; 03807 PMMPTE PointerPte; 03808 PHYSICAL_ADDRESS PhysicalAddress; 03809 PVOID CallingAddress; 03810 03811 #if defined (_X86_) 03812 PVOID CallersCaller; 03813 03814 RtlGetCallersAddress(&CallingAddress, &CallersCaller); 03815 #else 03816 CallingAddress = (PVOID)_ReturnAddress(); 03817 #endif 03818 03819 ASSERT (NumberOfBytes != 0); 03820 03821 LowestPfn = (PFN_NUMBER)(LowestAcceptableAddress.QuadPart >> PAGE_SHIFT); 03822 if (BYTE_OFFSET(LowestAcceptableAddress.LowPart)) { 03823 LowestPfn += 1; 03824 } 03825 03826 if (BYTE_OFFSET(BoundaryAddressMultiple.LowPart)) { 03827 return NULL; 03828 } 03829 03830 BoundaryPfn = (PFN_NUMBER)(BoundaryAddressMultiple.QuadPart >> PAGE_SHIFT); 03831 03832 HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT); 03833 03834 BaseAddress = MiAllocateContiguousMemory(NumberOfBytes, 03835 LowestPfn, 03836 HighestPfn, 03837 BoundaryPfn, 03838 CallingAddress); 03839 03840 if (BaseAddress) { 03841 03842 if (CacheType != MmCached) { 03843 03844 // 03845 // We have an address range but it's cached. Create an uncached 03846 // alternate mapping now. Stash the original virtual address at the 03847 // end of the mapped range so we can unmap the nonpaged pool VAs and 03848 // the actual pages when the caller frees the memory. 03849 // 03850 03851 PhysicalAddress = MmGetPhysicalAddress (BaseAddress); 03852 03853 NewVa = MmMapIoSpace (PhysicalAddress, 03854 NumberOfBytes + (2 * PAGE_SIZE), 03855 CacheType); 03856 03857 if (NewVa) { 03858 03859 PointerPte = MiGetPteAddress(NewVa); 03860 03861 PointerPte += ((NumberOfBytes + PAGE_SIZE - 1) >> PAGE_SHIFT); 03862 PointerPte->u.Long = (ULONG_PTR)BaseAddress; 03863 03864 PointerPte += 1; 03865 PointerPte->u.Long = NumberOfBytes; 03866 03867 KeSweepDcache (TRUE); 03868 BaseAddress = NewVa; 03869 } 03870 else { 03871 MmFreeContiguousMemory (BaseAddress); 03872 BaseAddress = NULL; 03873 } 03874 } 03875 } 03876 03877 return BaseAddress; 03878 }

PVOID MmAllocateIndependentPages IN SIZE_T  NumberOfBytes  ) 
 

Definition at line 3942 of file iosup.c.

References ASSERT, BYTES_TO_PAGES, FALSE, LOCK_PFN, MI_GET_PAGE_COLOR_FROM_PTE, MI_MAKE_VALID_PTE, MI_NONPAGABLE_MEMORY_AVAILABLE, MI_SET_PTE_DIRTY, MI_WRITE_VALID_PTE, MiChargeCommitmentCantExpand(), MiEnsureAvailablePageOrWait(), MiGetVirtualAddressMappedByPte, MiInitializePfn(), MiReleaseSystemPtes(), MiRemoveAnyPage(), MiReserveSystemPtes(), MM_BUMP_COUNTER, MM_DBG_COMMIT_INDEPENDENT_PAGES, MM_READWRITE, MM_TRACK_COMMIT, MmResidentAvailablePages, NULL, SystemPteSpace, TRUE, _MMPTE::u, and UNLOCK_PFN.

Referenced by KiI386PentiumLockErrataFixup().

03948 : 03949 03950 This function allocates a range of virtually contiguous nonpaged pages that 03951 can have independent page protections applied to each page. 03952 03953 Arguments: 03954 03955 NumberOfBytes - Supplies the number of bytes to allocate. 03956 03957 Return Value: 03958 03959 The virtual address of the memory or NULL if none could be allocated. 03960 03961 Environment: 03962 03963 Kernel mode, IRQL of APC_LEVEL or below. 03964 03965 --*/ 03966 03967 { 03968 PFN_NUMBER NumberOfPages; 03969 PMMPTE PointerPte; 03970 MMPTE TempPte; 03971 PFN_NUMBER PageFrameIndex; 03972 PVOID BaseAddress; 03973 KIRQL OldIrql; 03974 03975 NumberOfPages = BYTES_TO_PAGES (NumberOfBytes); 03976 03977 PointerPte = MiReserveSystemPtes ((ULONG)NumberOfPages, 03978 SystemPteSpace, 03979 0, 03980 0, 03981 FALSE); 03982 if (PointerPte == NULL) { 03983 return NULL; 03984 } 03985 03986 BaseAddress = (PVOID)MiGetVirtualAddressMappedByPte (PointerPte); 03987 03988 LOCK_PFN (OldIrql); 03989 03990 if ((SPFN_NUMBER)NumberOfPages > MI_NONPAGABLE_MEMORY_AVAILABLE()) { 03991 UNLOCK_PFN (OldIrql); 03992 MiReleaseSystemPtes (PointerPte, (ULONG)NumberOfPages, SystemPteSpace); 03993 return NULL; 03994 } 03995 03996 MmResidentAvailablePages -= NumberOfPages; 03997 MM_BUMP_COUNTER(28, NumberOfPages); 03998 03999 do { 04000 ASSERT (PointerPte->u.Hard.Valid == 0); 04001 MiEnsureAvailablePageOrWait (NULL, NULL); 04002 PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE (PointerPte)); 04003 04004 MI_MAKE_VALID_PTE (TempPte, 04005 PageFrameIndex, 04006 MM_READWRITE, 04007 PointerPte); 04008 04009 MI_SET_PTE_DIRTY (TempPte); 04010 MI_WRITE_VALID_PTE (PointerPte, TempPte); 04011 MiInitializePfn (PageFrameIndex, PointerPte, 1); 04012 04013 PointerPte += 1; 04014 NumberOfPages -= 1; 04015 } while (NumberOfPages != 0); 04016 04017 UNLOCK_PFN (OldIrql); 04018 04019 MiChargeCommitmentCantExpand (NumberOfPages, TRUE); 04020 04021 MM_TRACK_COMMIT (MM_DBG_COMMIT_INDEPENDENT_PAGES, NumberOfPages); 04022 04023 return BaseAddress; 04024 }

PVOID MmAllocateNonCachedMemory IN SIZE_T  NumberOfBytes  ) 
 

Definition at line 5581 of file iosup.c.

References ASSERT, BYTES_TO_PAGES, ExPageLockHandle, FALSE, KeFlushEntireTb(), KeSweepDcache(), LOCK_PFN, MI_DISABLE_CACHING, MI_GET_PAGE_COLOR_FROM_PTE, MI_MAKE_VALID_PTE, MI_NONPAGABLE_MEMORY_AVAILABLE, MI_SET_PTE_DIRTY, MI_WRITE_VALID_PTE, MiChargeCommitmentCantExpand(), MiEnsureAvailablePageOrWait(), MiGetVirtualAddressMappedByPte, MiInitializePfn(), MiReleaseSystemPtes(), MiRemoveAnyPage(), MiReserveSystemPtes(), MiReturnCommitment(), MiSweepCacheMachineDependent(), MM_BUMP_COUNTER, MM_DBG_COMMIT_NONCACHED_PAGES, MM_READWRITE, MM_TRACK_COMMIT, MmLockPagableSectionByHandle(), MmNonCached, MmResidentAvailablePages, MmUnlockPagableImageSection(), NULL, SystemPteSpace, TRUE, _MMPTE::u, and UNLOCK_PFN.

Referenced by IopGetDumpStack().

05587 : 05588 05589 This function allocates a range of noncached memory in 05590 the non-paged portion of the system address space. 05591 05592 This routine is designed to be used by a driver's initialization 05593 routine to allocate a noncached block of virtual memory for 05594 various device specific buffers. 05595 05596 Arguments: 05597 05598 NumberOfBytes - Supplies the number of bytes to allocate. 05599 05600 Return Value: 05601 05602 NON-NULL - Returns a pointer (virtual address in the nonpaged portion 05603 of the system) to the allocated physically contiguous 05604 memory. 05605 05606 NULL - The specified request could not be satisfied. 05607 05608 Environment: 05609 05610 Kernel mode, IRQL of APC_LEVEL or below. 05611 05612 --*/ 05613 05614 { 05615 PMMPTE PointerPte; 05616 MMPTE TempPte; 05617 PFN_NUMBER NumberOfPages; 05618 PFN_NUMBER PageFrameIndex; 05619 PVOID BaseAddress; 05620 KIRQL OldIrql; 05621 05622 ASSERT (NumberOfBytes != 0); 05623 05624 NumberOfPages = BYTES_TO_PAGES(NumberOfBytes); 05625 05626 // 05627 // Obtain enough virtual space to map the pages. 05628 // 05629 05630 PointerPte = MiReserveSystemPtes ((ULONG)NumberOfPages, 05631 SystemPteSpace, 05632 0, 05633 0, 05634 FALSE); 05635 05636 if (PointerPte == NULL) { 05637 return NULL; 05638 } 05639 05640 // 05641 // Obtain backing commitment for the pages. 05642 // 05643 05644 if (MiChargeCommitmentCantExpand (NumberOfPages, FALSE) == FALSE) { 05645 MiReleaseSystemPtes (PointerPte, (ULONG)NumberOfPages, SystemPteSpace); 05646 return NULL; 05647 } 05648 05649 MM_TRACK_COMMIT (MM_DBG_COMMIT_NONCACHED_PAGES, NumberOfPages); 05650 05651 MmLockPagableSectionByHandle (ExPageLockHandle); 05652 05653 // 05654 // Acquire the PFN mutex to synchronize access to the PFN database. 05655 // 05656 05657 LOCK_PFN (OldIrql); 05658 05659 // 05660 // Obtain enough pages to contain the allocation. 05661 // Check to make sure the physical pages are available. 05662 // 05663 05664 if ((SPFN_NUMBER)NumberOfPages > MI_NONPAGABLE_MEMORY_AVAILABLE()) { 05665 UNLOCK_PFN (OldIrql); 05666 MmUnlockPagableImageSection (ExPageLockHandle); 05667 MiReleaseSystemPtes (PointerPte, (ULONG)NumberOfPages, SystemPteSpace); 05668 MiReturnCommitment (NumberOfPages); 05669 return NULL; 05670 } 05671 05672 #if defined(_IA64_) 05673 KeFlushEntireTb(FALSE, TRUE); 05674 #endif 05675 05676 MmResidentAvailablePages -= NumberOfPages; 05677 MM_BUMP_COUNTER(4, NumberOfPages); 05678 05679 BaseAddress = (PVOID)MiGetVirtualAddressMappedByPte (PointerPte); 05680 05681 do { 05682 ASSERT (PointerPte->u.Hard.Valid == 0); 05683 MiEnsureAvailablePageOrWait (NULL, NULL); 05684 PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE (PointerPte)); 05685 05686 MI_MAKE_VALID_PTE (TempPte, 05687 PageFrameIndex, 05688 MM_READWRITE, 05689 PointerPte); 05690 05691 MI_SET_PTE_DIRTY (TempPte); 05692 MI_DISABLE_CACHING (TempPte); 05693 MI_WRITE_VALID_PTE (PointerPte, TempPte); 05694 MiInitializePfn (PageFrameIndex, PointerPte, 1); 05695 05696 PointerPte += 1; 05697 NumberOfPages -= 1; 05698 } while (NumberOfPages != 0); 05699 05700 // 05701 // Flush any data for this page out of the dcaches. 05702 // 05703 05704 #if !defined(_IA64_) 05705 // 05706 // Flush any data for this page out of the dcaches. 05707 // 05708 05709 KeSweepDcache (TRUE); 05710 #else 05711 MiSweepCacheMachineDependent(BaseAddress, NumberOfBytes, MmNonCached); 05712 #endif 05713 05714 UNLOCK_PFN (OldIrql); 05715 MmUnlockPagableImageSection (ExPageLockHandle); 05716 05717 return BaseAddress; 05718 }

PMDL MmAllocatePagesForMdl IN PHYSICAL_ADDRESS  LowAddress,
IN PHYSICAL_ADDRESS  HighAddress,
IN PHYSICAL_ADDRESS  SkipBytes,
IN SIZE_T  TotalBytes
 

Definition at line 4270 of file iosup.c.

References ActiveAndValid, ADDRESS_AND_SIZE_TO_SPAN_PAGES, APC_LEVEL, ASSERT, _PHYSICAL_MEMORY_RUN::BasePage, _MMPFNLIST::Blink, _MMCOLOR_TABLES::Blink, BYTE_OFFSET, _MDL::ByteCount, DbgPrint, ExAllocatePoolWithTag, ExFreePool(), ExPageLockHandle, FALSE, _MMPFNLIST::Flink, _MMCOLOR_TABLES::Flink, FreePageList, LOCK_PFN, MI_MAGIC_AWE_PTEFRAME, MI_NONPAGABLE_MEMORY_AVAILABLE, MI_PFN_ELEMENT, MI_SET_PFN_DELETED, MiChargeCommitmentCantExpand(), MiLastCallColor, MiLastCallHighPage, MiLastCallLowPage, MiRestoreTransitionPte(), MiReturnCommitment(), MiUnlinkFreeOrZeroedPage(), MiUnlinkPageFromList(), MiZeroPhysicalPage(), MM_BUMP_COUNTER, MM_DBG_COMMIT_MDL_PAGES, MM_DEMAND_ZERO_WRITE_PTE, MM_EMPTY_LIST, MM_TRACK_COMMIT, MmCreateMdl(), MmDynamicMemoryMutex, MmFreePagesByColor, MmHighestPhysicalPage, MMLISTS, MmLockPagableSectionByHandle(), MmMdlPagesAllocated, MmPageLocationList, MmPfnDatabase, MmPhysicalMemoryBlock, MmResidentAvailablePages, MmSecondaryColors, MmUnlockPagableImageSection(), NonPagedPool, NULL, _PHYSICAL_MEMORY_DESCRIPTOR::NumberOfRuns, _MMPFN::OriginalPte, PAGE_SHIFT, PAGE_SIZE, _PHYSICAL_MEMORY_RUN::PageCount, _MMPFN::PteFrame, _PHYSICAL_MEMORY_DESCRIPTOR::Run, StandbyPageList, TRUE, _MMPTE::u, _MMPFN::u1, _MMPFN::u2, _MMPFN::u3, UNLOCK_PFN, and ZeroedPageList.

Referenced by NtAllocateUserPhysicalPages().

04279 : 04280 04281 This routine searches the PFN database for free, zeroed or standby pages 04282 to satisfy the request. This does not map the pages - it just allocates 04283 them and puts them into an MDL. It is expected that our caller will 04284 map the MDL as needed. 04285 04286 NOTE: this routine may return an MDL mapping a smaller number of bytes 04287 than the amount requested. It is the caller's responsibility to check the 04288 MDL upon return for the size actually allocated. 04289 04290 These pages comprise physical non-paged memory and are zero-filled. 04291 04292 This routine is designed to be used by an AGP driver to obtain physical 04293 memory in a specified range since hardware may provide substantial 04294 performance wins depending on where the backing memory is allocated. 04295 04296 Arguments: 04297 04298 LowAddress - Supplies the low physical address of the first range that 04299 the allocated pages can come from. 04300 04301 HighAddress - Supplies the high physical address of the first range that 04302 the allocated pages can come from. 04303 04304 SkipBytes - Number of bytes to skip (from the Low Address) to get to the 04305 next physical address range that allocated pages can come from. 04306 04307 TotalBytes - Supplies the number of bytes to allocate. 04308 04309 Return Value: 04310 04311 MDL - An MDL mapping a range of pages in the specified range. 04312 This may map less memory than the caller requested if the full amount 04313 is not currently available. 04314 04315 NULL - No pages in the specified range OR not enough virtually contiguous 04316 nonpaged pool for the MDL is available at this time. 04317 04318 Environment: 04319 04320 Kernel mode, IRQL of APC_LEVEL or below. 04321 04322 --*/ 04323 04324 { 04325 PMDL MemoryDescriptorList; 04326 PMDL MemoryDescriptorList2; 04327 PMMPFN Pfn1; 04328 PMMPFN PfnNextColored; 04329 PMMPFN PfnNextFlink; 04330 PMMPFN PfnLastColored; 04331 KIRQL OldIrql; 04332 PFN_NUMBER start; 04333 PFN_NUMBER count; 04334 PFN_NUMBER Page; 04335 PFN_NUMBER LastPage; 04336 PFN_NUMBER found; 04337 PFN_NUMBER BasePage; 04338 PFN_NUMBER LowPage; 04339 PFN_NUMBER HighPage; 04340 PFN_NUMBER SizeInPages; 04341 PFN_NUMBER MdlPageSpan; 04342 PFN_NUMBER SkipPages; 04343 PFN_NUMBER MaxPages; 04344 PPFN_NUMBER MdlPage; 04345 PPFN_NUMBER LastMdlPage; 04346 ULONG Color; 04347 PMMCOLOR_TABLES ColorHead; 04348 MMLISTS MemoryList; 04349 PPFN_NUMBER FirstMdlPageToZero; 04350 PFN_NUMBER LowPage1; 04351 PFN_NUMBER HighPage1; 04352 LOGICAL PagePlacementOk; 04353 PFN_NUMBER PageNextColored; 04354 PFN_NUMBER PageNextFlink; 04355 PFN_NUMBER PageLastColored; 04356 PMMPFNLIST ListHead; 04357 PPFN_NUMBER ColorAnchorsHead; 04358 PPFN_NUMBER ColorAnchor; 04359 ULONG FullAnchorCount; 04360 #if DBG 04361 ULONG FinishedCount; 04362 #endif 04363 04364 ASSERT (KeGetCurrentIrql() <= APC_LEVEL); 04365 04366 // 04367 // The skip increment must be a page-size multiple. 04368 // 04369 04370 if (BYTE_OFFSET(SkipBytes.LowPart)) { 04371 return (PMDL)0; 04372 } 04373 04374 MmLockPagableSectionByHandle (ExPageLockHandle); 04375 04376 LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT); 04377 HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT); 04378 04379 // 04380 // Maximum allocation size is constrained by the MDL ByteCount field. 04381 // 04382 04383 if (TotalBytes > (SIZE_T)((ULONG)(MAXULONG - PAGE_SIZE))) { 04384 TotalBytes = (SIZE_T)((ULONG)(MAXULONG - PAGE_SIZE)); 04385 } 04386 04387 SizeInPages = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes); 04388 04389 SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT); 04390 04391 BasePage = LowPage; 04392 04393 LOCK_PFN (OldIrql); 04394 04395 MaxPages = MI_NONPAGABLE_MEMORY_AVAILABLE() - 1024; 04396 04397 if ((SPFN_NUMBER)MaxPages <= 0) { 04398 SizeInPages = 0; 04399 } 04400 else if (SizeInPages > MaxPages) { 04401 SizeInPages = MaxPages; 04402 } 04403 04404 if (SizeInPages == 0) { 04405 UNLOCK_PFN (OldIrql); 04406 MmUnlockPagableImageSection (ExPageLockHandle); 04407 return (PMDL)0; 04408 } 04409 04410 UNLOCK_PFN (OldIrql); 04411 04412 #if DBG 04413 if (SizeInPages < (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes)) { 04414 if (MiPrintAwe != 0) { 04415 DbgPrint("MmAllocatePagesForMdl1: unable to get %p pages, trying for %p instead\n", 04416 ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes), 04417 SizeInPages); 04418 } 04419 } 04420 #endif 04421 04422 // 04423 // Allocate an MDL to return the pages in. 04424 // 04425 04426 do { 04427 MemoryDescriptorList = MmCreateMdl ((PMDL)0, 04428 (PVOID)0, 04429 SizeInPages << PAGE_SHIFT); 04430 04431 if (MemoryDescriptorList != (PMDL)0) { 04432 break; 04433 } 04434 SizeInPages -= (SizeInPages >> 4); 04435 } while (SizeInPages != 0); 04436 04437 if (MemoryDescriptorList == (PMDL)0) { 04438 MmUnlockPagableImageSection (ExPageLockHandle); 04439 return (PMDL)0; 04440 } 04441 04442 // 04443 // Allocate a list of colored anchors. 04444 // 04445 04446 ColorAnchorsHead = (PPFN_NUMBER) ExAllocatePoolWithTag (NonPagedPool, 04447 MmSecondaryColors * sizeof (PFN_NUMBER), 04448 'ldmM'); 04449 04450 if (ColorAnchorsHead == NULL) { 04451 MmUnlockPagableImageSection (ExPageLockHandle); 04452 ExFreePool (MemoryDescriptorList); 04453 return (PMDL)0; 04454 } 04455 04456 MdlPageSpan = SizeInPages; 04457 04458 // 04459 // Recalculate as the PFN lock was dropped. 04460 // 04461 04462 start = 0; 04463 found = 0; 04464 04465 MdlPage = (PPFN_NUMBER)(MemoryDescriptorList + 1); 04466 04467 ExAcquireFastMutex (&MmDynamicMemoryMutex); 04468 04469 LOCK_PFN (OldIrql); 04470 04471 MaxPages = MI_NONPAGABLE_MEMORY_AVAILABLE() - 1024; 04472 04473 if ((SPFN_NUMBER)MaxPages <= 0) { 04474 SizeInPages = 0; 04475 } 04476 else if (SizeInPages > MaxPages) { 04477 SizeInPages = MaxPages; 04478 } 04479 04480 if (SizeInPages == 0) { 04481 UNLOCK_PFN (OldIrql); 04482 ExReleaseFastMutex (&MmDynamicMemoryMutex); 04483 MmUnlockPagableImageSection (ExPageLockHandle); 04484 ExFreePool (MemoryDescriptorList); 04485 ExFreePool (ColorAnchorsHead); 04486 return (PMDL)0; 04487 } 04488 04489 // 04490 // Ensure there is enough commit prior to allocating the pages as this 04491 // is not a nonpaged pool allocation but rather a dynamic MDL allocation. 04492 // 04493 04494 if (MiChargeCommitmentCantExpand (SizeInPages, FALSE) == FALSE) { 04495 UNLOCK_PFN (OldIrql); 04496 ExReleaseFastMutex (&MmDynamicMemoryMutex); 04497 MmUnlockPagableImageSection (ExPageLockHandle); 04498 ExFreePool (MemoryDescriptorList); 04499 ExFreePool (ColorAnchorsHead); 04500 return (PMDL)0; 04501 } 04502 04503 MM_TRACK_COMMIT (MM_DBG_COMMIT_MDL_PAGES, SizeInPages); 04504 04505 if ((MiLastCallLowPage != LowPage) || (MiLastCallHighPage != HighPage)) { 04506 MiLastCallColor = 0; 04507 } 04508 04509 MiLastCallLowPage = LowPage; 04510 MiLastCallHighPage = HighPage; 04511 04512 FirstMdlPageToZero = MdlPage; 04513 04514 do { 04515 // 04516 // Grab all zeroed (and then free) pages first directly from the 04517 // colored lists to avoid multiple walks down these singly linked lists. 04518 // Then snatch transition pages as needed. In addition to optimizing 04519 // the speed of the removals this also avoids cannibalizing the page 04520 // cache unless it's absolutely needed. 04521 // 04522 04523 for (MemoryList = ZeroedPageList; MemoryList <= FreePageList; MemoryList += 1) { 04524 04525 ListHead = MmPageLocationList[MemoryList]; 04526 04527 FullAnchorCount = 0; 04528 04529 for (Color = 0; Color < MmSecondaryColors; Color += 1) { 04530 ColorAnchorsHead[Color] = MM_EMPTY_LIST; 04531 } 04532 04533 Color = MiLastCallColor; 04534 ASSERT (Color < MmSecondaryColors); 04535 04536 do { 04537 04538 ColorHead = &MmFreePagesByColor[MemoryList][Color]; 04539 ColorAnchor = &ColorAnchorsHead[Color]; 04540 04541 Color += 1; 04542 if (Color >= MmSecondaryColors) { 04543 Color = 0; 04544 } 04545 04546 if (*ColorAnchor == (MM_EMPTY_LIST - 1)) { 04547 04548 // 04549 // This colored list has already been completely searched. 04550 // 04551 04552 continue; 04553 } 04554 04555 if (ColorHead->Flink == MM_EMPTY_LIST) { 04556 04557 // 04558 // This colored list is empty. 04559 // 04560 04561 FullAnchorCount += 1; 04562 *ColorAnchor = (MM_EMPTY_LIST - 1); 04563 continue; 04564 } 04565 04566 while (ColorHead->Flink != MM_EMPTY_LIST) { 04567 04568 Page = ColorHead->Flink; 04569 04570 Pfn1 = MI_PFN_ELEMENT(Page); 04571 04572 ASSERT ((MMLISTS)Pfn1->u3.e1.PageLocation == MemoryList); 04573 04574 // 04575 // See if the page is within the caller's page constraints. 04576 // 04577 04578 PagePlacementOk = FALSE; 04579 04580 LowPage1 = LowPage; 04581 HighPage1 = HighPage; 04582 04583 do { 04584 if ((Page >= LowPage1) && (Page <= HighPage1)) { 04585 PagePlacementOk = TRUE; 04586 break; 04587 } 04588 04589 if (SkipPages == 0) { 04590 break; 04591 } 04592 04593 LowPage1 += SkipPages; 04594 HighPage1 += SkipPages; 04595 04596 if (LowPage1 > MmHighestPhysicalPage) { 04597 break; 04598 } 04599 if (HighPage1 > MmHighestPhysicalPage) { 04600 HighPage1 = MmHighestPhysicalPage; 04601 } 04602 } while (TRUE); 04603 04604 // 04605 // The Flink and Blink must be nonzero here for the page 04606 // to be on the listhead. Only code that scans the 04607 // MmPhysicalMemoryBlock has to check for the zero case. 04608 // 04609 04610 ASSERT (Pfn1->u1.Flink != 0); 04611 ASSERT (Pfn1->u2.Blink != 0); 04612 04613 if (PagePlacementOk == FALSE) { 04614 04615 // 04616 // Put page on end of list and if first time, save pfn. 04617 // 04618 04619 if (*ColorAnchor == MM_EMPTY_LIST) { 04620 *ColorAnchor = Page; 04621 } 04622 else if (Page == *ColorAnchor) { 04623 04624 // 04625 // No more pages available in this colored chain. 04626 // 04627 04628 FullAnchorCount += 1; 04629 *ColorAnchor = (MM_EMPTY_LIST - 1); 04630 break; 04631 } 04632 04633 // 04634 // If the colored chain has more than one entry then 04635 // put this page on the end. 04636 // 04637 04638 PageNextColored = (PFN_NUMBER)Pfn1->OriginalPte.u.Long; 04639 04640 if (PageNextColored == MM_EMPTY_LIST) { 04641 04642 // 04643 // No more pages available in this colored chain. 04644 // 04645 04646 FullAnchorCount += 1; 04647 *ColorAnchor = (MM_EMPTY_LIST - 1); 04648 break; 04649 } 04650 04651 ASSERT (Pfn1->u1.Flink != 0); 04652 ASSERT (Pfn1->u1.Flink != MM_EMPTY_LIST); 04653 ASSERT (Pfn1->PteFrame != MI_MAGIC_AWE_PTEFRAME); 04654 04655 PfnNextColored = MI_PFN_ELEMENT(PageNextColored); 04656 ASSERT ((MMLISTS)PfnNextColored->u3.e1.PageLocation == MemoryList); 04657 ASSERT (PfnNextColored->PteFrame != MI_MAGIC_AWE_PTEFRAME); 04658 04659 // 04660 // Adjust the free page list so Page 04661 // follows PageNextFlink. 04662 // 04663 04664 PageNextFlink = Pfn1->u1.Flink; 04665 PfnNextFlink = MI_PFN_ELEMENT(PageNextFlink); 04666 04667 ASSERT ((MMLISTS)PfnNextFlink->u3.e1.PageLocation == MemoryList); 04668 ASSERT (PfnNextFlink->PteFrame != MI_MAGIC_AWE_PTEFRAME); 04669 04670 PfnLastColored = ColorHead->Blink; 04671 ASSERT (PfnLastColored != (PMMPFN)MM_EMPTY_LIST); 04672 ASSERT (PfnLastColored->OriginalPte.u.Long == MM_EMPTY_LIST); 04673 ASSERT (PfnLastColored->PteFrame != MI_MAGIC_AWE_PTEFRAME); 04674 ASSERT (PfnLastColored->u2.Blink != MM_EMPTY_LIST); 04675 04676 ASSERT ((MMLISTS)PfnLastColored->u3.e1.PageLocation == MemoryList); 04677 PageLastColored = PfnLastColored - MmPfnDatabase; 04678 04679 if (ListHead->Flink == Page) { 04680 04681 ASSERT (Pfn1->u2.Blink == MM_EMPTY_LIST); 04682 ASSERT (ListHead->Blink != Page); 04683 04684 ListHead->Flink = PageNextFlink; 04685 04686 PfnNextFlink->u2.Blink = MM_EMPTY_LIST; 04687 } 04688 else { 04689 04690 ASSERT (Pfn1->u2.Blink != MM_EMPTY_LIST); 04691 ASSERT ((MMLISTS)(MI_PFN_ELEMENT((MI_PFN_ELEMENT(Pfn1->u2.Blink)->u1.Flink)))->PteFrame != MI_MAGIC_AWE_PTEFRAME); 04692 ASSERT ((MMLISTS)(MI_PFN_ELEMENT((MI_PFN_ELEMENT(Pfn1->u2.Blink)->u1.Flink)))->u3.e1.PageLocation == MemoryList); 04693 04694 MI_PFN_ELEMENT(Pfn1->u2.Blink)->u1.Flink = PageNextFlink; 04695 PfnNextFlink->u2.Blink = Pfn1->u2.Blink; 04696 } 04697 04698 #if DBG 04699 if (PfnLastColored->u1.Flink == MM_EMPTY_LIST) { 04700 ASSERT (ListHead->Blink == PageLastColored); 04701 } 04702 #endif 04703 04704 Pfn1->u1.Flink = PfnLastColored->u1.Flink; 04705 Pfn1->u2.Blink = PageLastColored; 04706 04707 if (ListHead->Blink == PageLastColored) { 04708 ListHead->Blink = Page; 04709 } 04710 04711 // 04712 // Adjust the colored chains. 04713 // 04714 04715 if (PfnLastColored->u1.Flink != MM_EMPTY_LIST) { 04716 ASSERT (MI_PFN_ELEMENT(PfnLastColored->u1.Flink)->PteFrame != MI_MAGIC_AWE_PTEFRAME); 04717 ASSERT ((MMLISTS)(MI_PFN_ELEMENT(PfnLastColored->u1.Flink)->u3.e1.PageLocation) == MemoryList); 04718 MI_PFN_ELEMENT(PfnLastColored->u1.Flink)->u2.Blink = Page; 04719 } 04720 04721 PfnLastColored->u1.Flink = Page; 04722 04723 ColorHead->Flink = PageNextColored; 04724 Pfn1->OriginalPte.u.Long = MM_EMPTY_LIST; 04725 04726 ASSERT (PfnLastColored->OriginalPte.u.Long == MM_EMPTY_LIST); 04727 PfnLastColored->OriginalPte.u.Long = Page; 04728 ColorHead->Blink = Pfn1; 04729 04730 continue; 04731 } 04732 04733 found += 1; 04734 ASSERT (Pfn1->u3.e1.ReadInProgress == 0); 04735 MiUnlinkFreeOrZeroedPage (Page); 04736 Pfn1->u3.e1.PageColor = 0; 04737 04738 Pfn1->u3.e2.ReferenceCount = 1; 04739 Pfn1->u2.ShareCount = 1; 04740 MI_SET_PFN_DELETED(Pfn1); 04741 Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE; 04742 #if DBG 04743 Pfn1->PteFrame = MI_MAGIC_AWE_PTEFRAME; 04744 #endif 04745 Pfn1->u3.e1.PageLocation = ActiveAndValid; 04746 04747 Pfn1->u3.e1.StartOfAllocation = 1; 04748 Pfn1->u3.e1.EndOfAllocation = 1; 04749 Pfn1->u3.e1.VerifierAllocation = 0; 04750 Pfn1->u3.e1.LargeSessionAllocation = 0; 04751 04752 *MdlPage = Page; 04753 MdlPage += 1; 04754 04755 if (found == SizeInPages) { 04756 04757 // 04758 // All the pages requested are available. 04759 // 04760 04761 if (MemoryList == ZeroedPageList) { 04762 FirstMdlPageToZero = MdlPage; 04763 MiLastCallColor = Color; 04764 } 04765 04766 #if DBG 04767 FinishedCount = 0; 04768 for (Color = 0; Color < MmSecondaryColors; Color += 1) { 04769 if (ColorAnchorsHead[Color] == (MM_EMPTY_LIST - 1)) { 04770 FinishedCount += 1; 04771 } 04772 } 04773 ASSERT (FinishedCount == FullAnchorCount); 04774 #endif 04775 04776 goto pass2_done; 04777 } 04778 04779 // 04780 // March on to the next colored chain so the overall 04781 // allocation round-robins the page colors. 04782 // 04783 04784 break; 04785 } 04786 04787 } while (FullAnchorCount != MmSecondaryColors); 04788 04789 #if DBG 04790 FinishedCount = 0; 04791 for (Color = 0; Color < MmSecondaryColors; Color += 1) { 04792 if (ColorAnchorsHead[Color] == (MM_EMPTY_LIST - 1)) { 04793 FinishedCount += 1; 04794 } 04795 } 04796 ASSERT (FinishedCount == FullAnchorCount); 04797 #endif 04798 04799 if (MemoryList == ZeroedPageList) { 04800 FirstMdlPageToZero = MdlPage; 04801 } 04802 04803 MiLastCallColor = 0; 04804 } 04805 04806 start = 0; 04807 04808 do { 04809 04810 count = MmPhysicalMemoryBlock->Run[start].PageCount; 04811 Page = MmPhysicalMemoryBlock->Run[start].BasePage; 04812 04813 if (count != 0) { 04814 04815 // 04816 // Close the gaps, then examine the range for a fit. 04817 // 04818 04819 LastPage = Page + count; 04820 04821 if (LastPage - 1 > HighPage) { 04822 LastPage = HighPage + 1; 04823 } 04824 04825 if (Page < LowPage) { 04826 Page = LowPage; 04827 } 04828 04829 if ((Page < LastPage) && 04830 (Page >= MmPhysicalMemoryBlock->Run[start].BasePage) && 04831 (LastPage <= MmPhysicalMemoryBlock->Run[start].BasePage + 04832 MmPhysicalMemoryBlock->Run[start].PageCount)) { 04833 04834 Pfn1 = MI_PFN_ELEMENT (Page); 04835 do { 04836 04837 if (Pfn1->u3.e1.PageLocation == StandbyPageList) { 04838 04839 if ((Pfn1->u1.Flink != 0) && 04840 (Pfn1->u2.Blink != 0) && 04841 (Pfn1->u3.e2.ReferenceCount == 0)) { 04842 04843 ASSERT (Pfn1->u3.e1.ReadInProgress == 0); 04844 04845 found += 1; 04846 04847 // 04848 // This page is in the desired range - grab it. 04849 // 04850 04851 MiUnlinkPageFromList (Pfn1); 04852 MiRestoreTransitionPte (Page); 04853 04854 Pfn1->u3.e1.PageColor = 0; 04855 04856 Pfn1->u3.e2.ReferenceCount = 1; 04857 Pfn1->u2.ShareCount = 1; 04858 MI_SET_PFN_DELETED(Pfn1); 04859 Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE; 04860 #if DBG 04861 Pfn1->PteFrame = MI_MAGIC_AWE_PTEFRAME; 04862 #endif 04863 Pfn1->u3.e1.PageLocation = ActiveAndValid; 04864 04865 Pfn1->u3.e1.StartOfAllocation = 1; 04866 Pfn1->u3.e1.EndOfAllocation = 1; 04867 Pfn1->u3.e1.VerifierAllocation = 0; 04868 Pfn1->u3.e1.LargeSessionAllocation = 0; 04869 04870 *MdlPage = Page; 04871 MdlPage += 1; 04872 04873 if (found == SizeInPages) { 04874 04875 // 04876 // All the pages requested are available. 04877 // 04878 04879 goto pass2_done; 04880 } 04881 } 04882 } 04883 Page += 1; 04884 Pfn1 += 1; 04885 04886 } while (Page < LastPage); 04887 } 04888 } 04889 start += 1; 04890 } while (start != MmPhysicalMemoryBlock->NumberOfRuns); 04891 04892 if (SkipPages == 0) { 04893 break; 04894 } 04895 LowPage += SkipPages; 04896 HighPage += SkipPages; 04897 if (LowPage > MmHighestPhysicalPage) { 04898 break; 04899 } 04900 if (HighPage > MmHighestPhysicalPage) { 04901 HighPage = MmHighestPhysicalPage; 04902 } 04903 } while (1); 04904 04905 pass2_done: 04906 04907 MmMdlPagesAllocated += found; 04908 04909 MmResidentAvailablePages -= found; 04910 MM_BUMP_COUNTER(34, found); 04911 04912 UNLOCK_PFN (OldIrql); 04913 04914 ExReleaseFastMutex (&MmDynamicMemoryMutex); 04915 MmUnlockPagableImageSection (ExPageLockHandle); 04916 04917 ExFreePool (ColorAnchorsHead); 04918 04919 if (found != SizeInPages) { 04920 ASSERT (found < SizeInPages); 04921 MiReturnCommitment (SizeInPages - found); 04922 MM_TRACK_COMMIT (MM_DBG_COMMIT_MDL_PAGES, 0 - (SizeInPages - found)); 04923 } 04924 04925 if (found == 0) { 04926 ExFreePool (MemoryDescriptorList); 04927 return (PMDL)0; 04928 } 04929 04930 MemoryDescriptorList->ByteCount = (ULONG)(found << PAGE_SHIFT); 04931 04932 if (found != SizeInPages) { 04933 *MdlPage = MM_EMPTY_LIST; 04934 } 04935 04936 // 04937 // If the number of pages allocated was substantially less than the 04938 // initial request amount, attempt to allocate a smaller MDL to save 04939 // pool. 04940 // 04941 04942 if ((MdlPageSpan - found) > ((4 * PAGE_SIZE) / sizeof (PFN_NUMBER))) { 04943 MemoryDescriptorList2 = MmCreateMdl ((PMDL)0, 04944 (PVOID)0, 04945 found << PAGE_SHIFT); 04946 04947 if (MemoryDescriptorList2 != (PMDL)0) { 04948 RtlMoveMemory ((PVOID)(MemoryDescriptorList2 + 1), 04949 (PVOID)(MemoryDescriptorList + 1), 04950 found * sizeof (PFN_NUMBER)); 04951 FirstMdlPageToZero = (PPFN_NUMBER)(MemoryDescriptorList2 + 1) + 04952 (FirstMdlPageToZero - 04953 (PPFN_NUMBER)(MemoryDescriptorList + 1)); 04954 ExFreePool (MemoryDescriptorList); 04955 MemoryDescriptorList = MemoryDescriptorList2; 04956 } 04957 } 04958 04959 MdlPage = (PPFN_NUMBER)(MemoryDescriptorList + 1); 04960 LastMdlPage = MdlPage + found; 04961 04962 #if DBG 04963 // 04964 // Ensure all pages are within the caller's page constraints. 04965 // 04966 04967 LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT); 04968 HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT); 04969 04970 while (MdlPage < FirstMdlPageToZero) { 04971 Page = *MdlPage; 04972 PagePlacementOk = FALSE; 04973 LowPage1 = LowPage; 04974 HighPage1 = HighPage; 04975 04976 do { 04977 if ((Page >= LowPage1) && (Page <= HighPage1)) { 04978 PagePlacementOk = TRUE; 04979 break; 04980 } 04981 04982 if (SkipPages == 0) { 04983 break; 04984 } 04985 04986 LowPage1 += SkipPages; 04987 HighPage1 += SkipPages; 04988 04989 if (LowPage1 > MmHighestPhysicalPage) { 04990 break; 04991 } 04992 if (HighPage1 > MmHighestPhysicalPage) { 04993 HighPage1 = MmHighestPhysicalPage; 04994 } 04995 } while (TRUE); 04996 04997 ASSERT (PagePlacementOk == TRUE); 04998 Pfn1 = MI_PFN_ELEMENT(*MdlPage); 04999 ASSERT (Pfn1->PteFrame == MI_MAGIC_AWE_PTEFRAME); 05000 MdlPage += 1; 05001 } 05002 #endif 05003 05004 while (FirstMdlPageToZero < LastMdlPage) { 05005 05006 #if DBG 05007 // 05008 // Ensure all pages are within the caller's page constraints. 05009 // 05010 05011 Page = *FirstMdlPageToZero; 05012 05013 PagePlacementOk = FALSE; 05014 LowPage1 = LowPage; 05015 HighPage1 = HighPage; 05016 05017 do { 05018 if ((Page >= LowPage1) && (Page <= HighPage1)) { 05019 PagePlacementOk = TRUE; 05020 break; 05021 } 05022 05023 if (SkipPages == 0) { 05024 break; 05025 } 05026 05027 LowPage1 += SkipPages; 05028 HighPage1 += SkipPages; 05029 05030 if (LowPage1 > MmHighestPhysicalPage) { 05031 break; 05032 } 05033 if (HighPage1 > MmHighestPhysicalPage) { 05034 HighPage1 = MmHighestPhysicalPage; 05035 } 05036 } while (TRUE); 05037 05038 ASSERT (PagePlacementOk == TRUE); 05039 Pfn1 = MI_PFN_ELEMENT(*FirstMdlPageToZero); 05040 ASSERT (Pfn1->PteFrame == MI_MAGIC_AWE_PTEFRAME); 05041 #endif 05042 MiZeroPhysicalPage (*FirstMdlPageToZero, 0); 05043 FirstMdlPageToZero += 1; 05044 } 05045 05046 return MemoryDescriptorList; 05047 }

VOID MmBuildMdlForNonPagedPool IN OUT PMDL  MemoryDescriptorList  ) 
 

Definition at line 1500 of file iosup.c.

References ASSERT, MDL_MAPPED_TO_SYSTEM_VA, MDL_PAGES_LOCKED, MDL_PARTIAL, MDL_SOURCE_IS_NONPAGED_POOL, MI_CONVERT_PHYSICAL_TO_PFN, MI_GET_PAGE_FRAME_FROM_PTE, MI_IS_PHYSICAL_ADDRESS, MiGetPteAddress, MmIsNonPagedSystemAddressValid(), and NULL.

Referenced by CcZeroData(), MiCreateImageFileMap(), and UdfPrepareBuffers().

01506 : 01507 01508 This routine fills in the "pages" portion of the MDL using the PFN 01509 numbers corresponding the buffers which resides in non-paged pool. 01510 01511 Unlike MmProbeAndLockPages, there is no corresponding unlock as no 01512 reference counts are incremented as the buffers being in nonpaged 01513 pool are always resident. 01514 01515 Arguments: 01516 01517 MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List 01518 (MDL). The supplied MDL must supply a virtual 01519 address, byte offset and length field. The 01520 physical page portion of the MDL is updated when 01521 the pages are locked in memory. The virtual 01522 address must be within the non-paged portion 01523 of the system space. 01524 01525 Return Value: 01526 01527 None. 01528 01529 Environment: 01530 01531 Kernel mode, IRQL of DISPATCH_LEVEL or below. 01532 01533 --*/ 01534 01535 { 01536 PPFN_NUMBER Page; 01537 PMMPTE PointerPte; 01538 PMMPTE LastPte; 01539 PVOID EndVa; 01540 PFN_NUMBER PageFrameIndex; 01541 01542 Page = (PPFN_NUMBER)(MemoryDescriptorList + 1); 01543 01544 ASSERT (MemoryDescriptorList->ByteCount != 0); 01545 ASSERT ((MemoryDescriptorList->MdlFlags & ( 01546 MDL_PAGES_LOCKED | 01547 MDL_MAPPED_TO_SYSTEM_VA | 01548 MDL_SOURCE_IS_NONPAGED_POOL | 01549 MDL_PARTIAL)) == 0); 01550 01551 MemoryDescriptorList->Process = (PEPROCESS)NULL; 01552 01553 // 01554 // Endva is last byte of the buffer. 01555 // 01556 01557 MemoryDescriptorList->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL; 01558 01559 MemoryDescriptorList->MappedSystemVa = 01560 (PVOID)((PCHAR)MemoryDescriptorList->StartVa + 01561 MemoryDescriptorList->ByteOffset); 01562 01563 EndVa = (PVOID)(((PCHAR)MemoryDescriptorList->MappedSystemVa + 01564 MemoryDescriptorList->ByteCount - 1)); 01565 01566 LastPte = MiGetPteAddress (EndVa); 01567 01568 ASSERT (MmIsNonPagedSystemAddressValid (MemoryDescriptorList->StartVa)); 01569 01570 PointerPte = MiGetPteAddress (MemoryDescriptorList->StartVa); 01571 01572 if (MI_IS_PHYSICAL_ADDRESS(EndVa)) { 01573 PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN ( 01574 MemoryDescriptorList->StartVa); 01575 01576 do { 01577 *Page = PageFrameIndex; 01578 Page += 1; 01579 PageFrameIndex += 1; 01580 PointerPte += 1; 01581 } while (PointerPte <= LastPte); 01582 } else { 01583 do { 01584 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 01585 *Page = PageFrameIndex; 01586 Page += 1; 01587 PointerPte += 1; 01588 } while (PointerPte <= LastPte); 01589 } 01590 01591 return; 01592 }

PMDL MmCreateMdl IN PMDL MemoryDescriptorList  OPTIONAL,
IN PVOID  Base,
IN SIZE_T  Length
 

Definition at line 5852 of file iosup.c.

References ExAllocatePoolWithTag, MmInitializeMdl, MmSizeOfMdl(), NonPagedPool, NonPagedPoolMustSucceed, and POOL_BUDDY_MAX.

Referenced by IoWriteCrashDump(), MiCheckForCrashDump(), MiCreateImageFileMap(), MmAllocatePagesForMdl(), and NtFreeUserPhysicalPages().

05860 : 05861 05862 This function optionally allocates and initializes an MDL. 05863 05864 Arguments: 05865 05866 MemoryDescriptorList - Optionally supplies the address of the MDL 05867 to initialize. If this address is supplied as NULL 05868 an MDL is allocated from non-paged pool and 05869 initialized. 05870 05871 Base - Supplies the base virtual address for the buffer. 05872 05873 Length - Supplies the size of the buffer in bytes. 05874 05875 Return Value: 05876 05877 Returns the address of the initialized MDL. 05878 05879 Environment: 05880 05881 Kernel mode, IRQL of DISPATCH_LEVEL or below. 05882 05883 --*/ 05884 05885 { 05886 SIZE_T MdlSize; 05887 05888 MdlSize = MmSizeOfMdl( Base, Length ); 05889 05890 if (!ARGUMENT_PRESENT( MemoryDescriptorList )) { 05891 05892 // 05893 // The pool manager doesn't like being called with large requests 05894 // marked MustSucceed, so try the normal nonpaged if the 05895 // request is large. 05896 // 05897 05898 if (MdlSize > POOL_BUDDY_MAX) { 05899 MemoryDescriptorList = (PMDL)ExAllocatePoolWithTag ( 05900 NonPagedPool, 05901 MdlSize, 05902 'ldmM'); 05903 if (MemoryDescriptorList == (PMDL)0) { 05904 return (PMDL)0; 05905 } 05906 } 05907 else { 05908 MemoryDescriptorList = (PMDL)ExAllocatePoolWithTag ( 05909 NonPagedPoolMustSucceed, 05910 MdlSize, 05911 'ldmM'); 05912 } 05913 } 05914 05915 MmInitializeMdl (MemoryDescriptorList, Base, Length); 05916 return MemoryDescriptorList; 05917 }

VOID MmFreeContiguousMemory IN PVOID  BaseAddress  ) 
 

Definition at line 5372 of file iosup.c.

References ExFreePool(), MiFreeLowMemory(), MiNoLowMemory, PAGED_CODE, and TRUE.

Referenced by IoFreeDumpStack(), Ki386ClearIdentityMap(), and MmAllocateContiguousMemorySpecifyCache().

05378 : 05379 05380 This function deallocates a range of physically contiguous non-paged 05381 pool which was allocated with the MmAllocateContiguousMemory function. 05382 05383 Arguments: 05384 05385 BaseAddress - Supplies the base virtual address where the physical 05386 address was previously mapped. 05387 05388 Return Value: 05389 05390 None. 05391 05392 Environment: 05393 05394 Kernel mode, IRQL of APC_LEVEL or below. 05395 05396 --*/ 05397 05398 { 05399 PAGED_CODE(); 05400 05401 #if defined (_X86PAE_) 05402 if (MiNoLowMemory == TRUE) { 05403 if (MiFreeLowMemory (BaseAddress, 'tnoC') == TRUE) { 05404 return; 05405 } 05406 } 05407 #endif 05408 05409 ExFreePool (BaseAddress); 05410 }

VOID MmFreeContiguousMemorySpecifyCache IN PVOID  BaseAddress,
IN SIZE_T  NumberOfBytes,
IN MEMORY_CACHING_TYPE  CacheType
 

Definition at line 5414 of file iosup.c.

References ASSERT, ExFreePool(), MiFreeLowMemory(), MiGetPteAddress, MiNoLowMemory, MmCached, MmNonPagedSystemStart, MmNumberOfSystemPtes, MmUnmapIoSpace(), PAGE_SHIFT, PAGE_SIZE, PAGED_CODE, TRUE, and _MMPTE::u.

05422 : 05423 05424 This function deallocates a range of noncached memory in 05425 the non-paged portion of the system address space. 05426 05427 Arguments: 05428 05429 BaseAddress - Supplies the base virtual address where the noncached 05430 05431 NumberOfBytes - Supplies the number of bytes allocated to the request. 05432 This must be the same number that was obtained with 05433 the MmAllocateContiguousMemorySpecifyCache call. 05434 05435 CacheType - Supplies the cachetype used when the caller made the 05436 MmAllocateContiguousMemorySpecifyCache call. 05437 05438 Return Value: 05439 05440 None. 05441 05442 Environment: 05443 05444 Kernel mode, IRQL of APC_LEVEL or below. 05445 05446 --*/ 05447 05448 { 05449 PVOID PoolAddress; 05450 PMMPTE PointerPte; 05451 05452 PAGED_CODE(); 05453 05454 if (CacheType != MmCached) { 05455 05456 // 05457 // The caller was using an alternate mapping - free these PTEs too. 05458 // 05459 05460 PointerPte = MiGetPteAddress(BaseAddress); 05461 05462 PointerPte += ((NumberOfBytes + PAGE_SIZE - 1) >> PAGE_SHIFT); 05463 PoolAddress = (PVOID)(ULONG_PTR)PointerPte->u.Long; 05464 05465 PointerPte += 1; 05466 ASSERT (NumberOfBytes == PointerPte->u.Long); 05467 05468 NumberOfBytes += (2 * PAGE_SIZE); 05469 MmUnmapIoSpace (BaseAddress, NumberOfBytes); 05470 BaseAddress = PoolAddress; 05471 } 05472 else { 05473 ASSERT (BaseAddress < MmNonPagedSystemStart || 05474 BaseAddress >= (PVOID)((PCHAR)MmNonPagedSystemStart + (MmNumberOfSystemPtes << PAGE_SHIFT))); 05475 } 05476 05477 #if defined (_X86PAE_) 05478 if (MiNoLowMemory == TRUE) { 05479 if (MiFreeLowMemory (BaseAddress, 'tnoC') == TRUE) { 05480 return; 05481 } 05482 } 05483 #endif 05484 05485 ExFreePool (BaseAddress); 05486 }

VOID MmFreeNonCachedMemory IN PVOID  BaseAddress,
IN SIZE_T  NumberOfBytes
 

Definition at line 5721 of file iosup.c.

References ASSERT, BYTES_TO_PAGES, ExPageLockHandle, LOCK_PFN, MI_GET_PAGE_FRAME_FROM_PTE, MI_MAKING_MULTIPLE_PTES_INVALID, MI_PFN_ELEMENT, MI_SET_PFN_DELETED, MiDecrementShareAndValidCount, MiDecrementShareCountOnly, MiGetPteAddress, MiReleaseSystemPtes(), MiReturnCommitment(), MM_BUMP_COUNTER, MM_DBG_COMMIT_RETURN_NONCACHED_PAGES, MM_TRACK_COMMIT, MmLockPagableSectionByHandle(), MmResidentAvailablePages, MmUnlockPagableImageSection(), PAGE_ALIGN, _MMPFN::PteFrame, SystemPteSpace, TRUE, _MMPFN::u2, and UNLOCK_PFN.

05728 : 05729 05730 This function deallocates a range of noncached memory in 05731 the non-paged portion of the system address space. 05732 05733 Arguments: 05734 05735 BaseAddress - Supplies the base virtual address where the noncached 05736 memory resides. 05737 05738 NumberOfBytes - Supplies the number of bytes allocated to the request. 05739 This must be the same number that was obtained with 05740 the MmAllocateNonCachedMemory call. 05741 05742 Return Value: 05743 05744 None. 05745 05746 Environment: 05747 05748 Kernel mode, IRQL of APC_LEVEL or below. 05749 05750 --*/ 05751 05752 { 05753 05754 PMMPTE PointerPte; 05755 PMMPFN Pfn1; 05756 PFN_NUMBER NumberOfPages; 05757 PFN_NUMBER i; 05758 PFN_NUMBER PageFrameIndex; 05759 KIRQL OldIrql; 05760 05761 ASSERT (NumberOfBytes != 0); 05762 ASSERT (PAGE_ALIGN (BaseAddress) == BaseAddress); 05763 05764 MI_MAKING_MULTIPLE_PTES_INVALID (TRUE); 05765 05766 NumberOfPages = BYTES_TO_PAGES(NumberOfBytes); 05767 05768 PointerPte = MiGetPteAddress (BaseAddress); 05769 05770 i = NumberOfPages; 05771 05772 MmLockPagableSectionByHandle (ExPageLockHandle); 05773 05774 LOCK_PFN (OldIrql); 05775 05776 do { 05777 05778 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 05779 05780 // 05781 // Mark the page for deletion when the reference count goes to zero. 05782 // 05783 05784 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 05785 ASSERT (Pfn1->u2.ShareCount == 1); 05786 MiDecrementShareAndValidCount (Pfn1->PteFrame); 05787 MI_SET_PFN_DELETED (Pfn1); 05788 MiDecrementShareCountOnly (PageFrameIndex); 05789 PointerPte += 1; 05790 i -= 1; 05791 } while (i != 0); 05792 05793 PointerPte -= NumberOfPages; 05794 05795 // 05796 // Update the count of available resident pages. 05797 // 05798 05799 MmResidentAvailablePages += NumberOfPages; 05800 MM_BUMP_COUNTER(5, NumberOfPages); 05801 05802 UNLOCK_PFN (OldIrql); 05803 05804 MmUnlockPagableImageSection (ExPageLockHandle); 05805 05806 MiReleaseSystemPtes (PointerPte, (ULONG)NumberOfPages, SystemPteSpace); 05807 05808 MiReturnCommitment (NumberOfPages); 05809 MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_NONCACHED_PAGES, NumberOfPages); 05810 05811 return; 05812 }

VOID MmFreePagesFromMdl IN PMDL  MemoryDescriptorList  ) 
 

Definition at line 5051 of file iosup.c.

References ADDRESS_AND_SIZE_TO_SPAN_PAGES, APC_LEVEL, ASSERT, ExPageLockHandle, LOCK_PFN, MDL_IO_SPACE, MDL_PHYSICAL_VIEW, MI_IS_PFN_DELETED, MI_MAGIC_AWE_PTEFRAME, MI_MAKING_MULTIPLE_PTES_INVALID, MI_PFN_ELEMENT, MI_PFN_IS_AWE, MiDecrementReferenceCount(), MiReturnCommitment(), MM_BUMP_COUNTER, MM_DBG_COMMIT_RETURN_MDL_PAGES, MM_EMPTY_LIST, MM_TRACK_COMMIT, MmHighestPhysicalPage, MmLockPagableSectionByHandle(), MmMdlPagesAllocated, MmResidentAvailablePages, MmUnlockPagableImageSection(), PAGE_SIZE, StandbyPageList, TRUE, _MMPFN::u2, and UNLOCK_PFN.

Referenced by MiCleanPhysicalProcessPages(), NtAllocateUserPhysicalPages(), and NtFreeUserPhysicalPages().

05057 : 05058 05059 This routine walks the argument MDL freeing each physical page back to 05060 the PFN database. This is designed to free pages acquired via 05061 MmAllocatePagesForMdl only. 05062 05063 Arguments: 05064 05065 MemoryDescriptorList - Supplies an MDL which contains the pages to be freed. 05066 05067 Return Value: 05068 05069 None. 05070 05071 Environment: 05072 05073 Kernel mode, IRQL of APC_LEVEL or below. 05074 05075 --*/ 05076 { 05077 PMMPFN Pfn1; 05078 KIRQL OldIrql; 05079 PVOID StartingAddress; 05080 PVOID AlignedVa; 05081 PPFN_NUMBER Page; 05082 PFN_NUMBER NumberOfPages; 05083 PFN_NUMBER PagesFreed; 05084 05085 ASSERT (KeGetCurrentIrql() <= APC_LEVEL); 05086 05087 PagesFreed = 0; 05088 05089 MmLockPagableSectionByHandle (ExPageLockHandle); 05090 05091 Page = (PPFN_NUMBER)(MemoryDescriptorList + 1); 05092 05093 ASSERT ((MemoryDescriptorList->MdlFlags & (MDL_IO_SPACE | MDL_PHYSICAL_VIEW)) == 0); 05094 05095 ASSERT (((ULONG_PTR)MemoryDescriptorList->StartVa & (PAGE_SIZE - 1)) == 0); 05096 AlignedVa = (PVOID)MemoryDescriptorList->StartVa; 05097 05098 StartingAddress = (PVOID)((PCHAR)AlignedVa + 05099 MemoryDescriptorList->ByteOffset); 05100 05101 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartingAddress, 05102 MemoryDescriptorList->ByteCount); 05103 05104 MI_MAKING_MULTIPLE_PTES_INVALID (TRUE); 05105 05106 LOCK_PFN (OldIrql); 05107 05108 do { 05109 05110 if (*Page == MM_EMPTY_LIST) { 05111 05112 // 05113 // There are no more locked pages. 05114 // 05115 05116 break; 05117 } 05118 05119 ASSERT (*Page <= MmHighestPhysicalPage); 05120 05121 Pfn1 = MI_PFN_ELEMENT (*Page); 05122 ASSERT (Pfn1->u2.ShareCount == 1); 05123 ASSERT (MI_IS_PFN_DELETED (Pfn1) == TRUE); 05124 ASSERT (MI_PFN_IS_AWE (Pfn1) == TRUE); 05125 ASSERT (Pfn1->PteFrame == MI_MAGIC_AWE_PTEFRAME); 05126 05127 Pfn1->u3.e1.StartOfAllocation = 0; 05128 Pfn1->u3.e1.EndOfAllocation = 0; 05129 Pfn1->u2.ShareCount = 0; 05130 #if DBG 05131 Pfn1->PteFrame -= 1; 05132 Pfn1->u3.e1.PageLocation = StandbyPageList; 05133 #endif 05134 05135 MiDecrementReferenceCount (*Page); 05136 05137 PagesFreed += 1; 05138 05139 StartingAddress = (PVOID)((PCHAR)StartingAddress + PAGE_SIZE); 05140 05141 *Page++ = MM_EMPTY_LIST; 05142 NumberOfPages -= 1; 05143 05144 } while (NumberOfPages != 0); 05145 05146 MmMdlPagesAllocated -= PagesFreed; 05147 05148 MmResidentAvailablePages += PagesFreed; 05149 MM_BUMP_COUNTER(35, PagesFreed); 05150 05151 UNLOCK_PFN (OldIrql); 05152 05153 MmUnlockPagableImageSection (ExPageLockHandle); 05154 05155 MiReturnCommitment (PagesFreed); 05156 MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_MDL_PAGES, PagesFreed); 05157 }

NTKERNELAPI ULONG MmGatherMemoryForHibernate IN PMDL  Mdl,
IN BOOLEAN  Wait
 

Definition at line 7930 of file iosup.c.

References APC_LEVEL, FALSE, KeDelayExecutionThread(), KernelMode, LOCK_PFN2, MDL_PAGES_LOCKED, MI_GET_PAGE_COLOR_FROM_PTE, MI_PFN_ELEMENT, MI_SET_PFN_DELETED, MiDelayPageFaults, MiEmptyAllWorkingSets(), MiFlushAllPages(), MiRemoveAnyPage(), Mm30Milliseconds, MM_DEMAND_ZERO_WRITE_PTE, MmAvailablePages, NULL, _MMPFN::OriginalPte, PAGE_SHIFT, TRUE, _MMPTE::u, _MMPFN::u3, and UNLOCK_PFN2.

07937 : 07938 07939 Finds enough memory to fill in the pages of the MDL for power management 07940 hibernate function. 07941 07942 Arguments: 07943 07944 Mdl - Supplies an MDL, the start VA field should be NULL. The length 07945 field indicates how many pages to obtain. 07946 07947 Wait - FALSE to fail immediately if the pages aren't available. 07948 07949 Return Value: 07950 07951 TRUE if the MDL could be filled in, FALSE otherwise. 07952 07953 Environment: 07954 07955 Kernel mode, IRQL of APC_LEVEL or below. 07956 07957 --*/ 07958 07959 { 07960 KIRQL OldIrql; 07961 PFN_NUMBER PagesNeeded; 07962 PPFN_NUMBER Pages; 07963 PFN_NUMBER i; 07964 PFN_NUMBER PageFrameIndex; 07965 PMMPFN Pfn1; 07966 ULONG status; 07967 07968 status = FALSE; 07969 07970 PagesNeeded = Mdl->ByteCount >> PAGE_SHIFT; 07971 Pages = (PPFN_NUMBER)(Mdl + 1); 07972 07973 i = Wait ? 100 : 1; 07974 07975 InterlockedIncrement (&MiDelayPageFaults); 07976 07977 do { 07978 07979 LOCK_PFN2 (OldIrql); 07980 if (MmAvailablePages > PagesNeeded) { 07981 07982 // 07983 // Fill in the MDL. 07984 // 07985 07986 do { 07987 PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE (NULL)); 07988 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 07989 MI_SET_PFN_DELETED (Pfn1); 07990 Pfn1->u3.e2.ReferenceCount += 1; 07991 Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE; 07992 *Pages = PageFrameIndex; 07993 Pages += 1; 07994 PagesNeeded -= 1; 07995 } while (PagesNeeded); 07996 UNLOCK_PFN2 (OldIrql); 07997 Mdl->MdlFlags |= MDL_PAGES_LOCKED; 07998 status = TRUE; 07999 break; 08000 } 08001 08002 UNLOCK_PFN2 (OldIrql); 08003 08004 // 08005 // If we're being called at DISPATCH_LEVEL we cannot move pages to 08006 // the standby list because mutexes must be acquired to do so. 08007 // 08008 08009 if (OldIrql > APC_LEVEL) { 08010 break; 08011 } 08012 08013 if (!i) { 08014 break; 08015 } 08016 08017 // 08018 // Attempt to move pages to the standby list. 08019 // 08020 08021 MiEmptyAllWorkingSets (); 08022 MiFlushAllPages(); 08023 08024 KeDelayExecutionThread (KernelMode, 08025 FALSE, 08026 (PLARGE_INTEGER)&Mm30Milliseconds); 08027 i -= 1; 08028 08029 } while (TRUE); 08030 08031 InterlockedDecrement (&MiDelayPageFaults); 08032 08033 return status; 08034 }

PHYSICAL_ADDRESS MmGetPhysicalAddress IN PVOID  BaseAddress  ) 
 

Definition at line 5490 of file iosup.c.

References BYTE_OFFSET, MI_CONVERT_PHYSICAL_TO_PFN, MI_GET_PAGE_FRAME_FROM_PTE, MI_IS_PHYSICAL_ADDRESS, MiGetPteAddress, PAGE_SHIFT, _MMPTE::u, and ZERO_LARGE.

Referenced by IoFreeDumpRange(), IopGetDumpStack(), IopInitializeDCB(), IopMapVirtualToPhysicalMdl(), IoSetDumpRange(), KdpStub(), KeStartAllProcessors(), Ki386BuildIdentityBuffer(), Ki386ConvertPte(), Ki386CreateIdentityMap(), KiGetPhysicalAddress(), MmAllocateContiguousMemorySpecifyCache(), MmDbgWriteCheck(), MmHibernateInformation(), and MmMapUserAddressesToPage().

05496 : 05497 05498 This function returns the corresponding physical address for a 05499 valid virtual address. 05500 05501 Arguments: 05502 05503 BaseAddress - Supplies the virtual address for which to return the 05504 physical address. 05505 05506 Return Value: 05507 05508 Returns the corresponding physical address. 05509 05510 Environment: 05511 05512 Kernel mode. Any IRQL level. 05513 05514 --*/ 05515 05516 { 05517 PMMPTE PointerPte; 05518 PHYSICAL_ADDRESS PhysicalAddress; 05519 05520 if (MI_IS_PHYSICAL_ADDRESS(BaseAddress)) { 05521 PhysicalAddress.QuadPart = MI_CONVERT_PHYSICAL_TO_PFN (BaseAddress); 05522 } else { 05523 05524 PointerPte = MiGetPteAddress(BaseAddress); 05525 05526 if (PointerPte->u.Hard.Valid == 0) { 05527 KdPrint(("MM:MmGetPhysicalAddressFailed base address was %lx", 05528 BaseAddress)); 05529 ZERO_LARGE (PhysicalAddress); 05530 return PhysicalAddress; 05531 } 05532 PhysicalAddress.QuadPart = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 05533 } 05534 05535 PhysicalAddress.QuadPart = PhysicalAddress.QuadPart << PAGE_SHIFT; 05536 PhysicalAddress.LowPart += BYTE_OFFSET(BaseAddress); 05537 05538 return PhysicalAddress; 05539 }

NTSTATUS MmGetSectionRange IN PVOID  AddressWithinSection,
OUT PVOID *  StartingSectionAddress,
OUT PULONG  SizeofSection
 

Definition at line 6797 of file iosup.c.

References ExAcquireResourceShared, ExReleaseResource, KeEnterCriticalRegion, KeLeaveCriticalRegion, MiLookupDataTableEntry(), NTSTATUS(), PAGED_CODE, PsLoadedModuleResource, RtlImageNtHeader(), Status, and TRUE.

06802 { 06803 PLDR_DATA_TABLE_ENTRY DataTableEntry; 06804 ULONG i; 06805 PIMAGE_NT_HEADERS NtHeaders; 06806 PIMAGE_SECTION_HEADER NtSection; 06807 NTSTATUS Status; 06808 ULONG_PTR Rva; 06809 06810 PAGED_CODE(); 06811 06812 // 06813 // Search the loaded module list for the data table entry that describes 06814 // the DLL that was just unloaded. It is possible that an entry is not in 06815 // the list if a failure occurred at a point in loading the DLL just before 06816 // the data table entry was generated. 06817 // 06818 06819 Status = STATUS_NOT_FOUND; 06820 06821 KeEnterCriticalRegion(); 06822 ExAcquireResourceShared (&PsLoadedModuleResource, TRUE); 06823 06824 DataTableEntry = MiLookupDataTableEntry (AddressWithinSection, TRUE); 06825 if (DataTableEntry) { 06826 06827 Rva = (ULONG_PTR)((PUCHAR)AddressWithinSection - (ULONG_PTR)DataTableEntry->DllBase); 06828 06829 NtHeaders = (PIMAGE_NT_HEADERS)RtlImageNtHeader(DataTableEntry->DllBase); 06830 06831 NtSection = (PIMAGE_SECTION_HEADER)((PCHAR)NtHeaders + 06832 sizeof(ULONG) + 06833 sizeof(IMAGE_FILE_HEADER) + 06834 NtHeaders->FileHeader.SizeOfOptionalHeader 06835 ); 06836 06837 for (i = 0; i < NtHeaders->FileHeader.NumberOfSections; i += 1) { 06838 06839 if ( Rva >= NtSection->VirtualAddress && 06840 Rva < NtSection->VirtualAddress + NtSection->SizeOfRawData ) { 06841 06842 // 06843 // Found it 06844 // 06845 06846 *StartingSectionAddress = (PVOID) 06847 ((PCHAR) DataTableEntry->DllBase + NtSection->VirtualAddress); 06848 *SizeofSection = NtSection->SizeOfRawData; 06849 Status = STATUS_SUCCESS; 06850 break; 06851 } 06852 06853 NtSection += 1; 06854 } 06855 } 06856 06857 ExReleaseResource (&PsLoadedModuleResource); 06858 KeLeaveCriticalRegion(); 06859 return Status; 06860 }

PVOID MmGetVirtualForPhysical IN PHYSICAL_ADDRESS  PhysicalAddress  ) 
 

Definition at line 5542 of file iosup.c.

References BYTE_OFFSET, MI_PFN_ELEMENT, MiGetVirtualAddressMappedByPte, PAGE_SHIFT, and _MMPFN::PteAddress.

Referenced by MmSetKernelDumpRange().

05548 : 05549 05550 This function returns the corresponding virtual address for a physical 05551 address whose primary virtual address is in system space. 05552 05553 Arguments: 05554 05555 PhysicalAddress - Supplies the physical address for which to return the 05556 virtual address. 05557 05558 Return Value: 05559 05560 Returns the corresponding virtual address. 05561 05562 Environment: 05563 05564 Kernel mode. Any IRQL level. 05565 05566 --*/ 05567 05568 { 05569 PFN_NUMBER PageFrameIndex; 05570 PMMPFN Pfn; 05571 05572 PageFrameIndex = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT); 05573 05574 Pfn = MI_PFN_ELEMENT (PageFrameIndex); 05575 05576 return (PVOID)((PCHAR)MiGetVirtualAddressMappedByPte (Pfn->PteAddress) + 05577 BYTE_OFFSET (PhysicalAddress.LowPart)); 05578 }

BOOLEAN MmIsRecursiveIoFault VOID   ) 
 

Definition at line 7167 of file iosup.c.

References PsGetCurrentThread.

07173 : 07174 07175 This function examines the thread's page fault clustering information 07176 and determines if the current page fault is occurring during an I/O 07177 operation. 07178 07179 Arguments: 07180 07181 None. 07182 07183 Return Value: 07184 07185 Returns TRUE if the fault is occurring during an I/O operation, 07186 FALSE otherwise. 07187 07188 --*/ 07189 07190 { 07191 return (BOOLEAN)(PsGetCurrentThread()->DisablePageFaultClustering | 07192 PsGetCurrentThread()->ForwardClusterOnly); 07193 }

PVOID MmLockPagableDataSection IN PVOID  AddressWithinSection  ) 
 

Definition at line 6864 of file iosup.c.

References DbgPrint, ExAcquireResourceShared, ExReleaseResource, KeBugCheckEx(), KeEnterCriticalRegion, KeLeaveCriticalRegion, MI_IS_PHYSICAL_ADDRESS, MiLookupDataTableEntry(), MM_DBG_LOCK_CODE, MmLockPagableSectionByHandle(), NULL, PAGED_CODE, PsLoadedModuleResource, RtlImageNtHeader(), SECTION_BASE_ADDRESS, and TRUE.

Referenced by SmbTraceStart().

06870 : 06871 06872 This functions locks the entire section that contains the specified 06873 section in memory. This allows pagable code to be brought into 06874 memory and to be used as if the code was not really pagable. This 06875 should not be done with a high degree of frequency. 06876 06877 Arguments: 06878 06879 AddressWithinSection - Supplies the address of a function 06880 contained within a section that should be brought in and locked 06881 in memory. 06882 06883 Return Value: 06884 06885 This function returns a value to be used in a subsequent call to 06886 MmUnlockPagableImageSection. 06887 06888 --*/ 06889 06890 { 06891 PLDR_DATA_TABLE_ENTRY DataTableEntry; 06892 ULONG i; 06893 PIMAGE_NT_HEADERS NtHeaders; 06894 PIMAGE_SECTION_HEADER NtSection; 06895 PIMAGE_SECTION_HEADER FoundSection; 06896 ULONG_PTR Rva; 06897 06898 PAGED_CODE(); 06899 06900 if (MI_IS_PHYSICAL_ADDRESS(AddressWithinSection)) { 06901 06902 // 06903 // Physical address, just return that as the handle. 06904 // 06905 06906 return AddressWithinSection; 06907 } 06908 06909 // 06910 // Search the loaded module list for the data table entry that describes 06911 // the DLL that was just unloaded. It is possible that an entry is not in 06912 // the list if a failure occurred at a point in loading the DLL just before 06913 // the data table entry was generated. 06914 // 06915 06916 FoundSection = NULL; 06917 06918 KeEnterCriticalRegion(); 06919 ExAcquireResourceShared (&PsLoadedModuleResource, TRUE); 06920 06921 DataTableEntry = MiLookupDataTableEntry (AddressWithinSection, TRUE); 06922 06923 Rva = (ULONG_PTR)((PUCHAR)AddressWithinSection - (ULONG_PTR)DataTableEntry->DllBase); 06924 06925 NtHeaders = (PIMAGE_NT_HEADERS)RtlImageNtHeader(DataTableEntry->DllBase); 06926 06927 NtSection = (PIMAGE_SECTION_HEADER)((ULONG_PTR)NtHeaders + 06928 sizeof(ULONG) + 06929 sizeof(IMAGE_FILE_HEADER) + 06930 NtHeaders->FileHeader.SizeOfOptionalHeader 06931 ); 06932 06933 for (i = 0; i < NtHeaders->FileHeader.NumberOfSections; i += 1) { 06934 06935 if ( Rva >= NtSection->VirtualAddress && 06936 Rva < NtSection->VirtualAddress + NtSection->SizeOfRawData ) { 06937 FoundSection = NtSection; 06938 06939 if (SECTION_BASE_ADDRESS(NtSection) != ((PUCHAR)DataTableEntry->DllBase + 06940 NtSection->VirtualAddress)) { 06941 06942 // 06943 // Overwrite the PointerToRelocations field (and on Win64, the 06944 // PointerToLinenumbers field also) so that it contains 06945 // the Va of this section and NumberOfLinenumbers so it contains 06946 // the Lock Count for the section. 06947 // 06948 06949 SECTION_BASE_ADDRESS(NtSection) = ((PUCHAR)DataTableEntry->DllBase + 06950 NtSection->VirtualAddress); 06951 NtSection->NumberOfLinenumbers = 0; 06952 } 06953 06954 // 06955 // Now lock in the code 06956 // 06957 06958 #if DBG 06959 if (MmDebug & MM_DBG_LOCK_CODE) { 06960 DbgPrint("MM Lock %wZ %8s %p -> %p : %p %3ld.\n", 06961 &DataTableEntry->BaseDllName, 06962 NtSection->Name, 06963 AddressWithinSection, 06964 NtSection, 06965 SECTION_BASE_ADDRESS(NtSection), 06966 NtSection->NumberOfLinenumbers); 06967 } 06968 #endif //DBG 06969 06970 MmLockPagableSectionByHandle ((PVOID)NtSection); 06971 06972 break; 06973 } 06974 NtSection += 1; 06975 } 06976 06977 ExReleaseResource (&PsLoadedModuleResource); 06978 KeLeaveCriticalRegion(); 06979 if (!FoundSection) { 06980 KeBugCheckEx (MEMORY_MANAGEMENT, 06981 0x1234, 06982 (ULONG_PTR)AddressWithinSection, 06983 0, 06984 0); 06985 } 06986 return (PVOID)FoundSection; 06987 }

VOID MmLockPagableSectionByHandle IN PVOID  ImageSectionHandle  ) 
 

Definition at line 6284 of file iosup.c.

References ASSERT, FALSE, KeEnterCriticalRegion, KeLeaveCriticalRegion, KePulseEvent(), KernelMode, KeWaitForSingleObject(), LOCK_PFN2, LOCK_SYSTEM_WS, MI_IS_PHYSICAL_ADDRESS, MI_IS_SYSTEM_CACHE_ADDRESS, MiGetPteAddress, MiLockCode(), MiMakeSystemAddressValidPfnSystemWs(), MM_LOCK_BY_REFCOUNT, MmCollidedLockEvent, MmCollidedLockWait, MmSystemRangeStart, NULL, SECTION_BASE_ADDRESS, TRUE, UNLOCK_PFN2, UNLOCK_PFN_AND_THEN_WAIT, UNLOCK_SYSTEM_WS, UNLOCK_SYSTEM_WS_NO_IRQL, and WrVirtualMemory.

Referenced by ExpGetLockInformation(), ExpGetLookasideInformation(), ExpGetPoolInformation(), ExpGetProcessInformation(), KeSetPhysicalCacheTypeRange(), KiAmdK6MtrrSetMemoryType(), MiEmptyAllWorkingSets(), MiFindContiguousMemory(), MiLoadSystemImage(), MiMapViewInSystemSpace(), MiSetPagingOfDriver(), MiShareSessionImage(), MiUnmapLockedPagesInUserSpace(), MiUnmapViewInSystemSpace(), MmAdjustWorkingSetSize(), MmAllocateNonCachedMemory(), MmAllocatePagesForMdl(), MmFreeDriverInitialization(), MmFreeNonCachedMemory(), MmFreePagesFromMdl(), MmLockPagableDataSection(), MmLockPagedPool(), MmMapViewOfSection(), MmResetDriverPaging(), MmShutdownSystem(), MmUnloadSystemImage(), MmUnlockPagedPool(), NtQueryVirtualMemory(), PspQueryPooledQuotaLimits(), PspQueryQuotaLimits(), PspQueryWorkingSetWatch(), and PspSetQuotaLimits().

06291 : 06292 06293 This routine checks to see if the specified pages are resident in 06294 the process's working set and if so the reference count for the 06295 page is incremented. The allows the virtual address to be accessed 06296 without getting a hard page fault (have to go to the disk... except 06297 for extremely rare case when the page table page is removed from the 06298 working set and migrates to the disk. 06299 06300 If the virtual address is that of the system wide global "cache" the 06301 virtual address of the "locked" pages is always guaranteed to 06302 be valid. 06303 06304 NOTE: This routine is not to be used for general locking of user 06305 addresses - use MmProbeAndLockPages. This routine is intended for 06306 well behaved system code like the file system caches which allocates 06307 virtual addresses for mapping files AND guarantees that the mapping 06308 will not be modified (deleted or changed) while the pages are locked. 06309 06310 Arguments: 06311 06312 ImageSectionHandle - Supplies the value returned by a previous call 06313 to MmLockPagableDataSection. This is a pointer to the Section 06314 header for the image. 06315 06316 Return Value: 06317 06318 None. 06319 06320 Environment: 06321 06322 Kernel mode, IRQL of DISPATCH_LEVEL or below. 06323 06324 --*/ 06325 06326 { 06327 PIMAGE_SECTION_HEADER NtSection; 06328 PVOID BaseAddress; 06329 ULONG SizeToLock; 06330 PMMPTE PointerPte; 06331 PMMPTE LastPte; 06332 KIRQL OldIrql; 06333 KIRQL OldIrqlWs; 06334 ULONG Collision; 06335 06336 if (MI_IS_PHYSICAL_ADDRESS(ImageSectionHandle)) { 06337 06338 // 06339 // No need to lock physical addresses. 06340 // 06341 06342 return; 06343 } 06344 06345 NtSection = (PIMAGE_SECTION_HEADER)ImageSectionHandle; 06346 06347 BaseAddress = SECTION_BASE_ADDRESS(NtSection); 06348 06349 ASSERT (!MI_IS_SYSTEM_CACHE_ADDRESS(BaseAddress)); 06350 06351 ASSERT (BaseAddress >= MmSystemRangeStart); 06352 06353 SizeToLock = NtSection->SizeOfRawData; 06354 PointerPte = MiGetPteAddress(BaseAddress); 06355 LastPte = MiGetPteAddress((PCHAR)BaseAddress + SizeToLock - 1); 06356 06357 ASSERT (SizeToLock != 0); 06358 06359 // 06360 // The address must be within the system space. 06361 // 06362 06363 RetryLock: 06364 06365 LOCK_SYSTEM_WS (OldIrqlWs); 06366 LOCK_PFN2 (OldIrql); 06367 06368 MiMakeSystemAddressValidPfnSystemWs (&NtSection->NumberOfLinenumbers); 06369 06370 // 06371 // The NumberOfLinenumbers field is used to store the 06372 // lock count. 06373 // 06374 // Value of 0 means unlocked, 06375 // Value of 1 means lock in progress by another thread. 06376 // Value of 2 or more means locked. 06377 // 06378 // If the value is 1, this thread must block until the other thread's 06379 // lock operation is complete. 06380 // 06381 06382 NtSection->NumberOfLinenumbers += 1; 06383 06384 if (NtSection->NumberOfLinenumbers >= 3) { 06385 06386 // 06387 // Already locked, increment counter and return. 06388 // 06389 06390 UNLOCK_PFN2 (OldIrql); 06391 UNLOCK_SYSTEM_WS (OldIrqlWs); 06392 return; 06393 } 06394 06395 if (NtSection->NumberOfLinenumbers == 2) { 06396 06397 // 06398 // A lock is in progress. 06399 // Reset back to 1 and wait. 06400 // 06401 06402 NtSection->NumberOfLinenumbers = 1; 06403 MmCollidedLockWait = TRUE; 06404 06405 KeEnterCriticalRegion(); 06406 06407 // 06408 // The unlock IRQLs are deliberately reversed as the lock and mutex 06409 // are being released in reverse order. 06410 // 06411 06412 UNLOCK_SYSTEM_WS_NO_IRQL (); 06413 UNLOCK_PFN_AND_THEN_WAIT (OldIrqlWs); 06414 06415 KeWaitForSingleObject(&MmCollidedLockEvent, 06416 WrVirtualMemory, 06417 KernelMode, 06418 FALSE, 06419 (PLARGE_INTEGER)NULL); 06420 KeLeaveCriticalRegion(); 06421 goto RetryLock; 06422 } 06423 06424 // 06425 // Value was 0 when the lock was obtained. It is now 1 indicating 06426 // a lock is in progress. 06427 // 06428 06429 MiLockCode (PointerPte, LastPte, MM_LOCK_BY_REFCOUNT); 06430 06431 // 06432 // Set lock count to 2 (it was 1 when this started) and check 06433 // to see if any other threads tried to lock while this was happening. 06434 // 06435 06436 MiMakeSystemAddressValidPfnSystemWs (&NtSection->NumberOfLinenumbers); 06437 NtSection->NumberOfLinenumbers += 1; 06438 06439 ASSERT (NtSection->NumberOfLinenumbers == 2); 06440 06441 Collision = MmCollidedLockWait; 06442 MmCollidedLockWait = FALSE; 06443 06444 UNLOCK_PFN2 (OldIrql); 06445 UNLOCK_SYSTEM_WS (OldIrqlWs); 06446 06447 if (Collision) { 06448 06449 // 06450 // Wake up all waiters. 06451 // 06452 06453 KePulseEvent (&MmCollidedLockEvent, 0, FALSE); 06454 } 06455 06456 return; 06457 }

VOID MmLockPagedPool IN PVOID  Address,
IN SIZE_T  SizeInBytes
 

Definition at line 7821 of file iosup.c.

References ExPageLockHandle, LOCK_PFN, LOCK_SYSTEM_WS, MiGetPteAddress, MiLockCode(), MM_LOCK_BY_REFCOUNT, MmLockPagableSectionByHandle(), MmUnlockPagableImageSection(), UNLOCK_PFN, and UNLOCK_SYSTEM_WS.

Referenced by Ke386SetDescriptorProcess(), and MiSetImageProtect().

07828 : 07829 07830 Locks the specified address (which MUST reside in paged pool) into 07831 memory until MmUnlockPagedPool is called. 07832 07833 Arguments: 07834 07835 Address - Supplies the address in paged pool to lock. 07836 07837 SizeInBytes - Supplies the size in bytes to lock. 07838 07839 Return Value: 07840 07841 None. 07842 07843 Environment: 07844 07845 Kernel mode, IRQL of APC_LEVEL or below. 07846 07847 --*/ 07848 07849 { 07850 PMMPTE PointerPte; 07851 PMMPTE LastPte; 07852 KIRQL OldIrql; 07853 KIRQL OldIrqlWs; 07854 07855 MmLockPagableSectionByHandle(ExPageLockHandle); 07856 PointerPte = MiGetPteAddress (Address); 07857 LastPte = MiGetPteAddress ((PVOID)((PCHAR)Address + (SizeInBytes - 1))); 07858 LOCK_SYSTEM_WS (OldIrqlWs); 07859 LOCK_PFN (OldIrql); 07860 MiLockCode (PointerPte, LastPte, MM_LOCK_BY_REFCOUNT); 07861 UNLOCK_PFN (OldIrql); 07862 UNLOCK_SYSTEM_WS (OldIrqlWs); 07863 MmUnlockPagableImageSection(ExPageLockHandle); 07864 return; 07865 }

PVOID MmMapIoSpace IN PHYSICAL_ADDRESS  PhysicalAddress,
IN SIZE_T  NumberOfBytes,
IN MEMORY_CACHING_TYPE  CacheType
 

Definition at line 3415 of file iosup.c.

References ASSERT, BYTE_OFFSET, _MDL::ByteCount, _MDL::ByteOffset, COMPUTE_PAGES_SPANNED, ExAllocatePoolWithTag, FALSE, KeFeatureBits, KeFlushEntireTb(), KeInvalidateAllCaches(), KeSetPhysicalCacheTypeRange(), KF_PAT, _MDL::MappedSystemVa, MI_DISABLE_CACHING, MI_SET_PTE_WRITE_COMBINE, MI_WRITE_VALID_PTE, MiGetVirtualAddressMappedByPte, MiInsertPteTracker(), MiLockSystemSpace, MiReleaseDeadPteTrackers(), MiReleaseSystemPtes(), MiReserveSystemPtes(), MiSweepCacheMachineDependent(), MiTrackPtesAborted, MiUnlockSystemSpace, MiWriteCombiningPtes, MM_COLOR_ALIGNMENT, MM_COLOR_MASK_VIRTUAL, MmCached, MmMaximumCacheType, MmNonCached, MmTrackPtes, MmUSWCCached, MmWriteCombined, NonPagedPool, NT_SUCCESS, NTSTATUS(), NULL, PAGE_SHIFT, RtlGetCallersAddress(), _MDL::StartVa, Status, SystemPteSpace, TRUE, _MMPTE::u, and ValidKernelPte.

Referenced by CmpFindACPITable(), DriverEntry(), MmAllocateContiguousMemorySpecifyCache(), MmMapVideoDisplay(), and VerifierMapIoSpace().

03423 : 03424 03425 This function maps the specified physical address into the non-pagable 03426 portion of the system address space. 03427 03428 Arguments: 03429 03430 PhysicalAddress - Supplies the starting physical address to map. 03431 03432 NumberOfBytes - Supplies the number of bytes to map. 03433 03434 CacheType - Supplies MmNonCached if the physical address is to be mapped 03435 as non-cached, MmCached if the address should be cached, and 03436 MmWriteCombined if the address should be cached and 03437 write-combined as a frame buffer which is to be used only by 03438 the video port driver. All other callers should use 03439 MmUSWCCached. MmUSWCCached is available only if the PAT 03440 feature is present and available. 03441 03442 For I/O device registers, this is usually specified 03443 as MmNonCached. 03444 03445 Return Value: 03446 03447 Returns the virtual address which maps the specified physical addresses. 03448 The value NULL is returned if sufficient virtual address space for 03449 the mapping could not be found. 03450 03451 Environment: 03452 03453 Kernel mode, Should be IRQL of APC_LEVEL or below, but unfortunately 03454 callers are coming in at DISPATCH_LEVEL and it's too late to change the 03455 rules now. This means you can never make this routine pagable. 03456 03457 --*/ 03458 03459 { 03460 PFN_NUMBER NumberOfPages; 03461 PFN_NUMBER PageFrameIndex; 03462 PMMPTE PointerPte; 03463 PVOID BaseVa; 03464 MMPTE TempPte; 03465 KIRQL OldIrql; 03466 PMDL TempMdl; 03467 PFN_NUMBER MdlHack[(sizeof(MDL)/sizeof(PFN_NUMBER)) + 1]; 03468 PPFN_NUMBER Page; 03469 PLOCK_TRACKER Tracker; 03470 PVOID CallingAddress; 03471 PVOID CallersCaller; 03472 #ifdef i386 03473 NTSTATUS Status; 03474 #endif 03475 03476 #if !defined (_X86_) 03477 CallingAddress = (PVOID)_ReturnAddress(); 03478 CallersCaller = (PVOID)0; 03479 #endif 03480 03481 // 03482 // For compatibility for when CacheType used to be passed as a BOOLEAN 03483 // mask off the upper bits (TRUE == MmCached, FALSE == MmNonCached). 03484 // 03485 03486 CacheType &= 0xFF; 03487 03488 if (CacheType >= MmMaximumCacheType) { 03489 return (NULL); 03490 } 03491 03492 #if defined (i386) && !defined (_X86PAE_) 03493 ASSERT (PhysicalAddress.HighPart == 0); 03494 #endif 03495 03496 ASSERT (NumberOfBytes != 0); 03497 NumberOfPages = COMPUTE_PAGES_SPANNED (PhysicalAddress.LowPart, 03498 NumberOfBytes); 03499 03500 PointerPte = MiReserveSystemPtes((ULONG)NumberOfPages, 03501 SystemPteSpace, 03502 MM_COLOR_ALIGNMENT, 03503 (PhysicalAddress.LowPart & 03504 MM_COLOR_MASK_VIRTUAL), 03505 FALSE); 03506 if (PointerPte == NULL) { 03507 return(NULL); 03508 } 03509 03510 BaseVa = (PVOID)MiGetVirtualAddressMappedByPte (PointerPte); 03511 BaseVa = (PVOID)((PCHAR)BaseVa + BYTE_OFFSET(PhysicalAddress.LowPart)); 03512 03513 TempPte = ValidKernelPte; 03514 03515 #ifdef i386 03516 // 03517 // Set the physical range to proper caching type. If the PAT feature 03518 // is supported, then set the caching type in the PTE, otherwise modify 03519 // the MTRRs if applicable. If the cache type is MmUSWCCached and the 03520 // PAT is not supported then fail the call. 03521 // 03522 03523 if (KeFeatureBits & KF_PAT) { 03524 if ((CacheType == MmWriteCombined) || (CacheType == MmUSWCCached)) { 03525 if (MiWriteCombiningPtes == TRUE) { 03526 MI_SET_PTE_WRITE_COMBINE(TempPte); 03527 Status = STATUS_SUCCESS; 03528 } else { 03529 Status = STATUS_UNSUCCESSFUL; 03530 } 03531 } else { 03532 03533 // 03534 // For Non-MmFrameBufferCaching type use existing mm macros. 03535 // 03536 03537 Status = STATUS_SUCCESS; 03538 } 03539 } else { 03540 03541 // Set the MTRRs if possible. 03542 03543 Status = KeSetPhysicalCacheTypeRange( 03544 PhysicalAddress, 03545 NumberOfBytes, 03546 CacheType 03547 ); 03548 } 03549 03550 // 03551 // If range could not be set, determine what to do 03552 // 03553 03554 if (!NT_SUCCESS(Status)) { 03555 03556 if ((Status == STATUS_NOT_SUPPORTED) && 03557 ((CacheType == MmNonCached) || (CacheType == MmCached))) { 03558 03559 // 03560 // The range may not have been set into the proper cache 03561 // type. If the range is either MmNonCached or MmCached just 03562 // continue as the PTE will be marked properly. 03563 // 03564 03565 NOTHING; 03566 03567 } else if (Status == STATUS_UNSUCCESSFUL && CacheType == MmCached) { 03568 03569 // 03570 // If setting a range to Cached was unsuccessful things are not 03571 // optimal, but not fatal. The range can be returned to the 03572 // caller and it will have whatever caching type it has - possibly 03573 // something below fully cached. 03574 // 03575 03576 NOTHING; 03577 03578 } else { 03579 03580 // 03581 // If there's still a problem, fail the request. 03582 // 03583 03584 MiReleaseSystemPtes(PointerPte, NumberOfPages, SystemPteSpace); 03585 03586 return(NULL); 03587 } 03588 } 03589 #endif 03590 03591 if (CacheType == MmNonCached) { 03592 MI_DISABLE_CACHING (TempPte); 03593 } 03594 03595 #if defined(_IA64_) 03596 if (CacheType != MmCached) { 03597 KeFlushEntireTb(FALSE, TRUE); 03598 } 03599 #endif 03600 03601 PageFrameIndex = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT); 03602 03603 do { 03604 ASSERT (PointerPte->u.Hard.Valid == 0); 03605 TempPte.u.Hard.PageFrameNumber = PageFrameIndex; 03606 MI_WRITE_VALID_PTE (PointerPte, TempPte); 03607 PointerPte += 1; 03608 PageFrameIndex += 1; 03609 NumberOfPages -= 1; 03610 } while (NumberOfPages != 0); 03611 03612 #if defined(i386) 03613 // 03614 // WriteCombined is a non self-snooping memory type. This memory type 03615 // requires a writeback invalidation of all the caches on all processors 03616 // and each accompanying TB flush if the PAT is supported. 03617 // 03618 03619 if ((KeFeatureBits & KF_PAT) && ((CacheType == MmWriteCombined) 03620 || (CacheType == MmUSWCCached)) && (MiWriteCombiningPtes == TRUE)) { 03621 KeFlushEntireTb (FALSE, TRUE); 03622 KeInvalidateAllCaches (TRUE); 03623 } 03624 #endif 03625 03626 #if defined(_IA64_) 03627 if (CacheType != MmCached) { 03628 MiSweepCacheMachineDependent(BaseVa, NumberOfBytes, CacheType); 03629 } 03630 #endif 03631 03632 if (MmTrackPtes != 0) { 03633 03634 // 03635 // First free any zombie blocks as no locks are being held. 03636 // 03637 03638 MiReleaseDeadPteTrackers (); 03639 03640 Tracker = ExAllocatePoolWithTag (NonPagedPool, 03641 sizeof (PTE_TRACKER), 03642 'ySmM'); 03643 03644 if (Tracker != NULL) { 03645 #if defined (_X86_) 03646 RtlGetCallersAddress(&CallingAddress, &CallersCaller); 03647 #endif 03648 03649 TempMdl = (PMDL) &MdlHack; 03650 TempMdl->MappedSystemVa = BaseVa; 03651 TempMdl->StartVa = (PVOID)(ULONG_PTR)PhysicalAddress.QuadPart; 03652 TempMdl->ByteOffset = BYTE_OFFSET(PhysicalAddress.LowPart); 03653 TempMdl->ByteCount = (ULONG)NumberOfBytes; 03654 03655 Page = (PPFN_NUMBER) (TempMdl + 1); 03656 Page = (PPFN_NUMBER)-1; 03657 03658 MiLockSystemSpace(OldIrql); 03659 03660 MiInsertPteTracker (Tracker, 03661 TempMdl, 03662 COMPUTE_PAGES_SPANNED (PhysicalAddress.LowPart, 03663 NumberOfBytes), 03664 CallingAddress, 03665 CallersCaller); 03666 03667 MiUnlockSystemSpace(OldIrql); 03668 } 03669 else { 03670 MiTrackPtesAborted = TRUE; 03671 } 03672 } 03673 03674 return BaseVa; 03675 }

PVOID MmMapLockedPages IN PMDL  MemoryDescriptorList,
IN KPROCESSOR_MODE  AccessMode
 

Definition at line 2009 of file iosup.c.

References HighPagePriority, MmCached, MmMapLockedPagesSpecifyCache(), NULL, and TRUE.

Referenced by VerifierMapLockedPages().

02016 : 02017 02018 This function maps physical pages described by a memory descriptor 02019 list into the system virtual address space or the user portion of 02020 the virtual address space. 02021 02022 Arguments: 02023 02024 MemoryDescriptorList - Supplies a valid Memory Descriptor List which has 02025 been updated by MmProbeAndLockPages. 02026 02027 02028 AccessMode - Supplies an indicator of where to map the pages; 02029 KernelMode indicates that the pages should be mapped in the 02030 system part of the address space, UserMode indicates the 02031 pages should be mapped in the user part of the address space. 02032 02033 Return Value: 02034 02035 Returns the base address where the pages are mapped. The base address 02036 has the same offset as the virtual address in the MDL. 02037 02038 This routine will raise an exception if the processor mode is USER_MODE 02039 and quota limits or VM limits are exceeded. 02040 02041 Environment: 02042 02043 Kernel mode. DISPATCH_LEVEL or below if access mode is KernelMode, 02044 APC_LEVEL or below if access mode is UserMode. 02045 02046 --*/ 02047 02048 { 02049 return MmMapLockedPagesSpecifyCache (MemoryDescriptorList, 02050 AccessMode, 02051 MmCached, 02052 NULL, 02053 TRUE, 02054 HighPagePriority); 02055 }

PVOID MmMapLockedPagesSpecifyCache IN PMDL  MemoryDescriptorList,
IN KPROCESSOR_MODE  AccessMode,
IN MEMORY_CACHING_TYPE  CacheType,
IN PVOID  RequestedAddress,
IN ULONG  BugCheckOnFailure,
IN MM_PAGE_PRIORITY  Priority
 

Definition at line 2058 of file iosup.c.

References ASSERT, COMPUTE_PAGES_SPANNED, ExAllocatePoolWithTag, ExFreePool(), FALSE, HighPagePriority, KeFlushEntireTb(), KeInvalidateAllCaches(), KernelMode, LOCK_PFN2, MDL_IO_SPACE, MDL_MAPPED_TO_SYSTEM_VA, MDL_MAPPING_CAN_FAIL, MDL_PAGES_LOCKED, MDL_PARTIAL, MDL_PARTIAL_HAS_BEEN_MAPPED, MDL_PHYSICAL_VIEW, MDL_SOURCE_IS_NONPAGED_POOL, MI_DISABLE_CACHING, MI_PFN_ELEMENT, MI_SET_PTE_WRITE_COMBINE, MI_WRITE_VALID_PTE, MiGetPteAddress, MiGetSystemPteAvailability(), MiGetVirtualAddressMappedByPte, MiInsertPteTracker(), MiLockSystemSpace, MiMapLockedPagesInUserSpace(), MiReleaseDeadPteTrackers(), MiReleaseSystemPtes(), MiReserveSystemPtes(), MiSweepCacheMachineDependent(), MiTrackPtesAborted, MiUnlockSystemSpace, MiWriteCombiningPtes, MM_COLOR_ALIGNMENT, MM_COLOR_MASK, MM_COLOR_MASK_VIRTUAL, MM_EMPTY_LIST, MM_KSEG0_BASE, MmCached, MmHardwareCoherentCached, MmNonCached, MmNonCachedUnordered, MmSystemLockPagesCount, MmTrackPtes, MmWriteCombined, NonPagedPool, NULL, PAGE_SHIFT, PAGE_SIZE, PTE_SHIFT, RtlGetCallersAddress(), SystemPteSpace, TRUE, _MMPTE::u, _MMPFN::u3, UNLOCK_PFN2, and ValidKernelPte.

Referenced by MiCloneProcessAddressSpace(), MiDoMappedCopy(), MmMapLockedPages(), NtStartProfile(), and VerifierMapLockedPagesSpecifyCache().

02069 : 02070 02071 This function maps physical pages described by a memory descriptor 02072 list into the system virtual address space or the user portion of 02073 the virtual address space. 02074 02075 Arguments: 02076 02077 MemoryDescriptorList - Supplies a valid Memory Descriptor List which has 02078 been updated by MmProbeAndLockPages. 02079 02080 AccessMode - Supplies an indicator of where to map the pages; 02081 KernelMode indicates that the pages should be mapped in the 02082 system part of the address space, UserMode indicates the 02083 pages should be mapped in the user part of the address space. 02084 02085 CacheType - Supplies the type of cache mapping to use for the MDL. 02086 MmCached indicates "normal" kernel or user mappings. 02087 02088 RequestedAddress - Supplies the base user address of the view. This is only 02089 used if the AccessMode is UserMode. If the initial 02090 value of this argument is not null, then the view will 02091 be allocated starting at the specified virtual 02092 address rounded down to the next 64kb address 02093 boundary. If the initial value of this argument is 02094 null, then the operating system will determine 02095 where to allocate the view. 02096 02097 BugCheckOnFailure - Supplies whether to bugcheck if the mapping cannot be 02098 obtained. This flag is only checked if the MDL's 02099 MDL_MAPPING_CAN_FAIL is zero, which implies that the 02100 default MDL behavior is to bugcheck. This flag then 02101 provides an additional avenue to avoid the bugcheck. 02102 Done this way in order to provide WDM compatibility. 02103 02104 Priority - Supplies an indication as to how important it is that this 02105 request succeed under low available PTE conditions. 02106 02107 Return Value: 02108 02109 Returns the base address where the pages are mapped. The base address 02110 has the same offset as the virtual address in the MDL. 02111 02112 This routine will raise an exception if the processor mode is USER_MODE 02113 and quota limits or VM limits are exceeded. 02114 02115 Environment: 02116 02117 Kernel mode. DISPATCH_LEVEL or below if access mode is KernelMode, 02118 APC_LEVEL or below if access mode is UserMode. 02119 02120 --*/ 02121 02122 { 02123 PFN_NUMBER NumberOfPages; 02124 PFN_NUMBER SavedPageCount; 02125 PPFN_NUMBER Page; 02126 PMMPTE PointerPte; 02127 PVOID BaseVa; 02128 MMPTE TempPte; 02129 PVOID StartingVa; 02130 PMMPFN Pfn2; 02131 KIRQL OldIrql; 02132 PFN_NUMBER NumberOfPtes; 02133 PVOID CallingAddress; 02134 PVOID CallersCaller; 02135 PVOID Tracker; 02136 02137 #if !defined (_X86_) 02138 CallingAddress = (PVOID)_ReturnAddress(); 02139 CallersCaller = (PVOID)0; 02140 #endif 02141 02142 StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa + 02143 MemoryDescriptorList->ByteOffset); 02144 02145 ASSERT (MemoryDescriptorList->ByteCount != 0); 02146 02147 if (AccessMode == KernelMode) { 02148 02149 Page = (PPFN_NUMBER)(MemoryDescriptorList + 1); 02150 NumberOfPages = COMPUTE_PAGES_SPANNED (StartingVa, 02151 MemoryDescriptorList->ByteCount); 02152 SavedPageCount = NumberOfPages; 02153 02154 // 02155 // Map the pages into the system part of the address space as 02156 // kernel read/write. 02157 // 02158 02159 ASSERT ((MemoryDescriptorList->MdlFlags & ( 02160 MDL_MAPPED_TO_SYSTEM_VA | 02161 MDL_SOURCE_IS_NONPAGED_POOL | 02162 MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0); 02163 ASSERT ((MemoryDescriptorList->MdlFlags & ( 02164 MDL_PAGES_LOCKED | 02165 MDL_PARTIAL)) != 0); 02166 02167 // 02168 // Map this with KSEG0 if possible. 02169 // 02170 02171 #if defined(_ALPHA_) 02172 #define KSEG0_MAXPAGE ((PFN_NUMBER)((KSEG2_BASE - KSEG0_BASE) >> PAGE_SHIFT)) 02173 #endif 02174 02175 #if defined(_X86_) || defined(_IA64_) 02176 #define KSEG0_MAXPAGE MmKseg2Frame 02177 #endif 02178 02179 #if defined(_IA64_) 02180 #define MM_KSEG0_BASE KSEG0_BASE 02181 #endif 02182 02183 if ((NumberOfPages == 1) && (CacheType == MmCached) && 02184 (*Page < KSEG0_MAXPAGE)) { 02185 BaseVa = (PVOID)(MM_KSEG0_BASE + (*Page << PAGE_SHIFT) + 02186 MemoryDescriptorList->ByteOffset); 02187 MemoryDescriptorList->MappedSystemVa = BaseVa; 02188 MemoryDescriptorList->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA; 02189 02190 goto Update; 02191 } 02192 02193 // 02194 // Make sure there are enough PTEs of the requested size. 02195 // 02196 02197 if ((Priority != HighPagePriority) && 02198 (MiGetSystemPteAvailability ((ULONG)NumberOfPages, Priority) == FALSE)) { 02199 return NULL; 02200 } 02201 02202 PointerPte = MiReserveSystemPtes ( 02203 (ULONG)NumberOfPages, 02204 SystemPteSpace, 02205 MM_COLOR_ALIGNMENT, 02206 (PtrToUlong(StartingVa) & 02207 MM_COLOR_MASK_VIRTUAL), 02208 MemoryDescriptorList->MdlFlags & MDL_MAPPING_CAN_FAIL ? 0 : BugCheckOnFailure); 02209 02210 if (PointerPte == NULL) { 02211 02212 // 02213 // Not enough system PTES are available. 02214 // 02215 02216 return NULL; 02217 } 02218 BaseVa = (PVOID)((PCHAR)MiGetVirtualAddressMappedByPte (PointerPte) + 02219 MemoryDescriptorList->ByteOffset); 02220 02221 NumberOfPtes = NumberOfPages; 02222 02223 TempPte = ValidKernelPte; 02224 02225 switch (CacheType) { 02226 02227 case MmNonCached: 02228 MI_DISABLE_CACHING (TempPte); 02229 break; 02230 02231 case MmCached: 02232 break; 02233 02234 case MmWriteCombined: 02235 MI_SET_PTE_WRITE_COMBINE (TempPte); 02236 break; 02237 02238 case MmHardwareCoherentCached: 02239 break; 02240 02241 #if 0 02242 case MmNonCachedUnordered: 02243 break; 02244 #endif 02245 02246 default: 02247 break; 02248 } 02249 02250 #if defined(_IA64_) 02251 if (CacheType != MmCached) { 02252 KeFlushEntireTb(FALSE, TRUE); 02253 } 02254 #endif 02255 02256 #if DBG 02257 LOCK_PFN2 (OldIrql); 02258 #endif //DBG 02259 02260 do { 02261 02262 if (*Page == MM_EMPTY_LIST) { 02263 break; 02264 } 02265 TempPte.u.Hard.PageFrameNumber = *Page; 02266 ASSERT (PointerPte->u.Hard.Valid == 0); 02267 02268 #if DBG 02269 if ((MemoryDescriptorList->MdlFlags & (MDL_IO_SPACE | MDL_PHYSICAL_VIEW)) == 0) { 02270 Pfn2 = MI_PFN_ELEMENT (*Page); 02271 ASSERT (Pfn2->u3.e2.ReferenceCount != 0); 02272 ASSERT ((((ULONG_PTR)PointerPte >> PTE_SHIFT) & MM_COLOR_MASK) == 02273 (((ULONG)Pfn2->u3.e1.PageColor))); 02274 } 02275 #endif //DBG 02276 02277 MI_WRITE_VALID_PTE (PointerPte, TempPte); 02278 Page += 1; 02279 PointerPte += 1; 02280 NumberOfPages -= 1; 02281 } while (NumberOfPages != 0); 02282 02283 #if DBG 02284 UNLOCK_PFN2 (OldIrql); 02285 #endif //DBG 02286 02287 #if defined(i386) 02288 // 02289 // If write combined was specified then flush all caches and TBs. 02290 // 02291 02292 if (CacheType == MmWriteCombined && MiWriteCombiningPtes == TRUE) { 02293 KeFlushEntireTb (FALSE, TRUE); 02294 KeInvalidateAllCaches (TRUE); 02295 } 02296 #endif 02297 02298 #if defined(_IA64_) 02299 if (CacheType != MmCached) { 02300 MiSweepCacheMachineDependent(BaseVa, SavedPageCount * PAGE_SIZE, CacheType); 02301 } 02302 #endif 02303 if (MmTrackPtes != 0) { 02304 02305 // 02306 // First free any zombie blocks as no locks are being held. 02307 // 02308 02309 MiReleaseDeadPteTrackers (); 02310 02311 Tracker = ExAllocatePoolWithTag (NonPagedPool, 02312 sizeof (PTE_TRACKER), 02313 'ySmM'); 02314 if (Tracker == NULL) { 02315 MiTrackPtesAborted = TRUE; 02316 } 02317 } 02318 02319 MiLockSystemSpace(OldIrql); 02320 if (MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) { 02321 02322 // 02323 // Another thread must have already mapped this. 02324 // Clean up the system PTES and release them. 02325 // 02326 02327 MiUnlockSystemSpace(OldIrql); 02328 02329 if (MmTrackPtes != 0) { 02330 if (Tracker != NULL) { 02331 ExFreePool(Tracker); 02332 } 02333 } 02334 02335 #if DBG 02336 if ((MemoryDescriptorList->MdlFlags & (MDL_IO_SPACE | MDL_PHYSICAL_VIEW)) == 0) { 02337 PMMPFN Pfn3; 02338 PFN_NUMBER j; 02339 PPFN_NUMBER Page1; 02340 02341 Page1 = (PPFN_NUMBER)(MemoryDescriptorList + 1); 02342 for (j = 0; j < SavedPageCount ;j += 1) { 02343 if (*Page == MM_EMPTY_LIST) { 02344 break; 02345 } 02346 Pfn3 = MI_PFN_ELEMENT (*Page1); 02347 ASSERT (Pfn3->u3.e2.ReferenceCount != 0); 02348 Page1 += 1; 02349 } 02350 } 02351 #endif //DBG 02352 PointerPte = MiGetPteAddress (BaseVa); 02353 02354 MiReleaseSystemPtes (PointerPte, 02355 (ULONG)SavedPageCount, 02356 SystemPteSpace); 02357 02358 return MemoryDescriptorList->MappedSystemVa; 02359 } 02360 02361 MemoryDescriptorList->MappedSystemVa = BaseVa; 02362 *(volatile ULONG *)&MmSystemLockPagesCount; //need to force order. 02363 MemoryDescriptorList->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA; 02364 02365 if ((MmTrackPtes != 0) && (Tracker != NULL)) { 02366 #if defined (_X86_) 02367 RtlGetCallersAddress(&CallingAddress, &CallersCaller); 02368 #endif 02369 MiInsertPteTracker (Tracker, 02370 MemoryDescriptorList, 02371 NumberOfPtes, 02372 CallingAddress, 02373 CallersCaller); 02374 } 02375 02376 MiUnlockSystemSpace(OldIrql); 02377 02378 Update: 02379 if ((MemoryDescriptorList->MdlFlags & MDL_PARTIAL) != 0) { 02380 MemoryDescriptorList->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED; 02381 } 02382 02383 return BaseVa; 02384 02385 } else { 02386 02387 return MiMapLockedPagesInUserSpace (MemoryDescriptorList, 02388 StartingVa, 02389 CacheType, 02390 RequestedAddress); 02391 } 02392 }

VOID MmMapMemoryDumpMdl IN OUT PMDL  MemoryDumpMdl  ) 
 

Definition at line 7197 of file iosup.c.

References ASSERT, BYTES_TO_PAGES, KiFlushSingleTb(), MiGetVirtualAddressMappedByPte, MM_KERNEL_DEMAND_ZERO_PTE, MmCrashDumpPte, PAGE_SHIFT, PAGE_SIZE, TRUE, _MMPTE::u, and ValidKernelPte.

Referenced by IopMapPhysicalMemory(), and IopWritePageToDisk().

07203 : 07204 07205 For use by crash dump routine ONLY. Maps an MDL into a fixed 07206 portion of the address space. Only 1 MDL can be mapped at a 07207 time. 07208 07209 Arguments: 07210 07211 MemoryDumpMdl - Supplies the MDL to map. 07212 07213 Return Value: 07214 07215 None, fields in MDL updated. 07216 07217 --*/ 07218 07219 { 07220 PFN_NUMBER NumberOfPages; 07221 PMMPTE PointerPte; 07222 PCHAR BaseVa; 07223 MMPTE TempPte; 07224 PPFN_NUMBER Page; 07225 07226 NumberOfPages = BYTES_TO_PAGES (MemoryDumpMdl->ByteCount + MemoryDumpMdl->ByteOffset); 07227 07228 ASSERT (NumberOfPages <= 16); 07229 07230 PointerPte = MmCrashDumpPte; 07231 BaseVa = (PCHAR)MiGetVirtualAddressMappedByPte(PointerPte); 07232 MemoryDumpMdl->MappedSystemVa = (PCHAR)BaseVa + MemoryDumpMdl->ByteOffset; 07233 TempPte = ValidKernelPte; 07234 Page = (PPFN_NUMBER)(MemoryDumpMdl + 1); 07235 07236 // 07237 // If the pages don't span the entire dump virtual address range, 07238 // build a barrier. Otherwise use the default barrier provided at the 07239 // end of the dump virtual address range. 07240 // 07241 07242 if (NumberOfPages < 16) { 07243 KiFlushSingleTb (TRUE, BaseVa + (NumberOfPages << PAGE_SHIFT)); 07244 (PointerPte + NumberOfPages)->u.Long = MM_KERNEL_DEMAND_ZERO_PTE; 07245 } 07246 07247 do { 07248 07249 KiFlushSingleTb (TRUE, BaseVa); 07250 07251 TempPte.u.Hard.PageFrameNumber = *Page; 07252 07253 // 07254 // Note this PTE may be valid or invalid prior to the overwriting here. 07255 // 07256 07257 *PointerPte = TempPte; 07258 07259 Page += 1; 07260 PointerPte += 1; 07261 BaseVa += PAGE_SIZE; 07262 NumberOfPages -= 1; 07263 } while (NumberOfPages != 0); 07264 07265 return; 07266 }

NTSTATUS MmMapUserAddressesToPage IN PVOID  BaseAddress,
IN SIZE_T  NumberOfBytes,
IN PVOID  PageAddress
 

Definition at line 5161 of file iosup.c.

References _EPROCESS::AddressSpaceDeleted, ASSERT, _MMVAD::EndingVpn, KeFlushEntireTb(), KeFlushSingleTb(), LOCK_PFN, LOCK_WS_AND_ADDRESS_SPACE, MI_VA_TO_VPN, MI_VPN_TO_VA, MI_VPN_TO_VA_ENDING, MiFillMemoryPte, MiGetPteAddress, MiLocateAddress(), MmGetPhysicalAddress(), NTSTATUS(), NULL, PAGE_SHIFT, PAGED_CODE, PsGetCurrentProcess, _MMVAD::StartingVpn, Status, TRUE, _MMPTE::u, _MMVAD::u, UNLOCK_PFN, UNLOCK_WS_AND_ADDRESS_SPACE, and VOID().

05169 : 05170 05171 This function maps a range of addresses in a physical memory VAD to the 05172 specified page address. This is typically used by a driver to nicely 05173 remove an application's access to things like video memory when the 05174 application is not responding to requests to relinquish it. 05175 05176 Note the entire range must be currently mapped (ie, all the PTEs must 05177 be valid) by the caller. 05178 05179 Arguments: 05180 05181 BaseAddress - Supplies the base virtual address where the physical 05182 address is mapped. 05183 05184 NumberOfBytes - Supplies the number of bytes to remap to the new address. 05185 05186 PageAddress - Supplies the virtual address of the page this is remapped to. 05187 This must be nonpaged memory. 05188 05189 Return Value: 05190 05191 Various NTSTATUS codes. 05192 05193 Environment: 05194 05195 Kernel mode, IRQL of APC_LEVEL or below. 05196 05197 --*/ 05198 05199 { 05200 PMMVAD Vad; 05201 PMMPTE PointerPte; 05202 MMPTE PteContents; 05203 PMMPTE LastPte; 05204 PEPROCESS Process; 05205 NTSTATUS Status; 05206 PVOID EndingAddress; 05207 PFN_NUMBER PageFrameNumber; 05208 SIZE_T NumberOfPtes; 05209 PHYSICAL_ADDRESS PhysicalAddress; 05210 KIRQL OldIrql; 05211 05212 PAGED_CODE(); 05213 05214 if (BaseAddress > MM_HIGHEST_USER_ADDRESS) { 05215 return STATUS_INVALID_PARAMETER_1; 05216 } 05217 05218 if ((ULONG_PTR)BaseAddress + NumberOfBytes > (ULONG64)MM_HIGHEST_USER_ADDRESS) { 05219 return STATUS_INVALID_PARAMETER_2; 05220 } 05221 05222 Process = PsGetCurrentProcess(); 05223 05224 EndingAddress = (PVOID)((PCHAR)BaseAddress + NumberOfBytes - 1); 05225 05226 LOCK_WS_AND_ADDRESS_SPACE (Process); 05227 05228 // 05229 // Make sure the address space was not deleted. 05230 // 05231 05232 if (Process->AddressSpaceDeleted != 0) { 05233 Status = STATUS_PROCESS_IS_TERMINATING; 05234 goto ErrorReturn; 05235 } 05236 05237 Vad = (PMMVAD)MiLocateAddress (BaseAddress); 05238 05239 if (Vad == NULL) { 05240 05241 // 05242 // No virtual address descriptor located. 05243 // 05244 05245 Status = STATUS_MEMORY_NOT_ALLOCATED; 05246 goto ErrorReturn; 05247 } 05248 05249 if (NumberOfBytes == 0) { 05250 05251 // 05252 // If the region size is specified as 0, the base address 05253 // must be the starting address for the region. The entire VAD 05254 // will then be repointed. 05255 // 05256 05257 if (MI_VA_TO_VPN (BaseAddress) != Vad->StartingVpn) { 05258 Status = STATUS_FREE_VM_NOT_AT_BASE; 05259 goto ErrorReturn; 05260 } 05261 05262 BaseAddress = MI_VPN_TO_VA (Vad->StartingVpn); 05263 EndingAddress = MI_VPN_TO_VA_ENDING (Vad->EndingVpn); 05264 NumberOfBytes = (PCHAR)EndingAddress - (PCHAR)BaseAddress + 1; 05265 } 05266 05267 // 05268 // Found the associated virtual address descriptor. 05269 // 05270 05271 if (Vad->EndingVpn < MI_VA_TO_VPN (EndingAddress)) { 05272 05273 // 05274 // The entire range to remap is not contained within a single 05275 // virtual address descriptor. Return an error. 05276 // 05277 05278 Status = STATUS_INVALID_PARAMETER_2; 05279 goto ErrorReturn; 05280 } 05281 05282 if (Vad->u.VadFlags.PhysicalMapping == 0) { 05283 05284 // 05285 // The virtual address descriptor is not a physical mapping. 05286 // 05287 05288 Status = STATUS_INVALID_ADDRESS; 05289 goto ErrorReturn; 05290 } 05291 05292 PointerPte = MiGetPteAddress (BaseAddress); 05293 LastPte = MiGetPteAddress (EndingAddress); 05294 NumberOfPtes = LastPte - PointerPte + 1; 05295 05296 PhysicalAddress = MmGetPhysicalAddress (PageAddress); 05297 PageFrameNumber = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT); 05298 05299 PteContents = *PointerPte; 05300 PteContents.u.Hard.PageFrameNumber = PageFrameNumber; 05301 05302 #if DBG 05303 05304 // 05305 // All the PTEs must be valid or the filling will corrupt the 05306 // UsedPageTableCounts. 05307 // 05308 05309 do { 05310 ASSERT (PointerPte->u.Hard.Valid == 1); 05311 PointerPte += 1; 05312 } while (PointerPte < LastPte); 05313 PointerPte = MiGetPteAddress (BaseAddress); 05314 #endif 05315 05316 // 05317 // Fill the PTEs and flush at the end - no race here because it doesn't 05318 // matter whether the user app sees the old or the new data until we 05319 // return (writes going to either page is acceptable prior to return 05320 // from this function). There is no race with I/O and ProbeAndLockPages 05321 // because the PFN lock is acquired here. 05322 // 05323 05324 LOCK_PFN (OldIrql); 05325 05326 #if !defined (_X86PAE_) 05327 MiFillMemoryPte (PointerPte, 05328 NumberOfPtes * sizeof (MMPTE), 05329 PteContents.u.Long); 05330 #else 05331 05332 // 05333 // Note that the PAE architecture must very carefully fill these PTEs. 05334 // 05335 05336 do { 05337 ASSERT (PointerPte->u.Hard.Valid == 1); 05338 PointerPte += 1; 05339 (VOID)KeInterlockedSwapPte ((PHARDWARE_PTE)PointerPte, 05340 (PHARDWARE_PTE)&PteContents); 05341 } while (PointerPte < LastPte); 05342 PointerPte = MiGetPteAddress (BaseAddress); 05343 05344 #endif 05345 05346 if (NumberOfPtes == 1) { 05347 05348 (VOID)KeFlushSingleTb (BaseAddress, 05349 TRUE, 05350 TRUE, 05351 (PHARDWARE_PTE)PointerPte, 05352 PteContents.u.Flush); 05353 } 05354 else { 05355 KeFlushEntireTb (TRUE, TRUE); 05356 } 05357 05358 UNLOCK_PFN (OldIrql); 05359 05360 Status = STATUS_SUCCESS; 05361 05362 ErrorReturn: 05363 05364 UNLOCK_WS_AND_ADDRESS_SPACE (Process); 05365 05366 return Status; 05367 }

PVOID MmMapVideoDisplay IN PHYSICAL_ADDRESS  PhysicalAddress,
IN SIZE_T  NumberOfBytes,
IN MEMORY_CACHING_TYPE  CacheType
 

Definition at line 7516 of file iosup.c.

References ASSERT, BYTE_OFFSET, COMPUTE_PAGES_SPANNED, ExAllocatePoolWithTag, ExFreePool(), FALSE, MI_DISABLE_CACHING, MI_SET_GLOBAL_STATE, MI_WRITE_VALID_PTE, MiFillMemoryPte, MiGetSubsectionAddressForPte, MiGetVirtualAddressMappedByPte, MiProtoAddressForPte, MiReleaseSystemPtes(), MiReserveSystemPtes(), MM_NOCACHE, MM_READWRITE, MM_VA_MAPPED_BY_PDE, MM_ZERO_KERNEL_PTE, MmMapIoSpace(), MmPageSizeInfo, MMPTE, NonPagedPool, NULL, PAGE_SHIFT, PAGE_SIZE, PAGED_CODE, PagedPool, PTE_SHIFT, _SUBSECTION::PtesInSubsection, _SUBSECTION::StartingSector, _SUBSECTION::SubsectionBase, SystemPteSpace, TRUE, _SUBSECTION::u, _MMPTE::u, ValidKernelPte, X64K, and ZeroKernelPte.

07524 : 07525 07526 This function maps the specified physical address into the non-pagable 07527 portion of the system address space. 07528 07529 Arguments: 07530 07531 PhysicalAddress - Supplies the starting physical address to map. 07532 07533 NumberOfBytes - Supplies the number of bytes to map. 07534 07535 CacheType - Supplies MmNonCached if the physical address is to be mapped 07536 as non-cached, MmCached if the address should be cached, and 07537 MmWriteCombined if the address should be cached and 07538 write-combined as a frame buffer. For I/O device registers, 07539 this is usually specified as MmNonCached. 07540 07541 Return Value: 07542 07543 Returns the virtual address which maps the specified physical addresses. 07544 The value NULL is returned if sufficient virtual address space for 07545 the mapping could not be found. 07546 07547 Environment: 07548 07549 Kernel mode, IRQL of APC_LEVEL or below. 07550 07551 --*/ 07552 07553 { 07554 PMMPTE PointerPte; 07555 PVOID BaseVa; 07556 #ifdef LARGE_PAGES 07557 MMPTE TempPte; 07558 PFN_NUMBER PageFrameIndex; 07559 PFN_NUMBER NumberOfPages; 07560 ULONG size; 07561 PMMPTE protoPte; 07562 PMMPTE largePte; 07563 ULONG pageSize; 07564 PSUBSECTION Subsection; 07565 ULONG Alignment; 07566 ULONG EmPageSize; 07567 #endif LARGE_PAGES 07568 ULONG LargePages; 07569 07570 LargePages = FALSE; 07571 PointerPte = NULL; 07572 07573 #if defined (i386) && !defined (_X86PAE_) 07574 ASSERT (PhysicalAddress.HighPart == 0); 07575 #endif 07576 07577 PAGED_CODE(); 07578 07579 ASSERT (NumberOfBytes != 0); 07580 07581 #ifdef LARGE_PAGES 07582 NumberOfPages = COMPUTE_PAGES_SPANNED (PhysicalAddress.LowPart, 07583 NumberOfBytes); 07584 07585 TempPte = ValidKernelPte; 07586 MI_DISABLE_CACHING (TempPte); 07587 PageFrameIndex = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT); 07588 TempPte.u.Hard.PageFrameNumber = PageFrameIndex; 07589 07590 if ((NumberOfBytes > X64K) && (!MmLargeVideoMapped)) { 07591 size = (NumberOfBytes - 1) >> (PAGE_SHIFT + 1); 07592 pageSize = PAGE_SIZE; 07593 07594 while (size != 0) { 07595 size = size >> 2; 07596 pageSize = pageSize << 2; 07597 } 07598 07599 Alignment = pageSize << 1; 07600 if (Alignment < MM_VA_MAPPED_BY_PDE) { 07601 Alignment = MM_VA_MAPPED_BY_PDE; 07602 } 07603 07604 #if defined(_IA64_) 07605 07606 // 07607 // Convert pageSize to the EM specific page-size field format 07608 // 07609 07610 EmPageSize = 0; 07611 size = pageSize - 1 ; 07612 07613 while (size) { 07614 size = size >> 1; 07615 EmPageSize += 1; 07616 } 07617 07618 if (NumberOfBytes > pageSize) { 07619 07620 if (MmPageSizeInfo & (pageSize << 1)) { 07621 07622 // 07623 // if larger page size is supported in the implementation 07624 // 07625 07626 pageSize = pageSize << 1; 07627 EmPageSize += 1; 07628 07629 } 07630 else { 07631 07632 EmPageSize = EmPageSize | pageSize; 07633 07634 } 07635 } 07636 07637 pageSize = EmPageSize; 07638 #endif 07639 07640 NumberOfPages = Alignment >> PAGE_SHIFT; 07641 07642 PointerPte = MiReserveSystemPtes(NumberOfPages, 07643 SystemPteSpace, 07644 Alignment, 07645 0, 07646 FALSE); 07647 07648 if (PointerPte == NULL) { 07649 goto MapWithSmallPages; 07650 } 07651 07652 protoPte = ExAllocatePoolWithTag (PagedPool, 07653 sizeof (MMPTE), 07654 'bSmM'); 07655 07656 if (protoPte == NULL) { 07657 MiReleaseSystemPtes(PointerPte, NumberOfPages, SystemPteSpace); 07658 goto MapWithSmallPages; 07659 } 07660 07661 Subsection = ExAllocatePoolWithTag (NonPagedPool, 07662 sizeof(SUBSECTION) + (4 * sizeof(MMPTE)), 07663 'bSmM'); 07664 07665 if (Subsection == NULL) { 07666 ExFreePool (protoPte); 07667 MiReleaseSystemPtes(PointerPte, NumberOfPages, SystemPteSpace); 07668 goto MapWithSmallPages; 07669 } 07670 07671 MiFillMemoryPte (PointerPte, 07672 Alignment >> (PAGE_SHIFT - PTE_SHIFT), 07673 MM_ZERO_KERNEL_PTE); 07674 07675 // 07676 // Build large page descriptor and fill in all the PTEs. 07677 // 07678 07679 Subsection->StartingSector = pageSize; 07680 Subsection->EndingSector = (ULONG)NumberOfPages; 07681 Subsection->u.LongFlags = 0; 07682 Subsection->u.SubsectionFlags.LargePages = 1; 07683 Subsection->u.SubsectionFlags.Protection = MM_READWRITE | MM_NOCACHE; 07684 Subsection->PtesInSubsection = Alignment; 07685 Subsection->SubsectionBase = PointerPte; 07686 07687 largePte = (PMMPTE)(Subsection + 1); 07688 07689 // 07690 // Build the first 2 PTEs as entries for the TLB to 07691 // map the specified physical address. 07692 // 07693 07694 *largePte = TempPte; 07695 largePte += 1; 07696 07697 if (NumberOfBytes > pageSize) { 07698 *largePte = TempPte; 07699 largePte->u.Hard.PageFrameNumber += (pageSize >> PAGE_SHIFT); 07700 } else { 07701 *largePte = ZeroKernelPte; 07702 } 07703 07704 // 07705 // Build the first prototype PTE as a paging file format PTE 07706 // referring to the subsection. 07707 // 07708 07709 protoPte->u.Long = MiGetSubsectionAddressForPte(Subsection); 07710 protoPte->u.Soft.Prototype = 1; 07711 protoPte->u.Soft.Protection = MM_READWRITE | MM_NOCACHE; 07712 07713 // 07714 // Set the PTE up for all the user's PTE entries, proto pte 07715 // format pointing to the 3rd prototype PTE. 07716 // 07717 07718 TempPte.u.Long = MiProtoAddressForPte (protoPte); 07719 MI_SET_GLOBAL_STATE (TempPte, 1); 07720 LargePages = TRUE; 07721 MmLargeVideoMapped = TRUE; 07722 } 07723 07724 if (PointerPte != NULL) { 07725 BaseVa = (PVOID)MiGetVirtualAddressMappedByPte (PointerPte); 07726 BaseVa = (PVOID)((PCHAR)BaseVa + BYTE_OFFSET(PhysicalAddress.LowPart)); 07727 07728 do { 07729 ASSERT (PointerPte->u.Hard.Valid == 0); 07730 MI_WRITE_VALID_PTE (PointerPte, TempPte); 07731 PointerPte += 1; 07732 NumberOfPages -= 1; 07733 } while (NumberOfPages != 0); 07734 } else { 07735 07736 MapWithSmallPages: 07737 07738 #endif //LARGE_PAGES 07739 07740 BaseVa = MmMapIoSpace (PhysicalAddress, 07741 NumberOfBytes, 07742 CacheType); 07743 #ifdef LARGE_PAGES 07744 } 07745 #endif //LARGE_PAGES 07746 07747 return BaseVa; 07748 }

VOID MmProbeAndLockPages IN OUT PMDL  MemoryDescriptorList,
IN KPROCESSOR_MODE  AccessMode,
IN LOCK_OPERATION  Operation
 

Definition at line 238 of file iosup.c.

References ADDRESS_AND_SIZE_TO_SPAN_PAGES, ASSERT, CHAR, COMPUTE_PAGES_SPANNED, DbgPrint, _MI_PHYSICAL_VIEW::EndVa, EXCEPTION_EXECUTE_HANDLER, ExRaiseStatus(), failure, FALSE, IoReadAccess, KernelMode, LOCK_PFN2, MDL_IO_SPACE, MDL_MAPPED_TO_SYSTEM_VA, MDL_PAGES_LOCKED, MDL_PARTIAL, MDL_PHYSICAL_VIEW, MDL_SOURCE_IS_NONPAGED_POOL, MDL_WRITE_OPERATION, MI_ADD_LOCKED_PAGE_CHARGE, MI_CONVERT_PHYSICAL_TO_PFN, MI_GET_PAGE_FRAME_FROM_PTE, MI_INSTRUMENT_PROBE_RAISES, MI_IS_PHYSICAL_ADDRESS, MI_IS_SYSTEM_CACHE_ADDRESS, MI_NONPAGABLE_MEMORY_AVAILABLE, MI_PFN_ELEMENT, MiAddMdlTracker(), MiGetPdeAddress, MiGetPpeAddress, MiGetPteAddress, MiIsPteOnPdeBoundary, MiIsPteOnPpeBoundary, MM_EMPTY_LIST, MM_PTE_WRITE_MASK, MM_READWRITE, MmAccessFault(), MmHighestPhysicalPage, MmLockPagesLimit, MmReferenceCountCheck, MmResetPageFaultReadAhead, MmSavePageFaultReadAhead, MmSetPageFaultReadAhead, MmSystemLockPagesCount, MmTrackLockedPages, MmUnlockPages(), NT_SUCCESS, NTSTATUS(), NULL, _EPROCESS::NumberOfLockedPages, PAGE_SIZE, _EPROCESS::PhysicalVadList, ProbeForWriteChar, PsGetCurrentProcess, PsGetCurrentThread, RtlGetCallersAddress(), _MI_PHYSICAL_VIEW::StartVa, TRUE, _MMPTE::u, _MMVAD::u, _MMPFN::u3, UNLOCK_PFN2, and _MI_PHYSICAL_VIEW::Vad.

Referenced by BuildQueryDirectoryIrp(), CcMdlRead(), CcPrepareMdlWrite(), CcZeroData(), ExLockUserBuffer(), IoBuildAsynchronousFsdRequest(), IoBuildDeviceIoControlRequest(), IopSetEaOrQuotaInformationFile(), IopXxxControlFile(), MiDoMappedCopy(), MiGetWorkingSetInfo(), MmProbeAndLockProcessPages(), MmProbeAndLockSelectedPages(), NtNotifyChangeDirectoryFile(), NtQueryEaFile(), NtQueryQuotaInformationFile(), NtReadFile(), NtSetEaFile(), NtStartProfile(), NtWriteFile(), UdfCreateUserMdl(), VdmQueryDirectoryFile(), and VerifierProbeAndLockPages().

00246 : 00247 00248 This routine probes the specified pages, makes the pages resident and 00249 locks the physical pages mapped by the virtual pages in memory. The 00250 Memory descriptor list is updated to describe the physical pages. 00251 00252 Arguments: 00253 00254 MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List 00255 (MDL). The supplied MDL must supply a virtual 00256 address, byte offset and length field. The 00257 physical page portion of the MDL is updated when 00258 the pages are locked in memory. 00259 00260 AccessMode - Supplies the access mode in which to probe the arguments. 00261 One of KernelMode or UserMode. 00262 00263 Operation - Supplies the operation type. One of IoReadAccess, IoWriteAccess 00264 or IoModifyAccess. 00265 00266 Return Value: 00267 00268 None - exceptions are raised. 00269 00270 Environment: 00271 00272 Kernel mode. APC_LEVEL and below for pagable addresses, 00273 DISPATCH_LEVEL and below for non-pagable addresses. 00274 00275 --*/ 00276 00277 { 00278 PPFN_NUMBER Page; 00279 MMPTE PteContents; 00280 PMMPTE PointerPte; 00281 PMMPTE PointerPde; 00282 PMMPTE PointerPpe; 00283 PVOID Va; 00284 PVOID EndVa; 00285 PVOID AlignedVa; 00286 PMMPFN Pfn1; 00287 PFN_NUMBER PageFrameIndex; 00288 PEPROCESS CurrentProcess; 00289 KIRQL OldIrql; 00290 PFN_NUMBER NumberOfPagesToLock; 00291 PFN_NUMBER NumberOfPagesSpanned; 00292 NTSTATUS status; 00293 NTSTATUS ProbeStatus; 00294 PETHREAD Thread; 00295 ULONG SavedState; 00296 LOGICAL AddressIsPhysical; 00297 PLIST_ENTRY NextEntry; 00298 PMI_PHYSICAL_VIEW PhysicalView; 00299 PCHAR StartVa; 00300 PVOID CallingAddress; 00301 PVOID CallersCaller; 00302 00303 #if !defined (_X86_) 00304 CallingAddress = (PVOID)_ReturnAddress(); 00305 CallersCaller = (PVOID)0; 00306 #endif 00307 00308 #if DBG 00309 if (MiPrintLockedPages != 0) { 00310 MiVerifyLockedPageCharges (); 00311 } 00312 #endif 00313 00314 ASSERT (MemoryDescriptorList->ByteCount != 0); 00315 ASSERT (((ULONG)MemoryDescriptorList->ByteOffset & ~(PAGE_SIZE - 1)) == 0); 00316 00317 Page = (PPFN_NUMBER)(MemoryDescriptorList + 1); 00318 00319 ASSERT (((ULONG_PTR)MemoryDescriptorList->StartVa & (PAGE_SIZE - 1)) == 0); 00320 AlignedVa = (PVOID)MemoryDescriptorList->StartVa; 00321 00322 ASSERT ((MemoryDescriptorList->MdlFlags & ( 00323 MDL_PAGES_LOCKED | 00324 MDL_MAPPED_TO_SYSTEM_VA | 00325 MDL_SOURCE_IS_NONPAGED_POOL | 00326 MDL_PARTIAL | 00327 MDL_IO_SPACE)) == 0); 00328 00329 Va = (PCHAR)AlignedVa + MemoryDescriptorList->ByteOffset; 00330 StartVa = Va; 00331 00332 PointerPte = MiGetPteAddress (Va); 00333 00334 // 00335 // Endva is one byte past the end of the buffer, if ACCESS_MODE is not 00336 // kernel, make sure the EndVa is in user space AND the byte count 00337 // does not cause it to wrap. 00338 // 00339 00340 EndVa = (PVOID)((PCHAR)Va + MemoryDescriptorList->ByteCount); 00341 00342 if ((AccessMode != KernelMode) && 00343 ((EndVa > (PVOID)MM_USER_PROBE_ADDRESS) || (Va >= EndVa))) { 00344 *Page = MM_EMPTY_LIST; 00345 MI_INSTRUMENT_PROBE_RAISES(0); 00346 ExRaiseStatus (STATUS_ACCESS_VIOLATION); 00347 return; 00348 } 00349 00350 // 00351 // There is an optimization which could be performed here. If 00352 // the operation is for WriteAccess and the complete page is 00353 // being modified, we can remove the current page, if it is not 00354 // resident, and substitute a demand zero page. 00355 // Note, that after analysis by marking the thread and then 00356 // noting if a page read was done, this rarely occurs. 00357 // 00358 00359 MemoryDescriptorList->Process = (PEPROCESS)NULL; 00360 00361 Thread = PsGetCurrentThread (); 00362 00363 if (!MI_IS_PHYSICAL_ADDRESS(Va)) { 00364 00365 AddressIsPhysical = FALSE; 00366 ProbeStatus = STATUS_SUCCESS; 00367 00368 NumberOfPagesToLock = COMPUTE_PAGES_SPANNED (Va, 00369 MemoryDescriptorList->ByteCount); 00370 00371 ASSERT (NumberOfPagesToLock != 0); 00372 00373 NumberOfPagesSpanned = NumberOfPagesToLock; 00374 00375 PointerPpe = MiGetPpeAddress (Va); 00376 PointerPde = MiGetPdeAddress (Va); 00377 00378 MmSavePageFaultReadAhead (Thread, &SavedState); 00379 MmSetPageFaultReadAhead (Thread, (ULONG)(NumberOfPagesToLock - 1)); 00380 00381 try { 00382 00383 do { 00384 00385 *Page = MM_EMPTY_LIST; 00386 00387 // 00388 // Make sure the page is resident. 00389 // 00390 00391 *(volatile CHAR *)Va; 00392 00393 if ((Operation != IoReadAccess) && 00394 (Va <= MM_HIGHEST_USER_ADDRESS)) { 00395 00396 // 00397 // Probe for write access as well. 00398 // 00399 00400 ProbeForWriteChar ((PCHAR)Va); 00401 } 00402 00403 NumberOfPagesToLock -= 1; 00404 00405 MmSetPageFaultReadAhead (Thread, (ULONG)(NumberOfPagesToLock - 1)); 00406 Va = (PVOID)(((ULONG_PTR)(PCHAR)Va + PAGE_SIZE) & ~(PAGE_SIZE - 1)); 00407 Page += 1; 00408 } while (Va < EndVa); 00409 00410 ASSERT (NumberOfPagesToLock == 0); 00411 00412 } except (EXCEPTION_EXECUTE_HANDLER) { 00413 ProbeStatus = GetExceptionCode(); 00414 } 00415 00416 // 00417 // We may still fault again below but it's generally rare. 00418 // Restore this thread's normal fault behavior now. 00419 // 00420 00421 MmResetPageFaultReadAhead (Thread, SavedState); 00422 00423 if (ProbeStatus != STATUS_SUCCESS) { 00424 MI_INSTRUMENT_PROBE_RAISES(1); 00425 ExRaiseStatus (ProbeStatus); 00426 return; 00427 } 00428 } 00429 else { 00430 AddressIsPhysical = TRUE; 00431 *Page = MM_EMPTY_LIST; 00432 } 00433 00434 Va = AlignedVa; 00435 Page = (PPFN_NUMBER)(MemoryDescriptorList + 1); 00436 00437 // 00438 // Indicate that this is a write operation. 00439 // 00440 00441 if (Operation != IoReadAccess) { 00442 MemoryDescriptorList->MdlFlags |= MDL_WRITE_OPERATION; 00443 } else { 00444 MemoryDescriptorList->MdlFlags &= ~(MDL_WRITE_OPERATION); 00445 } 00446 00447 // 00448 // Acquire the PFN database lock. 00449 // 00450 00451 LOCK_PFN2 (OldIrql); 00452 00453 if (Va <= MM_HIGHEST_USER_ADDRESS) { 00454 00455 // 00456 // These are addresses with user space, check to see if the 00457 // working set size will allow these pages to be locked. 00458 // 00459 00460 ASSERT (NumberOfPagesSpanned != 0); 00461 00462 CurrentProcess = PsGetCurrentProcess (); 00463 00464 // 00465 // Check for a transfer to/from a physical VAD - no reference counts 00466 // may be modified for these pages. 00467 // 00468 00469 NextEntry = CurrentProcess->PhysicalVadList.Flink; 00470 while (NextEntry != &CurrentProcess->PhysicalVadList) { 00471 00472 PhysicalView = CONTAINING_RECORD(NextEntry, 00473 MI_PHYSICAL_VIEW, 00474 ListEntry); 00475 00476 if ((PhysicalView->Vad->u.VadFlags.UserPhysicalPages == 0) && 00477 (PhysicalView->Vad->u.VadFlags.PhysicalMapping == 0)) { 00478 NextEntry = NextEntry->Flink; 00479 continue; 00480 } 00481 00482 if (StartVa < PhysicalView->StartVa) { 00483 00484 if ((PCHAR)EndVa - 1 >= PhysicalView->StartVa) { 00485 00486 // 00487 // The range encompasses a physical VAD. This is not 00488 // allowed. 00489 // 00490 00491 UNLOCK_PFN2 (OldIrql); 00492 MI_INSTRUMENT_PROBE_RAISES(2); 00493 ExRaiseStatus (STATUS_ACCESS_VIOLATION); 00494 return; 00495 } 00496 00497 NextEntry = NextEntry->Flink; 00498 continue; 00499 } 00500 00501 if (StartVa <= PhysicalView->EndVa) { 00502 00503 // 00504 // Ensure that the entire range lies within the VAD. 00505 // 00506 00507 if ((PCHAR)EndVa - 1 > PhysicalView->EndVa) { 00508 00509 // 00510 // The range goes past the end of the VAD - not allowed. 00511 // 00512 00513 UNLOCK_PFN2 (OldIrql); 00514 MI_INSTRUMENT_PROBE_RAISES(3); 00515 ExRaiseStatus (STATUS_ACCESS_VIOLATION); 00516 return; 00517 } 00518 00519 if (PhysicalView->Vad->u.VadFlags.UserPhysicalPages == 1) { 00520 00521 // 00522 // All the PTEs must still be checked and reference 00523 // counts bumped on the pages. Just don't charge 00524 // against the working set. 00525 // 00526 00527 NextEntry = NextEntry->Flink; 00528 continue; 00529 } 00530 00531 // 00532 // The range lies within a physical VAD. 00533 // 00534 00535 if (Operation != IoReadAccess) { 00536 00537 // 00538 // Ensure the VAD is writable. Changing individual PTE 00539 // protections in a physical VAD is not allowed. 00540 // 00541 00542 if ((PhysicalView->Vad->u.VadFlags.Protection & MM_READWRITE) == 0) { 00543 UNLOCK_PFN2 (OldIrql); 00544 MI_INSTRUMENT_PROBE_RAISES(4); 00545 ExRaiseStatus (STATUS_ACCESS_VIOLATION); 00546 return; 00547 } 00548 } 00549 00550 // 00551 // Don't charge page locking for this transfer as it is all 00552 // physical, just initialize the MDL. Note the pages do not 00553 // have to be physically contiguous, so the frames must be 00554 // extracted from the PTEs. 00555 // 00556 00557 MemoryDescriptorList->MdlFlags |= (MDL_PHYSICAL_VIEW | MDL_PAGES_LOCKED); 00558 MemoryDescriptorList->Process = CurrentProcess; 00559 00560 do { 00561 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 00562 *Page = PageFrameIndex; 00563 Page += 1; 00564 PointerPte += 1; 00565 Va = (PVOID)((PCHAR)Va + PAGE_SIZE); 00566 } while (Va < EndVa); 00567 00568 UNLOCK_PFN2 (OldIrql); 00569 return; 00570 } 00571 NextEntry = NextEntry->Flink; 00572 } 00573 00574 CurrentProcess->NumberOfLockedPages += NumberOfPagesSpanned; 00575 00576 MemoryDescriptorList->Process = CurrentProcess; 00577 } 00578 00579 MemoryDescriptorList->MdlFlags |= MDL_PAGES_LOCKED; 00580 00581 do { 00582 00583 if (AddressIsPhysical == TRUE) { 00584 00585 // 00586 // On certain architectures, virtual addresses 00587 // may be physical and hence have no corresponding PTE. 00588 // 00589 00590 PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (Va); 00591 00592 } else { 00593 00594 #if defined (_WIN64) 00595 while ((PointerPpe->u.Hard.Valid == 0) || 00596 (PointerPde->u.Hard.Valid == 0) || 00597 (PointerPte->u.Hard.Valid == 0)) 00598 #else 00599 while ((PointerPde->u.Hard.Valid == 0) || 00600 (PointerPte->u.Hard.Valid == 0)) 00601 #endif 00602 { 00603 00604 // 00605 // PDE is not resident, release PFN lock touch the page and make 00606 // it appear. 00607 // 00608 00609 UNLOCK_PFN2 (OldIrql); 00610 00611 MmSetPageFaultReadAhead (Thread, 0); 00612 00613 status = MmAccessFault (FALSE, Va, KernelMode, (PVOID)0); 00614 00615 MmResetPageFaultReadAhead (Thread, SavedState); 00616 00617 if (!NT_SUCCESS(status)) { 00618 00619 // 00620 // An exception occurred. Unlock the pages locked 00621 // so far. 00622 // 00623 00624 failure: 00625 if (MmTrackLockedPages == TRUE) { 00626 00627 // 00628 // Adjust the MDL length so that MmUnlockPages only 00629 // processes the part that was completed. 00630 // 00631 00632 ULONG PagesLocked; 00633 00634 PagesLocked = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartVa, 00635 MemoryDescriptorList->ByteCount); 00636 00637 #if defined (_X86_) 00638 RtlGetCallersAddress(&CallingAddress, &CallersCaller); 00639 #endif 00640 MiAddMdlTracker (MemoryDescriptorList, 00641 CallingAddress, 00642 CallersCaller, 00643 PagesLocked, 00644 0); 00645 } 00646 00647 MmUnlockPages (MemoryDescriptorList); 00648 00649 // 00650 // Raise an exception of access violation to the caller. 00651 // 00652 00653 MI_INSTRUMENT_PROBE_RAISES(7); 00654 ExRaiseStatus (status); 00655 return; 00656 } 00657 00658 LOCK_PFN2 (OldIrql); 00659 } 00660 00661 PteContents = *PointerPte; 00662 ASSERT (PteContents.u.Hard.Valid == 1); 00663 00664 if (Va <= MM_HIGHEST_USER_ADDRESS) { 00665 if (Operation != IoReadAccess) { 00666 00667 if ((PteContents.u.Long & MM_PTE_WRITE_MASK) == 0) { 00668 00669 // 00670 // The caller has made the page protection more 00671 // restrictive, this should never be done once the 00672 // request has been issued ! Rather than wading 00673 // through the PFN database entry to see if it 00674 // could possibly work out, give the caller an 00675 // access violation. 00676 // 00677 00678 #if DBG 00679 DbgPrint ("MmProbeAndLockPages: PTE %p %p changed\n", 00680 PointerPte, 00681 PteContents.u.Long); 00682 ASSERT (FALSE); 00683 #endif 00684 00685 UNLOCK_PFN2 (OldIrql); 00686 status = STATUS_ACCESS_VIOLATION; 00687 goto failure; 00688 } 00689 } 00690 } 00691 00692 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents); 00693 } 00694 00695 if (PageFrameIndex > MmHighestPhysicalPage) { 00696 00697 // 00698 // This is an I/O space address don't allow operations 00699 // on addresses not in the PFN database. 00700 // 00701 00702 MemoryDescriptorList->MdlFlags |= MDL_IO_SPACE; 00703 00704 } else { 00705 ASSERT ((MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) == 0); 00706 00707 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 00708 00709 #if PFN_CONSISTENCY 00710 ASSERT(Pfn1->u3.e1.PageTablePage == 0); 00711 #endif 00712 00713 // 00714 // Check to make sure this page is not locked down an unusually 00715 // high number of times. 00716 // 00717 00718 if (Pfn1->u3.e2.ReferenceCount >= MmReferenceCountCheck) { 00719 UNLOCK_PFN2 (OldIrql); 00720 ASSERT (FALSE); 00721 status = STATUS_WORKING_SET_QUOTA; 00722 goto failure; 00723 } 00724 00725 // 00726 // Check to make sure the systemwide locked pages count is fluid. 00727 // 00728 00729 if (MI_NONPAGABLE_MEMORY_AVAILABLE() <= 0) { 00730 00731 // 00732 // If this page is for paged pool or privileged code/data, 00733 // then force it in. 00734 // 00735 00736 if ((Va > MM_HIGHEST_USER_ADDRESS) && 00737 (!MI_IS_SYSTEM_CACHE_ADDRESS(Va))) { 00738 MI_INSTRUMENT_PROBE_RAISES(8); 00739 goto ok; 00740 } 00741 00742 MI_INSTRUMENT_PROBE_RAISES(5); 00743 UNLOCK_PFN2 (OldIrql); 00744 status = STATUS_WORKING_SET_QUOTA; 00745 goto failure; 00746 } 00747 00748 // 00749 // Check to make sure any administrator-desired limit is obeyed. 00750 // 00751 00752 if (MmSystemLockPagesCount + 1 >= MmLockPagesLimit) { 00753 00754 // 00755 // If this page is for paged pool or privileged code/data, 00756 // then force it in. 00757 // 00758 00759 if ((Va > MM_HIGHEST_USER_ADDRESS) && 00760 (!MI_IS_SYSTEM_CACHE_ADDRESS(Va))) { 00761 MI_INSTRUMENT_PROBE_RAISES(9); 00762 goto ok; 00763 } 00764 00765 MI_INSTRUMENT_PROBE_RAISES(6); 00766 UNLOCK_PFN2 (OldIrql); 00767 status = STATUS_WORKING_SET_QUOTA; 00768 goto failure; 00769 } 00770 00771 ok: 00772 MI_ADD_LOCKED_PAGE_CHARGE(Pfn1, 0); 00773 00774 Pfn1->u3.e2.ReferenceCount += 1; 00775 } 00776 00777 *Page = PageFrameIndex; 00778 00779 Page += 1; 00780 PointerPte += 1; 00781 if (MiIsPteOnPdeBoundary(PointerPte)) { 00782 PointerPde += 1; 00783 if (MiIsPteOnPpeBoundary(PointerPte)) { 00784 PointerPpe += 1; 00785 } 00786 } 00787 00788 Va = (PVOID)((PCHAR)Va + PAGE_SIZE); 00789 } while (Va < EndVa); 00790 00791 UNLOCK_PFN2 (OldIrql); 00792 00793 if ((MmTrackLockedPages == TRUE) && (AlignedVa <= MM_HIGHEST_USER_ADDRESS)) { 00794 00795 ASSERT (NumberOfPagesSpanned != 0); 00796 00797 #if defined (_X86_) 00798 RtlGetCallersAddress(&CallingAddress, &CallersCaller); 00799 #endif 00800 00801 MiAddMdlTracker (MemoryDescriptorList, 00802 CallingAddress, 00803 CallersCaller, 00804 NumberOfPagesSpanned, 00805 1); 00806 } 00807 00808 return; 00809 }

NTKERNELAPI VOID MmProbeAndLockProcessPages IN OUT PMDL  MemoryDescriptorList,
IN PEPROCESS  Process,
IN KPROCESSOR_MODE  AccessMode,
IN LOCK_OPERATION  Operation
 

Definition at line 813 of file iosup.c.

References EXCEPTION_EXECUTE_HANDLER, ExRaiseStatus(), FALSE, KeAttachProcess(), KeDetachProcess(), MmProbeAndLockPages(), NTSTATUS(), PsGetCurrentProcess, Status, and TRUE.

Referenced by VerifierProbeAndLockProcessPages().

00822 : 00823 00824 This routine probes and locks the address range specified by 00825 the MemoryDescriptorList in the specified Process for the AccessMode 00826 and Operation. 00827 00828 Arguments: 00829 00830 MemoryDescriptorList - Supplies a pre-initialized MDL that describes the 00831 address range to be probed and locked. 00832 00833 Process - Specifies the address of the process whose address range is 00834 to be locked. 00835 00836 AccessMode - The mode for which the probe should check access to the range. 00837 00838 Operation - Supplies the type of access which for which to check the range. 00839 00840 Return Value: 00841 00842 None. 00843 00844 --*/ 00845 00846 { 00847 LOGICAL Attached; 00848 NTSTATUS Status; 00849 00850 Attached = FALSE; 00851 Status = STATUS_SUCCESS; 00852 00853 if (Process != PsGetCurrentProcess ()) { 00854 KeAttachProcess (&Process->Pcb); 00855 Attached = TRUE; 00856 } 00857 00858 try { 00859 00860 MmProbeAndLockPages (MemoryDescriptorList, 00861 AccessMode, 00862 Operation); 00863 00864 } except (EXCEPTION_EXECUTE_HANDLER) { 00865 Status = GetExceptionCode(); 00866 } 00867 00868 if (Attached) { 00869 KeDetachProcess(); 00870 } 00871 00872 if (Status != STATUS_SUCCESS) { 00873 ExRaiseStatus (Status); 00874 } 00875 return; 00876 }

NTKERNELAPI VOID MmProbeAndLockSelectedPages IN OUT PMDL  MemoryDescriptorList,
IN PFILE_SEGMENT_ELEMENT  SegmentArray,
IN KPROCESSOR_MODE  AccessMode,
IN LOCK_OPERATION  Operation
 

Definition at line 1152 of file iosup.c.

References ASSERT, BYTES_TO_PAGES, MDL_IO_SPACE, MDL_MAPPED_TO_SYSTEM_VA, MDL_PAGES_LOCKED, MDL_PARTIAL, MDL_SOURCE_IS_NONPAGED_POOL, _MDL::MdlFlags, MiAddMdlTracker(), MiFreeMdlTracker(), MmInitializeMdl, MmProbeAndLockPages(), MmTrackLockedPages, MmUnlockPages(), PAGE_SHIFT, PAGE_SIZE, PAGED_CODE, _MDL::Process, RtlGetCallersAddress(), _MDL::StartVa, and TRUE.

Referenced by NtReadFileScatter(), NtWriteFileGather(), and VerifierProbeAndLockSelectedPages().

01161 : 01162 01163 This routine probes the specified pages, makes the pages resident and 01164 locks the physical pages mapped by the virtual pages in memory. The 01165 Memory descriptor list is updated to describe the physical pages. 01166 01167 Arguments: 01168 01169 MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List 01170 (MDL). The MDL must supply the length. The 01171 physical page portion of the MDL is updated when 01172 the pages are locked in memory. 01173 01174 SegmentArray - Supplies a pointer to a list of buffer segments to be 01175 probed and locked. 01176 01177 AccessMode - Supplies the access mode in which to probe the arguments. 01178 One of KernelMode or UserMode. 01179 01180 Operation - Supplies the operation type. One of IoReadAccess, IoWriteAccess 01181 or IoModifyAccess. 01182 01183 Return Value: 01184 01185 None - exceptions are raised. 01186 01187 Environment: 01188 01189 Kernel mode. APC_LEVEL and below. 01190 01191 --*/ 01192 01193 { 01194 PMDL TempMdl; 01195 PFN_NUMBER MdlHack[(sizeof(MDL)/sizeof(PFN_NUMBER)) + 1]; 01196 PPFN_NUMBER Page; 01197 PFILE_SEGMENT_ELEMENT LastSegment; 01198 PVOID CallingAddress; 01199 PVOID CallersCaller; 01200 ULONG NumberOfPagesToLock; 01201 01202 PAGED_CODE(); 01203 01204 #if !defined (_X86_) 01205 CallingAddress = (PVOID)_ReturnAddress(); 01206 CallersCaller = (PVOID)0; 01207 #endif 01208 01209 NumberOfPagesToLock = 0; 01210 01211 ASSERT (MemoryDescriptorList->ByteCount != 0); 01212 ASSERT (((ULONG_PTR)MemoryDescriptorList->ByteOffset & ~(PAGE_SIZE - 1)) == 0); 01213 01214 ASSERT ((MemoryDescriptorList->MdlFlags & ( 01215 MDL_PAGES_LOCKED | 01216 MDL_MAPPED_TO_SYSTEM_VA | 01217 MDL_SOURCE_IS_NONPAGED_POOL | 01218 MDL_PARTIAL | 01219 MDL_IO_SPACE)) == 0); 01220 01221 // 01222 // Initialize TempMdl. 01223 // 01224 01225 TempMdl = (PMDL) &MdlHack; 01226 01227 MmInitializeMdl( TempMdl, SegmentArray->Buffer, PAGE_SIZE ); 01228 01229 Page = (PPFN_NUMBER) (MemoryDescriptorList + 1); 01230 01231 // 01232 // Calculate the end of the segment list. 01233 // 01234 01235 LastSegment = SegmentArray + 01236 BYTES_TO_PAGES(MemoryDescriptorList->ByteCount); 01237 01238 ASSERT(SegmentArray < LastSegment); 01239 01240 // 01241 // Build a small Mdl for each segment and call probe and lock pages. 01242 // Then copy the PFNs to the real mdl. The first page is processed 01243 // outside of the try/finally to ensure that the flags and process 01244 // field are correctly set in case MmUnlockPages needs to be called. 01245 // 01246 01247 // 01248 // Even systems without 64 bit pointers are required to zero the 01249 // upper 32 bits of the segment address so use alignment rather 01250 // than the buffer pointer. 01251 // 01252 01253 SegmentArray += 1; 01254 MmProbeAndLockPages( TempMdl, AccessMode, Operation ); 01255 01256 if (MmTrackLockedPages == TRUE) { 01257 01258 // 01259 // Since we move the page from the temp MDL to the real one below 01260 // and never free the temp one, fixup our accounting now. 01261 // 01262 01263 if (MiFreeMdlTracker (TempMdl, 1) == TRUE) { 01264 NumberOfPagesToLock += 1; 01265 } 01266 } 01267 01268 *Page++ = *((PPFN_NUMBER) (TempMdl + 1)); 01269 01270 // 01271 // Copy the flags and process fields. 01272 // 01273 01274 MemoryDescriptorList->MdlFlags |= TempMdl->MdlFlags; 01275 MemoryDescriptorList->Process = TempMdl->Process; 01276 01277 try { 01278 01279 while (SegmentArray < LastSegment) { 01280 01281 // 01282 // Even systems without 64 bit pointers are required to zero the 01283 // upper 32 bits of the segment address so use alignment rather 01284 // than the buffer pointer. 01285 // 01286 01287 TempMdl->StartVa = (PVOID)(ULONG_PTR)SegmentArray->Buffer; 01288 TempMdl->MdlFlags = 0; 01289 01290 SegmentArray += 1; 01291 MmProbeAndLockPages( TempMdl, AccessMode, Operation ); 01292 01293 01294 if (MmTrackLockedPages == TRUE) { 01295 01296 // 01297 // Since we move the page from the temp MDL to the real one 01298 // below and never free the temp one, fixup our accounting now. 01299 // 01300 01301 if (MiFreeMdlTracker (TempMdl, 1) == TRUE) { 01302 NumberOfPagesToLock += 1; 01303 } 01304 } 01305 01306 *Page++ = *((PPFN_NUMBER) (TempMdl + 1)); 01307 } 01308 } finally { 01309 01310 if (abnormal_termination()) { 01311 01312 // 01313 // Adjust the MDL length so that MmUnlockPages only processes 01314 // the part that was completed. 01315 // 01316 01317 MemoryDescriptorList->ByteCount = 01318 (ULONG) (Page - (PPFN_NUMBER) (MemoryDescriptorList + 1)) << PAGE_SHIFT; 01319 01320 if (MmTrackLockedPages == TRUE) { 01321 #if defined (_X86_) 01322 RtlGetCallersAddress(&CallingAddress, &CallersCaller); 01323 #endif 01324 MiAddMdlTracker (MemoryDescriptorList, 01325 CallingAddress, 01326 CallersCaller, 01327 NumberOfPagesToLock, 01328 2); 01329 } 01330 01331 MmUnlockPages( MemoryDescriptorList ); 01332 } 01333 else if (MmTrackLockedPages == TRUE) { 01334 #if defined (_X86_) 01335 RtlGetCallersAddress(&CallingAddress, &CallersCaller); 01336 #endif 01337 MiAddMdlTracker (MemoryDescriptorList, 01338 CallingAddress, 01339 CallersCaller, 01340 NumberOfPagesToLock, 01341 3); 01342 } 01343 } 01344 }

VOID MmReleaseDumpAddresses IN PFN_NUMBER  Pages  ) 
 

Definition at line 7270 of file iosup.c.

References KiFlushSingleTb(), MiGetVirtualAddressMappedByPte, MM_ZERO_PTE, MmCrashDumpPte, PAGE_SIZE, TRUE, and _MMPTE::u.

07276 : 07277 07278 For use by hibernate routine ONLY. Puts zeros back into the 07279 used dump PTEs. 07280 07281 Arguments: 07282 07283 None 07284 07285 Return Value: 07286 07287 None 07288 07289 --*/ 07290 07291 { 07292 PMMPTE PointerPte; 07293 PCHAR BaseVa; 07294 07295 PointerPte = MmCrashDumpPte; 07296 BaseVa = (PCHAR)MiGetVirtualAddressMappedByPte(PointerPte); 07297 07298 while (Pages) { 07299 07300 KiFlushSingleTb (TRUE, BaseVa); 07301 07302 PointerPte->u.Long = MM_ZERO_PTE; 07303 PointerPte += 1; 07304 BaseVa += PAGE_SIZE; 07305 Pages -= 1; 07306 } 07307 }

NTKERNELAPI VOID MmReturnMemoryForHibernate IN PMDL  Mdl  ) 
 

Definition at line 8038 of file iosup.c.

References LOCK_PFN2, MiDecrementReferenceCount(), PAGE_SHIFT, and UNLOCK_PFN2.

08044 : 08045 08046 Returns memory from MmGatherMemoryForHibername. 08047 08048 Arguments: 08049 08050 Mdl - Supplies an MDL, the start VA field should be NULL. The length 08051 field indicates how many pages to obtain. 08052 08053 Return Value: 08054 08055 None. 08056 08057 Environment: 08058 08059 Kernel mode, IRQL of APC_LEVEL or below. 08060 08061 --*/ 08062 08063 { 08064 KIRQL OldIrql; 08065 PFN_NUMBER PagesNeeded; 08066 PPFN_NUMBER Pages; 08067 08068 PagesNeeded = (Mdl->ByteCount >> PAGE_SHIFT); 08069 Pages = (PPFN_NUMBER)(Mdl + 1); 08070 08071 LOCK_PFN2 (OldIrql); 08072 do { 08073 MiDecrementReferenceCount (*Pages); 08074 Pages += 1; 08075 PagesNeeded -= 1; 08076 } while (PagesNeeded); 08077 UNLOCK_PFN2 (OldIrql); 08078 return; 08079 }

BOOLEAN MmSetAddressRangeModified IN PVOID  Address,
IN SIZE_T  Length
 

Definition at line 5920 of file iosup.c.

References Count, FALSE, KeFlushEntireTb(), KeFlushMultipleTb(), KeFlushSingleTb(), LOCK_PFN2, MI_IS_PTE_DIRTY, MI_PFN_ELEMENT, MI_SET_PTE_CLEAN, MI_WRITE_VALID_PTE_NEW_PROTECTION, MiGetPteAddress, MiReleasePageFileSpace(), MM_MAXIMUM_FLUSH_COUNT, NULL, _MMPFN::OriginalPte, PAGE_SIZE, TRUE, _MMPTE::u, _MMPFN::u3, UNLOCK_PFN2, VOID(), and ZeroPte.

Referenced by CcFlushCache(), CcMapAndCopy(), CcPurgeAndClearCacheSection(), CcUnpinRepinnedBcb(), and CcZeroData().

05927 : 05928 05929 This routine sets the modified bit in the PFN database for the 05930 pages that correspond to the specified address range. 05931 05932 Note that the dirty bit in the PTE is cleared by this operation. 05933 05934 Arguments: 05935 05936 Address - Supplies the address of the start of the range. This 05937 range must reside within the system cache. 05938 05939 Length - Supplies the length of the range. 05940 05941 Return Value: 05942 05943 TRUE if at least one PTE was dirty in the range, FALSE otherwise. 05944 05945 Environment: 05946 05947 Kernel mode. APC_LEVEL and below for pagable addresses, 05948 DISPATCH_LEVEL and below for non-pagable addresses. 05949 05950 --*/ 05951 05952 { 05953 PMMPTE PointerPte; 05954 PMMPTE LastPte; 05955 PMMPFN Pfn1; 05956 PMMPTE FlushPte; 05957 MMPTE PteContents; 05958 MMPTE FlushContents; 05959 KIRQL OldIrql; 05960 PVOID VaFlushList[MM_MAXIMUM_FLUSH_COUNT]; 05961 ULONG Count; 05962 BOOLEAN Result; 05963 05964 Count = 0; 05965 Result = FALSE; 05966 05967 // 05968 // Loop on the copy on write case until the page is only 05969 // writable. 05970 // 05971 05972 PointerPte = MiGetPteAddress (Address); 05973 LastPte = MiGetPteAddress ((PVOID)((PCHAR)Address + Length - 1)); 05974 05975 LOCK_PFN2 (OldIrql); 05976 05977 do { 05978 05979 PteContents = *PointerPte; 05980 05981 if (PteContents.u.Hard.Valid == 1) { 05982 05983 Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber); 05984 Pfn1->u3.e1.Modified = 1; 05985 05986 if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) && 05987 (Pfn1->u3.e1.WriteInProgress == 0)) { 05988 MiReleasePageFileSpace (Pfn1->OriginalPte); 05989 Pfn1->OriginalPte.u.Soft.PageFileHigh = 0; 05990 } 05991 05992 #ifdef NT_UP 05993 // 05994 // On uniprocessor systems no need to flush if this processor 05995 // doesn't think the PTE is dirty. 05996 // 05997 05998 if (MI_IS_PTE_DIRTY (PteContents)) { 05999 Result = TRUE; 06000 #else //NT_UP 06001 Result |= (BOOLEAN)(MI_IS_PTE_DIRTY (PteContents)); 06002 #endif //NT_UP 06003 MI_SET_PTE_CLEAN (PteContents); 06004 MI_WRITE_VALID_PTE_NEW_PROTECTION (PointerPte, PteContents); 06005 FlushContents = PteContents; 06006 FlushPte = PointerPte; 06007 06008 // 06009 // Clear the write bit in the PTE so new writes can be tracked. 06010 // 06011 06012 if (Count != MM_MAXIMUM_FLUSH_COUNT) { 06013 VaFlushList[Count] = Address; 06014 Count += 1; 06015 } 06016 #ifdef NT_UP 06017 } 06018 #endif //NT_UP 06019 } 06020 PointerPte += 1; 06021 Address = (PVOID)((PCHAR)Address + PAGE_SIZE); 06022 } while (PointerPte <= LastPte); 06023 06024 if (Count != 0) { 06025 if (Count == 1) { 06026 06027 (VOID)KeFlushSingleTb (VaFlushList[0], 06028 FALSE, 06029 TRUE, 06030 (PHARDWARE_PTE)FlushPte, 06031 FlushContents.u.Flush); 06032 06033 } else if (Count != MM_MAXIMUM_FLUSH_COUNT) { 06034 06035 KeFlushMultipleTb (Count, 06036 &VaFlushList[0], 06037 FALSE, 06038 TRUE, 06039 NULL, 06040 *(PHARDWARE_PTE)&ZeroPte.u.Flush); 06041 06042 } else { 06043 KeFlushEntireTb (FALSE, TRUE); 06044 } 06045 } 06046 UNLOCK_PFN2 (OldIrql); 06047 return Result; 06048 }

NTSTATUS MmSetBankedSection IN HANDLE  ProcessHandle,
IN PVOID  VirtualAddress,
IN ULONG  BankLength,
IN BOOLEAN  ReadWriteBank,
IN PBANKED_SECTION_ROUTINE  BankRoutine,
IN PVOID  Context
 

Definition at line 7311 of file iosup.c.

References ASSERT, _MMBANKED_SECTION::BankedRoutine, _MMBANKED_SECTION::BankShift, _MMBANKED_SECTION::BankSize, _MMBANKED_SECTION::BankTemplate, _MMBANKED_SECTION::BasedPte, _MMBANKED_SECTION::BasePhysicalPage, _MMBANKED_SECTION::Context, _MMBANKED_SECTION::CurrentMappedPte, _MMVAD::EndingVpn, ExAllocatePoolWithTag, KeAttachProcess(), KeDetachProcess(), KeFlushEntireTb(), KernelMode, LOCK_WS_AND_ADDRESS_SPACE, MI_GET_PAGE_FRAME_FROM_PTE, MI_MAKE_VALID_PTE, MI_SET_PTE_DIRTY, MI_VA_TO_VPN, MI_VPN_TO_VA, MiGetPteAddress, MiLocateAddress(), MM_READWRITE, MMBANKED_SECTION, NonPagedPool, NT_SUCCESS, NTSTATUS(), NULL, ObDereferenceObject, ObReferenceObjectByHandle(), PAGE_SHIFT, PAGE_SIZE, PAGED_CODE, PBANKED_SECTION_ROUTINE, PMMBANKED_SECTION, PsProcessType, PTE_SHIFT, _MMVAD::StartingVpn, Status, TRUE, _MMPTE::u, _MMVAD::u, _MMVAD::u4, UNLOCK_WS_AND_ADDRESS_SPACE, and ZeroPte.

07322 : 07323 07324 This function declares a mapped video buffer as a banked 07325 section. This allows banked video devices (i.e., even 07326 though the video controller has a megabyte or so of memory, 07327 only a small bank (like 64k) can be mapped at any one time. 07328 07329 In order to overcome this problem, the pager handles faults 07330 to this memory, unmaps the current bank, calls off to the 07331 video driver and then maps in the new bank. 07332 07333 This function creates the necessary structures to allow the 07334 video driver to be called from the pager. 07335 07336 ********************* NOTE NOTE NOTE ************************* 07337 At this time only read/write banks are supported! 07338 07339 Arguments: 07340 07341 ProcessHandle - Supplies a handle to the process in which to 07342 support the banked video function. 07343 07344 VirtualAddress - Supplies the virtual address where the video 07345 buffer is mapped in the specified process. 07346 07347 BankLength - Supplies the size of the bank. 07348 07349 ReadWriteBank - Supplies TRUE if the bank is read and write. 07350 07351 BankRoutine - Supplies a pointer to the routine that should be 07352 called by the pager. 07353 07354 Context - Supplies a context to be passed by the pager to the 07355 BankRoutine. 07356 07357 Return Value: 07358 07359 Returns the status of the function. 07360 07361 Environment: 07362 07363 Kernel mode, APC_LEVEL or below. 07364 07365 --*/ 07366 07367 { 07368 NTSTATUS Status; 07369 PEPROCESS Process; 07370 PMMVAD Vad; 07371 PMMPTE PointerPte; 07372 PMMPTE LastPte; 07373 MMPTE TempPte; 07374 ULONG_PTR size; 07375 LONG count; 07376 ULONG NumberOfPtes; 07377 PMMBANKED_SECTION Bank; 07378 07379 PAGED_CODE (); 07380 07381 UNREFERENCED_PARAMETER (ReadWriteBank); 07382 07383 // 07384 // Reference the specified process handle for VM_OPERATION access. 07385 // 07386 07387 Status = ObReferenceObjectByHandle ( ProcessHandle, 07388 PROCESS_VM_OPERATION, 07389 PsProcessType, 07390 KernelMode, 07391 (PVOID *)&Process, 07392 NULL ); 07393 07394 if (!NT_SUCCESS(Status)) { 07395 return Status; 07396 } 07397 07398 KeAttachProcess (&Process->Pcb); 07399 07400 // 07401 // Get the address creation mutex to block multiple threads from 07402 // creating or deleting address space at the same time and 07403 // get the working set mutex so virtual address descriptors can 07404 // be inserted and walked. Block APCs so an APC which takes a page 07405 // fault does not corrupt various structures. 07406 // 07407 07408 LOCK_WS_AND_ADDRESS_SPACE (Process); 07409 07410 // 07411 // Make sure the address space was not deleted, if so, return an error. 07412 // 07413 07414 if (Process->AddressSpaceDeleted != 0) { 07415 Status = STATUS_PROCESS_IS_TERMINATING; 07416 goto ErrorReturn; 07417 } 07418 07419 Vad = MiLocateAddress (VirtualAddress); 07420 07421 if ((Vad == NULL) || 07422 (Vad->StartingVpn != MI_VA_TO_VPN (VirtualAddress)) || 07423 (Vad->u.VadFlags.PhysicalMapping == 0)) { 07424 Status = STATUS_NOT_MAPPED_DATA; 07425 goto ErrorReturn; 07426 } 07427 07428 size = PAGE_SIZE + ((Vad->EndingVpn - Vad->StartingVpn) << PAGE_SHIFT); 07429 if ((size % BankLength) != 0) { 07430 Status = STATUS_INVALID_VIEW_SIZE; 07431 goto ErrorReturn; 07432 } 07433 07434 count = -1; 07435 NumberOfPtes = BankLength; 07436 07437 do { 07438 NumberOfPtes = NumberOfPtes >> 1; 07439 count += 1; 07440 } while (NumberOfPtes != 0); 07441 07442 // 07443 // Turn VAD into Banked VAD 07444 // 07445 07446 NumberOfPtes = BankLength >> PAGE_SHIFT; 07447 07448 Bank = ExAllocatePoolWithTag (NonPagedPool, 07449 sizeof (MMBANKED_SECTION) + 07450 (NumberOfPtes - 1) * sizeof(MMPTE), 07451 ' mM'); 07452 if (Bank == NULL) { 07453 Status = STATUS_INSUFFICIENT_RESOURCES; 07454 goto ErrorReturn; 07455 } 07456 07457 Bank->BankShift = PTE_SHIFT + count - PAGE_SHIFT; 07458 07459 PointerPte = MiGetPteAddress(MI_VPN_TO_VA (Vad->StartingVpn)); 07460 ASSERT (PointerPte->u.Hard.Valid == 1); 07461 07462 Vad->u4.Banked = Bank; 07463 Bank->BasePhysicalPage = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 07464 Bank->BasedPte = PointerPte; 07465 Bank->BankSize = BankLength; 07466 Bank->BankedRoutine = BankRoutine; 07467 Bank->Context = Context; 07468 Bank->CurrentMappedPte = PointerPte; 07469 07470 // 07471 // Build the template PTEs structure. 07472 // 07473 07474 count = 0; 07475 TempPte = ZeroPte; 07476 07477 MI_MAKE_VALID_PTE (TempPte, 07478 Bank->BasePhysicalPage, 07479 MM_READWRITE, 07480 PointerPte); 07481 07482 if (TempPte.u.Hard.Write) { 07483 MI_SET_PTE_DIRTY (TempPte); 07484 } 07485 07486 do { 07487 Bank->BankTemplate[count] = TempPte; 07488 TempPte.u.Hard.PageFrameNumber += 1; 07489 count += 1; 07490 } while ((ULONG)count < NumberOfPtes ); 07491 07492 LastPte = MiGetPteAddress (MI_VPN_TO_VA (Vad->EndingVpn)); 07493 07494 // 07495 // Set all PTEs within this range to zero. Any faults within 07496 // this range will call the banked routine before making the 07497 // page valid. 07498 // 07499 07500 RtlFillMemory (PointerPte, 07501 (size >> (PAGE_SHIFT - PTE_SHIFT)), 07502 (UCHAR)ZeroPte.u.Long); 07503 07504 KeFlushEntireTb (TRUE, TRUE); 07505 07506 Status = STATUS_SUCCESS; 07507 ErrorReturn: 07508 07509 UNLOCK_WS_AND_ADDRESS_SPACE (Process); 07510 KeDetachProcess(); 07511 ObDereferenceObject (Process); 07512 return Status; 07513 }

VOID MmSetKernelDumpRange IN OUT PVOID  pDumpContext  ) 
 

Definition at line 8083 of file iosup.c.

References ADDRESS_AND_SIZE_TO_SPAN_PAGES, ASSERT, COMPUTE_PAGES_SPANNED, FALSE, Index, IoFreeDumpRange(), IoSetDumpRange(), KeNumberProcessors, KiProcessorBlock, _MMFREE_POOL_ENTRY::List, List, MI_IS_PHYSICAL_ADDRESS, MI_MAX_FREE_LIST_HEADS, MiProtectFreeNonPagedPool(), MiSystemCacheEndExtra, MiSystemCacheStartExtra, MiUnProtectFreeNonPagedPool(), MM_FREE_POOL_SIGNATURE, MM_PAGES_IN_KSEG0, MM_SYSTEM_SPACE_END, MMFREE_POOL_ENTRY, MmGetVirtualForPhysical(), MmHighestPossiblePhysicalPage, MmIsAddressValid(), MmNonPagedPoolFreeListHead, MmNonPagedPoolStart, MMPFN, MmPfnDatabase, MmProtectFreedNonPagedPool, MmSizeOfNonPagedPoolInBytes, MmSystemCacheEnd, MmSystemCacheStart, MmSystemRangeStart, MmVirtualBias, NULL, PAGE_SIZE, PsLoadedModuleList, _MMFREE_POOL_ENTRY::Signature, _MMFREE_POOL_ENTRY::Size, and TRUE.

Referenced by IopCreateSummaryDump().

BOOLEAN MmSetPageProtection IN PVOID  VirtualAddress,
IN SIZE_T  NumberOfBytes,
IN ULONG  NewProtect
 

Definition at line 4027 of file iosup.c.

References BYTES_TO_PAGES, EXCEPTION_EXECUTE_HANDLER, FALSE, KeFlushSingleTb(), LOCK_PFN, MI_IS_PHYSICAL_ADDRESS, MI_MAKE_VALID_PTE, MiGetPteAddress, MiMakeProtectionMask(), PAGE_SHIFT, TRUE, _MMPTE::u, and UNLOCK_PFN.

Referenced by KiI386PentiumLockErrataFixup().

04035 : 04036 04037 This function sets the specified virtual address range to the desired 04038 protection. This assumes that the virtual addresses are backed by PTEs 04039 which can be set (ie: not in kseg0 or large pages). 04040 04041 Arguments: 04042 04043 VirtualAddress - Supplies the start address to protect. 04044 04045 NumberOfBytes - Supplies the number of bytes to set. 04046 04047 NewProtect - Supplies the protection to set the pages to (PAGE_XX). 04048 04049 Return Value: 04050 04051 TRUE if the protection was applied, FALSE if not. 04052 04053 Environment: 04054 04055 Kernel mode, IRQL of APC_LEVEL or below. 04056 04057 --*/ 04058 04059 { 04060 PFN_NUMBER i; 04061 PFN_NUMBER NumberOfPages; 04062 PMMPTE PointerPte; 04063 MMPTE TempPte; 04064 MMPTE NewPteContents; 04065 KIRQL OldIrql; 04066 ULONG ProtectionMask; 04067 04068 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) { 04069 return FALSE; 04070 } 04071 04072 try { 04073 ProtectionMask = MiMakeProtectionMask (NewProtect); 04074 } except (EXCEPTION_EXECUTE_HANDLER) { 04075 return FALSE; 04076 } 04077 04078 PointerPte = MiGetPteAddress (VirtualAddress); 04079 NumberOfPages = BYTES_TO_PAGES (NumberOfBytes); 04080 04081 LOCK_PFN (OldIrql); 04082 04083 for (i = 0; i < NumberOfPages; i += 1) { 04084 TempPte.u.Long = PointerPte->u.Long; 04085 04086 MI_MAKE_VALID_PTE (NewPteContents, 04087 TempPte.u.Hard.PageFrameNumber, 04088 ProtectionMask, 04089 PointerPte); 04090 04091 KeFlushSingleTb ((PVOID)((PUCHAR)VirtualAddress + (i << PAGE_SHIFT)), 04092 TRUE, 04093 TRUE, 04094 (PHARDWARE_PTE)PointerPte, 04095 NewPteContents.u.Flush); 04096 04097 PointerPte += 1; 04098 } 04099 04100 UNLOCK_PFN (OldIrql); 04101 04102 return TRUE; 04103 }

SIZE_T MmSizeOfMdl IN PVOID  Base,
IN SIZE_T  Length
 

Definition at line 5815 of file iosup.c.

References ADDRESS_AND_SIZE_TO_SPAN_PAGES.

Referenced by ExLockUserBuffer(), MiCreateImageFileMap(), MmCreateMdl(), and NtStartProfile().

05822 : 05823 05824 This function returns the number of bytes required for an MDL for a 05825 given buffer and size. 05826 05827 Arguments: 05828 05829 Base - Supplies the base virtual address for the buffer. 05830 05831 Length - Supplies the size of the buffer in bytes. 05832 05833 Return Value: 05834 05835 Returns the number of bytes required to contain the MDL. 05836 05837 Environment: 05838 05839 Kernel mode. Any IRQL level. 05840 05841 --*/ 05842 05843 { 05844 return( sizeof( MDL ) + 05845 (ADDRESS_AND_SIZE_TO_SPAN_PAGES( Base, Length ) * 05846 sizeof( PFN_NUMBER )) 05847 ); 05848 }

VOID MmUnlockPagableImageSection IN PVOID  ImageSectionHandle  ) 
 

Definition at line 7065 of file iosup.c.

References ASSERT, FALSE, KePulseEvent(), LOCK_PFN2, MI_GET_PAGE_FRAME_FROM_PTE, MI_IS_PHYSICAL_ADDRESS, MI_PFN_ELEMENT, MI_REMOVE_LOCKED_PAGE_CHARGE, MiDecrementReferenceCount(), MiGetPteAddress, MmCollidedLockEvent, MmCollidedLockWait, MmLockedCode, SECTION_BASE_ADDRESS, _MMPTE::u, _MMPFN::u3, and UNLOCK_PFN2.

Referenced by ExpGetLockInformation(), ExpGetLookasideInformation(), ExpGetPoolInformation(), ExpGetProcessInformation(), IoUnregisterShutdownNotification(), Ke386ConfigureCyrixProcessor(), KeSetPhysicalCacheTypeRange(), KiAmdK6MtrrSetMemoryType(), MiEmptyAllWorkingSets(), MiFindContiguousMemory(), MiFreeInitializationCode(), MiLoadSystemImage(), MiMapViewInSystemSpace(), MiSetPagingOfDriver(), MiShareSessionImage(), MiUnmapLockedPagesInUserSpace(), MiUnmapViewInSystemSpace(), MmAdjustWorkingSetSize(), MmAllocateNonCachedMemory(), MmAllocatePagesForMdl(), MmFreeDriverInitialization(), MmFreeNonCachedMemory(), MmFreePagesFromMdl(), MmLockPagedPool(), MmMapViewOfSection(), MmResetDriverPaging(), MmShutdownSystem(), MmUnloadSystemImage(), MmUnlockPagedPool(), NtQueryVirtualMemory(), PspQueryPooledQuotaLimits(), PspQueryQuotaLimits(), PspQueryWorkingSetWatch(), PspSetQuotaLimits(), and SmbTraceStop().

07071 : 07072 07073 This function unlocks from memory, the pages locked by a preceding call to 07074 MmLockPagableDataSection. 07075 07076 Arguments: 07077 07078 ImageSectionHandle - Supplies the value returned by a previous call 07079 to MmLockPagableDataSection. 07080 07081 Return Value: 07082 07083 None. 07084 07085 --*/ 07086 07087 { 07088 PIMAGE_SECTION_HEADER NtSection; 07089 PMMPTE PointerPte; 07090 PMMPTE LastPte; 07091 PFN_NUMBER PageFrameIndex; 07092 PMMPFN Pfn1; 07093 KIRQL OldIrql; 07094 PVOID BaseAddress; 07095 ULONG SizeToUnlock; 07096 ULONG Collision; 07097 07098 if (MI_IS_PHYSICAL_ADDRESS(ImageSectionHandle)) { 07099 07100 // 07101 // No need to lock physical addresses. 07102 // 07103 07104 return; 07105 } 07106 07107 NtSection = (PIMAGE_SECTION_HEADER)ImageSectionHandle; 07108 07109 BaseAddress = SECTION_BASE_ADDRESS(NtSection); 07110 SizeToUnlock = NtSection->SizeOfRawData; 07111 07112 PointerPte = MiGetPteAddress(BaseAddress); 07113 LastPte = MiGetPteAddress((PCHAR)BaseAddress + SizeToUnlock - 1); 07114 07115 // 07116 // Address must be within the system cache. 07117 // 07118 07119 LOCK_PFN2 (OldIrql); 07120 07121 // 07122 // The NumberOfLinenumbers field is used to store the 07123 // lock count. 07124 // 07125 07126 ASSERT (NtSection->NumberOfLinenumbers >= 2); 07127 NtSection->NumberOfLinenumbers -= 1; 07128 07129 if (NtSection->NumberOfLinenumbers != 1) { 07130 UNLOCK_PFN2 (OldIrql); 07131 return; 07132 } 07133 07134 do { 07135 ASSERT (PointerPte->u.Hard.Valid == 1); 07136 07137 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 07138 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 07139 07140 ASSERT (Pfn1->u3.e2.ReferenceCount > 1); 07141 07142 MI_REMOVE_LOCKED_PAGE_CHARGE (Pfn1, 37); 07143 07144 MiDecrementReferenceCount (PageFrameIndex); 07145 07146 PointerPte += 1; 07147 07148 } while (PointerPte <= LastPte); 07149 07150 NtSection->NumberOfLinenumbers -= 1; 07151 ASSERT (NtSection->NumberOfLinenumbers == 0); 07152 Collision = MmCollidedLockWait; 07153 MmCollidedLockWait = FALSE; 07154 MmLockedCode -= SizeToUnlock; 07155 07156 UNLOCK_PFN2 (OldIrql); 07157 07158 if (Collision) { 07159 KePulseEvent (&MmCollidedLockEvent, 0, FALSE); 07160 } 07161 07162 return; 07163 }

NTKERNELAPI VOID MmUnlockPagedPool IN PVOID  Address,
IN SIZE_T  SizeInBytes
 

Definition at line 7869 of file iosup.c.

References ASSERT, ExPageLockHandle, LOCK_PFN2, MI_GET_PAGE_FRAME_FROM_PTE, MI_PFN_ELEMENT, MI_REMOVE_LOCKED_PAGE_CHARGE, MiDecrementReferenceCount(), MiGetPteAddress, MmLockPagableSectionByHandle(), MmUnlockPagableImageSection(), _MMPTE::u, _MMPFN::u3, and UNLOCK_PFN2.

Referenced by Ke386SetDescriptorProcess(), and MiSetImageProtect().

07876 : 07877 07878 Unlocks paged pool that was locked with MmLockPagedPool. 07879 07880 Arguments: 07881 07882 Address - Supplies the address in paged pool to unlock. 07883 07884 Size - Supplies the size to unlock. 07885 07886 Return Value: 07887 07888 None. 07889 07890 Environment: 07891 07892 Kernel mode, IRQL of APC_LEVEL or below. 07893 07894 --*/ 07895 07896 { 07897 PMMPTE PointerPte; 07898 PMMPTE LastPte; 07899 KIRQL OldIrql; 07900 PFN_NUMBER PageFrameIndex; 07901 PMMPFN Pfn1; 07902 07903 MmLockPagableSectionByHandle(ExPageLockHandle); 07904 PointerPte = MiGetPteAddress (Address); 07905 LastPte = MiGetPteAddress ((PVOID)((PCHAR)Address + (SizeInBytes - 1))); 07906 LOCK_PFN2 (OldIrql); 07907 07908 do { 07909 ASSERT (PointerPte->u.Hard.Valid == 1); 07910 07911 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 07912 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 07913 07914 ASSERT (Pfn1->u3.e2.ReferenceCount > 1); 07915 07916 MI_REMOVE_LOCKED_PAGE_CHARGE (Pfn1, 35); 07917 07918 MiDecrementReferenceCount (PageFrameIndex); 07919 07920 PointerPte += 1; 07921 } while (PointerPte <= LastPte); 07922 07923 UNLOCK_PFN2 (OldIrql); 07924 MmUnlockPagableImageSection(ExPageLockHandle); 07925 return; 07926 }

VOID MmUnlockPages IN OUT PMDL  MemoryDescriptorList  ) 
 

Definition at line 1347 of file iosup.c.

References ADDRESS_AND_SIZE_TO_SPAN_PAGES, ASSERT, KeBugCheckEx(), _LOCK_HEADER::ListHead, LOCK_PFN2, _LOCK_TRACKER::Mdl, MDL_IO_SPACE, MDL_MAPPED_TO_SYSTEM_VA, MDL_PAGES_LOCKED, MDL_PARTIAL, MDL_PHYSICAL_VIEW, MDL_SOURCE_IS_NONPAGED_POOL, MDL_WRITE_OPERATION, MI_PFN_ELEMENT, MI_REMOVE_LOCKED_PAGE_CHARGE, MiDecrementReferenceCount(), MiFreeMdlTracker(), MiReleasePageFileSpace(), MM_EMPTY_LIST, MmHighestPhysicalPage, MmLockedPagesHead, MmTrackLockedPages, MmUnmapLockedPages(), NULL, _MMPFN::OriginalPte, TRUE, _MMPTE::u, _MMPFN::u3, Unlock, and UNLOCK_PFN2.

Referenced by CcMdlRead(), CcMdlReadComplete2(), CcMdlWriteComplete2(), CcPrepareMdlWrite(), CcZeroData(), ExpProfileDelete(), ExUnlockUserBuffer(), MiDoMappedCopy(), MiGetWorkingSetInfo(), MmProbeAndLockPages(), MmProbeAndLockSelectedPages(), NtStartProfile(), NtStopProfile(), and VerifierUnlockPages().

01353 : 01354 01355 This routine unlocks physical pages which are described by a Memory 01356 Descriptor List. 01357 01358 Arguments: 01359 01360 MemoryDescriptorList - Supplies a pointer to a memory descriptor list 01361 (MDL). The supplied MDL must have been supplied 01362 to MmLockPages to lock the pages down. As the 01363 pages are unlocked, the MDL is updated. 01364 01365 Return Value: 01366 01367 None. 01368 01369 Environment: 01370 01371 Kernel mode, IRQL of DISPATCH_LEVEL or below. 01372 01373 --*/ 01374 01375 { 01376 PFN_NUMBER NumberOfPages; 01377 PPFN_NUMBER Page; 01378 PVOID StartingVa; 01379 KIRQL OldIrql; 01380 PMMPFN Pfn1; 01381 LOGICAL Unlock; 01382 01383 ASSERT ((MemoryDescriptorList->MdlFlags & MDL_PAGES_LOCKED) != 0); 01384 ASSERT ((MemoryDescriptorList->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0); 01385 ASSERT ((MemoryDescriptorList->MdlFlags & MDL_PARTIAL) == 0); 01386 ASSERT (MemoryDescriptorList->ByteCount != 0); 01387 01388 if (MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) { 01389 01390 // 01391 // This MDL has been mapped into system space, unmap now. 01392 // 01393 01394 MmUnmapLockedPages (MemoryDescriptorList->MappedSystemVa, 01395 MemoryDescriptorList); 01396 } 01397 01398 Page = (PPFN_NUMBER)(MemoryDescriptorList + 1); 01399 Unlock = TRUE; 01400 StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa + 01401 MemoryDescriptorList->ByteOffset); 01402 01403 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartingVa, 01404 MemoryDescriptorList->ByteCount); 01405 01406 if (MmTrackLockedPages == TRUE) { 01407 if ((MemoryDescriptorList->Process != NULL) && 01408 (Unlock == TRUE) && 01409 ((MemoryDescriptorList->MdlFlags & MDL_PHYSICAL_VIEW) == 0)) { 01410 MiFreeMdlTracker (MemoryDescriptorList, NumberOfPages); 01411 } 01412 } 01413 01414 ASSERT (NumberOfPages != 0); 01415 01416 LOCK_PFN2 (OldIrql); 01417 01418 if (MmLockedPagesHead.ListHead.Flink != 0) { 01419 01420 PLOCK_TRACKER P; 01421 PLIST_ENTRY NextEntry; 01422 01423 NextEntry = MmLockedPagesHead.ListHead.Flink; 01424 while (NextEntry != &MmLockedPagesHead.ListHead) { 01425 01426 P = CONTAINING_RECORD(NextEntry, 01427 LOCK_TRACKER, 01428 GlobalListEntry); 01429 01430 if (P->Mdl == MemoryDescriptorList) { 01431 KeBugCheckEx (LOCKED_PAGES_TRACKER_CORRUPTION, 01432 0x4, 01433 (ULONG_PTR)P, 01434 (ULONG_PTR)MemoryDescriptorList, 01435 0); 01436 } 01437 01438 NextEntry = NextEntry->Flink; 01439 } 01440 } 01441 01442 if ((MemoryDescriptorList->Process != NULL) && 01443 (Unlock == TRUE) && 01444 ((MemoryDescriptorList->MdlFlags & MDL_PHYSICAL_VIEW) == 0)) { 01445 01446 MemoryDescriptorList->Process->NumberOfLockedPages -= NumberOfPages; 01447 ASSERT ((SPFN_NUMBER)MemoryDescriptorList->Process->NumberOfLockedPages >= 0); 01448 } 01449 01450 if ((MemoryDescriptorList->MdlFlags & (MDL_IO_SPACE | MDL_PHYSICAL_VIEW)) == 0) { 01451 01452 // 01453 // Only unlock if not I/O or physical space. 01454 // 01455 01456 do { 01457 01458 if (*Page == MM_EMPTY_LIST) { 01459 01460 // 01461 // There are no more locked pages. 01462 // 01463 01464 break; 01465 } 01466 ASSERT (*Page <= MmHighestPhysicalPage); 01467 01468 // 01469 // If this was a write operation set the modified bit in the 01470 // PFN database. 01471 // 01472 01473 Pfn1 = MI_PFN_ELEMENT (*Page); 01474 if (MemoryDescriptorList->MdlFlags & MDL_WRITE_OPERATION) { 01475 Pfn1->u3.e1.Modified = 1; 01476 if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) && 01477 (Pfn1->u3.e1.WriteInProgress == 0)) { 01478 MiReleasePageFileSpace (Pfn1->OriginalPte); 01479 Pfn1->OriginalPte.u.Soft.PageFileHigh = 0; 01480 } 01481 } 01482 01483 MI_REMOVE_LOCKED_PAGE_CHARGE(Pfn1, 1); 01484 01485 MiDecrementReferenceCount (*Page); 01486 01487 *Page = MM_EMPTY_LIST; 01488 Page += 1; 01489 NumberOfPages -= 1; 01490 } while (NumberOfPages != 0); 01491 } 01492 01493 MemoryDescriptorList->MdlFlags &= ~MDL_PAGES_LOCKED; 01494 UNLOCK_PFN2 (OldIrql); 01495 01496 return; 01497 }

VOID MmUnmapIoSpace IN PVOID  BaseAddress,
IN SIZE_T  NumberOfBytes
 

Definition at line 3678 of file iosup.c.

References ASSERT, COMPUTE_PAGES_SPANNED, MiGetPteAddress, MiInsertDeadPteTrackingBlock(), MiLockSystemSpace, MiReleaseSystemPtes(), MiRemovePteTracker(), MiUnlockSystemSpace, MmTrackPtes, NULL, PAGED_CODE, and SystemPteSpace.

Referenced by CmpFindACPITable(), CmpMatchAcpiCreatorIdRule(), CmpMatchAcpiCreatorRevisionRule(), CmpMatchAcpiOemIdRule(), CmpMatchAcpiOemRevisionRule(), CmpMatchAcpiOemTableIdRule(), CmpMatchAcpiRevisionRule(), DriverEntry(), MmFreeContiguousMemorySpecifyCache(), MmUnmapVideoDisplay(), and VerifierUnmapIoSpace().

03685 : 03686 03687 This function unmaps a range of physical address which were previously 03688 mapped via an MmMapIoSpace function call. 03689 03690 Arguments: 03691 03692 BaseAddress - Supplies the base virtual address where the physical 03693 address was previously mapped. 03694 03695 NumberOfBytes - Supplies the number of bytes which were mapped. 03696 03697 Return Value: 03698 03699 None. 03700 03701 Environment: 03702 03703 Kernel mode, Should be IRQL of APC_LEVEL or below, but unfortunately 03704 callers are coming in at DISPATCH_LEVEL and it's too late to change the 03705 rules now. This means you can never make this routine pagable. 03706 03707 --*/ 03708 03709 { 03710 PFN_NUMBER NumberOfPages; 03711 PMMPTE FirstPte; 03712 KIRQL OldIrql; 03713 PVOID PoolBlock; 03714 03715 PAGED_CODE(); 03716 ASSERT (NumberOfBytes != 0); 03717 NumberOfPages = COMPUTE_PAGES_SPANNED (BaseAddress, NumberOfBytes); 03718 FirstPte = MiGetPteAddress (BaseAddress); 03719 MiReleaseSystemPtes(FirstPte, (ULONG)NumberOfPages, SystemPteSpace); 03720 03721 if (MmTrackPtes != 0) { 03722 MiLockSystemSpace(OldIrql); 03723 03724 PoolBlock = MiRemovePteTracker (NULL, 03725 FirstPte, 03726 NumberOfPages); 03727 MiUnlockSystemSpace(OldIrql); 03728 03729 // 03730 // Can't free the pool block here because we may be getting called 03731 // from the fault path in MiWaitForInPageComplete holding the PFN 03732 // lock. Queue the block for later release. 03733 // 03734 03735 if (PoolBlock) { 03736 MiInsertDeadPteTrackingBlock (PoolBlock); 03737 } 03738 } 03739 03740 return; 03741 }

VOID MmUnmapLockedPages IN PVOID  BaseAddress,
IN PMDL  MemoryDescriptorList
 

Definition at line 3124 of file iosup.c.

References ASSERT, COMPUTE_PAGES_SPANNED, LOCK_PFN2, MDL_IO_SPACE, MDL_LOCK_HELD, MDL_MAPPED_TO_SYSTEM_VA, MDL_PARENT_MAPPED_SYSTEM_VA, MDL_PARTIAL_HAS_BEEN_MAPPED, MDL_PHYSICAL_VIEW, MI_GET_PAGE_FRAME_FROM_PTE, MI_IS_PHYSICAL_ADDRESS, MI_PFN_ELEMENT, MiGetPteAddress, MiInsertDeadPteTrackingBlock(), MiLockSystemSpace, MiReleaseSystemPtes(), MiRemovePteTracker(), MiUnlockSystemSpace, MiUnmapLockedPagesInUserSpace(), MmTrackPtes, SystemPteSpace, _MMPTE::u, _MMPFN::u3, and UNLOCK_PFN2.

Referenced by CcZeroData(), ExpProfileDelete(), MiCheckForCrashDump(), MiCleanSection(), MiCloneProcessAddressSpace(), MiCreateImageFileMap(), MiDoMappedCopy(), MiFlushSectionInternal(), MiMakeOutswappedPageResident(), MiWaitForInPageComplete(), MiWriteComplete(), MmShutdownSystem(), MmUnlockPages(), NtStartProfile(), NtStopProfile(), and VerifierUnmapLockedPages().

03131 : 03132 03133 This routine unmaps locked pages which were previously mapped via 03134 a MmMapLockedPages call. 03135 03136 Arguments: 03137 03138 BaseAddress - Supplies the base address where the pages were previously 03139 mapped. 03140 03141 MemoryDescriptorList - Supplies a valid Memory Descriptor List which has 03142 been updated by MmProbeAndLockPages. 03143 03144 Return Value: 03145 03146 None. 03147 03148 Environment: 03149 03150 Kernel mode. DISPATCH_LEVEL or below if base address is within 03151 system space; APC_LEVEL or below if base address is user space. 03152 03153 --*/ 03154 03155 { 03156 PFN_NUMBER NumberOfPages; 03157 PFN_NUMBER i; 03158 PPFN_NUMBER Page; 03159 PMMPTE PointerPte; 03160 PMMPTE PointerBase; 03161 PVOID StartingVa; 03162 KIRQL OldIrql; 03163 PVOID PoolBlock; 03164 03165 ASSERT (MemoryDescriptorList->ByteCount != 0); 03166 ASSERT ((MemoryDescriptorList->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0); 03167 03168 if (MI_IS_PHYSICAL_ADDRESS (BaseAddress)) { 03169 03170 // 03171 // MDL is not mapped into virtual space, just clear the fields 03172 // and return. 03173 // 03174 03175 MemoryDescriptorList->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA | 03176 MDL_PARTIAL_HAS_BEEN_MAPPED); 03177 return; 03178 } 03179 03180 if (BaseAddress > MM_HIGHEST_USER_ADDRESS) { 03181 03182 StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa + 03183 MemoryDescriptorList->ByteOffset); 03184 03185 NumberOfPages = COMPUTE_PAGES_SPANNED (StartingVa, 03186 MemoryDescriptorList->ByteCount); 03187 03188 PointerBase = MiGetPteAddress (BaseAddress); 03189 03190 03191 ASSERT ((MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) != 0); 03192 03193 03194 #if DBG 03195 PointerPte = PointerBase; 03196 i = NumberOfPages; 03197 Page = (PPFN_NUMBER)(MemoryDescriptorList + 1); 03198 if ((MemoryDescriptorList->MdlFlags & MDL_LOCK_HELD) == 0) { 03199 LOCK_PFN2 (OldIrql); 03200 } 03201 03202 while (i != 0) { 03203 ASSERT (PointerPte->u.Hard.Valid == 1); 03204 ASSERT (*Page == MI_GET_PAGE_FRAME_FROM_PTE (PointerPte)); 03205 if ((MemoryDescriptorList->MdlFlags & (MDL_IO_SPACE | MDL_PHYSICAL_VIEW)) == 0) { 03206 PMMPFN Pfn3; 03207 Pfn3 = MI_PFN_ELEMENT (*Page); 03208 ASSERT (Pfn3->u3.e2.ReferenceCount != 0); 03209 } 03210 03211 Page += 1; 03212 PointerPte += 1; 03213 i -= 1; 03214 } 03215 03216 if ((MemoryDescriptorList->MdlFlags & MDL_LOCK_HELD) == 0) { 03217 UNLOCK_PFN2 (OldIrql); 03218 } 03219 #endif //DBG 03220 03221 MemoryDescriptorList->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA | 03222 MDL_PARTIAL_HAS_BEEN_MAPPED); 03223 03224 if (MmTrackPtes != 0) { 03225 MiLockSystemSpace(OldIrql); 03226 PoolBlock = MiRemovePteTracker (MemoryDescriptorList, 03227 PointerBase, 03228 NumberOfPages); 03229 MiUnlockSystemSpace(OldIrql); 03230 03231 // 03232 // Can't free the pool block here because we may be getting called 03233 // from the fault path in MiWaitForInPageComplete holding the PFN 03234 // lock. Queue the block for later release. 03235 // 03236 03237 if (PoolBlock) { 03238 MiInsertDeadPteTrackingBlock (PoolBlock); 03239 } 03240 } 03241 03242 MiReleaseSystemPtes (PointerBase, (ULONG)NumberOfPages, SystemPteSpace); 03243 return; 03244 03245 } else { 03246 03247 MiUnmapLockedPagesInUserSpace (BaseAddress, 03248 MemoryDescriptorList); 03249 } 03250 }

VOID MmUnmapVideoDisplay IN PVOID  BaseAddress,
IN SIZE_T  NumberOfBytes
 

Definition at line 7751 of file iosup.c.

References ASSERT, COMPUTE_PAGES_SPANNED, ExFreePool(), FALSE, KSEG0_BASE, MiGetPteAddress, MiGetSubsectionAddress, MiPteToProto, MiReleaseSystemPtes(), MmUnmapIoSpace(), PAGED_CODE, _SUBSECTION::SubsectionBase, SystemPteSpace, _MMPTE::u, and X64K.

07758 : 07759 07760 This function unmaps a range of physical address which were previously 07761 mapped via an MmMapVideoDisplay function call. 07762 07763 Arguments: 07764 07765 BaseAddress - Supplies the base virtual address where the physical 07766 address was previously mapped. 07767 07768 NumberOfBytes - Supplies the number of bytes which were mapped. 07769 07770 Return Value: 07771 07772 None. 07773 07774 Environment: 07775 07776 Kernel mode, IRQL of APC_LEVEL or below. 07777 07778 --*/ 07779 07780 { 07781 07782 #ifdef LARGE_PAGES 07783 PFN_NUMBER NumberOfPages; 07784 ULONG i; 07785 PMMPTE FirstPte; 07786 KIRQL OldIrql; 07787 PMMPTE LargePte; 07788 PSUBSECTION Subsection; 07789 07790 PAGED_CODE(); 07791 07792 ASSERT (NumberOfBytes != 0); 07793 NumberOfPages = COMPUTE_PAGES_SPANNED (BaseAddress, NumberOfBytes); 07794 FirstPte = MiGetPteAddress (BaseAddress); 07795 07796 if ((NumberOfBytes > X64K) && (FirstPte->u.Hard.Valid == 0)) { 07797 07798 ASSERT (MmLargeVideoMapped); 07799 LargePte = MiPteToProto (FirstPte); 07800 Subsection = MiGetSubsectionAddress (LargePte); 07801 ASSERT (Subsection->SubsectionBase == FirstPte); 07802 07803 NumberOfPages = Subsection->EndingSector; 07804 ExFreePool (Subsection); 07805 ExFreePool (LargePte); 07806 MmLargeVideoMapped = FALSE; 07807 KeFillFixedEntryTb ((PHARDWARE_PTE)FirstPte, (PVOID)KSEG0_BASE, LARGE_ENTRY); 07808 } 07809 MiReleaseSystemPtes(FirstPte, NumberOfPages, SystemPteSpace); 07810 return; 07811 07812 #else // LARGE_PAGES 07813 07814 MmUnmapIoSpace (BaseAddress, NumberOfBytes); 07815 return; 07816 #endif //LARGE_PAGES 07817 }


Variable Documentation

LIST_ENTRY MiDeadPteTrackerListHead
 

Definition at line 95 of file iosup.c.

Referenced by MiInitializeIoTrackers(), MiInsertDeadPteTrackingBlock(), and MiReleaseDeadPteTrackers().

ULONG MiLastCallColor
 

Definition at line 4266 of file iosup.c.

Referenced by MmAllocatePagesForMdl().

PFN_NUMBER MiLastCallHighPage
 

Definition at line 4265 of file iosup.c.

Referenced by MmAllocatePagesForMdl().

PFN_NUMBER MiLastCallLowPage
 

Definition at line 4264 of file iosup.c.

Referenced by MmAllocatePagesForMdl().

LOGICAL MiNoLowMemory
 

Definition at line 141 of file iosup.c.

Referenced by KdpCheckLowMemory(), MiAllocateContiguousMemory(), MiInitMachineDependent(), MiReloadBootLoadedDrivers(), MmFreeContiguousMemory(), and MmFreeContiguousMemorySpecifyCache().

ULONG MiProbeRaises[MI_PROBE_RAISE_SIZE]
 

Definition at line 223 of file iosup.c.

SYSPTES_HEADER MiPteHeader
 

Definition at line 94 of file iosup.c.

Referenced by MiGetHighestPteConsumer(), MiInitializeIoTrackers(), MiInsertPteTracker(), and MiRemovePteTracker().

KSPIN_LOCK MiPteTrackerLock
 

Definition at line 96 of file iosup.c.

Referenced by MiInitializeIoTrackers(), MiInsertDeadPteTrackingBlock(), and MiReleaseDeadPteTrackers().

BOOLEAN MiTrackingAborted = FALSE
 

Definition at line 99 of file iosup.c.

Referenced by MiAddMdlTracker(), MiFreeMdlTracker(), and MmCleanProcessAddressSpace().

BOOLEAN MiTrackPtesAborted = FALSE
 

Definition at line 93 of file iosup.c.

Referenced by MiGetHighestPteConsumer(), MiRemovePteTracker(), MmMapIoSpace(), and MmMapLockedPagesSpecifyCache().

BOOLEAN MiWriteCombiningPtes = FALSE
 

Definition at line 211 of file iosup.c.

Referenced by MiMapLockedPagesInUserSpace(), MiMapSinglePage(), MiMapViewOfPhysicalSection(), MmMapIoSpace(), and MmMapLockedPagesSpecifyCache().

KEVENT MmCollidedLockEvent
 

Definition at line 206 of file iosup.c.

Referenced by MmInitSystem(), MmLockPagableSectionByHandle(), and MmUnlockPagableImageSection().

ULONG MmCollidedLockWait
 

Definition at line 207 of file iosup.c.

Referenced by MmLockPagableSectionByHandle(), and MmUnlockPagableImageSection().

SIZE_T MmLockedCode
 

Definition at line 209 of file iosup.c.

Referenced by MiLockCode(), and MmUnlockPagableImageSection().

LOCK_HEADER MmLockedPagesHead
 

Definition at line 98 of file iosup.c.

Referenced by MiAddMdlTracker(), MiFreeMdlTracker(), MiInitializeIoTrackers(), and MmUnlockPages().

PFN_NUMBER MmMdlPagesAllocated
 

Definition at line 204 of file iosup.c.

Referenced by MmAllocatePagesForMdl(), and MmFreePagesFromMdl().

ULONG MmReferenceCountCheck = 2500
 

Definition at line 234 of file iosup.c.

Referenced by MmProbeAndLockPages().

PFN_NUMBER MmSystemLockPagesCount
 

Definition at line 26 of file iosup.c.

Referenced by MmMapLockedPagesSpecifyCache(), and MmProbeAndLockPages().

ULONG MmTotalSystemDriverPages
 

Definition at line 28 of file iosup.c.

LOGICAL MmTrackPtes = FALSE
 

Definition at line 92 of file iosup.c.

POOL_DESCRIPTOR NonPagedPoolDescriptor
 

Definition at line 202 of file iosup.c.


Generated on Sat May 15 19:44:23 2004 for test by doxygen 1.3.7