Main Page | Class Hierarchy | Class List | File List | Class Members | File Members

mm.h File Reference

Go to the source code of this file.

Classes

struct  _PHYSICAL_MEMORY_RUN
struct  _PHYSICAL_MEMORY_DESCRIPTOR
struct  _MMINFO_COUNTERS
struct  _MMPFNLIST
struct  _PHYSICAL_MEMORY_RANGE

Defines

#define MAX_PHYSICAL_MEMORY_FRAGMENTS   20
#define MM_ALLOCATION_GRANULARITY   ((ULONG)0x10000)
#define MM_MAXIMUM_READ_CLUSTER_SIZE   (15)
#define MM_MAXIMUM_DISK_IO_SIZE   (0x10000)
#define ROUND_TO_PAGES(Size)   (((ULONG_PTR)(Size) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
#define BYTES_TO_PAGES(Size)
#define BYTE_OFFSET(Va)   ((ULONG)((LONG_PTR)(Va) & (PAGE_SIZE - 1)))
#define PAGE_ALIGN(Va)   ((PVOID)((ULONG_PTR)(Va) & ~(PAGE_SIZE - 1)))
#define ADDRESS_AND_SIZE_TO_SPAN_PAGES(Va, Size)
#define COMPUTE_PAGES_SPANNED(Va, Size)   ((ULONG)((((ULONG_PTR)(Va) & (PAGE_SIZE -1)) + (Size) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
#define IS_SYSTEM_ADDRESS(VA)   ((VA) >= MM_SYSTEM_RANGE_START)
#define MmGetMdlPfnArray(Mdl)   ((PPFN_NUMBER)(Mdl + 1))
#define MmGetMdlVirtualAddress(Mdl)   ((PVOID) ((PCHAR) ((Mdl)->StartVa) + (Mdl)->ByteOffset))
#define MmGetMdlByteCount(Mdl)   ((Mdl)->ByteCount)
#define MmGetMdlByteOffset(Mdl)   ((Mdl)->ByteOffset)
#define MmGetMdlBaseVa(Mdl)   ((Mdl)->StartVa)
#define MmIsRecursiveIoFault()
#define MmDisablePageFaultClustering(SavedState)
#define MmEnablePageFaultClustering(SavedState)
#define MmSavePageFaultReadAhead(Thread, SavedState)
#define MmSetPageFaultReadAhead(Thread, ReadAhead)
#define MmResetPageFaultReadAhead(Thread, SavedState)
#define NUMBER_OF_PAGE_LISTS   8
#define MmEnoughMemoryForWrite()
#define MmInitializeMdl(MemoryDescriptorList, BaseVa, Length)
#define MmGetSystemAddressForMdlSafe(MDL, PRIORITY)
#define MmGetSystemAddressForMdl(MDL)
#define MmPrepareMdlForReuse(MDL)
#define MMNONPAGED_QUOTA_INCREASE   (64*1024)
#define MMPAGED_QUOTA_INCREASE   (512*1024)
#define MMNONPAGED_QUOTA_CHECK   (256*1024)
#define MMPAGED_QUOTA_CHECK   (4*1024*1024)
#define PERFINFO_ADDPOOLPAGE(CheckType, PoolIndex, Addr, PoolDesc)
#define PERFINFO_ADDTOWS(PageFrame, Address, Pid)
#define PERFINFO_BIGPOOLALLOC(Type, PTag, NumBytes, Addr)
#define PERFINFO_CM_CHECKCELLTYPE(Map)
#define PERFINFO_CM_CHECKCELLTYPE(Map)
#define PERFINFO_CM_HIVECELL_REFERENCE_FLAT(Hive, pcell, Cell)
#define PERFINFO_CM_HIVECELL_REFERENCE_PAGED(Hive, pcell, Cell, Type, Map)
#define PERFINFO_CONVERT_TO_GUI_THREAD(EThread)
#define PERFINFO_DECREFCNT(PageFrame, Flag, Type)
#define PERFINFO_DELETE_STACK(PointerPte, NumberOfPtes)
#define PERFINFO_DISPATCHFAULT_DECL()
#define PERFINFO_DRIVER_COMPLETIONROUTINE_CALL(irp, irpsp)
#define PERFINFO_DRIVER_COMPLETIONROUTINE_RETURN(irp, irpsp)
#define PERFINFO_DRIVER_INIT(pdo)
#define PERFINFO_DRIVER_INIT_COMPLETE(pdo)
#define PERFINFO_DRIVER_MAJORFUNCTION_CALL(irp, irpsp, pdo)
#define PERFINFO_DRIVER_MAJORFUNCTION_RETURN(irp, irpsp, pdo)
#define PERFINFO_EXALLOCATEPOOLWITHTAG_DECL()
#define PERFINFO_EXFREEPOOLWITHTAG_DECL()
#define PERFINFO_FREEPOOL(Addr)
#define PERFINFO_FREEPOOLPAGE(CheckType, PoolIndex, Addr, PoolDesc)
#define PERFINFO_GET_PAGE_INFO(PointerPte)
#define PERFINFO_GET_PAGE_INFO_REPLACEMENT(PointerPte)
#define PERFINFO_GET_PAGE_INFO_WITH_DECL(PointerPte)
#define PERFINFO_GROW_STACK(EThread)
#define PERFINFO_HARDFAULT(Address, InpageSupport)
#define PERFINFO_HARDFAULT_INFO(ProtoPte)
#define PERFINFO_HARDFAULT_IOTIME()
#define PERFINFO_HIVECELL_REFERENCE_FLAT(Hive, pcell, Cell)
#define PERFINFO_HIVECELL_REFERENCE_PAGED(Hive, pcell, Cell, Type, Map)
#define PERFINFO_IMAGE_LOAD(LdrDataTableEntry)
#define PERFINFO_IMAGE_UNLOAD(Address)
#define PERFINFO_INIT_POOLRANGE(PoolStart, PoolPages)
#define PERFINFO_INIT_PERFMEMTABLE(LoaderBlock)
#define PERFINFO_INIT_TRACEFLAGS(OptionString, SpecificOption)
#define PERFINFO_INSERTINLIST(Page, ListHead)
#define PERFINFO_INSERT_FRONT_STANDBY(Page)
#define PERFINFO_LOG_MARK(PMARK)
#define PERFINFO_LOG_MARK_SPRINTF(PMARK, VARIABLE)
#define PERFINFO_LOG_WMI_TRACE_EVENT(PData, xLength)
#define PERFINFO_LOG_WMI_TRACE_KERNEL_EVENT(GroupType, PData, xLength, Thread)
#define PERFINFO_LOG_WMI_TRACE_LONG_EVENT(GroupType, PData, xCount, Thread)
#define PERFINFO_LOG_WS_REMOVAL(Type, WsInfo)
#define PERFINFO_LOG_WS_REPLACEMENT(WsInfo)
#define PERFINFO_MIH_DECL
#define PERFINFO_MMINIT_DECL
#define PERFINFO_MMINIT_START()
#define PERFINFO_MOD_PAGE_WRITER3()
#define PERFINFO_PAGE_INFO_DECL()
#define PERFINFO_PAGE_INFO_REPLACEMENT_DECL()
#define PERFINFO_POOLALLOC(Type, PTag, NumBytes)
#define PERFINFO_POOLALLOC_ADDR(Addr)
#define PERFINFO_POOL_ALLOC_COMMON(Type, PTag, NumBytes)
#define PERFINFO_PRIVATE_COPY_ON_WRITE(CopyFrom, PAGE_SIZE)
#define PERFINFO_PRIVATE_PAGE_DEMAND_ZERO(VirtualAddress)
#define PERFINFO_PROCESS_CREATE(EProcess)
#define PERFINFO_PROCESS_DELETE(EProcess)
#define PERFINFO_DELETE_PAGE(ppfn)
#define PERFINFO_REMOVEPAGE(PageIndex, LogType)
#define PERFINFO_SECTION_CREATE(ControlArea)
#define PERFINFO_SEGMENT_DELETE(FileName)
#define PERFINFO_SOFTFAULT(PageFrame, Address, Type)
#define PERFINFO_THREAD_CREATE(EThread, ITeb)
#define PERFINFO_THREAD_DELETE(EThread)
#define PERFINFO_UNLINKFREEPAGE(Index, Location)
#define PERFINFO_UNLINKPAGE(Index, Location)
#define PERFINFO_WSMANAGE_ACTUALTRIM(Trim)
#define PERFINFO_WSMANAGE_DECL()
#define PERFINFO_WSMANAGE_DUMPENTRIES()
#define PERFINFO_WSMANAGE_DUMPENTRIES_CLAIMS()
#define PERFINFO_WSMANAGE_DUMPENTRIES_FAULTS()
#define PERFINFO_WSMANAGE_DUMPWS(VmSupport, SampledAgeCounts)
#define PERFINFO_WSMANAGE_FINALACTION(TrimAction)
#define PERFINFO_WSMANAGE_GLOBAL_DECL
#define PERFINFO_WSMANAGE_LOGINFO_CLAIMS(TrimAction)
#define PERFINFO_WSMANAGE_LOGINFO_FAULTS(TrimAction)
#define PERFINFO_WSMANAGE_PROCESS_RESET(VmSupport)
#define PERFINFO_WSMANAGE_PROCESS_RESET(VmSupport)
#define PERFINFO_WSMANAGE_STARTLOG()
#define PERFINFO_WSMANAGE_STARTLOG_CLAIMS()
#define PERFINFO_WSMANAGE_STARTLOG_FAULTS()
#define PERFINFO_WSMANAGE_TOTRIM(Trim)
#define PERFINFO_WSMANAGE_TRIMACTION(TrimAction)
#define PERFINFO_WSMANAGE_TRIMEND_CLAIMS(Criteria)
#define PERFINFO_WSMANAGE_TRIMEND_FAULTS(Criteria)
#define PERFINFO_WSMANAGE_TRIMWS(Process, SessionSpace, VmSupport)
#define PERFINFO_WSMANAGE_TRIMWS_CLAIMINFO(VmSupport)
#define PERFINFO_WSMANAGE_TRIMWS_CLAIMINFO(VmSupport)
#define PERFINFO_WSMANAGE_WAITFORWRITER_CLAIMS()
#define PERFINFO_WSMANAGE_WAITFORWRITER_FAULTS()
#define PERFINFO_WSMANAGE_WILLTRIM(ReductionGoal, FreeGoal)
#define PERFINFO_WSMANAGE_WILLTRIM_CLAIMS(Criteria)
#define PERFINFO_WSMANAGE_WILLTRIM_FAULTS(Criteria)
#define PERFINFO_DO_PAGEFAULT_CLUSTERING()   1

Typedefs

typedef _PHYSICAL_MEMORY_RUN PHYSICAL_MEMORY_RUN
typedef _PHYSICAL_MEMORY_RUNPPHYSICAL_MEMORY_RUN
typedef _PHYSICAL_MEMORY_DESCRIPTOR PHYSICAL_MEMORY_DESCRIPTOR
typedef _PHYSICAL_MEMORY_DESCRIPTORPPHYSICAL_MEMORY_DESCRIPTOR
typedef enum _MM_SYSTEM_SIZE MM_SYSTEMSIZE
typedef enum _LOCK_OPERATION LOCK_OPERATION
typedef _MMINFO_COUNTERS MMINFO_COUNTERS
typedef MMINFO_COUNTERSPMMINFO_COUNTERS
typedef NTSTATUS(* PKWIN32_CALLOUT )(IN PVOID Arg)
typedef enum _MMLISTS MMLISTS
typedef _MMPFNLIST MMPFNLIST
typedef MMPFNLISTPMMPFNLIST
typedef enum _MMFLUSH_TYPE MMFLUSH_TYPE
typedef enum _MM_PAGE_PRIORITY MM_PAGE_PRIORITY
typedef _PHYSICAL_MEMORY_RANGE PHYSICAL_MEMORY_RANGE
typedef _PHYSICAL_MEMORY_RANGEPPHYSICAL_MEMORY_RANGE
typedef NTSTATUS(* PMM_DLL_INITIALIZE )(IN PUNICODE_STRING RegistryPath)
typedef NTSTATUS(* PMM_DLL_UNLOAD )(VOID)
typedef VOID(* PBANKED_SECTION_ROUTINE )(IN ULONG ReadBank, IN ULONG WriteBank, IN PVOID Context)
typedef VOID(FASTCALL * PPAGE_FAULT_NOTIFY_ROUTINE )(IN NTSTATUS Status, IN PVOID VirtualAddress, IN PVOID TrapInformation)
typedef VOID(FASTCALL * PHARD_FAULT_NOTIFY_ROUTINE )(IN HANDLE FileObject, IN PVOID VirtualAddress)

Enumerations

enum  _MM_SYSTEM_SIZE { MmSmallSystem, MmMediumSystem, MmLargeSystem }
enum  _LOCK_OPERATION { IoReadAccess, IoWriteAccess, IoModifyAccess }
enum  _MMLISTS {
  ZeroedPageList, FreePageList, StandbyPageList, ModifiedPageList,
  ModifiedNoWritePageList, BadPageList, ActiveAndValid, TransitionPage
}
enum  _MMFLUSH_TYPE { MmFlushForDelete, MmFlushForWrite }
enum  _MM_PAGE_PRIORITY { LowPagePriority, NormalPagePriority = 16, HighPagePriority = 32 }

Functions

NTKERNELAPI MM_SYSTEMSIZE MmQuerySystemSize (VOID)
NTKERNELAPI BOOLEAN MmIsThisAnNtAsSystem (VOID)
BOOLEAN MmInitSystem (IN ULONG Phase, IN PLOADER_PARAMETER_BLOCK LoaderBlock, IN PPHYSICAL_MEMORY_DESCRIPTOR PhysicalMemoryBlock)
VOID MmInitializeMemoryLimits (IN PLOADER_PARAMETER_BLOCK LoaderBlock, IN PBOOLEAN IncludedType, OUT PPHYSICAL_MEMORY_DESCRIPTOR Memory)
VOID MmFreeLoaderBlock (IN PLOADER_PARAMETER_BLOCK LoaderBlock)
VOID MmEnablePAT (VOID)
PVOID MmAllocateIndependentPages (IN SIZE_T NumberOfBytes)
BOOLEAN MmSetPageProtection (IN PVOID VirtualAddress, IN SIZE_T NumberOfBytes, IN ULONG NewProtect)
BOOLEAN MmShutdownSystem (VOID)
LOGICAL MmAssignProcessToJob (IN PEPROCESS Process)
LOGICAL MmEnforceWorkingSetLimit (IN PMMSUPPORT WsInfo, IN LOGICAL Enable)
NTSTATUS MmSessionCreate (OUT PULONG SessionId)
NTSTATUS MmSessionDelete (IN ULONG SessionId)
NTSTATUS MmDispatchWin32Callout (IN PKWIN32_CALLOUT Function, IN PKWIN32_CALLOUT WorkerCallback OPTIONAL, IN PVOID Arg, IN PULONG SessionId OPTIONAL)
VOID MmSessionLeader (IN PEPROCESS Process)
VOID MmSessionSetUnloadAddress (IN PDRIVER_OBJECT pWin32KDevice)
LOGICAL MmResourcesAvailable (IN POOL_TYPE PoolType, IN SIZE_T NumberOfBytes, IN EX_POOL_PRIORITY Priority)
PVOID MiAllocatePoolPages (IN POOL_TYPE PoolType, IN SIZE_T SizeInBytes, IN ULONG IsLargeSessionAllocation)
ULONG MiFreePoolPages (IN PVOID StartingAddress)
PVOID MiSessionPoolVector (VOID)
VOID MiSessionPoolAllocated (IN PVOID VirtualAddress, IN SIZE_T NumberOfBytes, IN POOL_TYPE PoolType)
VOID MiSessionPoolFreed (IN PVOID VirtualAddress, IN SIZE_T NumberOfBytes, IN POOL_TYPE PoolType)
POOL_TYPE MmDeterminePoolType (IN PVOID VirtualAddress)
LOGICAL MmIsSystemAddressLocked (IN PVOID VirtualAddress)
NTSTATUS MmAccessFault (IN BOOLEAN StoreInstruction, IN PVOID VirtualAddress, IN KPROCESSOR_MODE PreviousMode, IN PVOID TrapInformation)
BOOLEAN MmCreateProcessAddressSpace (IN ULONG MinimumWorkingSetSize, IN PEPROCESS NewProcess, OUT PULONG_PTR DirectoryTableBase)
NTSTATUS MmInitializeProcessAddressSpace (IN PEPROCESS ProcessToInitialize, IN PEPROCESS ProcessToClone OPTIONAL, IN PVOID SectionToMap OPTIONAL, OUT PUNICODE_STRING *AuditName OPTIONAL)
VOID MmDeleteProcessAddressSpace (IN PEPROCESS Process)
VOID MmCleanProcessAddressSpace (VOID)
VOID MmCleanUserProcessAddressSpace (VOID)
VOID MmCleanVirtualAddressDescriptor (VOID)
PVOID MmCreateKernelStack (BOOLEAN LargeStack)
VOID MmDeleteKernelStack (IN PVOID PointerKernelStack, IN BOOLEAN LargeStack)
NTKERNELAPI NTSTATUS MmGrowKernelStack (IN PVOID CurrentStack)
VOID MmOutPageKernelStack (IN PKTHREAD Thread)
VOID MmInPageKernelStack (IN PKTHREAD Thread)
VOID MmOutSwapProcess (IN PKPROCESS Process)
VOID MmInSwapProcess (IN PKPROCESS Process)
PTEB MmCreateTeb (IN PEPROCESS TargetProcess, IN PINITIAL_TEB InitialTeb, IN PCLIENT_ID ClientId)
PPEB MmCreatePeb (IN PEPROCESS TargetProcess, IN PINITIAL_PEB InitialPeb)
VOID MmDeleteTeb (IN PEPROCESS TargetProcess, IN PVOID TebBase)
VOID MmAllowWorkingSetExpansion (VOID)
NTKERNELAPI NTSTATUS MmAdjustWorkingSetSize (IN SIZE_T WorkingSetMinimum, IN SIZE_T WorkingSetMaximum, IN ULONG SystemCache)
VOID MmAdjustPageFileQuota (IN ULONG NewPageFileQuota)
VOID MmWorkingSetManager (VOID)
VOID MmSetMemoryPriorityProcess (IN PEPROCESS Process, IN UCHAR MemoryPriority)
NTSTATUS MmLoadSystemImage (IN PUNICODE_STRING ImageFileName, IN PUNICODE_STRING NamePrefix OPTIONAL, IN PUNICODE_STRING LoadedBaseName OPTIONAL, IN BOOLEAN LoadInSessionSpace, OUT PVOID *Section, OUT PVOID *ImageBaseAddress)
NTSTATUS MmLoadAndLockSystemImage (IN PUNICODE_STRING ImageFileName, IN PUNICODE_STRING NamePrefix OPTIONAL, IN PUNICODE_STRING LoadedBaseName OPTIONAL, OUT PVOID *Section, OUT PVOID *ImageBaseAddress)
VOID MmFreeDriverInitialization (IN PVOID Section)
NTSTATUS MmUnloadSystemImage (IN PVOID Section)
VOID MmMakeKernelResourceSectionWritable (VOID)
VOID VerifierFreeTrackedPool (IN PVOID VirtualAddress, IN SIZE_T ChargedBytes, IN LOGICAL CheckType, IN LOGICAL SpecialPool)
ULONG MmSizeOfTriageInformation (VOID)
ULONG MmSizeOfUnloadedDriverInformation (VOID)
VOID MmWriteTriageInformation (IN PVOID)
VOID MmWriteUnloadedDriverInformation (IN PVOID)
NTKERNELAPI NTSTATUS MmCreateSection (OUT PVOID *SectionObject, IN ACCESS_MASK DesiredAccess, IN POBJECT_ATTRIBUTES ObjectAttributes OPTIONAL, IN PLARGE_INTEGER MaximumSize, IN ULONG SectionPageProtection, IN ULONG AllocationAttributes, IN HANDLE FileHandle OPTIONAL, IN PFILE_OBJECT File OPTIONAL)
NTKERNELAPI NTSTATUS MmMapViewOfSection (IN PVOID SectionToMap, IN PEPROCESS Process, IN OUT PVOID *CapturedBase, IN ULONG_PTR ZeroBits, IN SIZE_T CommitSize, IN OUT PLARGE_INTEGER SectionOffset, IN OUT PSIZE_T CapturedViewSize, IN SECTION_INHERIT InheritDisposition, IN ULONG AllocationType, IN ULONG Protect)
NTKERNELAPI NTSTATUS MmUnmapViewOfSection (IN PEPROCESS Process, IN PVOID BaseAddress)
BOOLEAN MmForceSectionClosed (IN PSECTION_OBJECT_POINTERS SectionObjectPointer, IN BOOLEAN DelayClose)
NTSTATUS MmGetFileNameForSection (IN HANDLE Section, OUT PSTRING FileName)
NTSTATUS MmAddVerifierThunks (IN PVOID ThunkBuffer, IN ULONG ThunkBufferSize)
NTSTATUS MmSetVerifierInformation (IN OUT PVOID SystemInformation, IN ULONG SystemInformationLength)
NTSTATUS MmGetVerifierInformation (OUT PVOID SystemInformation, IN ULONG SystemInformationLength, OUT PULONG Length)
NTSTATUS MmGetPageFileInformation (OUT PVOID SystemInformation, IN ULONG SystemInformationLength, OUT PULONG Length)
NTSTATUS MmExtendSection (IN PVOID SectionToExtend, IN OUT PLARGE_INTEGER NewSectionSize, IN ULONG IgnoreFileSizeChecking)
NTSTATUS MmFlushVirtualMemory (IN PEPROCESS Process, IN OUT PVOID *BaseAddress, IN OUT PSIZE_T RegionSize, OUT PIO_STATUS_BLOCK IoStatus)
NTSTATUS MmMapViewInSystemCache (IN PVOID SectionToMap, OUT PVOID *CapturedBase, IN OUT PLARGE_INTEGER SectionOffset, IN OUT PULONG CapturedViewSize)
VOID MmUnmapViewInSystemCache (IN PVOID BaseAddress, IN PVOID SectionToUnmap, IN ULONG AddToFront)
BOOLEAN MmPurgeSection (IN PSECTION_OBJECT_POINTERS SectionObjectPointer, IN PLARGE_INTEGER Offset OPTIONAL, IN SIZE_T RegionSize, IN ULONG IgnoreCacheViews)
NTSTATUS MmFlushSection (IN PSECTION_OBJECT_POINTERS SectionObjectPointer, IN PLARGE_INTEGER Offset OPTIONAL, IN SIZE_T RegionSize, OUT PIO_STATUS_BLOCK IoStatus, IN ULONG AcquireFile)
NTSTATUS MmGetCrashDumpInformation (IN PSYSTEM_CRASH_DUMP_INFORMATION CrashInfo)
NTSTATUS MmGetCrashDumpStateInformation (IN PSYSTEM_CRASH_STATE_INFORMATION CrashInfo)
BOOLEAN MmFlushImageSection (IN PSECTION_OBJECT_POINTERS SectionObjectPointer, IN MMFLUSH_TYPE FlushType)
BOOLEAN MmCanFileBeTruncated (IN PSECTION_OBJECT_POINTERS SectionPointer, IN PLARGE_INTEGER NewFileSize)
BOOLEAN MmDisableModifiedWriteOfSection (IN PSECTION_OBJECT_POINTERS SectionObjectPointer)
VOID MmPurgeWorkingSet (IN PEPROCESS Process, IN PVOID BaseAddress, IN SIZE_T RegionSize)
BOOLEAN MmSetAddressRangeModified (IN PVOID Address, IN SIZE_T Length)
BOOLEAN MmCheckCachedPageState (IN PVOID Address, IN BOOLEAN SetToZero)
NTSTATUS MmCopyToCachedPage (IN PVOID Address, IN PVOID UserBuffer, IN ULONG Offset, IN SIZE_T CountInBytes, IN BOOLEAN DontZero)
VOID MmUnlockCachedPage (IN PVOID AddressInCache)
PVOID MmDbgReadCheck (IN PVOID VirtualAddress)
PVOID MmDbgWriteCheck (IN PVOID VirtualAddress, IN PHARDWARE_PTE Opaque)
VOID MmDbgReleaseAddress (IN PVOID VirtualAddress, IN PHARDWARE_PTE Opaque)
PVOID64 MmDbgReadCheck64 (IN PVOID64 VirtualAddress)
PVOID64 MmDbgWriteCheck64 (IN PVOID64 VirtualAddress)
PVOID64 MmDbgTranslatePhysicalAddress64 (IN PHYSICAL_ADDRESS PhysicalAddress)
VOID MmHibernateInformation (IN PVOID MemoryMap, OUT PULONG_PTR HiberVa, OUT PPHYSICAL_ADDRESS HiberPte)
NTKERNELAPI VOID MmProbeAndLockProcessPages (IN OUT PMDL MemoryDescriptorList, IN PEPROCESS Process, IN KPROCESSOR_MODE AccessMode, IN LOCK_OPERATION Operation)
NTKERNELAPI VOID MmProbeAndLockPages (IN OUT PMDL MemoryDescriptorList, IN KPROCESSOR_MODE AccessMode, IN LOCK_OPERATION Operation)
NTKERNELAPI VOID MmUnlockPages (IN PMDL MemoryDescriptorList)
NTKERNELAPI VOID MmBuildMdlForNonPagedPool (IN OUT PMDL MemoryDescriptorList)
NTKERNELAPI PVOID MmMapLockedPages (IN PMDL MemoryDescriptorList, IN KPROCESSOR_MODE AccessMode)
NTKERNELAPI PVOID MmGetSystemRoutineAddress (IN PUNICODE_STRING SystemRoutineName)
NTKERNELAPI NTSTATUS MmMapUserAddressesToPage (IN PVOID BaseAddress, IN SIZE_T NumberOfBytes, IN PVOID PageAddress)
NTKERNELAPI PVOID MmMapLockedPagesSpecifyCache (IN PMDL MemoryDescriptorList, IN KPROCESSOR_MODE AccessMode, IN MEMORY_CACHING_TYPE CacheType, IN PVOID BaseAddress, IN ULONG BugCheckOnFailure, IN MM_PAGE_PRIORITY Priority)
NTKERNELAPI VOID MmUnmapLockedPages (IN PVOID BaseAddress, IN PMDL MemoryDescriptorList)
NTKERNELAPI NTSTATUS MmAddPhysicalMemory (IN PPHYSICAL_ADDRESS StartAddress, IN OUT PLARGE_INTEGER NumberOfBytes)
NTKERNELAPI NTSTATUS MmRemovePhysicalMemory (IN PPHYSICAL_ADDRESS StartAddress, IN OUT PLARGE_INTEGER NumberOfBytes)
NTKERNELAPI PPHYSICAL_MEMORY_RANGE MmGetPhysicalMemoryRanges (VOID)
NTKERNELAPI PMDL MmAllocatePagesForMdl (IN PHYSICAL_ADDRESS LowAddress, IN PHYSICAL_ADDRESS HighAddress, IN PHYSICAL_ADDRESS SkipBytes, IN SIZE_T TotalBytes)
NTKERNELAPI VOID MmFreePagesFromMdl (IN PMDL MemoryDescriptorList)
NTKERNELAPI PVOID MmMapIoSpace (IN PHYSICAL_ADDRESS PhysicalAddress, IN SIZE_T NumberOfBytes, IN MEMORY_CACHING_TYPE CacheType)
NTKERNELAPI VOID MmUnmapIoSpace (IN PVOID BaseAddress, IN SIZE_T NumberOfBytes)
NTKERNELAPI VOID MmProbeAndLockSelectedPages (IN OUT PMDL MemoryDescriptorList, IN PFILE_SEGMENT_ELEMENT SegmentArray, IN KPROCESSOR_MODE AccessMode, IN LOCK_OPERATION Operation)
NTKERNELAPI PVOID MmMapVideoDisplay (IN PHYSICAL_ADDRESS PhysicalAddress, IN SIZE_T NumberOfBytes, IN MEMORY_CACHING_TYPE CacheType)
NTKERNELAPI VOID MmUnmapVideoDisplay (IN PVOID BaseAddress, IN SIZE_T NumberOfBytes)
NTKERNELAPI PHYSICAL_ADDRESS MmGetPhysicalAddress (IN PVOID BaseAddress)
NTKERNELAPI PVOID MmGetVirtualForPhysical (IN PHYSICAL_ADDRESS PhysicalAddress)
NTKERNELAPI PVOID MmAllocateContiguousMemory (IN SIZE_T NumberOfBytes, IN PHYSICAL_ADDRESS HighestAcceptableAddress)
NTKERNELAPI PVOID MmAllocateContiguousMemorySpecifyCache (IN SIZE_T NumberOfBytes, IN PHYSICAL_ADDRESS LowestAcceptableAddress, IN PHYSICAL_ADDRESS HighestAcceptableAddress, IN PHYSICAL_ADDRESS BoundaryAddressMultiple OPTIONAL, IN MEMORY_CACHING_TYPE CacheType)
NTKERNELAPI VOID MmFreeContiguousMemory (IN PVOID BaseAddress)
NTKERNELAPI VOID MmFreeContiguousMemorySpecifyCache (IN PVOID BaseAddress, IN SIZE_T NumberOfBytes, IN MEMORY_CACHING_TYPE CacheType)
NTKERNELAPI ULONG MmGatherMemoryForHibernate (IN PMDL Mdl, IN BOOLEAN Wait)
NTKERNELAPI VOID MmReturnMemoryForHibernate (IN PMDL Mdl)
VOID MmReleaseDumpAddresses (IN PFN_NUMBER Pages)
NTKERNELAPI PVOID MmAllocateNonCachedMemory (IN SIZE_T NumberOfBytes)
NTKERNELAPI VOID MmFreeNonCachedMemory (IN PVOID BaseAddress, IN SIZE_T NumberOfBytes)
NTKERNELAPI BOOLEAN MmIsAddressValid (IN PVOID VirtualAddress)
NTKERNELAPI BOOLEAN MmIsNonPagedSystemAddressValid (IN PVOID VirtualAddress)
NTKERNELAPI SIZE_T MmSizeOfMdl (IN PVOID Base, IN SIZE_T Length)
NTKERNELAPI PMDL MmCreateMdl (IN PMDL MemoryDescriptorList OPTIONAL, IN PVOID Base, IN SIZE_T Length)
NTKERNELAPI PVOID MmLockPagableDataSection (IN PVOID AddressWithinSection)
NTKERNELAPI VOID MmLockPagableSectionByHandle (IN PVOID ImageSectionHandle)
NTKERNELAPI VOID MmLockPagedPool (IN PVOID Address, IN SIZE_T Size)
NTKERNELAPI VOID MmUnlockPagedPool (IN PVOID Address, IN SIZE_T Size)
NTKERNELAPI VOID MmResetDriverPaging (IN PVOID AddressWithinSection)
NTKERNELAPI PVOID MmPageEntireDriver (IN PVOID AddressWithinSection)
NTKERNELAPI VOID MmUnlockPagableImageSection (IN PVOID ImageSectionHandle)
NTKERNELAPI HANDLE MmSecureVirtualMemory (IN PVOID Address, IN SIZE_T Size, IN ULONG ProbeMode)
NTKERNELAPI VOID MmUnsecureVirtualMemory (IN HANDLE SecureHandle)
NTKERNELAPI NTSTATUS MmMapViewInSystemSpace (IN PVOID Section, OUT PVOID *MappedBase, IN PSIZE_T ViewSize)
NTKERNELAPI NTSTATUS MmUnmapViewInSystemSpace (IN PVOID MappedBase)
NTKERNELAPI NTSTATUS MmMapViewInSessionSpace (IN PVOID Section, OUT PVOID *MappedBase, IN OUT PSIZE_T ViewSize)
NTKERNELAPI NTSTATUS MmUnmapViewInSessionSpace (IN PVOID MappedBase)
PVOID MmAllocateSpecialPool (IN SIZE_T NumberOfBytes, IN ULONG Tag, IN POOL_TYPE Type, IN ULONG SpecialPoolType)
VOID MmFreeSpecialPool (IN PVOID P)
LOGICAL MmSetSpecialPool (IN LOGICAL Enable)
LOGICAL MmProtectSpecialPool (IN PVOID VirtualAddress, IN ULONG NewProtect)
LOGICAL MmIsSpecialPoolAddressFree (IN PVOID VirtualAddress)
SIZE_T MmQuerySpecialPoolBlockSize (IN PVOID P)
LOGICAL MmIsHydraAddress (IN PVOID VirtualAddress)
PUNICODE_STRING MmLocateUnloadedDriver (IN PVOID VirtualAddress)
NTKERNELAPI LOGICAL MmIsDriverVerifying (IN struct _DRIVER_OBJECT *DriverObject)
LOGICAL MmTrimAllSystemPagableMemory (IN LOGICAL PurgeTransition)
BOOLEAN MmRaisePoolQuota (IN POOL_TYPE PoolType, IN SIZE_T OldQuotaLimit, OUT PSIZE_T NewQuotaLimit)
VOID MmReturnPoolQuota (IN POOL_TYPE PoolType, IN SIZE_T ReturnedQuota)
VOID MmZeroPageThread (VOID)
NTSTATUS MmCopyVirtualMemory (IN PEPROCESS FromProcess, IN PVOID FromAddress, IN PEPROCESS ToProcess, OUT PVOID ToAddress, IN ULONG BufferSize, IN KPROCESSOR_MODE PreviousMode, OUT PULONG NumberOfBytesCopied)
NTSTATUS MmGetSectionRange (IN PVOID AddressWithinSection, OUT PVOID *StartingSectionAddress, OUT PULONG SizeofSection)
VOID MmMapMemoryDumpMdl (IN OUT PMDL MemoryDumpMdl)
NTSTATUS MmSetBankedSection (IN HANDLE ProcessHandle, IN PVOID VirtualAddress, IN ULONG BankLength, IN BOOLEAN ReadWriteBank, IN PBANKED_SECTION_ROUTINE BankRoutine, IN PVOID Context)
NTKERNELAPI BOOLEAN MmIsSystemAddressAccessable (IN PVOID VirtualAddress)
BOOLEAN MmVerifyImageIsOkForMpUse (IN PVOID BaseAddress)
NTSTATUS MmMemoryUsage (IN PVOID Buffer, IN ULONG Size, IN ULONG Type, OUT PULONG Length)
NTKERNELAPI VOID FASTCALL MmSetPageFaultNotifyRoutine (IN PPAGE_FAULT_NOTIFY_ROUTINE NotifyRoutine)
NTKERNELAPI VOID FASTCALL MmSetHardFaultNotifyRoutine (IN PHARD_FAULT_NOTIFY_ROUTINE NotifyRoutine)
NTSTATUS MmCallDllInitialize (IN PLDR_DATA_TABLE_ENTRY DataTableEntry)
VOID MmSetKernelDumpRange (IN PVOID DumpContext)

Variables

ULONG_PTR MmVirtualBias
PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock
BOOLEAN Mm64BitPhysicalAddress
POBJECT_TYPE MmSectionObjectType
ULONG MmReadClusterSize
ULONG MmNumberOfColors
PFN_COUNT MmNumberOfPhysicalPages
PFN_COUNT MmSizeOfSystemCacheInPages
MMSUPPORT MmSystemCacheWs
KEVENT MmWorkingSetManagerEvent
ULONG MmProductType
MMINFO_COUNTERS MmInfoCounters
MMPFNLIST MmModifiedPageListHead
PFN_NUMBER MmThrottleTop
PFN_NUMBER MmThrottleBottom
ULONG MmSpecialPoolTag
PVOID MmSpecialPoolStart
PVOID MmSpecialPoolEnd


Define Documentation

#define ADDRESS_AND_SIZE_TO_SPAN_PAGES Va,
Size   ) 
 

Value:

(((((Size) - 1) >> PAGE_SHIFT) + \ (((((ULONG)(Size-1)&(PAGE_SIZE-1)) + (PtrToUlong(Va) & (PAGE_SIZE -1)))) >> PAGE_SHIFT)) + 1L)

Definition at line 217 of file mm.h.

Referenced by Ki386BuildIdentityBuffer(), MiCreatePageTablesForPhysicalRange(), MiDeletePageTablesForPhysicalRange(), MmAllocatePagesForMdl(), MmFreePagesFromMdl(), MmProbeAndLockPages(), MmSetKernelDumpRange(), MmSizeOfMdl(), and MmUnlockPages().

#define BYTE_OFFSET Va   )     ((ULONG)((LONG_PTR)(Va) & (PAGE_SIZE - 1)))
 

Definition at line 166 of file mm.h.

Referenced by ExFreePoolSanityChecks(), IoBuildPartialMdl(), IopWriteToDisk(), KdpReadPhysicalMemory(), KdpWritePhysicalMemory(), MiCloneProcessAddressSpace(), MiCreateImageFileMap(), MiGatherMappedPages(), MiWaitForInPageComplete(), MmAddPhysicalMemory(), MmAllocateContiguousMemorySpecifyCache(), MmAllocatePagesForMdl(), MmDbgTranslatePhysicalAddress(), MmDbgTranslatePhysicalAddress64(), MmFlushSection(), MmFreeSpecialPool(), MmGetPhysicalAddress(), MmGetVirtualForPhysical(), MmMapIoSpace(), MmMapVideoDisplay(), MmPurgeSection(), MmRemovePhysicalMemory(), and ViPostPoolAllocation().

#define BYTES_TO_PAGES Size   ) 
 

Value:

((ULONG)((ULONG_PTR)(Size) >> PAGE_SHIFT) + \ (((ULONG)(Size) & (PAGE_SIZE - 1)) != 0))

Definition at line 141 of file mm.h.

Referenced by ExAllocatePoolWithTag(), ExInsertPoolTag(), IopReadDumpRegistry(), MiAllocateContiguousMemory(), MiAllocatePoolPages(), MiBuildPagedPool(), MiCalculatePageCommitment(), MiCreateImageFileMap(), MiCreatePagingFileMap(), MiCreatePebOrTeb(), MiGatherMappedPages(), MiGetWorkingSetInfo(), MiInitializeNonPagedPool(), MiInitMachineDependent(), MiLoadImageSection(), MiMapViewInSystemSpace(), MiMapViewOfDataSection(), MiRemoveVad(), MiSessionInitializeWorkingSetList(), MiShareSessionImage(), MiWriteProtectSystemImage(), MmAllocateIndependentPages(), MmAllocateNonCachedMemory(), MmCreateKernelStack(), MmDeleteKernelStack(), MmFreeNonCachedMemory(), MmMapMemoryDumpMdl(), MmProbeAndLockSelectedPages(), MmResourcesAvailable(), MmSetPageProtection(), NtAllocateVirtualMemory(), NtReadFileScatter(), and NtWriteFileGather().

#define COMPUTE_PAGES_SPANNED Va,
Size   )     ((ULONG)((((ULONG_PTR)(Va) & (PAGE_SIZE -1)) + (Size) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
 

Definition at line 221 of file mm.h.

Referenced by CcCopyRead(), CcFastCopyRead(), CcMapAndRead(), CcMapData(), CcMdlRead(), CcPerformReadAhead(), CcZeroData(), FsRtlCopyRead(), IoAllocateMdl(), IoBuildPartialMdl(), IopIsAddressRangeValid(), IopMapPhysicalMemory(), IopMapVirtualToPhysicalMdl(), MiInitializeSystemCache(), MiMapLockedPagesInUserSpace(), MiSweepCacheMachineDependent(), MiUnmapLockedPagesInUserSpace(), MmMapIoSpace(), MmMapLockedPagesSpecifyCache(), MmMapVideoDisplay(), MmProbeAndLockPages(), MmSetKernelDumpRange(), MmUnmapIoSpace(), MmUnmapLockedPages(), and MmUnmapVideoDisplay().

#define IS_SYSTEM_ADDRESS VA   )     ((VA) >= MM_SYSTEM_RANGE_START)
 

Definition at line 248 of file mm.h.

Referenced by KeBugCheckEx(), and xxxInterSendMsgEx().

#define MAX_PHYSICAL_MEMORY_FRAGMENTS   20
 

Definition at line 31 of file mm.h.

Referenced by IopInitializeResourceMap(), MiMergeMemoryLimit(), and MmInitSystem().

#define MM_ALLOCATION_GRANULARITY   ((ULONG)0x10000)
 

Definition at line 54 of file mm.h.

Referenced by NtQuerySystemInformation().

#define MM_MAXIMUM_DISK_IO_SIZE   (0x10000)
 

Definition at line 91 of file mm.h.

Referenced by MiFlushSectionInternal().

#define MM_MAXIMUM_READ_CLUSTER_SIZE   (15)
 

Definition at line 60 of file mm.h.

Referenced by MiResolveMappedFileFault().

#define MmDisablePageFaultClustering SavedState   ) 
 

Value:

{ \ *(SavedState) = 2 + (ULONG)PsGetCurrentThread()->DisablePageFaultClustering;\ PsGetCurrentThread()->DisablePageFaultClustering = TRUE; }

Definition at line 924 of file mm.h.

Referenced by CcPrepareMdlWrite(), and CcZeroData().

#define MmEnablePageFaultClustering SavedState   ) 
 

Value:

{ \ PsGetCurrentThread()->DisablePageFaultClustering = (BOOLEAN)(SavedState - 2); }

Definition at line 953 of file mm.h.

Referenced by CcPrepareMdlWrite(), and CcZeroData().

 
#define MmEnoughMemoryForWrite  ) 
 

Value:

Definition at line 1123 of file mm.h.

Referenced by CcCanIWrite(), and MiGatherMappedPages().

#define MmGetMdlBaseVa Mdl   )     ((Mdl)->StartVa)
 

Definition at line 373 of file mm.h.

Referenced by IoBuildPartialMdl().

#define MmGetMdlByteCount Mdl   )     ((Mdl)->ByteCount)
 

Definition at line 324 of file mm.h.

Referenced by IoBuildPartialMdl(), SmbTraceCopyMdlContiguous(), and SmbTraceMdlLength().

#define MmGetMdlByteOffset Mdl   )     ((Mdl)->ByteOffset)
 

Definition at line 348 of file mm.h.

Referenced by IoBuildPartialMdl().

#define MmGetMdlPfnArray Mdl   )     ((PPFN_NUMBER)(Mdl + 1))
 

Definition at line 275 of file mm.h.

Referenced by CcZeroData(), IoBuildPartialMdl(), IopMapPhysicalMemory(), IopMapVirtualToPhysicalMdl(), IopWritePageToDisk(), and IoWriteCrashDump().

#define MmGetMdlVirtualAddress Mdl   )     ((PVOID) ((PCHAR) ((Mdl)->StartVa) + (Mdl)->ByteOffset))
 

Definition at line 299 of file mm.h.

#define MmGetSystemAddressForMdl MDL   ) 
 

Value:

(((MDL)->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA | \ MDL_SOURCE_IS_NONPAGED_POOL)) ? \ ((MDL)->MappedSystemVa) : \ (MmMapLockedPages((MDL),KernelMode)))

Definition at line 1874 of file mm.h.

Referenced by CcZeroData(), ExLockUserBuffer(), FsRtlCancelNotify(), FsRtlNotifyCompleteIrp(), FsRtlNotifyFullReportChange(), and SmbTraceCopyMdlContiguous().

#define MmGetSystemAddressForMdlSafe MDL,
PRIORITY   ) 
 

Value:

(((MDL)->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA | \ MDL_SOURCE_IS_NONPAGED_POOL)) ? \ ((MDL)->MappedSystemVa) : \ (MmMapLockedPagesSpecifyCache((MDL), \ KernelMode, \ MmCached, \ NULL, \ FALSE, \ (PRIORITY))))

Definition at line 1833 of file mm.h.

Referenced by CcZeroData(), MiCheckForCrashDump(), MiGetWorkingSetInfo(), and UdfIsVolumeDirty().

#define MmInitializeMdl MemoryDescriptorList,
BaseVa,
Length   ) 
 

Value:

{ \ (MemoryDescriptorList)->Next = (PMDL) NULL; \ (MemoryDescriptorList)->Size = (CSHORT)(sizeof(MDL) + \ (sizeof(PFN_NUMBER) * ADDRESS_AND_SIZE_TO_SPAN_PAGES((BaseVa), (Length)))); \ (MemoryDescriptorList)->MdlFlags = 0; \ (MemoryDescriptorList)->StartVa = (PVOID) PAGE_ALIGN((BaseVa)); \ (MemoryDescriptorList)->ByteOffset = BYTE_OFFSET((BaseVa)); \ (MemoryDescriptorList)->ByteCount = (ULONG)(Length); \ }

Definition at line 1789 of file mm.h.

Referenced by ExLockUserBuffer(), IoAllocateMdl(), IopWritePageToDisk(), MiCleanPhysicalProcessPages(), MiCloneProcessAddressSpace(), MiDoMappedCopy(), MiGatherMappedPages(), MiGatherPagefilePages(), MiGetWorkingSetInfo(), MiMakeOutswappedPageResident(), MiResolveMappedFileFault(), MiResolvePageFileFault(), MmCreateMdl(), MmProbeAndLockSelectedPages(), MmShutdownSystem(), NtFreeUserPhysicalPages(), and NtStartProfile().

 
#define MmIsRecursiveIoFault  ) 
 

Value:

((PsGetCurrentThread()->DisablePageFaultClustering) | \ (PsGetCurrentThread()->ForwardClusterOnly))

Definition at line 893 of file mm.h.

Referenced by IoPageRead().

#define MMNONPAGED_QUOTA_CHECK   (256*1024)
 

Definition at line 2014 of file mm.h.

#define MMNONPAGED_QUOTA_INCREASE   (64*1024)
 

Definition at line 2010 of file mm.h.

Referenced by MmRaisePoolQuota(), and PsReturnPoolQuota().

#define MMPAGED_QUOTA_CHECK   (4*1024*1024)
 

Definition at line 2016 of file mm.h.

Referenced by MmRaisePoolQuota().

#define MMPAGED_QUOTA_INCREASE   (512*1024)
 

Definition at line 2012 of file mm.h.

Referenced by MmRaisePoolQuota(), and PsReturnPoolQuota().

#define MmPrepareMdlForReuse MDL   ) 
 

Value:

if (((MDL)->MdlFlags & MDL_PARTIAL_HAS_BEEN_MAPPED) != 0) { \ ASSERT(((MDL)->MdlFlags & MDL_PARTIAL) != 0); \ MmUnmapLockedPages( (MDL)->MappedSystemVa, (MDL) ); \ } else if (((MDL)->MdlFlags & MDL_PARTIAL) == 0) { \ ASSERT(((MDL)->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0); \ }

Definition at line 1902 of file mm.h.

Referenced by IoFreeMdl().

#define MmResetPageFaultReadAhead Thread,
SavedState   ) 
 

Value:

{ \ (Thread)->ForwardClusterOnly = (BOOLEAN)((SavedState) & 1); \ (Thread)->ReadClusterSize = (SavedState) / 2; }

Definition at line 1053 of file mm.h.

Referenced by CcCopyRead(), CcFastCopyRead(), CcMapAndCopy(), CcMapAndRead(), CcMapData(), CcMdlRead(), CcPerformReadAhead(), MmCopyToCachedPage(), and MmProbeAndLockPages().

#define MmSavePageFaultReadAhead Thread,
SavedState   ) 
 

Value:

{ \ *(SavedState) = (Thread)->ReadClusterSize * 2 + \ (Thread)->ForwardClusterOnly; }

Definition at line 983 of file mm.h.

Referenced by CcCopyRead(), CcFastCopyRead(), CcMapAndCopy(), CcMapAndRead(), CcMapData(), CcMdlRead(), CcPerformReadAhead(), MmCopyToCachedPage(), and MmProbeAndLockPages().

#define MmSetPageFaultReadAhead Thread,
ReadAhead   ) 
 

Value:

{ \ (Thread)->ForwardClusterOnly = TRUE; \ if ((ReadAhead) > MM_MAXIMUM_READ_CLUSTER_SIZE) { \ (Thread)->ReadClusterSize = MM_MAXIMUM_READ_CLUSTER_SIZE;\ } else { \ (Thread)->ReadClusterSize = (ReadAhead); \ } }

Definition at line 1018 of file mm.h.

Referenced by CcCopyRead(), CcFastCopyRead(), CcMapAndCopy(), CcMapAndRead(), CcMapData(), CcMdlRead(), CcPerformReadAhead(), MmCopyToCachedPage(), and MmProbeAndLockPages().

#define NUMBER_OF_PAGE_LISTS   8
 

Definition at line 1065 of file mm.h.

#define PAGE_ALIGN Va   )     ((PVOID)((ULONG_PTR)(Va) & ~(PAGE_SIZE - 1)))
 

Definition at line 190 of file mm.h.

Referenced by IoBuildPartialMdl(), IopIsAddressRangeValid(), IopMapPhysicalMemory(), IopMapVirtualToPhysicalMdl(), IopWritePageToDisk(), IoWriteCrashDump(), KdpReadPhysicalMemory(), KdpWritePhysicalMemory(), MiCheckForUserStackOverflow(), MiCheckVirtualAddress(), MiCloneProcessAddressSpace(), MiConvertBackToStandardPages(), MiConvertToSuperPages(), MiCreateImageFileMap(), MiDecommitPages(), MiDeleteAddressesInWorkingSet(), MiDeletePte(), MiDeleteSystemPagableVm(), MiFindContiguousMemory(), MiFindInitializationCode(), MiGatherMappedPages(), MiGrowWsleHash(), MiInitializeSystemCache(), MiInitializeWorkingSetList(), MiInitMachineDependent(), MiInsertWsle(), MiLocateWsle(), MiProtectVirtualMemory(), MiRemovePageFromWorkingSet(), MiRemoveWorkingSetPages(), MiRemoveWsle(), MiResolvePageFileFault(), MiSessionInitializeWorkingSetList(), MiSweepCacheMachineDependent(), MiUpdateWsle(), MmFlushVirtualMemory(), MmFreeNonCachedMemory(), MmFreeSpecialPool(), MmQuerySpecialPoolBlockSize(), MmRemovePhysicalMemory(), NtAllocateVirtualMemory(), NtFlushVirtualMemory(), NtFreeVirtualMemory(), NtLockVirtualMemory(), NtMapUserPhysicalPages(), NtQueryVirtualMemory(), NtUnlockVirtualMemory(), VerifierFreeTrackedPool(), and ViPostPoolAllocation().

#define PERFINFO_ADDPOOLPAGE CheckType,
PoolIndex,
Addr,
PoolDesc   ) 
 

Definition at line 2153 of file mm.h.

Referenced by ExAllocatePoolWithTag().

#define PERFINFO_ADDTOWS PageFrame,
Address,
Pid   ) 
 

Definition at line 2154 of file mm.h.

Referenced by MiAddValidPageToWorkingSet().

#define PERFINFO_BIGPOOLALLOC Type,
PTag,
NumBytes,
Addr   ) 
 

Definition at line 2155 of file mm.h.

Referenced by ExAllocatePoolWithTag().

#define PERFINFO_CM_CHECKCELLTYPE Map   ) 
 

Definition at line 2157 of file mm.h.

#define PERFINFO_CM_CHECKCELLTYPE Map   ) 
 

Definition at line 2157 of file mm.h.

#define PERFINFO_CM_HIVECELL_REFERENCE_FLAT Hive,
pcell,
Cell   ) 
 

Definition at line 2158 of file mm.h.

#define PERFINFO_CM_HIVECELL_REFERENCE_PAGED Hive,
pcell,
Cell,
Type,
Map   ) 
 

Definition at line 2159 of file mm.h.

#define PERFINFO_CONVERT_TO_GUI_THREAD EThread   ) 
 

Definition at line 2160 of file mm.h.

Referenced by PsConvertToGuiThread().

#define PERFINFO_DECREFCNT PageFrame,
Flag,
Type   ) 
 

Definition at line 2161 of file mm.h.

Referenced by MiDecrementShareCount().

#define PERFINFO_DELETE_PAGE ppfn   ) 
 

Definition at line 2210 of file mm.h.

#define PERFINFO_DELETE_STACK PointerPte,
NumberOfPtes   ) 
 

Definition at line 2162 of file mm.h.

Referenced by MmDeleteKernelStack().

 
#define PERFINFO_DISPATCHFAULT_DECL  ) 
 

Definition at line 2163 of file mm.h.

Referenced by MiDispatchFault().

 
#define PERFINFO_DO_PAGEFAULT_CLUSTERING  )     1
 

Definition at line 2247 of file mm.h.

Referenced by MiResolveMappedFileFault().

#define PERFINFO_DRIVER_COMPLETIONROUTINE_CALL irp,
irpsp   ) 
 

Definition at line 2164 of file mm.h.

#define PERFINFO_DRIVER_COMPLETIONROUTINE_RETURN irp,
irpsp   ) 
 

Definition at line 2165 of file mm.h.

#define PERFINFO_DRIVER_INIT pdo   ) 
 

Definition at line 2166 of file mm.h.

Referenced by IopInitializeBuiltinDriver(), and IopLoadDriver().

#define PERFINFO_DRIVER_INIT_COMPLETE pdo   ) 
 

Definition at line 2167 of file mm.h.

Referenced by IopInitializeBuiltinDriver(), and IopLoadDriver().

#define PERFINFO_DRIVER_MAJORFUNCTION_CALL irp,
irpsp,
pdo   ) 
 

Definition at line 2168 of file mm.h.

Referenced by IopfCallDriver(), IopRaiseHardError(), and IovSpecialIrpCallDriver().

#define PERFINFO_DRIVER_MAJORFUNCTION_RETURN irp,
irpsp,
pdo   ) 
 

Definition at line 2169 of file mm.h.

Referenced by IopfCallDriver(), IopRaiseHardError(), and IovSpecialIrpCallDriver().

 
#define PERFINFO_EXALLOCATEPOOLWITHTAG_DECL  ) 
 

Definition at line 2170 of file mm.h.

Referenced by ExAllocatePoolWithTag().

 
#define PERFINFO_EXFREEPOOLWITHTAG_DECL  ) 
 

Definition at line 2171 of file mm.h.

Referenced by ExFreePoolWithTag().

#define PERFINFO_FREEPOOL Addr   ) 
 

Definition at line 2172 of file mm.h.

Referenced by ExFreePoolWithTag().

#define PERFINFO_FREEPOOLPAGE CheckType,
PoolIndex,
Addr,
PoolDesc   ) 
 

Definition at line 2173 of file mm.h.

Referenced by ExFreePoolWithTag().

#define PERFINFO_GET_PAGE_INFO PointerPte   ) 
 

Definition at line 2174 of file mm.h.

Referenced by MiEmptyWorkingSet(), MiInsertWsle(), MmAdjustWorkingSetSize(), and NtUnlockVirtualMemory().

#define PERFINFO_GET_PAGE_INFO_REPLACEMENT PointerPte   ) 
 

Definition at line 2175 of file mm.h.

Referenced by MiReplaceWorkingSetEntryUsingFaultInfo().

#define PERFINFO_GET_PAGE_INFO_WITH_DECL PointerPte   ) 
 

Definition at line 2176 of file mm.h.

Referenced by MiTrimWorkingSet().

#define PERFINFO_GROW_STACK EThread   ) 
 

Definition at line 2177 of file mm.h.

Referenced by MmGrowKernelStack().

#define PERFINFO_HARDFAULT Address,
InpageSupport   ) 
 

Definition at line 2178 of file mm.h.

Referenced by MiDispatchFault().

#define PERFINFO_HARDFAULT_INFO ProtoPte   ) 
 

Definition at line 2179 of file mm.h.

Referenced by MiDispatchFault().

 
#define PERFINFO_HARDFAULT_IOTIME  ) 
 

Definition at line 2180 of file mm.h.

Referenced by MiDispatchFault().

#define PERFINFO_HIVECELL_REFERENCE_FLAT Hive,
pcell,
Cell   ) 
 

Definition at line 2181 of file mm.h.

#define PERFINFO_HIVECELL_REFERENCE_PAGED Hive,
pcell,
Cell,
Type,
Map   ) 
 

Definition at line 2182 of file mm.h.

#define PERFINFO_IMAGE_LOAD LdrDataTableEntry   ) 
 

Definition at line 2183 of file mm.h.

Referenced by MiLoadSystemImage().

#define PERFINFO_IMAGE_UNLOAD Address   ) 
 

Definition at line 2184 of file mm.h.

Referenced by MmUnloadSystemImage().

#define PERFINFO_INIT_PERFMEMTABLE LoaderBlock   ) 
 

Definition at line 2186 of file mm.h.

#define PERFINFO_INIT_POOLRANGE PoolStart,
PoolPages   ) 
 

Definition at line 2185 of file mm.h.

Referenced by MiInitMachineDependent().

#define PERFINFO_INIT_TRACEFLAGS OptionString,
SpecificOption   ) 
 

Definition at line 2187 of file mm.h.

#define PERFINFO_INSERT_FRONT_STANDBY Page   ) 
 

Definition at line 2189 of file mm.h.

Referenced by MiInsertStandbyListAtFront().

#define PERFINFO_INSERTINLIST Page,
ListHead   ) 
 

Definition at line 2188 of file mm.h.

Referenced by MiInsertPageInList().

#define PERFINFO_LOG_MARK PMARK   ) 
 

Definition at line 2190 of file mm.h.

#define PERFINFO_LOG_MARK_SPRINTF PMARK,
VARIABLE   ) 
 

Definition at line 2191 of file mm.h.

#define PERFINFO_LOG_WMI_TRACE_EVENT PData,
xLength   ) 
 

Definition at line 2192 of file mm.h.

#define PERFINFO_LOG_WMI_TRACE_KERNEL_EVENT GroupType,
PData,
xLength,
Thread   ) 
 

Definition at line 2193 of file mm.h.

#define PERFINFO_LOG_WMI_TRACE_LONG_EVENT GroupType,
PData,
xCount,
Thread   ) 
 

Definition at line 2194 of file mm.h.

#define PERFINFO_LOG_WS_REMOVAL Type,
WsInfo   ) 
 

Definition at line 2195 of file mm.h.

Referenced by MiEmptyWorkingSet(), MiInsertWsle(), MiTrimWorkingSet(), MmAdjustWorkingSetSize(), and NtUnlockVirtualMemory().

#define PERFINFO_LOG_WS_REPLACEMENT WsInfo   ) 
 

Definition at line 2196 of file mm.h.

Referenced by MiReplaceWorkingSetEntryUsingFaultInfo().

#define PERFINFO_MIH_DECL
 

Definition at line 2197 of file mm.h.

#define PERFINFO_MMINIT_DECL
 

Definition at line 2198 of file mm.h.

 
#define PERFINFO_MMINIT_START  ) 
 

Definition at line 2199 of file mm.h.

Referenced by MiMapBBTMemory().

 
#define PERFINFO_MOD_PAGE_WRITER3  ) 
 

Definition at line 2200 of file mm.h.

 
#define PERFINFO_PAGE_INFO_DECL  ) 
 

Definition at line 2201 of file mm.h.

Referenced by MiEmptyWorkingSet(), MiInsertWsle(), MmAdjustWorkingSetSize(), and NtUnlockVirtualMemory().

 
#define PERFINFO_PAGE_INFO_REPLACEMENT_DECL  ) 
 

Definition at line 2202 of file mm.h.

Referenced by MiReplaceWorkingSetEntryUsingFaultInfo().

#define PERFINFO_POOL_ALLOC_COMMON Type,
PTag,
NumBytes   ) 
 

Definition at line 2205 of file mm.h.

#define PERFINFO_POOLALLOC Type,
PTag,
NumBytes   ) 
 

Definition at line 2203 of file mm.h.

Referenced by ExAllocatePoolWithTag().

#define PERFINFO_POOLALLOC_ADDR Addr   ) 
 

Definition at line 2204 of file mm.h.

Referenced by ExAllocatePoolWithTag().

#define PERFINFO_PRIVATE_COPY_ON_WRITE CopyFrom,
PAGE_SIZE   ) 
 

Definition at line 2206 of file mm.h.

Referenced by MiCopyOnWrite().

#define PERFINFO_PRIVATE_PAGE_DEMAND_ZERO VirtualAddress   ) 
 

Definition at line 2207 of file mm.h.

Referenced by MiResolveDemandZeroFault().

#define PERFINFO_PROCESS_CREATE EProcess   ) 
 

Definition at line 2208 of file mm.h.

Referenced by PspCreateThread().

#define PERFINFO_PROCESS_DELETE EProcess   ) 
 

Definition at line 2209 of file mm.h.

Referenced by PspProcessDelete().

#define PERFINFO_REMOVEPAGE PageIndex,
LogType   ) 
 

Definition at line 2211 of file mm.h.

Referenced by MiRemovePageByColor(), and MiRemovePageFromList().

#define PERFINFO_SECTION_CREATE ControlArea   ) 
 

Definition at line 2212 of file mm.h.

Referenced by MmCreateSection().

#define PERFINFO_SEGMENT_DELETE FileName   ) 
 

Definition at line 2213 of file mm.h.

Referenced by MiSegmentDelete().

#define PERFINFO_SOFTFAULT PageFrame,
Address,
Type   ) 
 

Definition at line 2214 of file mm.h.

Referenced by MiCompleteProtoPteFault(), MiResolveDemandZeroFault(), MiResolveProtoPteFault(), and MiResolveTransitionFault().

#define PERFINFO_THREAD_CREATE EThread,
ITeb   ) 
 

Definition at line 2215 of file mm.h.

Referenced by PspCreateThread().

#define PERFINFO_THREAD_DELETE EThread   ) 
 

Definition at line 2216 of file mm.h.

Referenced by PspThreadDelete().

#define PERFINFO_UNLINKFREEPAGE Index,
Location   ) 
 

Definition at line 2217 of file mm.h.

Referenced by MiUnlinkFreeOrZeroedPage().

#define PERFINFO_UNLINKPAGE Index,
Location   ) 
 

Definition at line 2218 of file mm.h.

Referenced by MiUnlinkPageFromList().

#define PERFINFO_WSMANAGE_ACTUALTRIM Trim   ) 
 

Definition at line 2219 of file mm.h.

Referenced by MmWorkingSetManager().

 
#define PERFINFO_WSMANAGE_DECL  ) 
 

Definition at line 2220 of file mm.h.

Referenced by MiCheckAndSetSystemTrimCriteria(), MiCheckSystemTrimEndCriteria(), and MmWorkingSetManager().

 
#define PERFINFO_WSMANAGE_DUMPENTRIES  ) 
 

Definition at line 2221 of file mm.h.

 
#define PERFINFO_WSMANAGE_DUMPENTRIES_CLAIMS  ) 
 

Definition at line 2222 of file mm.h.

Referenced by MiCheckAndSetSystemTrimCriteria().

 
#define PERFINFO_WSMANAGE_DUMPENTRIES_FAULTS  ) 
 

Definition at line 2223 of file mm.h.

Referenced by MiCheckAndSetSystemTrimCriteria().

#define PERFINFO_WSMANAGE_DUMPWS VmSupport,
SampledAgeCounts   ) 
 

Definition at line 2224 of file mm.h.

#define PERFINFO_WSMANAGE_FINALACTION TrimAction   ) 
 

Definition at line 2225 of file mm.h.

Referenced by MmWorkingSetManager().

#define PERFINFO_WSMANAGE_GLOBAL_DECL
 

Definition at line 2226 of file mm.h.

#define PERFINFO_WSMANAGE_LOGINFO_CLAIMS TrimAction   ) 
 

Definition at line 2227 of file mm.h.

#define PERFINFO_WSMANAGE_LOGINFO_FAULTS TrimAction   ) 
 

Definition at line 2228 of file mm.h.

#define PERFINFO_WSMANAGE_PROCESS_RESET VmSupport   ) 
 

Definition at line 2230 of file mm.h.

#define PERFINFO_WSMANAGE_PROCESS_RESET VmSupport   ) 
 

Definition at line 2230 of file mm.h.

Referenced by MiCheckProcessTrimCriteria(), and MmWorkingSetManager().

 
#define PERFINFO_WSMANAGE_STARTLOG  ) 
 

Definition at line 2231 of file mm.h.

 
#define PERFINFO_WSMANAGE_STARTLOG_CLAIMS  ) 
 

Definition at line 2232 of file mm.h.

Referenced by MiCheckAndSetSystemTrimCriteria().

 
#define PERFINFO_WSMANAGE_STARTLOG_FAULTS  ) 
 

Definition at line 2233 of file mm.h.

Referenced by MiCheckAndSetSystemTrimCriteria().

#define PERFINFO_WSMANAGE_TOTRIM Trim   ) 
 

Definition at line 2234 of file mm.h.

Referenced by MmWorkingSetManager().

#define PERFINFO_WSMANAGE_TRIMACTION TrimAction   ) 
 

Definition at line 2235 of file mm.h.

Referenced by MiCheckAndSetSystemTrimCriteria(), and MiCheckSystemTrimEndCriteria().

#define PERFINFO_WSMANAGE_TRIMEND_CLAIMS Criteria   ) 
 

Definition at line 2236 of file mm.h.

Referenced by MmWorkingSetManager().

#define PERFINFO_WSMANAGE_TRIMEND_FAULTS Criteria   ) 
 

Definition at line 2237 of file mm.h.

Referenced by MmWorkingSetManager().

#define PERFINFO_WSMANAGE_TRIMWS Process,
SessionSpace,
VmSupport   ) 
 

Definition at line 2238 of file mm.h.

Referenced by MmWorkingSetManager().

#define PERFINFO_WSMANAGE_TRIMWS_CLAIMINFO VmSupport   ) 
 

Definition at line 2240 of file mm.h.

#define PERFINFO_WSMANAGE_TRIMWS_CLAIMINFO VmSupport   ) 
 

Definition at line 2240 of file mm.h.

 
#define PERFINFO_WSMANAGE_WAITFORWRITER_CLAIMS  ) 
 

Definition at line 2241 of file mm.h.

Referenced by MiCheckSystemTrimEndCriteria().

 
#define PERFINFO_WSMANAGE_WAITFORWRITER_FAULTS  ) 
 

Definition at line 2242 of file mm.h.

Referenced by MiCheckSystemTrimEndCriteria().

#define PERFINFO_WSMANAGE_WILLTRIM ReductionGoal,
FreeGoal   ) 
 

Definition at line 2243 of file mm.h.

#define PERFINFO_WSMANAGE_WILLTRIM_CLAIMS Criteria   ) 
 

Definition at line 2244 of file mm.h.

Referenced by MiCheckAndSetSystemTrimCriteria().

#define PERFINFO_WSMANAGE_WILLTRIM_FAULTS Criteria   ) 
 

Definition at line 2245 of file mm.h.

Referenced by MiCheckAndSetSystemTrimCriteria().

#define ROUND_TO_PAGES Size   )     (((ULONG_PTR)(Size) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
 

Definition at line 117 of file mm.h.

Referenced by CcCopyRead(), CcFastCopyRead(), CcPinFileData(), CcScheduleReadAhead(), ExAllocatePoolWithTag(), ExpAddTagForBigPages(), ExpInsertPoolTracker(), InitializePool(), IopCalculateRequiredDumpSpace(), MiBuildPageTableForDrivers(), MiCreateDataFileMap(), MiCreatePagingFileMap(), MiCreatePebOrTeb(), MiEnablePagingOfDriver(), MiEnablePagingTheExecutive(), MiFindInitializationCode(), MiFlushRangeFilter(), MiLocateKernelSections(), MiReloadBootLoadedDrivers(), MmDeleteTeb(), MmExtendSection(), MmFreeDriverInitialization(), MmRemovePhysicalMemory(), NtAllocateVirtualMemory(), NtCreatePagingFile(), PsReturnPoolQuota(), UdfRawBufferSize(), and UdfRawBufferSizeN().


Typedef Documentation

typedef enum _LOCK_OPERATION LOCK_OPERATION
 

Referenced by IoBuildAsynchronousFsdRequest(), IoBuildDeviceIoControlRequest(), and IopXxxControlFile().

typedef enum _MM_PAGE_PRIORITY MM_PAGE_PRIORITY
 

typedef enum _MM_SYSTEM_SIZE MM_SYSTEMSIZE
 

Referenced by IoInitSystem(), ObInitSystem(), and PspInitPhase0().

typedef enum _MMFLUSH_TYPE MMFLUSH_TYPE
 

Referenced by MmFlushImageSection().

typedef struct _MMINFO_COUNTERS MMINFO_COUNTERS
 

typedef enum _MMLISTS MMLISTS
 

Referenced by MiRemovePhysicalPages(), and MmAllocatePagesForMdl().

typedef struct _MMPFNLIST MMPFNLIST
 

typedef VOID(* PBANKED_SECTION_ROUTINE)(IN ULONG ReadBank, IN ULONG WriteBank, IN PVOID Context)
 

Definition at line 2072 of file mm.h.

Referenced by MmSetBankedSection().

typedef VOID(FASTCALL * PHARD_FAULT_NOTIFY_ROUTINE)(IN HANDLE FileObject, IN PVOID VirtualAddress)
 

Definition at line 2119 of file mm.h.

Referenced by MiDispatchFault().

typedef struct _PHYSICAL_MEMORY_DESCRIPTOR PHYSICAL_MEMORY_DESCRIPTOR
 

Referenced by IoWriteCrashDump().

typedef struct _PHYSICAL_MEMORY_RANGE PHYSICAL_MEMORY_RANGE
 

Referenced by MmGetPhysicalMemoryRanges().

typedef struct _PHYSICAL_MEMORY_RUN PHYSICAL_MEMORY_RUN
 

typedef NTSTATUS(* PKWIN32_CALLOUT)(IN PVOID Arg)
 

Definition at line 559 of file mm.h.

Referenced by IopNotifyDeviceClassChange(), IopNotifyHwProfileChange(), IopNotifySetupDeviceArrival(), IopNotifyTargetDeviceChange(), and IoRegisterPlugPlayNotification().

typedef NTSTATUS(* PMM_DLL_INITIALIZE)(IN PUNICODE_STRING RegistryPath)
 

Definition at line 1910 of file mm.h.

Referenced by MiResolveImageReferences(), and MmCallDllInitialize().

typedef NTSTATUS(* PMM_DLL_UNLOAD)(VOID)
 

Definition at line 1914 of file mm.h.

Referenced by MiCallDllUnloadAndUnloadDll().

typedef MMINFO_COUNTERS* PMMINFO_COUNTERS
 

Definition at line 473 of file mm.h.

typedef MMPFNLIST* PMMPFNLIST
 

Definition at line 1085 of file mm.h.

typedef VOID(FASTCALL * PPAGE_FAULT_NOTIFY_ROUTINE)(IN NTSTATUS Status, IN PVOID VirtualAddress, IN PVOID TrapInformation)
 

Definition at line 2111 of file mm.h.

Referenced by MmAccessFault().

typedef struct _PHYSICAL_MEMORY_DESCRIPTOR * PPHYSICAL_MEMORY_DESCRIPTOR
 

typedef struct _PHYSICAL_MEMORY_RANGE * PPHYSICAL_MEMORY_RANGE
 

typedef struct _PHYSICAL_MEMORY_RUN * PPHYSICAL_MEMORY_RUN
 


Enumeration Type Documentation

enum _LOCK_OPERATION
 

Enumeration values:
IoReadAccess 
IoWriteAccess 
IoModifyAccess 

Definition at line 443 of file mm.h.

00443 { 00444 IoReadAccess, 00445 IoWriteAccess, 00446 IoModifyAccess 00447 } LOCK_OPERATION;

enum _MM_PAGE_PRIORITY
 

Enumeration values:
LowPagePriority 
NormalPagePriority 
HighPagePriority 

Definition at line 1446 of file mm.h.

01446 { 01447 LowPagePriority, 01448 NormalPagePriority = 16, 01449 HighPagePriority = 32 01450 } MM_PAGE_PRIORITY;

enum _MM_SYSTEM_SIZE
 

Enumeration values:
MmSmallSystem 
MmMediumSystem 
MmLargeSystem 

Definition at line 421 of file mm.h.

00421 { 00422 MmSmallSystem, 00423 MmMediumSystem, 00424 MmLargeSystem 00425 } MM_SYSTEMSIZE;

enum _MMFLUSH_TYPE
 

Enumeration values:
MmFlushForDelete 
MmFlushForWrite 

Definition at line 1267 of file mm.h.

01267 { 01268 MmFlushForDelete, 01269 MmFlushForWrite 01270 } MMFLUSH_TYPE;

enum _MMLISTS
 

Enumeration values:
ZeroedPageList 
FreePageList 
StandbyPageList 
ModifiedPageList 
ModifiedNoWritePageList 
BadPageList 
ActiveAndValid 
TransitionPage 

Definition at line 1067 of file mm.h.

01067 { 01068 ZeroedPageList, 01069 FreePageList, 01070 StandbyPageList, //this list and before make up available pages. 01071 ModifiedPageList, 01072 ModifiedNoWritePageList, 01073 BadPageList, 01074 ActiveAndValid, 01075 TransitionPage 01076 } MMLISTS;


Function Documentation

PVOID MiAllocatePoolPages IN POOL_TYPE  PoolType,
IN SIZE_T  SizeInBytes,
IN ULONG  IsLargeSessionAllocation
 

Definition at line 807 of file allocpag.c.

References ActiveAndValid, _MM_PAGED_POOL_INFO::AllocatedPagedPool, ASSERT, BASE_POOL_TYPE_MASK, BYTES_TO_PAGES, _MM_SESSION_SPACE::CommittedPages, CONSISTENCY_LOCK_PFN2, CONSISTENCY_UNLOCK_PFN2, DbgPrint, _MM_PAGED_POOL_INFO::EndOfPagedPoolBitmap, FALSE, _MMWSL::FirstDynamic, _MM_PAGED_POOL_INFO::FirstPteForPagedPool, Index, KeBugCheckEx(), KeSetEvent(), KSEG0_BASE, KSEG2_BASE, L, _MM_PAGED_POOL_INFO::LastPteForPagedPool, List, _MMFREE_POOL_ENTRY::List, LOCK_PFN, LOCK_PFN2, LOCK_SESSION_SPACE_WS, MI_CONVERT_PHYSICAL_TO_PFN, MI_GET_PAGE_COLOR_FROM_PTE, MI_GET_PAGE_FRAME_FROM_PTE, MI_IS_PHYSICAL_ADDRESS, MI_MAX_FREE_LIST_HEADS, MI_MEMORY_MAKER, MI_PFN_ELEMENT, MI_UNUSED_SEGMENTS_SURPLUS, MI_WRITE_VALID_PTE, MiAddValidPageToWorkingSet(), MiChargeCommitmentCantExpand(), MiFillMemoryPte, MiFreeNonPagedPool(), MiGetPdeAddress, MiGetPteAddress, MiGetVirtualAddressMappedByPte, MiInitializePfn(), MiInitializePfnForOtherProcess(), MiIssuePageExtendRequestNoWait(), MiLocateWsle(), MiProtectedPoolInsertList(), MiProtectedPoolRemoveEntryList(), MiProtectFreeNonPagedPool(), MiRemoveAnyPage(), MiReserveSystemPtes(), MiSessionPoolAllocated(), MiSwapWslEntries(), MiUnProtectFreeNonPagedPool(), MM_BUMP_COUNTER, MM_BUMP_SESS_COUNTER, MM_DBG_COMMIT_NONPAGED_POOL_EXPANSION, MM_DBG_COMMIT_PAGED_POOL_PAGES, MM_DBG_COMMIT_PAGED_POOL_PAGETABLE, MM_DBG_SESSION_COMMIT_PAGEDPOOL_PAGES, MM_DBG_SESSION_PAGEDPOOL_PAGETABLE_ALLOC1, MM_DEMAND_ZERO_WRITE_PTE, MM_FREE_POOL_SIGNATURE, MM_KERNEL_DEMAND_ZERO_PTE, MM_KERNEL_NOACCESS_PTE, MM_KSEG2_BASE, MM_TRACK_COMMIT, MmAllocatedNonPagedPool, MmAvailablePages, MMFREE_POOL_ENTRY, MmKseg2Frame, MmNonPagedMustSucceed, MmNonPagedPoolExpansionStart, MmNonPagedPoolFreeListHead, MmNumberOfFreeNonPagedPool, MmPageAlignedPoolBase, MmPagedPoolCommit, MmPagedPoolInfo, MmProtectFreedNonPagedPool, MmResidentAvailablePages, MmSessionBase, MmSessionSpace, MmSubsectionBase, MmSubsectionTopPage, MmSystemPageDirectory, MmSystemPagePtes, MmUnusedSegmentCleanup, MmUnusedSegmentForceFree, MmUnusedSegmentList, MUST_SUCCEED_POOL_TYPE_MASK, _MM_PAGED_POOL_INFO::NextPdeForPagedPoolExpansion, _MM_SESSION_SPACE::NonPagablePages, NonPagedPool, NonPagedPoolDescriptor, NonPagedPoolExpansion, NULL, _MMPFN::OriginalPte, PAGE_SHIFT, PAGE_SIZE, PagedPool, _MM_PAGED_POOL_INFO::PagedPoolAllocationMap, _MM_PAGED_POOL_INFO::PagedPoolCommit, _MM_PAGED_POOL_INFO::PagedPoolHint, _MM_SESSION_SPACE::PagedPoolInfo, _MM_PAGED_POOL_INFO::PagedPoolLargeSessionAllocationMap, _MM_SESSION_SPACE::PagedPoolStart, _MM_SESSION_SPACE::PageTables, PDE_PER_PAGE, PMM_PAGED_POOL_INFO, PMMFREE_POOL_ENTRY, POOL_VERIFIER_MASK, PsGetCurrentThread, PTE_PER_PAGE, _MMPFN::PteAddress, _MMPFN::PteFrame, RtlClearBits(), RtlFindClearBitsAndSet(), RtlSetBits(), SESSION_POOL_MASK, _MM_SESSION_SPACE::SessionPageDirectoryIndex, _MMFREE_POOL_ENTRY::Signature, _MMFREE_POOL_ENTRY::Size, _POOL_DESCRIPTOR::TotalBigPages, _POOL_DESCRIPTOR::TotalPages, TRUE, _MMPTE::u, _MMPFN::u1, _MMWSLE::u1, _MMPFN::u2, _MMPFN::u3, UNLOCK_PFN, UNLOCK_PFN2, UNLOCK_SESSION_SPACE_WS, ValidKernelPde, ValidKernelPdeLocal, ValidKernelPte, VerifierLargePagedPoolMap, _MM_SESSION_SPACE::Vm, _MMSUPPORT::VmWorkingSetList, and _MM_SESSION_SPACE::Wsle.

Referenced by AllocatePoolInternal(), ExAllocatePool(), ExAllocatePoolWithTag(), ExpAddTagForBigPages(), ExpInsertPoolTracker(), InitializePool(), and MiInitializeSystemPtes().

00815 : 00816 00817 This function allocates a set of pages from the specified pool 00818 - and returns the starting virtual address to the caller. 00819 00820 For the NonPagedPoolMustSucceed case, the caller must first 00821 attempt to get NonPagedPool and if and ONLY IF that fails, then 00822 MiAllocatePoolPages should be called again with the PoolType of 00823 NonPagedPoolMustSucceed. 00824 00825 Arguments: 00826 00827 PoolType - Supplies the type of pool from which to obtain pages. 00828 00829 SizeInBytes - Supplies the size of the request in bytes. The actual 00830 size returned is rounded up to a page boundary. 00831 00832 IsLargeSessionAllocation - Supplies nonzero if the allocation is a single 00833 large session allocation. Zero otherwise. 00834 00835 Return Value: 00836 00837 Returns a pointer to the allocated pool, or NULL if no more pool is 00838 available. 00839 00840 Environment: 00841 00842 These functions are used by the general pool allocation routines 00843 and should not be called directly. 00844 00845 Mutexes guarding the pool databases must be held when calling 00846 these functions. 00847 00848 Kernel mode, IRQL at DISPATCH_LEVEL. 00849 00850 --*/ 00851 00852 { 00853 PFN_NUMBER SizeInPages; 00854 ULONG StartPosition; 00855 ULONG EndPosition; 00856 PMMPTE StartingPte; 00857 PMMPTE PointerPte; 00858 PMMPFN Pfn1; 00859 MMPTE TempPte; 00860 PFN_NUMBER PageFrameIndex; 00861 PVOID BaseVa; 00862 KIRQL OldIrql; 00863 KIRQL SessionIrql; 00864 PFN_NUMBER i; 00865 PLIST_ENTRY Entry; 00866 PMMFREE_POOL_ENTRY FreePageInfo; 00867 PMM_SESSION_SPACE SessionSpace; 00868 PMM_PAGED_POOL_INFO PagedPoolInfo; 00869 PVOID VirtualAddress; 00870 ULONG Index; 00871 PMMPTE SessionPte; 00872 ULONG WsEntry; 00873 ULONG WsSwapEntry; 00874 ULONG PageTableCount; 00875 LOGICAL FreedPool; 00876 LOGICAL SignalDereferenceThread; 00877 PETHREAD Thread; 00878 00879 SizeInPages = BYTES_TO_PAGES (SizeInBytes); 00880 00881 #if DBG 00882 if (MmCheckRequestInPages != 0) { 00883 ASSERT (SizeInPages < MmCheckRequestInPages); 00884 } 00885 #endif 00886 00887 if (PoolType & MUST_SUCCEED_POOL_TYPE_MASK) { 00888 00889 // 00890 // Pool expansion failed, see if any Must Succeed 00891 // pool is still left. 00892 // 00893 00894 if (MmNonPagedMustSucceed == NULL) { 00895 00896 // 00897 // No more pool exists. Bug Check. 00898 // 00899 00900 KeBugCheckEx (MUST_SUCCEED_POOL_EMPTY, 00901 SizeInBytes, 00902 NonPagedPoolDescriptor.TotalPages, 00903 NonPagedPoolDescriptor.TotalBigPages, 00904 MmAvailablePages); 00905 } 00906 00907 // 00908 // Remove a page from the must succeed pool. More than one is illegal. 00909 // 00910 00911 if (SizeInBytes > PAGE_SIZE) { 00912 KeBugCheckEx (BAD_POOL_CALLER, 00913 0x98, 00914 (ULONG_PTR)SizeInBytes, 00915 (ULONG_PTR)SizeInPages, 00916 PoolType); 00917 } 00918 00919 BaseVa = MmNonPagedMustSucceed; 00920 00921 if (IsLargeSessionAllocation != 0) { 00922 00923 // 00924 // Mark this as a large session allocation in the PFN database. 00925 // 00926 00927 if (MI_IS_PHYSICAL_ADDRESS(BaseVa)) { 00928 PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (BaseVa); 00929 } else { 00930 PointerPte = MiGetPteAddress(BaseVa); 00931 ASSERT (PointerPte->u.Hard.Valid == 1); 00932 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 00933 } 00934 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 00935 00936 ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0); 00937 00938 CONSISTENCY_LOCK_PFN2 (OldIrql); 00939 00940 Pfn1->u3.e1.LargeSessionAllocation = 1; 00941 00942 CONSISTENCY_UNLOCK_PFN2 (OldIrql); 00943 00944 MiSessionPoolAllocated (BaseVa, PAGE_SIZE, NonPagedPool); 00945 } 00946 else if (PoolType & POOL_VERIFIER_MASK) { 00947 00948 if (MI_IS_PHYSICAL_ADDRESS(BaseVa)) { 00949 PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (BaseVa); 00950 } else { 00951 PointerPte = MiGetPteAddress(BaseVa); 00952 ASSERT (PointerPte->u.Hard.Valid == 1); 00953 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 00954 } 00955 00956 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 00957 00958 ASSERT (Pfn1->u3.e1.VerifierAllocation == 0); 00959 Pfn1->u3.e1.VerifierAllocation = 1; 00960 } 00961 00962 MmNonPagedMustSucceed = (PVOID)(*(PULONG_PTR)BaseVa); 00963 return BaseVa; 00964 } 00965 00966 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) { 00967 00968 Index = (ULONG)(SizeInPages - 1); 00969 00970 if (Index >= MI_MAX_FREE_LIST_HEADS) { 00971 Index = MI_MAX_FREE_LIST_HEADS - 1; 00972 } 00973 00974 // 00975 // NonPaged pool is linked together through the pages themselves. 00976 // 00977 00978 while (Index < MI_MAX_FREE_LIST_HEADS) { 00979 00980 Entry = MmNonPagedPoolFreeListHead[Index].Flink; 00981 00982 while (Entry != &MmNonPagedPoolFreeListHead[Index]) { 00983 00984 if (MmProtectFreedNonPagedPool == TRUE) { 00985 MiUnProtectFreeNonPagedPool ((PVOID)Entry, 0); 00986 } 00987 00988 // 00989 // The list is not empty, see if this one has enough space. 00990 // 00991 00992 FreePageInfo = CONTAINING_RECORD(Entry, 00993 MMFREE_POOL_ENTRY, 00994 List); 00995 00996 ASSERT (FreePageInfo->Signature == MM_FREE_POOL_SIGNATURE); 00997 if (FreePageInfo->Size >= SizeInPages) { 00998 00999 // 01000 // This entry has sufficient space, remove 01001 // the pages from the end of the allocation. 01002 // 01003 01004 FreePageInfo->Size -= SizeInPages; 01005 01006 BaseVa = (PVOID)((PCHAR)FreePageInfo + 01007 (FreePageInfo->Size << PAGE_SHIFT)); 01008 01009 if (MmProtectFreedNonPagedPool == FALSE) { 01010 RemoveEntryList (&FreePageInfo->List); 01011 } 01012 else { 01013 MiProtectedPoolRemoveEntryList (&FreePageInfo->List); 01014 } 01015 01016 if (FreePageInfo->Size != 0) { 01017 01018 // 01019 // Insert any remainder into the correct list. 01020 // 01021 01022 Index = (ULONG)(FreePageInfo->Size - 1); 01023 if (Index >= MI_MAX_FREE_LIST_HEADS) { 01024 Index = MI_MAX_FREE_LIST_HEADS - 1; 01025 } 01026 01027 if (MmProtectFreedNonPagedPool == FALSE) { 01028 InsertTailList (&MmNonPagedPoolFreeListHead[Index], 01029 &FreePageInfo->List); 01030 } 01031 else { 01032 MiProtectedPoolInsertList (&MmNonPagedPoolFreeListHead[Index], 01033 &FreePageInfo->List, 01034 FALSE); 01035 01036 MiProtectFreeNonPagedPool ((PVOID)FreePageInfo, 01037 (ULONG)FreePageInfo->Size); 01038 } 01039 } 01040 01041 // 01042 // Adjust the number of free pages remaining in the pool. 01043 // 01044 01045 MmNumberOfFreeNonPagedPool -= SizeInPages; 01046 ASSERT ((LONG)MmNumberOfFreeNonPagedPool >= 0); 01047 01048 // 01049 // Mark start and end of allocation in the PFN database. 01050 // 01051 01052 if (MI_IS_PHYSICAL_ADDRESS(BaseVa)) { 01053 01054 // 01055 // On certain architectures, virtual addresses 01056 // may be physical and hence have no corresponding PTE. 01057 // 01058 01059 PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (BaseVa); 01060 } else { 01061 PointerPte = MiGetPteAddress(BaseVa); 01062 ASSERT (PointerPte->u.Hard.Valid == 1); 01063 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 01064 } 01065 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 01066 01067 ASSERT (Pfn1->u3.e1.StartOfAllocation == 0); 01068 ASSERT (Pfn1->u3.e1.VerifierAllocation == 0); 01069 01070 CONSISTENCY_LOCK_PFN2 (OldIrql); 01071 01072 Pfn1->u3.e1.StartOfAllocation = 1; 01073 01074 if (PoolType & POOL_VERIFIER_MASK) { 01075 Pfn1->u3.e1.VerifierAllocation = 1; 01076 } 01077 01078 CONSISTENCY_UNLOCK_PFN2 (OldIrql); 01079 01080 // 01081 // Mark this as a large session allocation in the PFN database. 01082 // 01083 01084 if (IsLargeSessionAllocation != 0) { 01085 ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0); 01086 01087 CONSISTENCY_LOCK_PFN2 (OldIrql); 01088 01089 Pfn1->u3.e1.LargeSessionAllocation = 1; 01090 01091 CONSISTENCY_UNLOCK_PFN2 (OldIrql); 01092 01093 MiSessionPoolAllocated (BaseVa, 01094 SizeInPages << PAGE_SHIFT, 01095 NonPagedPool); 01096 } 01097 01098 // 01099 // Calculate the ending PTE's address. 01100 // 01101 01102 if (SizeInPages != 1) { 01103 if (MI_IS_PHYSICAL_ADDRESS(BaseVa)) { 01104 Pfn1 += SizeInPages - 1; 01105 } else { 01106 PointerPte += SizeInPages - 1; 01107 ASSERT (PointerPte->u.Hard.Valid == 1); 01108 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 01109 } 01110 } 01111 else if (MmProtectFreedNonPagedPool == FALSE) { 01112 01113 // 01114 // Map this with KSEG0 if possible. 01115 // 01116 #if defined (_X86_) 01117 if ((BaseVa > (PVOID)MM_KSEG2_BASE) && 01118 (PageFrameIndex >= MI_CONVERT_PHYSICAL_TO_PFN(MmSubsectionBase)) && 01119 (PageFrameIndex < MmSubsectionTopPage) && 01120 (MmKseg2Frame != 0)) 01121 #elif defined (_ALPHA_) 01122 if ((BaseVa > (PVOID)KSEG2_BASE) && 01123 (PageFrameIndex >= MI_CONVERT_PHYSICAL_TO_PFN(MmSubsectionBase)) && 01124 (PageFrameIndex < MmSubsectionTopPage)) 01125 #else 01126 if ((BaseVa > (PVOID)KSEG2_BASE) && 01127 (PageFrameIndex < MmSubsectionTopPage)) 01128 #endif 01129 { 01130 BaseVa = (PVOID)(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT)); 01131 } 01132 } 01133 01134 ASSERT (Pfn1->u3.e1.EndOfAllocation == 0); 01135 01136 CONSISTENCY_LOCK_PFN2 (OldIrql); 01137 01138 Pfn1->u3.e1.EndOfAllocation = 1; 01139 01140 CONSISTENCY_UNLOCK_PFN2 (OldIrql); 01141 01142 MmAllocatedNonPagedPool += SizeInPages; 01143 return BaseVa; 01144 } 01145 01146 Entry = FreePageInfo->List.Flink; 01147 01148 if (MmProtectFreedNonPagedPool == TRUE) { 01149 MiProtectFreeNonPagedPool ((PVOID)FreePageInfo, 01150 (ULONG)FreePageInfo->Size); 01151 } 01152 } 01153 Index += 1; 01154 } 01155 01156 // 01157 // No more entries on the list, expand nonpaged pool if 01158 // possible to satisfy this request. 01159 // 01160 01161 // 01162 // Check to see if there are too many unused segments laying 01163 // around. If so, set an event so they get deleted. 01164 // 01165 01166 if (MI_UNUSED_SEGMENTS_SURPLUS()) { 01167 KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE); 01168 } 01169 01170 LOCK_PFN2 (OldIrql); 01171 01172 // 01173 // Make sure we have 1 more than the number of pages 01174 // requested available. 01175 // 01176 01177 if (MmAvailablePages <= SizeInPages) { 01178 01179 UNLOCK_PFN2 (OldIrql); 01180 01181 // 01182 // There are no free physical pages to expand 01183 // nonpaged pool. 01184 // 01185 01186 return NULL; 01187 } 01188 01189 // 01190 // Try to find system PTEs to expand the pool into. 01191 // 01192 01193 StartingPte = MiReserveSystemPtes ((ULONG)SizeInPages, 01194 NonPagedPoolExpansion, 01195 0, 01196 0, 01197 FALSE); 01198 01199 if (StartingPte == NULL) { 01200 01201 // 01202 // There are no free physical PTEs to expand nonpaged pool. 01203 // If there are any cached expansion PTEs, free them now in 01204 // an attempt to get enough contiguous VA for our caller. 01205 // 01206 01207 if ((SizeInPages > 1) && (MmNumberOfFreeNonPagedPool != 0)) { 01208 01209 FreedPool = FALSE; 01210 01211 for (Index = 0; Index < MI_MAX_FREE_LIST_HEADS; Index += 1) { 01212 01213 Entry = MmNonPagedPoolFreeListHead[Index].Flink; 01214 01215 while (Entry != &MmNonPagedPoolFreeListHead[Index]) { 01216 01217 if (MmProtectFreedNonPagedPool == TRUE) { 01218 MiUnProtectFreeNonPagedPool ((PVOID)Entry, 0); 01219 } 01220 01221 // 01222 // The list is not empty, see if this one is virtually 01223 // mapped. 01224 // 01225 01226 FreePageInfo = CONTAINING_RECORD(Entry, 01227 MMFREE_POOL_ENTRY, 01228 List); 01229 01230 if ((!MI_IS_PHYSICAL_ADDRESS(FreePageInfo)) && 01231 ((PVOID)FreePageInfo >= MmNonPagedPoolExpansionStart)) { 01232 if (MmProtectFreedNonPagedPool == FALSE) { 01233 RemoveEntryList (&FreePageInfo->List); 01234 } 01235 else { 01236 MiProtectedPoolRemoveEntryList (&FreePageInfo->List); 01237 } 01238 01239 MmNumberOfFreeNonPagedPool -= FreePageInfo->Size; 01240 ASSERT ((LONG)MmNumberOfFreeNonPagedPool >= 0); 01241 01242 UNLOCK_PFN2 (OldIrql); 01243 01244 FreedPool = TRUE; 01245 01246 MiFreeNonPagedPool ((PVOID)FreePageInfo, 01247 FreePageInfo->Size); 01248 01249 LOCK_PFN2 (OldIrql); 01250 Index = 0; 01251 break; 01252 } 01253 01254 Entry = FreePageInfo->List.Flink; 01255 01256 if (MmProtectFreedNonPagedPool == TRUE) { 01257 MiProtectFreeNonPagedPool ((PVOID)FreePageInfo, 01258 (ULONG)FreePageInfo->Size); 01259 } 01260 } 01261 } 01262 01263 if (FreedPool == TRUE) { 01264 StartingPte = MiReserveSystemPtes ((ULONG)SizeInPages, 01265 NonPagedPoolExpansion, 01266 0, 01267 0, 01268 FALSE); 01269 01270 if (StartingPte != NULL) { 01271 goto gotpool; 01272 } 01273 } 01274 } 01275 01276 UNLOCK_PFN2 (OldIrql); 01277 01278 nopool: 01279 01280 // 01281 // Running low on pool - if this request is not for session pool, 01282 // force unused segment trimming when appropriate. 01283 // 01284 01285 SignalDereferenceThread = FALSE; 01286 LOCK_PFN2 (OldIrql); 01287 if (MmUnusedSegmentForceFree == 0) { 01288 if (!IsListEmpty(&MmUnusedSegmentList)) { 01289 SignalDereferenceThread = TRUE; 01290 MmUnusedSegmentForceFree = 30; 01291 } 01292 } 01293 UNLOCK_PFN2 (OldIrql); 01294 if (SignalDereferenceThread == TRUE) { 01295 KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE); 01296 } 01297 01298 return NULL; 01299 } 01300 01301 gotpool: 01302 01303 // 01304 // Update the count of available resident pages. 01305 // 01306 01307 MmResidentAvailablePages -= SizeInPages; 01308 MM_BUMP_COUNTER(0, SizeInPages); 01309 01310 // 01311 // Charge commitment as non paged pool uses physical memory. 01312 // 01313 01314 MM_TRACK_COMMIT (MM_DBG_COMMIT_NONPAGED_POOL_EXPANSION, SizeInPages); 01315 01316 MiChargeCommitmentCantExpand (SizeInPages, TRUE); 01317 01318 // 01319 // Expand the pool. 01320 // 01321 01322 PointerPte = StartingPte; 01323 TempPte = ValidKernelPte; 01324 MmAllocatedNonPagedPool += SizeInPages; 01325 i = SizeInPages; 01326 01327 do { 01328 PageFrameIndex = MiRemoveAnyPage ( 01329 MI_GET_PAGE_COLOR_FROM_PTE (PointerPte)); 01330 01331 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 01332 01333 Pfn1->u3.e2.ReferenceCount = 1; 01334 Pfn1->u2.ShareCount = 1; 01335 Pfn1->PteAddress = PointerPte; 01336 Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE; 01337 Pfn1->PteFrame = MI_GET_PAGE_FRAME_FROM_PTE (MiGetPteAddress(PointerPte)); 01338 01339 Pfn1->u3.e1.PageLocation = ActiveAndValid; 01340 Pfn1->u3.e1.LargeSessionAllocation = 0; 01341 Pfn1->u3.e1.VerifierAllocation = 0; 01342 01343 TempPte.u.Hard.PageFrameNumber = PageFrameIndex; 01344 MI_WRITE_VALID_PTE (PointerPte, TempPte); 01345 PointerPte += 1; 01346 SizeInPages -= 1; 01347 } while (SizeInPages > 0); 01348 01349 Pfn1->u3.e1.EndOfAllocation = 1; 01350 01351 Pfn1 = MI_PFN_ELEMENT (StartingPte->u.Hard.PageFrameNumber); 01352 Pfn1->u3.e1.StartOfAllocation = 1; 01353 01354 ASSERT (Pfn1->u3.e1.VerifierAllocation == 0); 01355 01356 if (PoolType & POOL_VERIFIER_MASK) { 01357 Pfn1->u3.e1.VerifierAllocation = 1; 01358 } 01359 01360 // 01361 // Mark this as a large session allocation in the PFN database. 01362 // 01363 01364 ASSERT (Pfn1->u3.e1.LargeSessionAllocation == 0); 01365 01366 if (IsLargeSessionAllocation != 0) { 01367 Pfn1->u3.e1.LargeSessionAllocation = 1; 01368 01369 MiSessionPoolAllocated(MiGetVirtualAddressMappedByPte (StartingPte), 01370 i << PAGE_SHIFT, 01371 NonPagedPool); 01372 } 01373 01374 UNLOCK_PFN2 (OldIrql); 01375 01376 BaseVa = MiGetVirtualAddressMappedByPte (StartingPte); 01377 01378 if (i == 1) { 01379 01380 // 01381 // Map this with KSEG0 if possible. 01382 // 01383 01384 #if defined (_X86_) 01385 if ((PageFrameIndex >= MI_CONVERT_PHYSICAL_TO_PFN(MmSubsectionBase)) && 01386 (PageFrameIndex < MmSubsectionTopPage) && 01387 (MmKseg2Frame != 0)) 01388 #elif defined (_ALPHA_) 01389 if ((PageFrameIndex >= MI_CONVERT_PHYSICAL_TO_PFN(MmSubsectionBase)) && 01390 (PageFrameIndex < MmSubsectionTopPage)) 01391 #else 01392 if (PageFrameIndex < MmSubsectionTopPage) 01393 #endif 01394 { 01395 BaseVa = (PVOID)(KSEG0_BASE + (PageFrameIndex << PAGE_SHIFT)); 01396 } 01397 } 01398 01399 return BaseVa; 01400 } 01401 01402 // 01403 // Paged Pool. 01404 // 01405 01406 if ((PoolType & SESSION_POOL_MASK) == 0) { 01407 SessionSpace = (PMM_SESSION_SPACE)0; 01408 PagedPoolInfo = &MmPagedPoolInfo; 01409 } 01410 else { 01411 SessionSpace = MmSessionSpace; 01412 PagedPoolInfo = &SessionSpace->PagedPoolInfo; 01413 } 01414 01415 StartPosition = RtlFindClearBitsAndSet ( 01416 PagedPoolInfo->PagedPoolAllocationMap, 01417 (ULONG)SizeInPages, 01418 PagedPoolInfo->PagedPoolHint 01419 ); 01420 01421 if ((StartPosition == 0xFFFFFFFF) && (PagedPoolInfo->PagedPoolHint != 0)) { 01422 01423 if (MI_UNUSED_SEGMENTS_SURPLUS()) { 01424 KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE); 01425 } 01426 01427 // 01428 // No free bits were found, check from the start of 01429 // the bit map. 01430 01431 StartPosition = RtlFindClearBitsAndSet ( 01432 PagedPoolInfo->PagedPoolAllocationMap, 01433 (ULONG)SizeInPages, 01434 0 01435 ); 01436 } 01437 01438 // 01439 // If start position = -1, no room in pool. Attempt to expand PagedPool. 01440 // 01441 01442 if (StartPosition == 0xFFFFFFFF) { 01443 01444 // 01445 // Attempt to expand the paged pool. 01446 // 01447 01448 StartPosition = (((ULONG)SizeInPages - 1) / PTE_PER_PAGE) + 1; 01449 01450 // 01451 // Make sure there is enough space to create the prototype PTEs. 01452 // 01453 01454 if (((StartPosition - 1) + PagedPoolInfo->NextPdeForPagedPoolExpansion) > 01455 MiGetPteAddress (PagedPoolInfo->LastPteForPagedPool)) { 01456 01457 // 01458 // Can't expand pool any more. If this request is not for session 01459 // pool, force unused segment trimming when appropriate. 01460 // 01461 01462 if (SessionSpace == NULL) { 01463 goto nopool; 01464 } 01465 01466 return NULL; 01467 } 01468 01469 if (SessionSpace) { 01470 TempPte = ValidKernelPdeLocal; 01471 PageTableCount = StartPosition; 01472 } 01473 else { 01474 TempPte = ValidKernelPde; 01475 } 01476 01477 LOCK_PFN (OldIrql); 01478 01479 // 01480 // Make sure we have 1 more than the number of pages 01481 // requested available. 01482 // 01483 01484 if (MmAvailablePages <= StartPosition) { 01485 01486 UNLOCK_PFN (OldIrql); 01487 01488 // 01489 // There are no free physical pages to expand 01490 // paged pool. 01491 // 01492 01493 return NULL; 01494 } 01495 01496 // 01497 // Update the count of available resident pages. 01498 // 01499 01500 MmResidentAvailablePages -= StartPosition; 01501 MM_BUMP_COUNTER(1, StartPosition); 01502 01503 // 01504 // Expand the pool. 01505 // 01506 01507 EndPosition = (ULONG)((PagedPoolInfo->NextPdeForPagedPoolExpansion - 01508 MiGetPteAddress(PagedPoolInfo->FirstPteForPagedPool)) * 01509 PTE_PER_PAGE); 01510 01511 RtlClearBits (PagedPoolInfo->PagedPoolAllocationMap, 01512 EndPosition, 01513 (ULONG) StartPosition * PTE_PER_PAGE); 01514 01515 PointerPte = PagedPoolInfo->NextPdeForPagedPoolExpansion; 01516 StartingPte = (PMMPTE)MiGetVirtualAddressMappedByPte(PointerPte); 01517 PagedPoolInfo->NextPdeForPagedPoolExpansion += StartPosition; 01518 01519 do { 01520 ASSERT (PointerPte->u.Hard.Valid == 0); 01521 01522 MM_TRACK_COMMIT (MM_DBG_COMMIT_PAGED_POOL_PAGETABLE, 1); 01523 01524 MiChargeCommitmentCantExpand (1, TRUE); 01525 01526 PageFrameIndex = MiRemoveAnyPage ( 01527 MI_GET_PAGE_COLOR_FROM_PTE (PointerPte)); 01528 01529 TempPte.u.Hard.PageFrameNumber = PageFrameIndex; 01530 MI_WRITE_VALID_PTE (PointerPte, TempPte); 01531 01532 // 01533 // Map valid PDE into system (or session) address space as well. 01534 // 01535 01536 VirtualAddress = MiGetVirtualAddressMappedByPte (PointerPte); 01537 01538 #if defined (_WIN64) 01539 01540 MiInitializePfn (PageFrameIndex, 01541 PointerPte, 01542 1); 01543 01544 #else 01545 01546 if (SessionSpace) { 01547 01548 Index = (ULONG)(PointerPte - MiGetPdeAddress (MmSessionBase)); 01549 ASSERT (MmSessionSpace->PageTables[Index].u.Long == 0); 01550 MmSessionSpace->PageTables[Index] = TempPte; 01551 01552 MiInitializePfnForOtherProcess (PageFrameIndex, 01553 PointerPte, 01554 MmSessionSpace->SessionPageDirectoryIndex); 01555 01556 MM_BUMP_SESS_COUNTER(MM_DBG_SESSION_PAGEDPOOL_PAGETABLE_ALLOC1, 1); 01557 } 01558 else { 01559 #if !defined (_X86PAE_) 01560 MmSystemPagePtes [((ULONG_PTR)PointerPte & 01561 ((sizeof(MMPTE) * PDE_PER_PAGE) - 1)) / sizeof(MMPTE)] = 01562 TempPte; 01563 MiInitializePfnForOtherProcess (PageFrameIndex, 01564 PointerPte, 01565 MmSystemPageDirectory); 01566 #else 01567 MmSystemPagePtes [((ULONG_PTR)PointerPte & 01568 (PD_PER_SYSTEM * (sizeof(MMPTE) * PDE_PER_PAGE) - 1)) / sizeof(MMPTE)] = 01569 TempPte; 01570 MiInitializePfnForOtherProcess (PageFrameIndex, 01571 PointerPte, 01572 MmSystemPageDirectory[(PointerPte - MiGetPdeAddress(0)) / PDE_PER_PAGE]); 01573 #endif 01574 } 01575 #endif 01576 01577 KeFillEntryTb ((PHARDWARE_PTE) PointerPte, VirtualAddress, FALSE); 01578 01579 MiFillMemoryPte (StartingPte, 01580 PAGE_SIZE, 01581 MM_KERNEL_NOACCESS_PTE); 01582 01583 PointerPte += 1; 01584 StartingPte += PAGE_SIZE / sizeof(MMPTE); 01585 StartPosition -= 1; 01586 } while (StartPosition > 0); 01587 01588 UNLOCK_PFN (OldIrql); 01589 01590 if (SessionSpace) { 01591 01592 PointerPte -= PageTableCount; 01593 01594 LOCK_SESSION_SPACE_WS (SessionIrql); 01595 01596 MmSessionSpace->NonPagablePages += PageTableCount; 01597 MmSessionSpace->CommittedPages += PageTableCount; 01598 01599 do { 01600 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 01601 01602 ASSERT (Pfn1->u1.Event == 0); 01603 Pfn1->u1.Event = (PVOID) PsGetCurrentThread (); 01604 01605 SessionPte = MiGetVirtualAddressMappedByPte (PointerPte); 01606 01607 MiAddValidPageToWorkingSet (SessionPte, 01608 PointerPte, 01609 Pfn1, 01610 0); 01611 01612 WsEntry = MiLocateWsle (SessionPte, 01613 MmSessionSpace->Vm.VmWorkingSetList, 01614 Pfn1->u1.WsIndex); 01615 01616 if (WsEntry >= MmSessionSpace->Vm.VmWorkingSetList->FirstDynamic) { 01617 01618 WsSwapEntry = MmSessionSpace->Vm.VmWorkingSetList->FirstDynamic; 01619 01620 if (WsEntry != MmSessionSpace->Vm.VmWorkingSetList->FirstDynamic) { 01621 01622 // 01623 // Swap this entry with the one at first dynamic. 01624 // 01625 01626 MiSwapWslEntries (WsEntry, WsSwapEntry, &MmSessionSpace->Vm); 01627 } 01628 01629 MmSessionSpace->Vm.VmWorkingSetList->FirstDynamic += 1; 01630 } 01631 else { 01632 WsSwapEntry = WsEntry; 01633 } 01634 01635 // 01636 // Indicate that the page is locked. 01637 // 01638 01639 MmSessionSpace->Wsle[WsSwapEntry].u1.e1.LockedInWs = 1; 01640 01641 PointerPte += 1; 01642 PageTableCount -= 1; 01643 } while (PageTableCount > 0); 01644 UNLOCK_SESSION_SPACE_WS (SessionIrql); 01645 } 01646 01647 StartPosition = RtlFindClearBitsAndSet ( 01648 PagedPoolInfo->PagedPoolAllocationMap, 01649 (ULONG)SizeInPages, 01650 EndPosition 01651 ); 01652 01653 ASSERT (StartPosition != 0xffffffff); 01654 } 01655 01656 // 01657 // This is paged pool, the start and end can't be saved 01658 // in the PFN database as the page isn't always resident 01659 // in memory. The ideal place to save the start and end 01660 // would be in the prototype PTE, but there are no free 01661 // bits. To solve this problem, a bitmap which parallels 01662 // the allocation bitmap exists which contains set bits 01663 // in the positions where an allocation ends. This 01664 // allows pages to be deallocated with only their starting 01665 // address. 01666 // 01667 // For sanity's sake, the starting address can be verified 01668 // from the 2 bitmaps as well. If the page before the starting 01669 // address is not allocated (bit is zero in allocation bitmap) 01670 // then this page is obviously a start of an allocation block. 01671 // If the page before is allocated and the other bit map does 01672 // not indicate the previous page is the end of an allocation, 01673 // then the starting address is wrong and a bug check should 01674 // be issued. 01675 // 01676 01677 if (SizeInPages == 1) { 01678 PagedPoolInfo->PagedPoolHint = StartPosition + (ULONG)SizeInPages; 01679 } 01680 01681 if (MiChargeCommitmentCantExpand (SizeInPages, FALSE) == FALSE) { 01682 Thread = PsGetCurrentThread (); 01683 if (MI_MEMORY_MAKER(Thread)) { 01684 MiChargeCommitmentCantExpand (SizeInPages, TRUE); 01685 } 01686 else { 01687 RtlClearBits (PagedPoolInfo->PagedPoolAllocationMap, 01688 StartPosition, 01689 (ULONG)SizeInPages); 01690 01691 // 01692 // Could not commit the page(s), return NULL indicating 01693 // no pool was allocated. Note that the lack of commit may be due 01694 // to unused segments and the MmSharedCommit, prototype PTEs, etc 01695 // associated with them. So force a reduction now. 01696 // 01697 01698 MiIssuePageExtendRequestNoWait (SizeInPages); 01699 01700 SignalDereferenceThread = FALSE; 01701 LOCK_PFN (OldIrql); 01702 if (MmUnusedSegmentForceFree == 0) { 01703 if (!IsListEmpty(&MmUnusedSegmentList)) { 01704 SignalDereferenceThread = TRUE; 01705 MmUnusedSegmentForceFree = 30; 01706 } 01707 } 01708 UNLOCK_PFN (OldIrql); 01709 if (SignalDereferenceThread == TRUE) { 01710 KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE); 01711 } 01712 01713 return NULL; 01714 } 01715 } 01716 01717 MM_TRACK_COMMIT (MM_DBG_COMMIT_PAGED_POOL_PAGES, SizeInPages); 01718 01719 if (SessionSpace) { 01720 LOCK_SESSION_SPACE_WS (OldIrql); 01721 SessionSpace->CommittedPages += SizeInPages; 01722 MM_BUMP_SESS_COUNTER(MM_DBG_SESSION_COMMIT_PAGEDPOOL_PAGES, SizeInPages); 01723 UNLOCK_SESSION_SPACE_WS (OldIrql); 01724 BaseVa = (PVOID)((PCHAR)SessionSpace->PagedPoolStart + 01725 (StartPosition << PAGE_SHIFT)); 01726 } 01727 else { 01728 MmPagedPoolCommit += (ULONG)SizeInPages; 01729 BaseVa = (PVOID)((PUCHAR)MmPageAlignedPoolBase[PagedPool] + 01730 (StartPosition << PAGE_SHIFT)); 01731 } 01732 01733 #if DBG 01734 PointerPte = MiGetPteAddress (BaseVa); 01735 for (i = 0; i < SizeInPages; i += 1) { 01736 if (*(ULONG *)PointerPte != MM_KERNEL_NOACCESS_PTE) { 01737 DbgPrint("MiAllocatePoolPages: PP not zero PTE (%x %x %x)\n", 01738 BaseVa, PointerPte, *PointerPte); 01739 DbgBreakPoint(); 01740 } 01741 PointerPte += 1; 01742 } 01743 #endif 01744 PointerPte = MiGetPteAddress (BaseVa); 01745 MiFillMemoryPte (PointerPte, 01746 SizeInPages * sizeof(MMPTE), 01747 MM_KERNEL_DEMAND_ZERO_PTE); 01748 01749 PagedPoolInfo->PagedPoolCommit += SizeInPages; 01750 EndPosition = StartPosition + (ULONG)SizeInPages - 1; 01751 RtlSetBits (PagedPoolInfo->EndOfPagedPoolBitmap, EndPosition, 1L); 01752 01753 // 01754 // Mark this as a large session allocation in the PFN database. 01755 // 01756 01757 if (IsLargeSessionAllocation != 0) { 01758 RtlSetBits (PagedPoolInfo->PagedPoolLargeSessionAllocationMap, 01759 StartPosition, 01760 1L); 01761 01762 MiSessionPoolAllocated (BaseVa, 01763 SizeInPages << PAGE_SHIFT, 01764 PagedPool); 01765 } 01766 else if (PoolType & POOL_VERIFIER_MASK) { 01767 RtlSetBits (VerifierLargePagedPoolMap, 01768 StartPosition, 01769 1L); 01770 } 01771 01772 PagedPoolInfo->AllocatedPagedPool += SizeInPages; 01773 01774 return BaseVa; 01775 }

ULONG MiFreePoolPages IN PVOID  StartingAddress  ) 
 

Definition at line 1778 of file allocpag.c.

References _MM_PAGED_POOL_INFO::AllocatedPagedPool, ASSERT, _MM_SESSION_SPACE::CommittedPages, CONSISTENCY_LOCK_PFN2, CONSISTENCY_UNLOCK_PFN2, DbgPrint, _MM_PAGED_POOL_INFO::EndOfPagedPoolBitmap, FALSE, _MM_PAGED_POOL_INFO::FirstPteForPagedPool, Index, KeBugCheckEx(), L, _MMFREE_POOL_ENTRY::List, LOCK_SESSION_SPACE_WS, MI_CONVERT_PHYSICAL_TO_PFN, MI_GET_PAGE_FRAME_FROM_PTE, MI_IS_PHYSICAL_ADDRESS, MI_IS_SESSION_POOL_ADDRESS, MI_MAGIC_AWE_PTEFRAME, MI_MAX_FREE_LIST_HEADS, MI_PFN_ELEMENT, MiDeleteSystemPagableVm(), MiEndOfInitialPoolFrame, MiFreeNonPagedPool(), MiGetPteAddress, MiGetVirtualAddressMappedByPte, MiHydra, MiProtectedPoolInsertList(), MiProtectedPoolRemoveEntryList(), MiProtectFreeNonPagedPool(), MiReturnCommitment(), MiSessionPoolFreed(), MiUnProtectFreeNonPagedPool(), MM_BUMP_SESS_COUNTER, MM_DBG_COMMIT_RETURN_PAGED_POOL_PAGES, MM_DBG_SESSION_COMMIT_POOL_FREED, MM_FREE_POOL_SIGNATURE, MM_KERNEL_NOACCESS_PTE, MM_SMALL_ALLOCATIONS, MM_TRACK_COMMIT, MmAllocatedNonPagedPool, MmExpandedPoolBitPosition, MmHighestPhysicalPage, MmMustSucceedPoolBitPosition, MmNonPagedMustSucceed, MmNonPagedPoolEnd, MmNonPagedPoolExpansionStart, MmNonPagedPoolFreeListHead, MmNonPagedPoolStart, MmNumberOfFreeNonPagedPool, MmPageAlignedPoolBase, MmPagedPoolCommit, MmPagedPoolEnd, MmPagedPoolInfo, MmPagedPoolStart, MmPfnDatabase, MmProtectFreedNonPagedPool, MmSessionSpace, MmSizeOfNonPagedPoolInBytes, MmSizeOfPagedPoolInBytes, NoAccessPte, NonPagedPool, NULL, _MMFREE_POOL_ENTRY::Owner, PAGE_SHIFT, PAGE_SIZE, PagedPool, _MM_PAGED_POOL_INFO::PagedPoolAllocationMap, _MM_PAGED_POOL_INFO::PagedPoolCommit, _MM_PAGED_POOL_INFO::PagedPoolHint, _MM_SESSION_SPACE::PagedPoolInfo, _MM_PAGED_POOL_INFO::PagedPoolLargeSessionAllocationMap, _MM_SESSION_SPACE::PagedPoolStart, POOL_TYPE, _MMPFN::PteAddress, _MMPFN::PteFrame, RtlClearBits(), _MMFREE_POOL_ENTRY::Signature, _MMFREE_POOL_ENTRY::Size, TRUE, _MMPTE::u, _MMPFN::u3, UNLOCK_SESSION_SPACE_WS, VerifierFreeTrackedPool(), and VerifierLargePagedPoolMap.

Referenced by DeallocatePoolInternal(), ExFreePool(), ExFreePoolWithTag(), ExpAddTagForBigPages(), ExpInsertPoolTracker(), and MiInitMachineDependent().

01784 : 01785 01786 This function returns a set of pages back to the pool from 01787 which they were obtained. Once the pages have been deallocated 01788 the region provided by the allocation becomes available for 01789 allocation to other callers, i.e. any data in the region is now 01790 trashed and cannot be referenced. 01791 01792 Arguments: 01793 01794 StartingAddress - Supplies the starting address which was returned 01795 in a previous call to MiAllocatePoolPages. 01796 01797 Return Value: 01798 01799 Returns the number of pages deallocated. 01800 01801 Environment: 01802 01803 These functions are used by the general pool allocation routines 01804 and should not be called directly. 01805 01806 Mutexes guarding the pool databases must be held when calling 01807 these functions. 01808 01809 --*/ 01810 01811 { 01812 ULONG StartPosition; 01813 ULONG Index; 01814 PFN_NUMBER i; 01815 PFN_NUMBER NumberOfPages; 01816 POOL_TYPE PoolType; 01817 PMMPTE PointerPte; 01818 PMMPFN Pfn1; 01819 PFN_NUMBER PageFrameIndex; 01820 KIRQL OldIrql; 01821 ULONG IsLargeSessionAllocation; 01822 ULONG IsLargeVerifierAllocation; 01823 PMMFREE_POOL_ENTRY Entry; 01824 PMMFREE_POOL_ENTRY NextEntry; 01825 PMM_PAGED_POOL_INFO PagedPoolInfo; 01826 PMM_SESSION_SPACE SessionSpace; 01827 LOGICAL SessionAllocation; 01828 MMPTE NoAccessPte; 01829 PFN_NUMBER PagesFreed; 01830 01831 NumberOfPages = 1; 01832 01833 // 01834 // Determine Pool type base on the virtual address of the block 01835 // to deallocate. 01836 // 01837 // This assumes NonPagedPool starts at a higher virtual address 01838 // then PagedPool. 01839 // 01840 01841 if ((StartingAddress >= MmPagedPoolStart) && 01842 (StartingAddress <= MmPagedPoolEnd)) { 01843 PoolType = PagedPool; 01844 SessionSpace = NULL; 01845 PagedPoolInfo = &MmPagedPoolInfo; 01846 StartPosition = (ULONG)(((PCHAR)StartingAddress - 01847 (PCHAR)MmPageAlignedPoolBase[PoolType]) >> PAGE_SHIFT); 01848 } 01849 else if (MI_IS_SESSION_POOL_ADDRESS (StartingAddress) == TRUE) { 01850 ASSERT (MiHydra == TRUE); 01851 PoolType = PagedPool; 01852 SessionSpace = MmSessionSpace; 01853 ASSERT (SessionSpace); 01854 PagedPoolInfo = &SessionSpace->PagedPoolInfo; 01855 StartPosition = (ULONG)(((PCHAR)StartingAddress - 01856 (PCHAR)SessionSpace->PagedPoolStart) >> PAGE_SHIFT); 01857 } 01858 else { 01859 01860 if (StartingAddress < MM_SYSTEM_RANGE_START) { 01861 KeBugCheckEx (BAD_POOL_CALLER, 01862 0x40, 01863 (ULONG_PTR)StartingAddress, 01864 (ULONG_PTR)MM_SYSTEM_RANGE_START, 01865 0); 01866 } 01867 01868 PoolType = NonPagedPool; 01869 SessionSpace = NULL; 01870 PagedPoolInfo = &MmPagedPoolInfo; 01871 StartPosition = (ULONG)(((PCHAR)StartingAddress - 01872 (PCHAR)MmPageAlignedPoolBase[PoolType]) >> PAGE_SHIFT); 01873 } 01874 01875 // 01876 // Check to ensure this page is really the start of an allocation. 01877 // 01878 01879 if (PoolType == NonPagedPool) { 01880 01881 if (StartPosition < MmMustSucceedPoolBitPosition) { 01882 01883 PULONG_PTR NextList; 01884 01885 // 01886 // This is must succeed pool, don't free it, just 01887 // add it to the front of the list. 01888 // 01889 // Note - only a single page can be released at a time. 01890 // 01891 01892 if (MI_IS_PHYSICAL_ADDRESS(StartingAddress)) { 01893 PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (StartingAddress); 01894 } else { 01895 PointerPte = MiGetPteAddress(StartingAddress); 01896 ASSERT (PointerPte->u.Hard.Valid == 1); 01897 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 01898 } 01899 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 01900 01901 if (Pfn1->u3.e1.VerifierAllocation == 1) { 01902 Pfn1->u3.e1.VerifierAllocation = 0; 01903 VerifierFreeTrackedPool (StartingAddress, 01904 PAGE_SIZE, 01905 NonPagedPool, 01906 FALSE); 01907 } 01908 01909 // 01910 // Check for this being a large session allocation. If it is, 01911 // we need to return the pool charge accordingly. 01912 // 01913 01914 if (Pfn1->u3.e1.LargeSessionAllocation) { 01915 Pfn1->u3.e1.LargeSessionAllocation = 0; 01916 MiSessionPoolFreed (StartingAddress, 01917 PAGE_SIZE, 01918 NonPagedPool); 01919 } 01920 01921 NextList = (PULONG_PTR)StartingAddress; 01922 *NextList = (ULONG_PTR)MmNonPagedMustSucceed; 01923 MmNonPagedMustSucceed = StartingAddress; 01924 return (ULONG)NumberOfPages; 01925 } 01926 01927 if (MI_IS_PHYSICAL_ADDRESS (StartingAddress)) { 01928 01929 // 01930 // On certain architectures, virtual addresses 01931 // may be physical and hence have no corresponding PTE. 01932 // 01933 01934 Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (StartingAddress)); 01935 if (StartPosition >= MmExpandedPoolBitPosition) { 01936 PointerPte = Pfn1->PteAddress; 01937 StartingAddress = MiGetVirtualAddressMappedByPte (PointerPte); 01938 } 01939 } else { 01940 PointerPte = MiGetPteAddress (StartingAddress); 01941 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 01942 } 01943 01944 if (Pfn1->u3.e1.StartOfAllocation == 0) { 01945 KeBugCheckEx (BAD_POOL_CALLER, 01946 0x41, 01947 (ULONG_PTR)StartingAddress, 01948 (ULONG_PTR)(Pfn1 - MmPfnDatabase), 01949 MmHighestPhysicalPage); 01950 } 01951 01952 CONSISTENCY_LOCK_PFN2 (OldIrql); 01953 01954 ASSERT (Pfn1->PteFrame != MI_MAGIC_AWE_PTEFRAME); 01955 01956 IsLargeVerifierAllocation = Pfn1->u3.e1.VerifierAllocation; 01957 IsLargeSessionAllocation = Pfn1->u3.e1.LargeSessionAllocation; 01958 01959 Pfn1->u3.e1.StartOfAllocation = 0; 01960 Pfn1->u3.e1.VerifierAllocation = 0; 01961 Pfn1->u3.e1.LargeSessionAllocation = 0; 01962 01963 CONSISTENCY_UNLOCK_PFN2 (OldIrql); 01964 01965 #if DBG 01966 if ((Pfn1->u3.e2.ReferenceCount > 1) && 01967 (Pfn1->u3.e1.WriteInProgress == 0)) { 01968 DbgPrint ("MM: MiFreePoolPages - deleting pool locked for I/O %lx\n", 01969 Pfn1); 01970 ASSERT (Pfn1->u3.e2.ReferenceCount == 1); 01971 } 01972 #endif //DBG 01973 01974 // 01975 // Find end of allocation and release the pages. 01976 // 01977 01978 while (Pfn1->u3.e1.EndOfAllocation == 0) { 01979 if (MI_IS_PHYSICAL_ADDRESS(StartingAddress)) { 01980 Pfn1 += 1; 01981 } else { 01982 PointerPte += 1; 01983 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 01984 } 01985 NumberOfPages += 1; 01986 #if DBG 01987 if ((Pfn1->u3.e2.ReferenceCount > 1) && 01988 (Pfn1->u3.e1.WriteInProgress == 0)) { 01989 DbgPrint ("MM:MiFreePoolPages - deleting pool locked for I/O %lx\n", 01990 Pfn1); 01991 ASSERT (Pfn1->u3.e2.ReferenceCount == 1); 01992 } 01993 #endif //DBG 01994 } 01995 01996 MmAllocatedNonPagedPool -= NumberOfPages; 01997 01998 if (IsLargeVerifierAllocation != 0) { 01999 VerifierFreeTrackedPool (StartingAddress, 02000 NumberOfPages << PAGE_SHIFT, 02001 NonPagedPool, 02002 FALSE); 02003 } 02004 02005 if (IsLargeSessionAllocation != 0) { 02006 MiSessionPoolFreed (StartingAddress, 02007 NumberOfPages << PAGE_SHIFT, 02008 NonPagedPool); 02009 } 02010 02011 CONSISTENCY_LOCK_PFN2 (OldIrql); 02012 02013 Pfn1->u3.e1.EndOfAllocation = 0; 02014 02015 CONSISTENCY_UNLOCK_PFN2 (OldIrql); 02016 02017 #if DBG 02018 if (MiFillFreedPool != 0) { 02019 RtlFillMemoryUlong (StartingAddress, 02020 PAGE_SIZE * NumberOfPages, 02021 MiFillFreedPool); 02022 } 02023 #endif //DBG 02024 02025 if (StartingAddress > MmNonPagedPoolExpansionStart) { 02026 02027 // 02028 // This page was from the expanded pool, should 02029 // it be freed? 02030 // 02031 // NOTE: all pages in the expanded pool area have PTEs 02032 // so no physical address checks need to be performed. 02033 // 02034 02035 if ((NumberOfPages > 3) || (MmNumberOfFreeNonPagedPool > 5)) { 02036 02037 // 02038 // Free these pages back to the free page list. 02039 // 02040 02041 MiFreeNonPagedPool (StartingAddress, NumberOfPages); 02042 02043 return (ULONG)NumberOfPages; 02044 } 02045 } 02046 02047 // 02048 // Add the pages to the list of free pages. 02049 // 02050 02051 MmNumberOfFreeNonPagedPool += NumberOfPages; 02052 02053 // 02054 // Check to see if the next allocation is free. 02055 // We cannot walk off the end of nonpaged initial or expansion 02056 // pages as the highest initial allocation is never freed and 02057 // the highest expansion allocation is guard-paged. 02058 // 02059 02060 i = NumberOfPages; 02061 02062 ASSERT (MiEndOfInitialPoolFrame != 0); 02063 02064 if ((PFN_NUMBER)(Pfn1 - MmPfnDatabase) == MiEndOfInitialPoolFrame) { 02065 PointerPte += 1; 02066 Pfn1 = NULL; 02067 } 02068 else if (MI_IS_PHYSICAL_ADDRESS(StartingAddress)) { 02069 Pfn1 += 1; 02070 ASSERT ((PCHAR)StartingAddress + NumberOfPages < (PCHAR)MmNonPagedPoolStart + MmSizeOfNonPagedPoolInBytes); 02071 } else { 02072 PointerPte += 1; 02073 ASSERT ((PCHAR)StartingAddress + NumberOfPages <= (PCHAR)MmNonPagedPoolEnd); 02074 02075 // 02076 // Unprotect the previously freed pool so it can be merged. 02077 // 02078 02079 if (MmProtectFreedNonPagedPool == TRUE) { 02080 MiUnProtectFreeNonPagedPool ( 02081 (PVOID)MiGetVirtualAddressMappedByPte(PointerPte), 02082 0); 02083 } 02084 02085 if (PointerPte->u.Hard.Valid == 1) { 02086 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 02087 } else { 02088 Pfn1 = NULL; 02089 } 02090 } 02091 02092 if ((Pfn1 != NULL) && (Pfn1->u3.e1.StartOfAllocation == 0)) { 02093 02094 // 02095 // This range of pages is free. Remove this entry 02096 // from the list and add these pages to the current 02097 // range being freed. 02098 // 02099 02100 Entry = (PMMFREE_POOL_ENTRY)((PCHAR)StartingAddress 02101 + (NumberOfPages << PAGE_SHIFT)); 02102 ASSERT (Entry->Signature == MM_FREE_POOL_SIGNATURE); 02103 ASSERT (Entry->Owner == Entry); 02104 #if DBG 02105 { 02106 PMMPTE DebugPte; 02107 PMMPFN DebugPfn; 02108 02109 DebugPfn = NULL; 02110 02111 if (MI_IS_PHYSICAL_ADDRESS(StartingAddress)) { 02112 02113 // 02114 // On certain architectures, virtual addresses 02115 // may be physical and hence have no corresponding PTE. 02116 // 02117 02118 DebugPfn = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (Entry)); 02119 DebugPfn += Entry->Size; 02120 if ((PFN_NUMBER)((DebugPfn - 1) - MmPfnDatabase) != MiEndOfInitialPoolFrame) { 02121 ASSERT (DebugPfn->u3.e1.StartOfAllocation == 1); 02122 } 02123 } else { 02124 DebugPte = PointerPte + Entry->Size; 02125 if ((DebugPte-1)->u.Hard.Valid == 1) { 02126 DebugPfn = MI_PFN_ELEMENT ((DebugPte-1)->u.Hard.PageFrameNumber); 02127 if ((PFN_NUMBER)(DebugPfn - MmPfnDatabase) != MiEndOfInitialPoolFrame) { 02128 if (DebugPte->u.Hard.Valid == 1) { 02129 DebugPfn = MI_PFN_ELEMENT (DebugPte->u.Hard.PageFrameNumber); 02130 ASSERT (DebugPfn->u3.e1.StartOfAllocation == 1); 02131 } 02132 } 02133 02134 } 02135 } 02136 } 02137 #endif //DBG 02138 02139 i += Entry->Size; 02140 if (MmProtectFreedNonPagedPool == FALSE) { 02141 RemoveEntryList (&Entry->List); 02142 } 02143 else { 02144 MiProtectedPoolRemoveEntryList (&Entry->List); 02145 } 02146 } 02147 02148 // 02149 // Check to see if the previous page is the end of an allocation. 02150 // If it is not the end of an allocation, it must be free and 02151 // therefore this allocation can be tagged onto the end of 02152 // that allocation. 02153 // 02154 // We cannot walk off the beginning of expansion pool because it is 02155 // guard-paged. If the initial pool is superpaged instead, we are also 02156 // safe as the must succeed pages always have EndOfAllocation set. 02157 // 02158 02159 Entry = (PMMFREE_POOL_ENTRY)StartingAddress; 02160 02161 if (MI_IS_PHYSICAL_ADDRESS(StartingAddress)) { 02162 ASSERT (StartingAddress != MmNonPagedPoolStart); 02163 02164 Pfn1 = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN ( 02165 (PVOID)((PCHAR)Entry - PAGE_SIZE))); 02166 02167 } else { 02168 PointerPte -= NumberOfPages + 1; 02169 02170 // 02171 // Unprotect the previously freed pool so it can be merged. 02172 // 02173 02174 if (MmProtectFreedNonPagedPool == TRUE) { 02175 MiUnProtectFreeNonPagedPool ( 02176 (PVOID)MiGetVirtualAddressMappedByPte(PointerPte), 02177 0); 02178 } 02179 02180 if (PointerPte->u.Hard.Valid == 1) { 02181 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 02182 } else { 02183 Pfn1 = NULL; 02184 } 02185 } 02186 if (Pfn1 != NULL) { 02187 if (Pfn1->u3.e1.EndOfAllocation == 0) { 02188 02189 // 02190 // This range of pages is free, add these pages to 02191 // this entry. The owner field points to the address 02192 // of the list entry which is linked into the free pool 02193 // pages list. 02194 // 02195 02196 Entry = (PMMFREE_POOL_ENTRY)((PCHAR)StartingAddress - PAGE_SIZE); 02197 ASSERT (Entry->Signature == MM_FREE_POOL_SIGNATURE); 02198 Entry = Entry->Owner; 02199 02200 // 02201 // Unprotect the previously freed pool so we can merge it 02202 // 02203 02204 if (MmProtectFreedNonPagedPool == TRUE) { 02205 MiUnProtectFreeNonPagedPool ((PVOID)Entry, 0); 02206 } 02207 02208 // 02209 // If this entry became larger than MM_SMALL_ALLOCATIONS 02210 // pages, move it to the tail of the list. This keeps the 02211 // small allocations at the front of the list. 02212 // 02213 02214 if (Entry->Size < MI_MAX_FREE_LIST_HEADS - 1) { 02215 02216 if (MmProtectFreedNonPagedPool == FALSE) { 02217 RemoveEntryList (&Entry->List); 02218 } 02219 else { 02220 MiProtectedPoolRemoveEntryList (&Entry->List); 02221 } 02222 02223 // 02224 // Add these pages to the previous entry. 02225 // 02226 02227 Entry->Size += i; 02228 02229 Index = (ULONG)(Entry->Size - 1); 02230 02231 if (Index >= MI_MAX_FREE_LIST_HEADS) { 02232 Index = MI_MAX_FREE_LIST_HEADS - 1; 02233 } 02234 02235 if (MmProtectFreedNonPagedPool == FALSE) { 02236 InsertTailList (&MmNonPagedPoolFreeListHead[Index], 02237 &Entry->List); 02238 } 02239 else { 02240 MiProtectedPoolInsertList (&MmNonPagedPoolFreeListHead[Index], 02241 &Entry->List, 02242 Entry->Size < MM_SMALL_ALLOCATIONS ? 02243 TRUE : FALSE); 02244 } 02245 } 02246 else { 02247 02248 // 02249 // Add these pages to the previous entry. 02250 // 02251 02252 Entry->Size += i; 02253 } 02254 } 02255 } 02256 02257 if (Entry == (PMMFREE_POOL_ENTRY)StartingAddress) { 02258 02259 // 02260 // This entry was not combined with the previous, insert it 02261 // into the list. 02262 // 02263 02264 Entry->Size = i; 02265 02266 Index = (ULONG)(Entry->Size - 1); 02267 02268 if (Index >= MI_MAX_FREE_LIST_HEADS) { 02269 Index = MI_MAX_FREE_LIST_HEADS - 1; 02270 } 02271 02272 if (MmProtectFreedNonPagedPool == FALSE) { 02273 InsertTailList (&MmNonPagedPoolFreeListHead[Index], 02274 &Entry->List); 02275 } 02276 else { 02277 MiProtectedPoolInsertList (&MmNonPagedPoolFreeListHead[Index], 02278 &Entry->List, 02279 Entry->Size < MM_SMALL_ALLOCATIONS ? 02280 TRUE : FALSE); 02281 } 02282 } 02283 02284 // 02285 // Set the owner field in all these pages. 02286 // 02287 02288 NextEntry = (PMMFREE_POOL_ENTRY)StartingAddress; 02289 while (i > 0) { 02290 NextEntry->Owner = Entry; 02291 #if DBG 02292 NextEntry->Signature = MM_FREE_POOL_SIGNATURE; 02293 #endif 02294 02295 NextEntry = (PMMFREE_POOL_ENTRY)((PCHAR)NextEntry + PAGE_SIZE); 02296 i -= 1; 02297 } 02298 02299 #if DBG 02300 NextEntry = Entry; 02301 for (i = 0; i < Entry->Size; i += 1) { 02302 PMMPTE DebugPte; 02303 PMMPFN DebugPfn; 02304 if (MI_IS_PHYSICAL_ADDRESS(StartingAddress)) { 02305 02306 // 02307 // On certain architectures, virtual addresses 02308 // may be physical and hence have no corresponding PTE. 02309 // 02310 02311 DebugPfn = MI_PFN_ELEMENT (MI_CONVERT_PHYSICAL_TO_PFN (NextEntry)); 02312 } else { 02313 02314 DebugPte = MiGetPteAddress (NextEntry); 02315 DebugPfn = MI_PFN_ELEMENT (DebugPte->u.Hard.PageFrameNumber); 02316 } 02317 ASSERT (DebugPfn->u3.e1.StartOfAllocation == 0); 02318 ASSERT (DebugPfn->u3.e1.EndOfAllocation == 0); 02319 ASSERT (NextEntry->Owner == Entry); 02320 NextEntry = (PMMFREE_POOL_ENTRY)((PCHAR)NextEntry + PAGE_SIZE); 02321 } 02322 #endif 02323 02324 // 02325 // Prevent anyone from touching non paged pool after freeing it. 02326 // 02327 02328 if (MmProtectFreedNonPagedPool == TRUE) { 02329 MiProtectFreeNonPagedPool ((PVOID)Entry, (ULONG)Entry->Size); 02330 } 02331 02332 return (ULONG)NumberOfPages; 02333 02334 } else { 02335 02336 // 02337 // Paged pool. Need to verify start of allocation using 02338 // end of allocation bitmap. 02339 // 02340 02341 if (!RtlCheckBit (PagedPoolInfo->PagedPoolAllocationMap, StartPosition)) { 02342 KeBugCheckEx (BAD_POOL_CALLER, 02343 0x50, 02344 (ULONG_PTR)StartingAddress, 02345 (ULONG_PTR)StartPosition, 02346 MmSizeOfPagedPoolInBytes); 02347 } 02348 02349 #if DBG 02350 if (StartPosition > 0) { 02351 if (RtlCheckBit (PagedPoolInfo->PagedPoolAllocationMap, StartPosition - 1)) { 02352 if (!RtlCheckBit (PagedPoolInfo->EndOfPagedPoolBitmap, StartPosition - 1)) { 02353 02354 // 02355 // In the middle of an allocation... bugcheck. 02356 // 02357 02358 DbgPrint("paged pool in middle of allocation\n"); 02359 KeBugCheckEx (MEMORY_MANAGEMENT, 02360 0x41286, 02361 (ULONG_PTR)PagedPoolInfo->PagedPoolAllocationMap, 02362 (ULONG_PTR)PagedPoolInfo->EndOfPagedPoolBitmap, 02363 StartPosition); 02364 } 02365 } 02366 } 02367 #endif 02368 02369 i = StartPosition; 02370 PointerPte = PagedPoolInfo->FirstPteForPagedPool + i; 02371 02372 // 02373 // Find the last allocated page and check to see if any 02374 // of the pages being deallocated are in the paging file. 02375 // 02376 02377 while (!RtlCheckBit (PagedPoolInfo->EndOfPagedPoolBitmap, i)) { 02378 NumberOfPages += 1; 02379 i += 1; 02380 } 02381 02382 NoAccessPte.u.Long = MM_KERNEL_NOACCESS_PTE; 02383 02384 if (SessionSpace) { 02385 02386 // 02387 // This is needed purely to verify no one leaks pool. This 02388 // could be removed if we believe everyone was good. 02389 // 02390 02391 if (RtlCheckBit (PagedPoolInfo->PagedPoolLargeSessionAllocationMap, 02392 StartPosition)) { 02393 02394 RtlClearBits (PagedPoolInfo->PagedPoolLargeSessionAllocationMap, 02395 StartPosition, 02396 1L); 02397 02398 MiSessionPoolFreed (MiGetVirtualAddressMappedByPte (PointerPte), 02399 NumberOfPages << PAGE_SHIFT, 02400 PagedPool); 02401 } 02402 02403 SessionAllocation = TRUE; 02404 } 02405 else { 02406 SessionAllocation = FALSE; 02407 02408 if (VerifierLargePagedPoolMap) { 02409 02410 if (RtlCheckBit (VerifierLargePagedPoolMap, StartPosition)) { 02411 02412 RtlClearBits (VerifierLargePagedPoolMap, 02413 StartPosition, 02414 1L); 02415 02416 VerifierFreeTrackedPool (MiGetVirtualAddressMappedByPte (PointerPte), 02417 NumberOfPages << PAGE_SHIFT, 02418 PagedPool, 02419 FALSE); 02420 } 02421 } 02422 } 02423 02424 PagesFreed = MiDeleteSystemPagableVm (PointerPte, 02425 NumberOfPages, 02426 NoAccessPte, 02427 SessionAllocation, 02428 NULL); 02429 02430 ASSERT (PagesFreed == NumberOfPages); 02431 02432 if (SessionSpace) { 02433 LOCK_SESSION_SPACE_WS (OldIrql); 02434 MmSessionSpace->CommittedPages -= NumberOfPages; 02435 02436 MM_BUMP_SESS_COUNTER(MM_DBG_SESSION_COMMIT_POOL_FREED, 02437 NumberOfPages); 02438 02439 UNLOCK_SESSION_SPACE_WS (OldIrql); 02440 } 02441 else { 02442 MmPagedPoolCommit -= (ULONG)NumberOfPages; 02443 } 02444 02445 MiReturnCommitment (NumberOfPages); 02446 02447 MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_PAGED_POOL_PAGES, NumberOfPages); 02448 02449 // 02450 // Clear the end of allocation bit in the bit map. 02451 // 02452 02453 RtlClearBits (PagedPoolInfo->EndOfPagedPoolBitmap, (ULONG)i, 1L); 02454 02455 PagedPoolInfo->PagedPoolCommit -= NumberOfPages; 02456 PagedPoolInfo->AllocatedPagedPool -= NumberOfPages; 02457 02458 // 02459 // Clear the allocation bits in the bit map. 02460 // 02461 02462 RtlClearBits (PagedPoolInfo->PagedPoolAllocationMap, 02463 StartPosition, 02464 (ULONG)NumberOfPages 02465 ); 02466 02467 if (StartPosition < PagedPoolInfo->PagedPoolHint) { 02468 PagedPoolInfo->PagedPoolHint = StartPosition; 02469 } 02470 02471 return (ULONG)NumberOfPages; 02472 } 02473 }

VOID MiSessionPoolAllocated IN PVOID  VirtualAddress,
IN SIZE_T  NumberOfBytes,
IN POOL_TYPE  PoolType
 

Definition at line 505 of file allocpag.c.

References ASSERT, BASE_POOL_TYPE_MASK, FALSE, MI_IS_SESSION_POOL_ADDRESS, MmSessionSpace, NonPagedPool, _MM_SESSION_SPACE::NonPagedPoolAllocations, _MM_SESSION_SPACE::NonPagedPoolBytes, _MM_SESSION_SPACE::PagedPoolAllocations, _MM_SESSION_SPACE::PagedPoolBytes, and TRUE.

Referenced by ExAllocatePoolWithTag(), and MiAllocatePoolPages().

00513 : 00514 00515 This function charges the new pool allocation for the current session. 00516 On session exit, this charge must be zero. 00517 00518 Arguments: 00519 00520 VirtualAddress - Supplies the allocated pool address. 00521 00522 NumberOfBytes - Supplies the number of bytes allocated. 00523 00524 PoolType - Supplies the type of the above pool allocation. 00525 00526 Return Value: 00527 00528 None. 00529 00530 --*/ 00531 00532 { 00533 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) { 00534 ASSERT (MI_IS_SESSION_POOL_ADDRESS(VirtualAddress) == FALSE); 00535 MmSessionSpace->NonPagedPoolBytes += NumberOfBytes; 00536 MmSessionSpace->NonPagedPoolAllocations += 1; 00537 } 00538 else { 00539 ASSERT (MI_IS_SESSION_POOL_ADDRESS(VirtualAddress) == TRUE); 00540 MmSessionSpace->PagedPoolBytes += NumberOfBytes; 00541 MmSessionSpace->PagedPoolAllocations += 1; 00542 } 00543 }

VOID MiSessionPoolFreed IN PVOID  VirtualAddress,
IN SIZE_T  NumberOfBytes,
IN POOL_TYPE  PoolType
 

Definition at line 547 of file allocpag.c.

References ASSERT, BASE_POOL_TYPE_MASK, FALSE, MI_IS_SESSION_POOL_ADDRESS, MmSessionSpace, NonPagedPool, _MM_SESSION_SPACE::NonPagedPoolAllocations, _MM_SESSION_SPACE::NonPagedPoolBytes, _MM_SESSION_SPACE::PagedPoolAllocations, _MM_SESSION_SPACE::PagedPoolBytes, and TRUE.

Referenced by ExFreePoolWithTag(), and MiFreePoolPages().

00555 : 00556 00557 This function returns the specified pool allocation for the current session. 00558 On session exit, this charge must be zero. 00559 00560 Arguments: 00561 00562 VirtualAddress - Supplies the pool address being freed. 00563 00564 NumberOfBytes - Supplies the number of bytes being freed. 00565 00566 PoolType - Supplies the type of the above pool allocation. 00567 00568 Return Value: 00569 00570 None. 00571 00572 --*/ 00573 00574 { 00575 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) { 00576 ASSERT (MI_IS_SESSION_POOL_ADDRESS(VirtualAddress) == FALSE); 00577 MmSessionSpace->NonPagedPoolBytes -= NumberOfBytes; 00578 MmSessionSpace->NonPagedPoolAllocations -= 1; 00579 } 00580 else { 00581 ASSERT (MI_IS_SESSION_POOL_ADDRESS(VirtualAddress) == TRUE); 00582 MmSessionSpace->PagedPoolBytes -= NumberOfBytes; 00583 MmSessionSpace->PagedPoolAllocations -= 1; 00584 } 00585 }

PVOID MiSessionPoolVector VOID   ) 
 

Definition at line 479 of file allocpag.c.

References MmSessionSpace, and _MM_SESSION_SPACE::PagedPool.

Referenced by ExpInitializePoolDescriptor().

00485 : 00486 00487 This function returns the session pool descriptor for the current session. 00488 00489 Arguments: 00490 00491 None. 00492 00493 Return Value: 00494 00495 Pool descriptor. 00496 00497 --*/ 00498 00499 { 00500 return (PVOID)&MmSessionSpace->PagedPool; 00501 }

NTSTATUS MmAccessFault IN BOOLEAN  StoreInstruction,
IN PVOID  VirtualAddress,
IN KPROCESSOR_MODE  PreviousMode,
IN PVOID  TrapInformation
 

Definition at line 41 of file mmfault.c.

References _MMSUPPORT::AllowWorkingSetAdjustment, APC_LEVEL, ASSERT, _KPROCESS::BasePriority, CONSISTENCY_LOCK_PFN, CONSISTENCY_UNLOCK_PFN, DbgPrint, _MMINFO_COUNTERS::DemandZeroCount, DemandZeroPde, DISPATCH_LEVEL, FALSE, _EPROCESS::ForkInProgress, HYDRA_PROCESS, HYPER_SPACE, IoRetryIrpCompletions(), KeBugCheckEx(), KeDelayExecutionThread(), KeInvalidAccessAllowed(), KeLowerIrql(), KeRaiseIrql(), KernelMode, LOCK_EXPANSION_IF_ALPHA, LOCK_PFN, LOCK_SESSION_SPACE_WS, LOCK_SYSTEM_WS, LOCK_WS, MI_ADD_LOCKED_PAGE_CHARGE, MI_BARRIER_STAMP_ZEROED_PAGE, MI_BARRIER_SYNCHRONIZE, MI_GET_PROTECTION_FROM_SOFT_PTE, MI_GET_USED_PTES_HANDLE, MI_INCREMENT_USED_PTES_BY_HANDLE, MI_IS_HYPER_SPACE_ADDRESS, MI_IS_PAGE_TABLE_ADDRESS, MI_IS_PHYSICAL_ADDRESS, MI_IS_SESSION_ADDRESS, MI_IS_SESSION_IMAGE_ADDRESS, MI_MAKE_VALID_PTE, MI_NO_FAULT_FOUND, MI_PAGE_COLOR_VA_PROCESS, MI_PFN_ELEMENT, MI_PTE_LOOKUP_NEEDED, MI_REMOVE_LOCKED_PAGE_CHARGE, MI_SET_PAGE_DIRTY, MI_SET_PTE_DIRTY, MI_SET_PTE_IN_WORKING_SET, MI_WRITE_INVALID_PTE, MI_WRITE_VALID_PTE, MI_WRITE_VALID_PTE_NEW_PROTECTION, MiAccessCheck(), MiCheckForUserStackOverflow(), MiCheckPdeForPagedPool(), MiCheckPdeForSessionSpace(), MiCheckVirtualAddress(), MiCopyOnWrite(), MiDelayPageFaults, MiDispatchFault(), MiEnsureAvailablePageOrWait(), MiFormatPte(), MiGetPdeAddress, MiGetPpeAddress, MiGetPteAddress, MiGrowWsleHash(), MiHydra, MiInitializePfn(), MiLocateAndReserveWsle(), MiMakeSystemAddressValidPfn(), _MMSUPPORT::MinimumWorkingSetSize, MiProtoAddressForPte, MiPteToProto, MiRemoveAnyPage(), MiRemoveZeroPageIfAny, MiResolveDemandZeroFault(), MiSessionCopyOnWrite(), MiUpdateWsle(), MiWaitForForkToComplete(), MiZeroPhysicalPage(), Mm30Milliseconds, MM_DBG_PTE_UPDATE, MM_DBG_SHOW_FAULTS, MM_DBG_STOP_ON_ACCVIO, MM_DEMAND_ZERO_WRITE_PTE, MM_EXECUTE_WRITECOPY, MM_GROW_WSLE_HASH, MM_GUARD_PAGE, MM_KSTACK_OUTSWAPPED, MM_LARGE_PAGES, MM_NOACCESS, MM_PTE_WRITE_MASK, MM_READONLY, MM_READWRITE, MM_SESSION_SPACE_WS_LOCK_ASSERT, MM_SYSTEM_SPACE_END, MM_UNKNOWN_PROTECTION, MM_ZERO_KERNEL_PTE, MM_ZERO_PTE, MmAvailablePages, MmHalfSecond, MmInfoCounters, MmModifiedPageListHead, MmModifiedPageMaximum, MmMoreThanEnoughFreePages, MmNonPagedPoolEnd, MmNonPagedPoolExpansionStart, MmNonPagedPoolStart, MmPageFaultNotifyRoutine, MmProtectFreedNonPagedPool, MmSessionSpace, MmShortTime, MmSystemCacheWs, MmSystemLockOwner, MmSystemRangeStart, MmWorkingSetList, _EPROCESS::ModifiedPageCount, _EPROCESS::NextPageColor, NTSTATUS(), NULL, _EPROCESS::NumberOfPrivatePages, _MMPFN::OriginalPte, PAGE_SIZE, _MMSUPPORT::PageFaultCount, PASSIVE_LEVEL, _EPROCESS::Pcb, PPAGE_FAULT_NOTIFY_ROUTINE, PROCESS_FOREGROUND_PRIORITY, PrototypePte, PsGetCurrentProcess, PsGetCurrentThread, _MMPFN::PteFrame, _MMPFNLIST::Total, TRUE, _MMPTE::u, _MMPFN::u1, _MMPFN::u3, UNLOCK_EXPANSION_IF_ALPHA, UNLOCK_PFN, UNLOCK_SESSION_SPACE_WS, UNLOCK_SYSTEM_WS, UNLOCK_WS, UserMode, _MM_SESSION_SPACE::Vm, _EPROCESS::Vm, _MM_SESSION_SPACE::WorkingSetLockOwner, _MMSUPPORT::WorkingSetSize, and WSLE_NUMBER.

Referenced by KiMemoryFault(), MiMakeSystemAddressValid(), MiMakeSystemAddressValidPfn(), MiMakeSystemAddressValidPfnSystemWs(), MiMakeSystemAddressValidPfnWs(), MmCheckCachedPageState(), MmProbeAndLockPages(), and MmProbeForWrite().

00050 : 00051 00052 This function is called by the kernel on data or instruction 00053 access faults. The access fault was detected due to either 00054 an access violation, a PTE with the present bit clear, or a 00055 valid PTE with the dirty bit clear and a write operation. 00056 00057 Also note that the access violation and the page fault could 00058 occur because of the Page Directory Entry contents as well. 00059 00060 This routine determines what type of fault it is and calls 00061 the appropriate routine to handle the page fault or the write 00062 fault. 00063 00064 Arguments: 00065 00066 StoreInstruction - Supplies TRUE (1) if the operation causes a write into 00067 memory. Note this value must be 1 or 0. 00068 00069 VirtualAddress - Supplies the virtual address which caused the fault. 00070 00071 PreviousMode - Supplies the mode (kernel or user) in which the fault 00072 occurred. 00073 00074 TrapInformation - Opaque information about the trap, interpreted by the 00075 kernel, not Mm. Needed to allow fast interlocked access 00076 to operate correctly. 00077 00078 Return Value: 00079 00080 Returns the status of the fault handling operation. Can be one of: 00081 - Success. 00082 - Access Violation. 00083 - Guard Page Violation. 00084 - In-page Error. 00085 00086 Environment: 00087 00088 Kernel mode, APCs disabled. 00089 00090 --*/ 00091 00092 { 00093 PMMPTE PointerPpe; 00094 PMMPTE PointerPde; 00095 PMMPTE PointerPte; 00096 PMMPTE PointerProtoPte; 00097 ULONG ProtectionCode; 00098 MMPTE TempPte; 00099 PEPROCESS CurrentProcess; 00100 KIRQL PreviousIrql; 00101 NTSTATUS status; 00102 ULONG ProtectCode; 00103 PFN_NUMBER PageFrameIndex; 00104 WSLE_NUMBER WorkingSetIndex; 00105 KIRQL OldIrql; 00106 PMMPFN Pfn1; 00107 PPAGE_FAULT_NOTIFY_ROUTINE NotifyRoutine; 00108 NTSTATUS SessionStatus; 00109 PEPROCESS FaultProcess; 00110 PMMSUPPORT Ws; 00111 BOOLEAN SessionAddress; 00112 PVOID UsedPageTableHandle; 00113 ULONG BarrierStamp; 00114 LOGICAL ApcNeeded; 00115 00116 #if defined(_IA64_) 00117 LOGICAL ExecutionFault = FALSE; 00118 00119 // 00120 // If StoreInstruction indicates it was an execution fault, set 00121 // ExecutionFault TRUE and StoreInstruction FALSE. 00122 // 00123 00124 if (StoreInstruction == 2) { 00125 ExecutionFault = TRUE; 00126 StoreInstruction = FALSE; 00127 } 00128 #endif 00129 00130 PointerProtoPte = NULL; 00131 00132 #if defined (_WIN64) 00133 00134 // 00135 // Perform address sanity checks. 00136 // 00137 00138 if (PreviousMode == UserMode) { 00139 00140 if (VirtualAddress >= MM_HIGHEST_USER_ADDRESS) { 00141 return STATUS_ACCESS_VIOLATION; 00142 } 00143 00144 } else { 00145 00146 if (!((VirtualAddress <= (PVOID)((ULONG_PTR)MM_HIGHEST_USER_ADDRESS + 1)) || 00147 00148 #if defined (_IA64_) 00149 00150 // 00151 // Page table pages are in the user region space for IA64. 00152 // 00153 00154 (MI_IS_PAGE_TABLE_ADDRESS(VirtualAddress)) || 00155 (MI_IS_HYPER_SPACE_ADDRESS(VirtualAddress)) || 00156 (MI_IS_SESSION_ADDRESS(VirtualAddress)) || 00157 #endif 00158 00159 ((VirtualAddress >= MM_SYSTEM_RANGE_START) && 00160 (VirtualAddress < (PVOID)MM_SYSTEM_SPACE_END)))) { 00161 00162 if (KeInvalidAccessAllowed(TrapInformation) == TRUE) { 00163 return STATUS_ACCESS_VIOLATION; 00164 } 00165 00166 KeBugCheckEx (MEMORY_MANAGEMENT, 00167 (ULONG_PTR) VirtualAddress, 00168 StoreInstruction, 00169 PreviousMode, 00170 0xdead); 00171 } 00172 } 00173 00174 #endif 00175 00176 // 00177 // Block APCs and acquire the working set mutex. This prevents any 00178 // changes to the address space and it prevents valid PTEs from becoming 00179 // invalid. 00180 // 00181 00182 CurrentProcess = PsGetCurrentProcess (); 00183 00184 #if DBG 00185 if (MmDebug & MM_DBG_SHOW_FAULTS) { 00186 00187 PETHREAD CurThread; 00188 00189 CurThread = PsGetCurrentThread(); 00190 DbgPrint("MM:**access fault - va %p process %p thread %p\n", 00191 VirtualAddress, CurrentProcess, CurThread); 00192 } 00193 #endif //DBG 00194 00195 PreviousIrql = KeGetCurrentIrql (); 00196 00197 // 00198 // Get the pointer to the PDE and the PTE for this page. 00199 // 00200 00201 PointerPte = MiGetPteAddress (VirtualAddress); 00202 PointerPde = MiGetPdeAddress (VirtualAddress); 00203 PointerPpe = MiGetPpeAddress (VirtualAddress); 00204 00205 #if PFN_CONSISTENCY 00206 if (PointerPte >= MiPfnStartPte && PointerPte < MiPfnStartPte + MiPfnPtes) { 00207 DbgPrint("MM: Unsynchronized access to the PFN database - va %p process %p\n", 00208 VirtualAddress, CurrentProcess); 00209 00210 KeRaiseIrql (DISPATCH_LEVEL, &OldIrql); 00211 MiMapInPfnDatabase(); 00212 MiPfnProtectionEnabled = FALSE; 00213 KeLowerIrql (OldIrql); 00214 00215 DbgBreakPoint(); 00216 } 00217 #endif 00218 00219 #if DBG 00220 if (PointerPte == MmPteHit) { 00221 DbgPrint("MM:pte hit at %p\n", MmPteHit); 00222 DbgBreakPoint(); 00223 } 00224 #endif 00225 00226 ApcNeeded = FALSE; 00227 00228 if (PreviousIrql > APC_LEVEL) { 00229 00230 // 00231 // The PFN database lock is an executive spin-lock. The pager could 00232 // get dirty faults or lock faults while servicing and it already owns 00233 // the PFN database lock. 00234 // 00235 00236 #if !defined (_WIN64) 00237 MiCheckPdeForPagedPool (VirtualAddress); 00238 #endif 00239 00240 #ifdef _X86_ 00241 if (PointerPde->u.Hard.Valid == 1) { 00242 if (PointerPde->u.Hard.LargePage == 1) { 00243 #if DBG 00244 if (MmLargePageFaultError < 10) { 00245 DbgPrint ("MM - fault on Large page %p\n", VirtualAddress); 00246 } 00247 MmLargePageFaultError += 1; 00248 #endif //DBG 00249 return STATUS_SUCCESS; 00250 } 00251 } 00252 #endif //X86 00253 00254 if ( 00255 #if defined (_WIN64) 00256 (PointerPpe->u.Hard.Valid == 0) || 00257 #endif 00258 (PointerPde->u.Hard.Valid == 0) || 00259 (PointerPte->u.Hard.Valid == 0)) { 00260 00261 KdPrint(("MM:***PAGE FAULT AT IRQL > 1 Va %p, IRQL %lx\n", 00262 VirtualAddress, 00263 PreviousIrql)); 00264 00265 // 00266 // use reserved bit to signal fatal error to trap handlers 00267 // 00268 00269 return STATUS_IN_PAGE_ERROR | 0x10000000; 00270 00271 } 00272 00273 if (StoreInstruction && (PointerPte->u.Hard.CopyOnWrite != 0)) { 00274 KdPrint(("MM:***PAGE FAULT AT IRQL > 1 Va %p, IRQL %lx\n", 00275 VirtualAddress, 00276 PreviousIrql)); 00277 00278 // 00279 // use reserved bit to signal fatal error to trap handlers 00280 // 00281 00282 return STATUS_IN_PAGE_ERROR | 0x10000000; 00283 } 00284 00285 // 00286 // The PTE is valid and accessible, another thread must 00287 // have faulted the PTE in already, or the access bit 00288 // is clear and this is a access fault; Blindly set the 00289 // access bit and dismiss the fault. 00290 // 00291 #if DBG 00292 if (MmDebug & MM_DBG_SHOW_FAULTS) { 00293 DbgPrint("MM:no fault found - pte is %p\n", PointerPte->u.Long); 00294 } 00295 #endif //DBG 00296 00297 if (StoreInstruction) { 00298 00299 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 00300 00301 if (((PointerPte->u.Long & MM_PTE_WRITE_MASK) == 0) && 00302 ((Pfn1->OriginalPte.u.Soft.Protection & MM_READWRITE) == 0)) { 00303 00304 KeBugCheckEx (ATTEMPTED_WRITE_TO_READONLY_MEMORY, 00305 (ULONG_PTR)VirtualAddress, 00306 (ULONG_PTR)PointerPte->u.Long, 00307 (ULONG_PTR)TrapInformation, 00308 10); 00309 } 00310 } 00311 00312 MI_NO_FAULT_FOUND (TempPte, PointerPte, VirtualAddress, FALSE); 00313 return STATUS_SUCCESS; 00314 } 00315 00316 if (VirtualAddress >= MmSystemRangeStart) { 00317 00318 // 00319 // This is a fault in the system address space. User 00320 // mode access is not allowed. 00321 // 00322 00323 if (PreviousMode == UserMode) { 00324 return STATUS_ACCESS_VIOLATION; 00325 } 00326 00327 #if defined (_WIN64) 00328 if (PointerPpe->u.Hard.Valid == 0) { 00329 00330 if (KeInvalidAccessAllowed(TrapInformation) == TRUE) { 00331 return STATUS_ACCESS_VIOLATION; 00332 } 00333 00334 KeBugCheckEx (PAGE_FAULT_IN_NONPAGED_AREA, 00335 (ULONG_PTR)VirtualAddress, 00336 StoreInstruction, 00337 (ULONG_PTR)TrapInformation, 00338 5); 00339 } 00340 #endif 00341 00342 RecheckPde: 00343 00344 if (PointerPde->u.Hard.Valid == 1) { 00345 #ifdef _X86_ 00346 if (PointerPde->u.Hard.LargePage == 1) { 00347 #if DBG 00348 if (MmLargePageFaultError < 10) { 00349 DbgPrint ("MM - fault on Large page %p\n",VirtualAddress); 00350 } 00351 MmLargePageFaultError += 1; 00352 #endif //DBG 00353 return STATUS_SUCCESS; 00354 } 00355 #endif //X86 00356 00357 if (PointerPte->u.Hard.Valid == 1) { 00358 00359 // 00360 // Session space faults cannot early exit here because 00361 // it may be a copy on write which must be checked for 00362 // and handled below. 00363 // 00364 00365 if (MI_IS_SESSION_ADDRESS (VirtualAddress) == FALSE) { 00366 00367 // 00368 // Acquire the PFN lock, check to see if the address is 00369 // still valid if writable, update dirty bit. 00370 // 00371 00372 LOCK_PFN (OldIrql); 00373 TempPte = *(volatile MMPTE *)PointerPte; 00374 if (TempPte.u.Hard.Valid == 1) { 00375 00376 Pfn1 = MI_PFN_ELEMENT (TempPte.u.Hard.PageFrameNumber); 00377 00378 if ((StoreInstruction) && 00379 ((TempPte.u.Long & MM_PTE_WRITE_MASK) == 0) && 00380 ((Pfn1->OriginalPte.u.Soft.Protection & MM_READWRITE) == 0)) { 00381 00382 KeBugCheckEx (ATTEMPTED_WRITE_TO_READONLY_MEMORY, 00383 (ULONG_PTR)VirtualAddress, 00384 (ULONG_PTR)TempPte.u.Long, 00385 (ULONG_PTR)TrapInformation, 00386 11); 00387 } 00388 MI_NO_FAULT_FOUND (TempPte, PointerPte, VirtualAddress, TRUE); 00389 } 00390 UNLOCK_PFN (OldIrql); 00391 return STATUS_SUCCESS; 00392 } 00393 } 00394 #if !defined (_WIN64) 00395 else { 00396 00397 // 00398 // Handle trimmer references to paged pool PTEs where the PDE 00399 // might not be present. Only needed for 00400 // MmTrimAllSystemPagable memory. 00401 // 00402 00403 MiCheckPdeForPagedPool (VirtualAddress); 00404 TempPte = *(volatile MMPTE *)PointerPte; 00405 if (TempPte.u.Hard.Valid == 1) { 00406 return STATUS_SUCCESS; 00407 } 00408 } 00409 #endif 00410 } else { 00411 00412 // 00413 // Due to G-bits in kernel mode code, accesses to paged pool 00414 // PDEs may not fault even though the PDE is not valid. Make 00415 // sure the PDE is valid so PteFrames in the PFN database are 00416 // tracked properly. 00417 // 00418 00419 #if defined (_WIN64) 00420 if ((VirtualAddress >= (PVOID)PTE_BASE) && (VirtualAddress < (PVOID)MiGetPteAddress (HYPER_SPACE))) { 00421 // 00422 // This is a user mode PDE entry being faulted in by the Mm 00423 // referencing the page table page. This needs to be done 00424 // with the working set lock so that the PPE validity can be 00425 // relied on throughout the fault processing. 00426 // 00427 // The case when Mm faults in PPE entries by referencing the 00428 // page directory page is correctly handled by falling through 00429 // the below code. 00430 // 00431 00432 goto UserFault; 00433 } 00434 #else 00435 MiCheckPdeForPagedPool (VirtualAddress); 00436 #endif 00437 00438 if (PointerPde->u.Hard.Valid == 0) { 00439 if (KeInvalidAccessAllowed(TrapInformation) == TRUE) { 00440 return STATUS_ACCESS_VIOLATION; 00441 } 00442 KeBugCheckEx (PAGE_FAULT_IN_NONPAGED_AREA, 00443 (ULONG_PTR)VirtualAddress, 00444 StoreInstruction, 00445 (ULONG_PTR)TrapInformation, 00446 2); 00447 return STATUS_SUCCESS; 00448 } 00449 00450 // 00451 // Now that the PDE is valid, go look at the PTE again. 00452 // 00453 00454 goto RecheckPde; 00455 } 00456 00457 if (MiHydra == TRUE) { 00458 00459 #if !defined (_WIN64) 00460 00461 // 00462 // First check to see if it's in the session space data 00463 // structures or page table pages. 00464 // 00465 00466 SessionStatus = MiCheckPdeForSessionSpace (VirtualAddress); 00467 00468 if (SessionStatus == STATUS_ACCESS_VIOLATION) { 00469 00470 // 00471 // This thread faulted on a session space access, but this 00472 // process does not have one. This could be the system 00473 // process attempting to access a working buffer passed 00474 // to it from WIN32K or a driver loaded in session space 00475 // (video, printer, etc). 00476 // 00477 // The system process which contains the worker threads 00478 // NEVER has a session space - if code accidentally queues a 00479 // worker thread that points to a session space buffer, a 00480 // fault will occur. This must be bug checked since drivers 00481 // are responsible for making sure this never occurs. 00482 // 00483 // The only exception to this is when the working set manager 00484 // attaches to a session to age or trim it. However, the 00485 // working set manager will never fault and so the bugcheck 00486 // below is always valid. Note that a worker thread can get 00487 // away with a bad access if it happens while the working set 00488 // manager is attached, but there's really no way to prevent 00489 // this case which is a driver bug anyway. 00490 // 00491 00492 if (KeInvalidAccessAllowed(TrapInformation) == TRUE) { 00493 return STATUS_ACCESS_VIOLATION; 00494 } 00495 00496 KeBugCheckEx (PAGE_FAULT_IN_NONPAGED_AREA, 00497 (ULONG_PTR)VirtualAddress, 00498 StoreInstruction, 00499 (ULONG_PTR)TrapInformation, 00500 6); 00501 } 00502 00503 #endif 00504 00505 // 00506 // Fall though to further fault handling. 00507 // 00508 00509 SessionAddress = MI_IS_SESSION_ADDRESS (VirtualAddress); 00510 } 00511 else { 00512 SessionAddress = FALSE; 00513 } 00514 00515 if (SessionAddress == TRUE || 00516 ((!MI_IS_PAGE_TABLE_ADDRESS(VirtualAddress)) && 00517 (!MI_IS_HYPER_SPACE_ADDRESS(VirtualAddress)))) { 00518 00519 if (SessionAddress == FALSE) { 00520 00521 // 00522 // Acquire system working set lock. While this lock 00523 // is held, no pages may go from valid to invalid. 00524 // 00525 // HOWEVER - transition pages may go to valid, but 00526 // may not be added to the working set list. This 00527 // is done in the cache manager support routines to 00528 // shortcut faults on transition prototype PTEs. 00529 // 00530 00531 if (PsGetCurrentThread() == MmSystemLockOwner) { 00532 00533 // 00534 // Recursively trying to acquire the system working set 00535 // fast mutex - cause an IRQL > 1 bug check. 00536 // 00537 00538 return STATUS_IN_PAGE_ERROR | 0x10000000; 00539 } 00540 00541 LOCK_SYSTEM_WS (PreviousIrql); 00542 } 00543 00544 // 00545 // Note that for session space the below check is done without 00546 // acquiring the session WSL lock. This is because this thread 00547 // may already own it - ie: it may be adding a page to the 00548 // session space working set and the session's working set list is 00549 // not mapped in and causes a fault. The MiCheckPdeForSessionSpace 00550 // call above will fill in the PDE and then we must check the PTE 00551 // below - if that's not present then we couldn't possibly be 00552 // holding the session WSL lock, so we'll acquire it below. 00553 // 00554 00555 #if defined (_X86PAE_) 00556 // 00557 // PAE PTEs are subject to write tearing due to the cache manager 00558 // shortcut routines that insert PTEs without acquiring the working 00559 // set lock. Synchronize here via the PFN lock. 00560 // 00561 LOCK_PFN (OldIrql); 00562 #endif 00563 TempPte = *PointerPte; 00564 #if defined (_X86PAE_) 00565 UNLOCK_PFN (OldIrql); 00566 #endif 00567 00568 // 00569 // If the PTE is valid, make sure we do not have a copy on 00570 // write. 00571 // 00572 00573 if (TempPte.u.Hard.Valid != 0) { 00574 00575 // 00576 // PTE is already valid, return. Unless it's Hydra where 00577 // kernel mode copy-on-write must be handled properly. 00578 // 00579 00580 BOOLEAN FaultHandled; 00581 00582 FaultHandled = FALSE; 00583 00584 LOCK_PFN (OldIrql); 00585 TempPte = *(volatile MMPTE *)PointerPte; 00586 if (TempPte.u.Hard.Valid == 1) { 00587 00588 Pfn1 = MI_PFN_ELEMENT (TempPte.u.Hard.PageFrameNumber); 00589 00590 if ((StoreInstruction) && 00591 (TempPte.u.Hard.CopyOnWrite == 0) && 00592 ((TempPte.u.Long & MM_PTE_WRITE_MASK) == 0) && 00593 ((Pfn1->OriginalPte.u.Soft.Protection & MM_READWRITE) == 0)) { 00594 00595 KeBugCheckEx (ATTEMPTED_WRITE_TO_READONLY_MEMORY, 00596 (ULONG_PTR)VirtualAddress, 00597 (ULONG_PTR)TempPte.u.Long, 00598 (ULONG_PTR)TrapInformation, 00599 12); 00600 } 00601 00602 // 00603 // Set the dirty bit in the PTE and the page frame. 00604 // 00605 00606 #if defined(_ALPHA_) 00607 if (SessionAddress == FALSE || (TempPte.u.Hard.Write == 1 && TempPte.u.Hard.CopyOnWrite == 0)) 00608 #else 00609 if (SessionAddress == FALSE || TempPte.u.Hard.Write == 1) 00610 #endif 00611 { 00612 FaultHandled = TRUE; 00613 MI_NO_FAULT_FOUND (TempPte, PointerPte, VirtualAddress, TRUE); 00614 } 00615 } 00616 UNLOCK_PFN (OldIrql); 00617 if (SessionAddress == FALSE) { 00618 UNLOCK_SYSTEM_WS (PreviousIrql); 00619 } 00620 if (SessionAddress == FALSE || FaultHandled == TRUE) { 00621 return STATUS_SUCCESS; 00622 } 00623 } 00624 00625 if (SessionAddress == TRUE) { 00626 00627 ASSERT (MiHydra == TRUE); 00628 00629 // 00630 // Acquire the session space working set lock. While this lock 00631 // is held, no session pages may go from valid to invalid. 00632 // 00633 00634 if (PsGetCurrentThread() == MmSessionSpace->WorkingSetLockOwner) { 00635 00636 // 00637 // Recursively trying to acquire the session working set 00638 // lock - cause an IRQL > 1 bug check. 00639 // 00640 00641 return STATUS_IN_PAGE_ERROR | 0x10000000; 00642 } 00643 00644 LOCK_SESSION_SPACE_WS (PreviousIrql); 00645 00646 TempPte = *PointerPte; 00647 00648 // 00649 // The PTE could have become valid while we waited 00650 // for the session space working set lock. 00651 // 00652 00653 if (TempPte.u.Hard.Valid == 1) { 00654 00655 LOCK_PFN (OldIrql); 00656 TempPte = *(volatile MMPTE *)PointerPte; 00657 00658 // 00659 // Check for copy-on-write. 00660 // 00661 00662 if (TempPte.u.Hard.Valid == 1) { 00663 00664 #if defined(_ALPHA_) 00665 if (StoreInstruction && TempPte.u.Hard.CopyOnWrite == 1) 00666 #else 00667 if (StoreInstruction && TempPte.u.Hard.Write == 0) 00668 #endif 00669 { 00670 #if defined(_ALPHA_) 00671 TempPte.u.Hard.Write = 0; 00672 MI_WRITE_VALID_PTE_NEW_PROTECTION (PointerPte, TempPte); 00673 #endif 00674 00675 // 00676 // Copy on write only for loaded drivers... 00677 // 00678 00679 ASSERT (MI_IS_SESSION_IMAGE_ADDRESS (VirtualAddress)); 00680 00681 UNLOCK_PFN (OldIrql); 00682 00683 if (TempPte.u.Hard.CopyOnWrite == 0) { 00684 00685 KeBugCheckEx (ATTEMPTED_WRITE_TO_READONLY_MEMORY, 00686 (ULONG_PTR)VirtualAddress, 00687 (ULONG_PTR)TempPte.u.Long, 00688 (ULONG_PTR)TrapInformation, 00689 13); 00690 } 00691 00692 MiSessionCopyOnWrite (MmSessionSpace, 00693 VirtualAddress, 00694 PointerPte); 00695 00696 UNLOCK_SESSION_SPACE_WS (PreviousIrql); 00697 00698 return STATUS_SUCCESS; 00699 } 00700 00701 #if DBG 00702 // 00703 // If we are allowing a store, it better be writable. 00704 // 00705 00706 if (StoreInstruction) { 00707 ASSERT (TempPte.u.Hard.Write == 1); 00708 } 00709 #endif 00710 // 00711 // PTE is already valid, return. 00712 // 00713 00714 MI_NO_FAULT_FOUND (TempPte, PointerPte, VirtualAddress, TRUE); 00715 } 00716 00717 UNLOCK_PFN (OldIrql); 00718 UNLOCK_SESSION_SPACE_WS (PreviousIrql); 00719 return STATUS_SUCCESS; 00720 } 00721 } 00722 00723 if (TempPte.u.Soft.Prototype != 0) { 00724 00725 if (MmProtectFreedNonPagedPool == TRUE) { 00726 00727 PVOID StartVa; 00728 00729 if (MI_IS_PHYSICAL_ADDRESS(MmNonPagedPoolStart)) { 00730 StartVa = MmNonPagedPoolExpansionStart; 00731 } 00732 else { 00733 StartVa = MmNonPagedPoolStart; 00734 } 00735 00736 if (VirtualAddress >= StartVa && VirtualAddress < MmNonPagedPoolEnd) { 00737 // 00738 // This is an access to previously freed 00739 // non paged pool - bugcheck! 00740 // 00741 00742 if (KeInvalidAccessAllowed(TrapInformation) == TRUE) { 00743 goto AccessViolation; 00744 } 00745 00746 KeBugCheckEx (DRIVER_CAUGHT_MODIFYING_FREED_POOL, 00747 (ULONG_PTR)VirtualAddress, 00748 StoreInstruction, 00749 PreviousMode, 00750 4); 00751 } 00752 } 00753 00754 // 00755 // This is a PTE in prototype format, locate the corresponding 00756 // prototype PTE. 00757 // 00758 00759 PointerProtoPte = MiPteToProto (&TempPte); 00760 00761 if (SessionAddress == TRUE) { 00762 00763 if (TempPte.u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED) { 00764 PointerProtoPte = MiCheckVirtualAddress (VirtualAddress, 00765 &ProtectionCode); 00766 if (PointerProtoPte == NULL) { 00767 UNLOCK_SESSION_SPACE_WS (PreviousIrql); 00768 return STATUS_IN_PAGE_ERROR | 0x10000000; 00769 } 00770 } 00771 else if (TempPte.u.Proto.ReadOnly == 1) { 00772 00773 // 00774 // Writes are not allowed to this page. 00775 // 00776 00777 } else if (MI_IS_SESSION_IMAGE_ADDRESS (VirtualAddress)) { 00778 00779 // 00780 // Copy on write this page. 00781 // 00782 00783 MI_WRITE_INVALID_PTE (PointerPte, PrototypePte); 00784 PointerPte->u.Soft.Protection = MM_EXECUTE_WRITECOPY; 00785 } 00786 } 00787 } else if ((TempPte.u.Soft.Transition == 0) && 00788 (TempPte.u.Soft.Protection == 0)) { 00789 00790 // 00791 // Page file format. If the protection is ZERO, this 00792 // is a page of free system PTEs - bugcheck! 00793 // 00794 00795 if (KeInvalidAccessAllowed(TrapInformation) == TRUE) { 00796 goto AccessViolation; 00797 } 00798 00799 KeBugCheckEx (PAGE_FAULT_IN_NONPAGED_AREA, 00800 (ULONG_PTR)VirtualAddress, 00801 StoreInstruction, 00802 (ULONG_PTR)TrapInformation, 00803 0); 00804 return STATUS_SUCCESS; 00805 } 00806 else if (TempPte.u.Soft.Protection == MM_NOACCESS) { 00807 00808 if (KeInvalidAccessAllowed(TrapInformation) == TRUE) { 00809 goto AccessViolation; 00810 } 00811 00812 KeBugCheckEx (PAGE_FAULT_IN_NONPAGED_AREA, 00813 (ULONG_PTR)VirtualAddress, 00814 StoreInstruction, 00815 (ULONG_PTR)TrapInformation, 00816 1); 00817 return STATUS_SUCCESS; 00818 } 00819 00820 #ifdef PROTECT_KSTACKS 00821 else { 00822 if (TempPte.u.Soft.Protection == MM_KSTACK_OUTSWAPPED) { 00823 00824 if (KeInvalidAccessAllowed(TrapInformation) == TRUE) { 00825 goto AccessViolation; 00826 } 00827 00828 KeBugCheckEx (PAGE_FAULT_IN_NONPAGED_AREA, 00829 (ULONG_PTR)VirtualAddress, 00830 StoreInstruction, 00831 (ULONG_PTR)TrapInformation, 00832 3); 00833 } 00834 } 00835 #endif 00836 00837 if (SessionAddress == TRUE) { 00838 00839 MM_SESSION_SPACE_WS_LOCK_ASSERT (); 00840 00841 // 00842 // If it's a write to a session space page that is ultimately 00843 // mapped by a prototype PTE, it's a copy-on-write piece of 00844 // a session driver. Since the page isn't even present yet, 00845 // turn the write access into a read access to fault it in. 00846 // We'll get a write fault on the present page when we retry 00847 // the operation at which point we'll sever the copy on write. 00848 // 00849 00850 if (PointerProtoPte && 00851 StoreInstruction && 00852 MI_IS_SESSION_IMAGE_ADDRESS (VirtualAddress)) { 00853 StoreInstruction = 0; 00854 } 00855 00856 FaultProcess = HYDRA_PROCESS; 00857 } 00858 else { 00859 FaultProcess = NULL; 00860 00861 if (StoreInstruction) { 00862 00863 if ((TempPte.u.Hard.Valid == 0) && (PointerProtoPte == NULL)) { 00864 if (TempPte.u.Soft.Transition == 1) { 00865 00866 if ((TempPte.u.Trans.Protection & MM_READWRITE) == 0) { 00867 KeBugCheckEx (ATTEMPTED_WRITE_TO_READONLY_MEMORY, 00868 (ULONG_PTR)VirtualAddress, 00869 (ULONG_PTR)TempPte.u.Long, 00870 (ULONG_PTR)TrapInformation, 00871 14); 00872 } 00873 } 00874 else { 00875 if ((TempPte.u.Soft.Protection & MM_READWRITE) == 0) { 00876 00877 KeBugCheckEx (ATTEMPTED_WRITE_TO_READONLY_MEMORY, 00878 (ULONG_PTR)VirtualAddress, 00879 (ULONG_PTR)TempPte.u.Long, 00880 (ULONG_PTR)TrapInformation, 00881 15); 00882 } 00883 } 00884 } 00885 } 00886 } 00887 00888 status = MiDispatchFault (StoreInstruction, 00889 VirtualAddress, 00890 PointerPte, 00891 PointerProtoPte, 00892 FaultProcess, 00893 &ApcNeeded); 00894 00895 ASSERT (ApcNeeded == FALSE); 00896 ASSERT (KeGetCurrentIrql() == APC_LEVEL); 00897 00898 if (SessionAddress == TRUE) { 00899 Ws = &MmSessionSpace->Vm; 00900 PageFrameIndex = Ws->PageFaultCount; 00901 MM_SESSION_SPACE_WS_LOCK_ASSERT(); 00902 } 00903 else { 00904 Ws = &MmSystemCacheWs; 00905 PageFrameIndex = MmSystemCacheWs.PageFaultCount; 00906 } 00907 00908 if (Ws->AllowWorkingSetAdjustment == MM_GROW_WSLE_HASH) { 00909 MiGrowWsleHash (Ws); 00910 LOCK_EXPANSION_IF_ALPHA (OldIrql); 00911 Ws->AllowWorkingSetAdjustment = TRUE; 00912 UNLOCK_EXPANSION_IF_ALPHA (OldIrql); 00913 } 00914 00915 if (SessionAddress == TRUE) { 00916 UNLOCK_SESSION_SPACE_WS (PreviousIrql); 00917 } 00918 else { 00919 UNLOCK_SYSTEM_WS (PreviousIrql); 00920 } 00921 00922 if ((PageFrameIndex & 0x3FFFF) == 0x30000) { 00923 00924 // 00925 // The system cache or this session is taking too many faults, 00926 // delay execution so the modified page writer gets a quick 00927 // shot and increase the working set size. 00928 // 00929 00930 KeDelayExecutionThread (KernelMode, FALSE, &MmShortTime); 00931 } 00932 NotifyRoutine = MmPageFaultNotifyRoutine; 00933 if (NotifyRoutine) { 00934 if (status != STATUS_SUCCESS) { 00935 (*NotifyRoutine) ( 00936 status, 00937 VirtualAddress, 00938 TrapInformation 00939 ); 00940 } 00941 } 00942 return status; 00943 } else { 00944 #if !defined (_WIN64) 00945 if (MiCheckPdeForPagedPool (VirtualAddress) == STATUS_WAIT_1) { 00946 return STATUS_SUCCESS; 00947 } 00948 #endif 00949 } 00950 } 00951 00952 #if defined (_WIN64) 00953 UserFault: 00954 #endif 00955 00956 if (MiDelayPageFaults || 00957 ((MmModifiedPageListHead.Total >= (MmModifiedPageMaximum + 100)) && 00958 (MmAvailablePages < (1024*1024 / PAGE_SIZE)) && 00959 (CurrentProcess->ModifiedPageCount > ((64*1024)/PAGE_SIZE)))) { 00960 00961 // 00962 // This process has placed more than 64k worth of pages on the modified 00963 // list. Delay for a short period and set the count to zero. 00964 // 00965 00966 KeDelayExecutionThread (KernelMode, 00967 FALSE, 00968 (CurrentProcess->Pcb.BasePriority < PROCESS_FOREGROUND_PRIORITY) ? 00969 &MmHalfSecond : &Mm30Milliseconds); 00970 CurrentProcess->ModifiedPageCount = 0; 00971 } 00972 00973 // 00974 // FAULT IN USER SPACE OR PAGE DIRECTORY/PAGE TABLE PAGES. 00975 // 00976 00977 // 00978 // Block APCs and acquire the working set lock. 00979 // 00980 00981 LOCK_WS (CurrentProcess); 00982 00983 #if defined (_WIN64) 00984 00985 // 00986 // Locate the Page Directory Parent Entry which maps this virtual 00987 // address and check for accessibility and validity. The page directory 00988 // page must be made valid before any other checks are made. 00989 // 00990 00991 if (PointerPpe->u.Hard.Valid == 0) { 00992 00993 // 00994 // If the PPE is zero, check to see if there is a virtual address 00995 // mapped at this location, and if so create the necessary 00996 // structures to map it. 00997 // 00998 00999 if ((PointerPpe->u.Long == MM_ZERO_PTE) || 01000 (PointerPpe->u.Long == MM_ZERO_KERNEL_PTE)) { 01001 PointerProtoPte = MiCheckVirtualAddress (VirtualAddress, 01002 &ProtectCode); 01003 01004 #ifdef LARGE_PAGES 01005 if (ProtectCode == MM_LARGE_PAGES) { 01006 status = STATUS_SUCCESS; 01007 goto ReturnStatus2; 01008 } 01009 #endif //LARGE_PAGES 01010 01011 if (ProtectCode == MM_NOACCESS) { 01012 status = STATUS_ACCESS_VIOLATION; 01013 // MiCheckPpeForPagedPool (VirtualAddress); 01014 if (PointerPpe->u.Hard.Valid == 1) { 01015 status = STATUS_SUCCESS; 01016 } 01017 01018 #if DBG 01019 if ((MmDebug & MM_DBG_STOP_ON_ACCVIO) && 01020 (status == STATUS_ACCESS_VIOLATION)) { 01021 DbgPrint("MM:access violation - %p\n",VirtualAddress); 01022 MiFormatPte(PointerPpe); 01023 DbgBreakPoint(); 01024 } 01025 #endif //DEBUG 01026 01027 goto ReturnStatus2; 01028 01029 } else { 01030 01031 // 01032 // Build a demand zero PPE and operate on it. 01033 // 01034 01035 *PointerPpe = DemandZeroPde; 01036 } 01037 } 01038 01039 // 01040 // The PPE is not valid, call the page fault routine passing 01041 // in the address of the PPE. If the PPE is valid, determine 01042 // the status of the corresponding PDE. 01043 // 01044 // Note this call may result in ApcNeeded getting set to TRUE. 01045 // This is deliberate as there may be another call to MiDispatchFault 01046 // issued later in this routine and we don't want to lose the APC 01047 // status. 01048 // 01049 01050 status = MiDispatchFault (TRUE, //page table page always written 01051 PointerPde, //Virtual address 01052 PointerPpe, // PTE (PPE in this case) 01053 NULL, 01054 CurrentProcess, 01055 &ApcNeeded); 01056 01057 #if DBG 01058 if (ApcNeeded == TRUE) { 01059 ASSERT (PsGetCurrentThread()->NestedFaultCount == 0); 01060 ASSERT (PsGetCurrentThread()->ApcNeeded == 0); 01061 } 01062 #endif 01063 01064 ASSERT (KeGetCurrentIrql() == APC_LEVEL); 01065 if (PointerPpe->u.Hard.Valid == 0) { 01066 01067 // 01068 // The PPE is not valid, return the status. 01069 // 01070 goto ReturnStatus1; 01071 } 01072 01073 #if PFN_CONSISTENCY 01074 { 01075 PMMPFN Pfn1; 01076 01077 LOCK_PFN (OldIrql); 01078 Pfn1 = MI_PFN_ELEMENT (PointerPpe->u.Hard.PageFrameNumber); 01079 Pfn1->u3.e1.PageTablePage = 1; 01080 UNLOCK_PFN (OldIrql); 01081 } 01082 #endif 01083 //KeFillEntryTb ((PHARDWARE_PTE)PointerPpe, (PVOID)PointerPde, TRUE); 01084 01085 MI_SET_PAGE_DIRTY (PointerPpe, PointerPde, FALSE); 01086 01087 // 01088 // Now that the PPE is accessible, get the PDE - let this fall 01089 // through. 01090 // 01091 } 01092 #endif 01093 01094 // 01095 // Locate the Page Directory Entry which maps this virtual 01096 // address and check for accessibility and validity. 01097 // 01098 01099 // 01100 // Check to see if the page table page (PDE entry) is valid. 01101 // If not, the page table page must be made valid first. 01102 // 01103 01104 if (PointerPde->u.Hard.Valid == 0) { 01105 01106 // 01107 // If the PDE is zero, check to see if there is a virtual address 01108 // mapped at this location, and if so create the necessary 01109 // structures to map it. 01110 // 01111 01112 if ((PointerPde->u.Long == MM_ZERO_PTE) || 01113 (PointerPde->u.Long == MM_ZERO_KERNEL_PTE)) { 01114 PointerProtoPte = MiCheckVirtualAddress (VirtualAddress, 01115 &ProtectCode); 01116 01117 #ifdef LARGE_PAGES 01118 if (ProtectCode == MM_LARGE_PAGES) { 01119 status = STATUS_SUCCESS; 01120 goto ReturnStatus2; 01121 } 01122 #endif //LARGE_PAGES 01123 01124 if (ProtectCode == MM_NOACCESS) { 01125 status = STATUS_ACCESS_VIOLATION; 01126 #if !defined (_WIN64) 01127 MiCheckPdeForPagedPool (VirtualAddress); 01128 #endif 01129 01130 if (PointerPde->u.Hard.Valid == 1) { 01131 status = STATUS_SUCCESS; 01132 } 01133 01134 #if DBG 01135 if ((MmDebug & MM_DBG_STOP_ON_ACCVIO) && 01136 (status == STATUS_ACCESS_VIOLATION)) { 01137 DbgPrint("MM:access violation - %p\n",VirtualAddress); 01138 MiFormatPte(PointerPde); 01139 DbgBreakPoint(); 01140 } 01141 #endif //DEBUG 01142 01143 goto ReturnStatus2; 01144 01145 } 01146 01147 // 01148 // Build a demand zero PDE and operate on it. 01149 // 01150 01151 MI_WRITE_INVALID_PTE (PointerPde, DemandZeroPde); 01152 01153 #if defined (_WIN64) 01154 01155 // 01156 // Increment the count of non-zero page directory entries for this 01157 // page directory. 01158 // 01159 01160 if (VirtualAddress <= MM_HIGHEST_USER_ADDRESS) { 01161 UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (PointerPte); 01162 MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle); 01163 } 01164 #endif 01165 01166 } 01167 01168 // 01169 // The PDE is not valid, call the page fault routine passing 01170 // in the address of the PDE. If the PDE is valid, determine 01171 // the status of the corresponding PTE. 01172 // 01173 01174 status = MiDispatchFault (TRUE, //page table page always written 01175 PointerPte, //Virtual address 01176 PointerPde, // PTE (PDE in this case) 01177 NULL, 01178 CurrentProcess, 01179 &ApcNeeded); 01180 01181 #if DBG 01182 if (ApcNeeded == TRUE) { 01183 ASSERT (PsGetCurrentThread()->NestedFaultCount == 0); 01184 ASSERT (PsGetCurrentThread()->ApcNeeded == 0); 01185 } 01186 #endif 01187 01188 ASSERT (KeGetCurrentIrql() == APC_LEVEL); 01189 if (PointerPde->u.Hard.Valid == 0) { 01190 01191 // 01192 // The PDE is not valid, return the status. 01193 // 01194 goto ReturnStatus1; 01195 } 01196 01197 #if PFN_CONSISTENCY 01198 { 01199 PMMPFN Pfn1; 01200 01201 LOCK_PFN (OldIrql); 01202 Pfn1 = MI_PFN_ELEMENT (PointerPde->u.Hard.PageFrameNumber); 01203 Pfn1->u3.e1.PageTablePage = 1; 01204 UNLOCK_PFN (OldIrql); 01205 } 01206 #endif 01207 //KeFillEntryTb ((PHARDWARE_PTE)PointerPde, (PVOID)PointerPte, TRUE); 01208 01209 MI_SET_PAGE_DIRTY (PointerPde, PointerPte, FALSE); 01210 01211 // 01212 // Now that the PDE is accessible, get the PTE - let this fall 01213 // through. 01214 // 01215 } 01216 01217 // 01218 // The PDE is valid and accessible, get the PTE contents. 01219 // 01220 01221 TempPte = *PointerPte; 01222 if (TempPte.u.Hard.Valid != 0) { 01223 01224 // 01225 // The PTE is valid and accessible, is this a write fault 01226 // copy on write or setting of some dirty bit? 01227 // 01228 01229 #if DBG 01230 if (MmDebug & MM_DBG_PTE_UPDATE) { 01231 MiFormatPte(PointerPte); 01232 } 01233 #endif //DBG 01234 01235 status = STATUS_SUCCESS; 01236 01237 if (StoreInstruction) { 01238 01239 // 01240 // This was a write operation. If the copy on write 01241 // bit is set in the PTE perform the copy on write, 01242 // else check to ensure write access to the PTE. 01243 // 01244 01245 if (TempPte.u.Hard.CopyOnWrite != 0) { 01246 MiCopyOnWrite (VirtualAddress, PointerPte); 01247 status = STATUS_PAGE_FAULT_COPY_ON_WRITE; 01248 goto ReturnStatus2; 01249 01250 } else { 01251 if (TempPte.u.Hard.Write == 0) { 01252 status = STATUS_ACCESS_VIOLATION; 01253 } 01254 } 01255 #if defined(_IA64_) 01256 } else if (ExecutionFault) { 01257 01258 // 01259 // It also checks to ensure execute access to the PTE. 01260 // 01261 01262 if (TempPte.u.Hard.Execute == 0) { 01263 status = STATUS_ACCESS_VIOLATION; 01264 } 01265 #endif 01266 #if DBG 01267 } else { 01268 01269 // 01270 // The PTE is valid and accessible, another thread must 01271 // have faulted the PTE in already, or the access bit 01272 // is clear and this is a access fault; Blindly set the 01273 // access bit and dismiss the fault. 01274 // 01275 01276 if (MmDebug & MM_DBG_SHOW_FAULTS) { 01277 DbgPrint("MM:no fault found - pte is %p\n", PointerPte->u.Long); 01278 } 01279 #endif //DBG 01280 } 01281 01282 if (status == STATUS_SUCCESS) { 01283 LOCK_PFN (OldIrql); 01284 if (PointerPte->u.Hard.Valid != 0) { 01285 MI_NO_FAULT_FOUND (TempPte, PointerPte, VirtualAddress, TRUE); 01286 } 01287 UNLOCK_PFN (OldIrql); 01288 } 01289 01290 goto ReturnStatus2; 01291 } 01292 01293 // 01294 // If the PTE is zero, check to see if there is a virtual address 01295 // mapped at this location, and if so create the necessary 01296 // structures to map it. 01297 // 01298 01299 // 01300 // Check explicitly for demand zero pages. 01301 // 01302 01303 if (TempPte.u.Long == MM_DEMAND_ZERO_WRITE_PTE) { 01304 MiResolveDemandZeroFault (VirtualAddress, 01305 PointerPte, 01306 CurrentProcess, 01307 0); 01308 01309 status = STATUS_PAGE_FAULT_DEMAND_ZERO; 01310 goto ReturnStatus1; 01311 } 01312 01313 if ((TempPte.u.Long == MM_ZERO_PTE) || 01314 (TempPte.u.Long == MM_ZERO_KERNEL_PTE)) { 01315 01316 // 01317 // PTE is needs to be evaluated with respect to its virtual 01318 // address descriptor (VAD). At this point there are 3 01319 // possibilities, bogus address, demand zero, or refers to 01320 // a prototype PTE. 01321 // 01322 01323 PointerProtoPte = MiCheckVirtualAddress (VirtualAddress, 01324 &ProtectionCode); 01325 if (ProtectionCode == MM_NOACCESS) { 01326 status = STATUS_ACCESS_VIOLATION; 01327 01328 // 01329 // Check to make sure this is not a page table page for 01330 // paged pool which needs extending. 01331 // 01332 01333 #if !defined (_WIN64) 01334 MiCheckPdeForPagedPool (VirtualAddress); 01335 #endif 01336 01337 if (PointerPte->u.Hard.Valid == 1) { 01338 status = STATUS_SUCCESS; 01339 } 01340 01341 #if DBG 01342 if ((MmDebug & MM_DBG_STOP_ON_ACCVIO) && 01343 (status == STATUS_ACCESS_VIOLATION)) { 01344 DbgPrint("MM:access vio - %p\n",VirtualAddress); 01345 MiFormatPte(PointerPte); 01346 DbgBreakPoint(); 01347 } 01348 #endif //DEBUG 01349 goto ReturnStatus2; 01350 } 01351 01352 // 01353 // Increment the count of non-zero page table entries for this 01354 // page table. 01355 // 01356 01357 if (VirtualAddress <= MM_HIGHEST_USER_ADDRESS) { 01358 UsedPageTableHandle = MI_GET_USED_PTES_HANDLE (VirtualAddress); 01359 MI_INCREMENT_USED_PTES_BY_HANDLE (UsedPageTableHandle); 01360 } 01361 01362 // 01363 // Is this page a guard page? 01364 // 01365 01366 if (ProtectionCode & MM_GUARD_PAGE) { 01367 01368 // 01369 // This is a guard page exception. 01370 // 01371 01372 PointerPte->u.Soft.Protection = ProtectionCode & ~MM_GUARD_PAGE; 01373 01374 if (PointerProtoPte != NULL) { 01375 01376 // 01377 // This is a prototype PTE, build the PTE to not 01378 // be a guard page. 01379 // 01380 01381 PointerPte->u.Soft.PageFileHigh = MI_PTE_LOOKUP_NEEDED; 01382 PointerPte->u.Soft.Prototype = 1; 01383 } 01384 01385 UNLOCK_WS (CurrentProcess); 01386 ASSERT (KeGetCurrentIrql() == PreviousIrql); 01387 01388 if (ApcNeeded == TRUE) { 01389 ASSERT (PsGetCurrentThread()->NestedFaultCount == 0); 01390 ASSERT (PsGetCurrentThread()->ApcNeeded == 0); 01391 ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL); 01392 KeRaiseIrql (APC_LEVEL, &PreviousIrql); 01393 IoRetryIrpCompletions (); 01394 KeLowerIrql (PreviousIrql); 01395 } 01396 01397 return MiCheckForUserStackOverflow (VirtualAddress); 01398 } 01399 01400 if (PointerProtoPte == NULL) { 01401 01402 //ASSERT (KeReadStateMutant (&CurrentProcess->WorkingSetLock) == 0); 01403 01404 // 01405 // Assert that this is not for a PDE. 01406 // 01407 01408 if (PointerPde == MiGetPdeAddress(PTE_BASE)) { 01409 01410 // 01411 // This PTE is really a PDE, set contents as such. 01412 // 01413 01414 MI_WRITE_INVALID_PTE (PointerPte, DemandZeroPde); 01415 } else { 01416 PointerPte->u.Soft.Protection = ProtectionCode; 01417 } 01418 01419 LOCK_PFN (OldIrql); 01420 01421 // 01422 // If a fork operation is in progress and the faulting thread 01423 // is not the thread performing the fork operation, block until 01424 // the fork is completed. 01425 // 01426 01427 if ((CurrentProcess->ForkInProgress != NULL) && 01428 (CurrentProcess->ForkInProgress != PsGetCurrentThread())) { 01429 MiWaitForForkToComplete (CurrentProcess); 01430 status = STATUS_SUCCESS; 01431 UNLOCK_PFN (OldIrql); 01432 goto ReturnStatus1; 01433 } 01434 01435 if (!MiEnsureAvailablePageOrWait (CurrentProcess, 01436 VirtualAddress)) { 01437 01438 ULONG Color; 01439 Color = MI_PAGE_COLOR_VA_PROCESS (VirtualAddress, 01440 &CurrentProcess->NextPageColor); 01441 PageFrameIndex = MiRemoveZeroPageIfAny (Color); 01442 if (PageFrameIndex == 0) { 01443 PageFrameIndex = MiRemoveAnyPage (Color); 01444 UNLOCK_PFN (OldIrql); 01445 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 01446 MiZeroPhysicalPage (PageFrameIndex, Color); 01447 01448 // 01449 // Note the stamping must occur after the page is zeroed. 01450 // 01451 01452 MI_BARRIER_STAMP_ZEROED_PAGE (&Pfn1->PteFrame); 01453 01454 LOCK_PFN (OldIrql); 01455 } 01456 01457 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 01458 01459 CurrentProcess->NumberOfPrivatePages += 1; 01460 MmInfoCounters.DemandZeroCount += 1; 01461 01462 // 01463 // This barrier check is needed after zeroing the page and 01464 // before setting the PTE valid. 01465 // Capture it now, check it at the last possible moment. 01466 // 01467 01468 BarrierStamp = (ULONG)Pfn1->PteFrame; 01469 01470 MiInitializePfn (PageFrameIndex, PointerPte, 1); 01471 01472 UNLOCK_PFN (OldIrql); 01473 01474 // 01475 // As this page is demand zero, set the modified bit in the 01476 // PFN database element and set the dirty bit in the PTE. 01477 // 01478 01479 #if PFN_CONSISTENCY 01480 if (PointerPde == MiGetPdeAddress(PTE_BASE)) { 01481 LOCK_PFN (OldIrql); 01482 Pfn1->u3.e1.PageTablePage = 1; 01483 UNLOCK_PFN (OldIrql); 01484 } 01485 #endif 01486 01487 MI_MAKE_VALID_PTE (TempPte, 01488 PageFrameIndex, 01489 PointerPte->u.Soft.Protection, 01490 PointerPte); 01491 01492 if (TempPte.u.Hard.Write != 0) { 01493 MI_SET_PTE_DIRTY (TempPte); 01494 } 01495 01496 MI_BARRIER_SYNCHRONIZE (BarrierStamp); 01497 01498 MI_WRITE_VALID_PTE (PointerPte, TempPte); 01499 01500 ASSERT (Pfn1->u1.Event == 0); 01501 01502 CONSISTENCY_LOCK_PFN (OldIrql); 01503 01504 Pfn1->u1.Event = (PVOID)PsGetCurrentThread(); 01505 01506 CONSISTENCY_UNLOCK_PFN (OldIrql); 01507 01508 WorkingSetIndex = MiLocateAndReserveWsle (&CurrentProcess->Vm); 01509 MiUpdateWsle (&WorkingSetIndex, 01510 VirtualAddress, 01511 MmWorkingSetList, 01512 Pfn1); 01513 01514 MI_SET_PTE_IN_WORKING_SET (PointerPte, WorkingSetIndex); 01515 01516 KeFillEntryTb ((PHARDWARE_PTE)PointerPte, 01517 VirtualAddress, 01518 FALSE); 01519 } else { 01520 UNLOCK_PFN (OldIrql); 01521 } 01522 01523 status = STATUS_PAGE_FAULT_DEMAND_ZERO; 01524 goto ReturnStatus1; 01525 01526 } else { 01527 01528 // 01529 // This is a prototype PTE. 01530 // 01531 01532 if (ProtectionCode == MM_UNKNOWN_PROTECTION) { 01533 01534 // 01535 // The protection field is stored in the prototype PTE. 01536 // 01537 01538 PointerPte->u.Long = MiProtoAddressForPte (PointerProtoPte); 01539 01540 } else { 01541 01542 MI_WRITE_INVALID_PTE (PointerPte, PrototypePte); 01543 PointerPte->u.Soft.Protection = ProtectionCode; 01544 } 01545 TempPte = *PointerPte; 01546 } 01547 01548 } else { 01549 01550 // 01551 // The PTE is non-zero and not valid, see if it is a prototype PTE. 01552 // 01553 01554 ProtectionCode = MI_GET_PROTECTION_FROM_SOFT_PTE(&TempPte); 01555 01556 if (TempPte.u.Soft.Prototype != 0) { 01557 if (TempPte.u.Soft.PageFileHigh == MI_PTE_LOOKUP_NEEDED) { 01558 #if DBG 01559 MmProtoPteVadLookups += 1; 01560 #endif //DBG 01561 PointerProtoPte = MiCheckVirtualAddress (VirtualAddress, 01562 &ProtectCode); 01563 if (PointerProtoPte == NULL) { 01564 status = STATUS_ACCESS_VIOLATION; 01565 goto ReturnStatus1; 01566 } 01567 01568 } else { 01569 #if DBG 01570 MmProtoPteDirect += 1; 01571 #endif //DBG 01572 01573 // 01574 // Protection is in the prototype PTE, indicate an 01575 // access check should not be performed on the current PTE. 01576 // 01577 01578 PointerProtoPte = MiPteToProto (&TempPte); 01579 ProtectionCode = MM_UNKNOWN_PROTECTION; 01580 01581 // 01582 // Check to see if the proto protection has been overridden. 01583 // 01584 01585 if (TempPte.u.Proto.ReadOnly != 0) { 01586 ProtectionCode = MM_READONLY; 01587 } 01588 } 01589 } 01590 } 01591 01592 if (ProtectionCode != MM_UNKNOWN_PROTECTION) { 01593 status = MiAccessCheck (PointerPte, 01594 StoreInstruction, 01595 PreviousMode, 01596 ProtectionCode, 01597 FALSE ); 01598 01599 if (status != STATUS_SUCCESS) { 01600 #if DBG 01601 if ((MmDebug & MM_DBG_STOP_ON_ACCVIO) && (status == STATUS_ACCESS_VIOLATION)) { 01602 DbgPrint("MM:access violate - %p\n",VirtualAddress); 01603 MiFormatPte(PointerPte); 01604 DbgBreakPoint(); 01605 } 01606 #endif //DEBUG 01607 01608 UNLOCK_WS (CurrentProcess); 01609 ASSERT (KeGetCurrentIrql() == PreviousIrql); 01610 01611 if (ApcNeeded == TRUE) { 01612 ASSERT (PsGetCurrentThread()->NestedFaultCount == 0); 01613 ASSERT (PsGetCurrentThread()->ApcNeeded == 0); 01614 ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL); 01615 KeRaiseIrql (APC_LEVEL, &PreviousIrql); 01616 IoRetryIrpCompletions (); 01617 KeLowerIrql (PreviousIrql); 01618 } 01619 01620 // 01621 // Check to see if this is a guard page violation 01622 // and if so, should the user's stack be extended. 01623 // 01624 01625 if (status == STATUS_GUARD_PAGE_VIOLATION) { 01626 return MiCheckForUserStackOverflow (VirtualAddress); 01627 } 01628 01629 return status; 01630 } 01631 } 01632 01633 // 01634 // This is a page fault, invoke the page fault handler. 01635 // 01636 01637 if (PointerProtoPte != NULL) { 01638 01639 // 01640 // Lock page containing prototype PTEs in memory by 01641 // incrementing the reference count for the page. 01642 // 01643 01644 01645 if (!MI_IS_PHYSICAL_ADDRESS(PointerProtoPte)) { 01646 PointerPde = MiGetPteAddress (PointerProtoPte); 01647 LOCK_PFN (OldIrql); 01648 if (PointerPde->u.Hard.Valid == 0) { 01649 MiMakeSystemAddressValidPfn (PointerProtoPte); 01650 } 01651 Pfn1 = MI_PFN_ELEMENT (PointerPde->u.Hard.PageFrameNumber); 01652 MI_ADD_LOCKED_PAGE_CHARGE(Pfn1, 2); 01653 Pfn1->u3.e2.ReferenceCount += 1; 01654 ASSERT (Pfn1->u3.e2.ReferenceCount > 1); 01655 UNLOCK_PFN (OldIrql); 01656 } 01657 } 01658 status = MiDispatchFault (StoreInstruction, 01659 VirtualAddress, 01660 PointerPte, 01661 PointerProtoPte, 01662 CurrentProcess, 01663 &ApcNeeded); 01664 01665 #if DBG 01666 if (ApcNeeded == TRUE) { 01667 ASSERT (PsGetCurrentThread()->NestedFaultCount == 0); 01668 ASSERT (PsGetCurrentThread()->ApcNeeded == 0); 01669 } 01670 #endif 01671 01672 if (PointerProtoPte != NULL) { 01673 01674 // 01675 // Unlock page containing prototype PTEs. 01676 // 01677 01678 if (!MI_IS_PHYSICAL_ADDRESS(PointerProtoPte)) { 01679 LOCK_PFN (OldIrql); 01680 ASSERT (Pfn1->u3.e2.ReferenceCount > 1); 01681 MI_REMOVE_LOCKED_PAGE_CHARGE(Pfn1, 3); 01682 Pfn1->u3.e2.ReferenceCount -= 1; 01683 UNLOCK_PFN (OldIrql); 01684 } 01685 } 01686 01687 ReturnStatus1: 01688 01689 ASSERT (KeGetCurrentIrql() <= APC_LEVEL); 01690 if (CurrentProcess->Vm.AllowWorkingSetAdjustment == MM_GROW_WSLE_HASH) { 01691 MiGrowWsleHash (&CurrentProcess->Vm); 01692 LOCK_EXPANSION_IF_ALPHA (OldIrql); 01693 CurrentProcess->Vm.AllowWorkingSetAdjustment = TRUE; 01694 UNLOCK_EXPANSION_IF_ALPHA (OldIrql); 01695 } 01696 01697 ReturnStatus2: 01698 01699 PageFrameIndex = CurrentProcess->Vm.WorkingSetSize - CurrentProcess->Vm.MinimumWorkingSetSize; 01700 01701 UNLOCK_WS (CurrentProcess); 01702 ASSERT (KeGetCurrentIrql() == PreviousIrql); 01703 01704 if (ApcNeeded == TRUE) { 01705 ASSERT (PsGetCurrentThread()->NestedFaultCount == 0); 01706 ASSERT (PsGetCurrentThread()->ApcNeeded == 0); 01707 ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL); 01708 KeRaiseIrql (APC_LEVEL, &PreviousIrql); 01709 IoRetryIrpCompletions (); 01710 KeLowerIrql (PreviousIrql); 01711 } 01712 01713 if (MmAvailablePages < MmMoreThanEnoughFreePages) { 01714 01715 if (((SPFN_NUMBER)PageFrameIndex > 100) && 01716 (PsGetCurrentThread()->Tcb.Priority >= LOW_REALTIME_PRIORITY)) { 01717 01718 // 01719 // This thread is realtime and is well over the process' 01720 // working set minimum. Delay execution so the trimmer & the 01721 // modified page writer get a quick shot at making pages. 01722 // 01723 01724 KeDelayExecutionThread (KernelMode, FALSE, &MmShortTime); 01725 } 01726 } 01727 01728 NotifyRoutine = MmPageFaultNotifyRoutine; 01729 if (NotifyRoutine) { 01730 if (status != STATUS_SUCCESS) { 01731 (*NotifyRoutine) ( 01732 status, 01733 VirtualAddress, 01734 TrapInformation 01735 ); 01736 } 01737 } 01738 01739 return status; 01740 01741 AccessViolation: 01742 if (SessionAddress == TRUE) { 01743 UNLOCK_SESSION_SPACE_WS (PreviousIrql); 01744 } 01745 else { 01746 UNLOCK_SYSTEM_WS (PreviousIrql); 01747 } 01748 return STATUS_ACCESS_VIOLATION; 01749 } }

NTKERNELAPI NTSTATUS MmAddPhysicalMemory IN PPHYSICAL_ADDRESS  StartAddress,
IN OUT PLARGE_INTEGER  NumberOfBytes
 

Definition at line 43 of file dynmem.c.

References ASSERT, ASSERT64, _PHYSICAL_MEMORY_RUN::BasePage, BYTE_OFFSET, ExAllocatePoolWithTag, ExFreePool(), FALSE, FreePageList, LOCK_PFN, MI_GET_PAGE_COLOR_FROM_PTE, MI_IS_PHYSICAL_ADDRESS, MI_PFN_ELEMENT, MiGetPteAddress, MiInitializePfn(), MiInsertPageInList(), MiRemoveZeroPage(), MmAvailablePages, MmChargeCommitmentLock, MmDynamicMemoryMutex, MmDynamicPfn, MmHighestPhysicalPage, MmHighestPossiblePhysicalPage, MmNumberOfPhysicalPages, MmPageLocationList, MmPfnDatabase, MmPhysicalMemoryBlock, MmResidentAvailablePages, MmTotalCommitLimit, MmTotalCommitLimitMaximum, MmTotalCommittedPages, NonPagedPool, NULL, _PHYSICAL_MEMORY_DESCRIPTOR::NumberOfPages, _PHYSICAL_MEMORY_DESCRIPTOR::NumberOfRuns, _MMPFN::OriginalPte, PAGE_SHIFT, PAGE_SIZE, _PHYSICAL_MEMORY_RUN::PageCount, PASSIVE_LEVEL, PFN_REMOVED, PMMPTE, PTE_SHIFT, _MMPFN::PteAddress, _MMPFN::PteFrame, _PHYSICAL_MEMORY_DESCRIPTOR::Run, TRUE, _MMPTE::u, _MMPFN::u2, _MMPFN::u3, UNLOCK_PFN, ValidKernelPte, and ZeroKernelPte.

00050 : 00051 00052 This routine adds the specified physical address range to the system. 00053 This includes initializing PFN database entries and adding it to the 00054 freelists. 00055 00056 Arguments: 00057 00058 StartAddress - Supplies the starting physical address. 00059 00060 NumberOfBytes - Supplies a pointer to the number of bytes being added. 00061 If any bytes were added (ie: STATUS_SUCCESS is being 00062 returned), the actual amount is returned here. 00063 00064 Return Value: 00065 00066 NTSTATUS. 00067 00068 Environment: 00069 00070 Kernel mode. PASSIVE level. No locks held. 00071 00072 --*/ 00073 00074 { 00075 ULONG i; 00076 PMMPFN Pfn1; 00077 KIRQL OldIrql; 00078 LOGICAL Inserted; 00079 LOGICAL Updated; 00080 MMPTE TempPte; 00081 PMMPTE PointerPte; 00082 PMMPTE LastPte; 00083 PFN_NUMBER NumberOfPages; 00084 PFN_NUMBER start; 00085 PFN_NUMBER count; 00086 PFN_NUMBER StartPage; 00087 PFN_NUMBER EndPage; 00088 PFN_NUMBER PageFrameIndex; 00089 PFN_NUMBER Page; 00090 PFN_NUMBER LastPage; 00091 PFN_COUNT PagesNeeded; 00092 PPHYSICAL_MEMORY_DESCRIPTOR OldPhysicalMemoryBlock; 00093 PPHYSICAL_MEMORY_DESCRIPTOR NewPhysicalMemoryBlock; 00094 PPHYSICAL_MEMORY_RUN NewRun; 00095 LOGICAL PfnDatabaseIsPhysical; 00096 00097 ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL); 00098 00099 ASSERT (BYTE_OFFSET(NumberOfBytes->LowPart) == 0); 00100 ASSERT (BYTE_OFFSET(StartAddress->LowPart) == 0); 00101 00102 if (MI_IS_PHYSICAL_ADDRESS(MmPfnDatabase)) { 00103 00104 // 00105 // The system must be configured for dynamic memory addition. This is 00106 // critical as only then is the database guaranteed to be non-sparse. 00107 // 00108 00109 if (MmDynamicPfn == FALSE) { 00110 return STATUS_NOT_SUPPORTED; 00111 } 00112 00113 PfnDatabaseIsPhysical = TRUE; 00114 } 00115 else { 00116 PfnDatabaseIsPhysical = FALSE; 00117 } 00118 00119 StartPage = (PFN_NUMBER)(StartAddress->QuadPart >> PAGE_SHIFT); 00120 NumberOfPages = (PFN_NUMBER)(NumberOfBytes->QuadPart >> PAGE_SHIFT); 00121 00122 EndPage = StartPage + NumberOfPages; 00123 00124 if (EndPage - 1 > MmHighestPossiblePhysicalPage) { 00125 00126 // 00127 // Truncate the request into something that can be mapped by the PFN 00128 // database. 00129 // 00130 00131 EndPage = MmHighestPossiblePhysicalPage + 1; 00132 NumberOfPages = EndPage - StartPage; 00133 } 00134 00135 // 00136 // The range cannot wrap. 00137 // 00138 00139 if (StartPage >= EndPage) { 00140 return STATUS_INVALID_PARAMETER_1; 00141 } 00142 00143 ExAcquireFastMutex (&MmDynamicMemoryMutex); 00144 00145 i = (sizeof(PHYSICAL_MEMORY_DESCRIPTOR) + 00146 (sizeof(PHYSICAL_MEMORY_RUN) * (MmPhysicalMemoryBlock->NumberOfRuns + 1))); 00147 00148 NewPhysicalMemoryBlock = ExAllocatePoolWithTag (NonPagedPool, 00149 i, 00150 ' mM'); 00151 00152 if (NewPhysicalMemoryBlock == NULL) { 00153 ExReleaseFastMutex (&MmDynamicMemoryMutex); 00154 return STATUS_INSUFFICIENT_RESOURCES; 00155 } 00156 00157 // 00158 // The range cannot overlap any ranges that are already present. 00159 // 00160 00161 start = 0; 00162 00163 LOCK_PFN (OldIrql); 00164 00165 do { 00166 00167 count = MmPhysicalMemoryBlock->Run[start].PageCount; 00168 Page = MmPhysicalMemoryBlock->Run[start].BasePage; 00169 00170 if (count != 0) { 00171 00172 LastPage = Page + count; 00173 00174 if ((StartPage < Page) && (EndPage > Page)) { 00175 UNLOCK_PFN (OldIrql); 00176 ExReleaseFastMutex (&MmDynamicMemoryMutex); 00177 ExFreePool (NewPhysicalMemoryBlock); 00178 return STATUS_CONFLICTING_ADDRESSES; 00179 } 00180 00181 if ((StartPage >= Page) && (StartPage < LastPage)) { 00182 UNLOCK_PFN (OldIrql); 00183 ExReleaseFastMutex (&MmDynamicMemoryMutex); 00184 ExFreePool (NewPhysicalMemoryBlock); 00185 return STATUS_CONFLICTING_ADDRESSES; 00186 } 00187 } 00188 00189 start += 1; 00190 00191 } while (start != MmPhysicalMemoryBlock->NumberOfRuns); 00192 00193 // 00194 // Fill any gaps in the (sparse) PFN database needed for these pages, 00195 // unless the PFN database was physically allocated and completely 00196 // committed up front. 00197 // 00198 00199 PagesNeeded = 0; 00200 00201 if (PfnDatabaseIsPhysical == FALSE) { 00202 PointerPte = MiGetPteAddress (MI_PFN_ELEMENT(StartPage)); 00203 LastPte = MiGetPteAddress ((PCHAR)(MI_PFN_ELEMENT(EndPage)) - 1); 00204 00205 while (PointerPte <= LastPte) { 00206 if (PointerPte->u.Hard.Valid == 0) { 00207 PagesNeeded += 1; 00208 } 00209 PointerPte += 1; 00210 } 00211 00212 if (MmAvailablePages < PagesNeeded) { 00213 UNLOCK_PFN (OldIrql); 00214 ExReleaseFastMutex (&MmDynamicMemoryMutex); 00215 ExFreePool (NewPhysicalMemoryBlock); 00216 return STATUS_INSUFFICIENT_RESOURCES; 00217 } 00218 00219 TempPte = ValidKernelPte; 00220 00221 PointerPte = MiGetPteAddress (MI_PFN_ELEMENT(StartPage)); 00222 00223 while (PointerPte <= LastPte) { 00224 if (PointerPte->u.Hard.Valid == 0) { 00225 00226 PageFrameIndex = MiRemoveZeroPage(MI_GET_PAGE_COLOR_FROM_PTE (PointerPte)); 00227 00228 MiInitializePfn (PageFrameIndex, PointerPte, 0); 00229 00230 TempPte.u.Hard.PageFrameNumber = PageFrameIndex; 00231 *PointerPte = TempPte; 00232 } 00233 PointerPte += 1; 00234 } 00235 MmResidentAvailablePages -= PagesNeeded; 00236 } 00237 00238 // 00239 // If the new range is adjacent to an existing range, just merge it into 00240 // the old block. Otherwise use the new block as a new entry will have to 00241 // be used. 00242 // 00243 00244 NewPhysicalMemoryBlock->NumberOfRuns = MmPhysicalMemoryBlock->NumberOfRuns + 1; 00245 NewPhysicalMemoryBlock->NumberOfPages = MmPhysicalMemoryBlock->NumberOfPages + NumberOfPages; 00246 00247 NewRun = &NewPhysicalMemoryBlock->Run[0]; 00248 start = 0; 00249 Inserted = FALSE; 00250 Updated = FALSE; 00251 00252 do { 00253 00254 Page = MmPhysicalMemoryBlock->Run[start].BasePage; 00255 count = MmPhysicalMemoryBlock->Run[start].PageCount; 00256 00257 if (Inserted == FALSE) { 00258 00259 // 00260 // Note overlaps into adjacent ranges were already checked above. 00261 // 00262 00263 if (StartPage == Page + count) { 00264 MmPhysicalMemoryBlock->Run[start].PageCount += NumberOfPages; 00265 OldPhysicalMemoryBlock = NewPhysicalMemoryBlock; 00266 MmPhysicalMemoryBlock->NumberOfPages += NumberOfPages; 00267 00268 // 00269 // Coalesce below and above to avoid leaving zero length gaps 00270 // as these gaps would prevent callers from removing ranges 00271 // the span them. 00272 // 00273 00274 if (start + 1 < MmPhysicalMemoryBlock->NumberOfRuns) { 00275 00276 start += 1; 00277 Page = MmPhysicalMemoryBlock->Run[start].BasePage; 00278 count = MmPhysicalMemoryBlock->Run[start].PageCount; 00279 00280 if (StartPage + NumberOfPages == Page) { 00281 MmPhysicalMemoryBlock->Run[start - 1].PageCount += 00282 count; 00283 MmPhysicalMemoryBlock->NumberOfRuns -= 1; 00284 00285 // 00286 // Copy any remaining entries. 00287 // 00288 00289 if (start != MmPhysicalMemoryBlock->NumberOfRuns) { 00290 RtlMoveMemory (&MmPhysicalMemoryBlock->Run[start], 00291 &MmPhysicalMemoryBlock->Run[start + 1], 00292 (MmPhysicalMemoryBlock->NumberOfRuns - start) * sizeof (PHYSICAL_MEMORY_RUN)); 00293 } 00294 } 00295 } 00296 Updated = TRUE; 00297 break; 00298 } 00299 00300 if (StartPage + NumberOfPages == Page) { 00301 MmPhysicalMemoryBlock->Run[start].BasePage = StartPage; 00302 MmPhysicalMemoryBlock->Run[start].PageCount += NumberOfPages; 00303 OldPhysicalMemoryBlock = NewPhysicalMemoryBlock; 00304 MmPhysicalMemoryBlock->NumberOfPages += NumberOfPages; 00305 Updated = TRUE; 00306 break; 00307 } 00308 00309 if (StartPage + NumberOfPages <= Page) { 00310 00311 if (start + 1 < MmPhysicalMemoryBlock->NumberOfRuns) { 00312 00313 if (StartPage + NumberOfPages <= MmPhysicalMemoryBlock->Run[start + 1].BasePage) { 00314 // 00315 // Don't insert here - the new entry really belongs 00316 // (at least) one entry further down. 00317 // 00318 00319 continue; 00320 } 00321 } 00322 00323 NewRun->BasePage = StartPage; 00324 NewRun->PageCount = NumberOfPages; 00325 NewRun += 1; 00326 Inserted = TRUE; 00327 Updated = TRUE; 00328 } 00329 } 00330 00331 *NewRun = MmPhysicalMemoryBlock->Run[start]; 00332 NewRun += 1; 00333 00334 start += 1; 00335 00336 } while (start != MmPhysicalMemoryBlock->NumberOfRuns); 00337 00338 // 00339 // If the memory block has not been updated, then the new entry must 00340 // be added at the very end. 00341 // 00342 00343 if (Updated == FALSE) { 00344 ASSERT (Inserted == FALSE); 00345 NewRun->BasePage = StartPage; 00346 NewRun->PageCount = NumberOfPages; 00347 Inserted = TRUE; 00348 } 00349 00350 // 00351 // Repoint the MmPhysicalMemoryBlock at the new chunk, free the old after 00352 // releasing the PFN lock. 00353 // 00354 00355 if (Inserted == TRUE) { 00356 OldPhysicalMemoryBlock = MmPhysicalMemoryBlock; 00357 MmPhysicalMemoryBlock = NewPhysicalMemoryBlock; 00358 } 00359 00360 // 00361 // Note that the page directory (page parent entries on Win64) must be 00362 // filled in at system boot so that already-created processes do not fault 00363 // when referencing the new PFNs. 00364 // 00365 00366 // 00367 // Walk through the memory descriptors and add pages to the 00368 // free list in the PFN database. 00369 // 00370 00371 PageFrameIndex = StartPage; 00372 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 00373 00374 if (EndPage - 1 > MmHighestPhysicalPage) { 00375 MmHighestPhysicalPage = EndPage - 1; 00376 } 00377 00378 while (PageFrameIndex < EndPage) { 00379 00380 ASSERT (Pfn1->u2.ShareCount == 0); 00381 ASSERT (Pfn1->u3.e2.ShortFlags == 0); 00382 ASSERT (Pfn1->u3.e2.ReferenceCount == 0); 00383 ASSERT64 (Pfn1->UsedPageTableEntries == 0); 00384 ASSERT (Pfn1->OriginalPte.u.Long == ZeroKernelPte.u.Long); 00385 ASSERT (Pfn1->PteFrame == 0); 00386 ASSERT ((Pfn1->PteAddress == PFN_REMOVED) || 00387 (Pfn1->PteAddress == (PMMPTE)(UINT_PTR)0)); 00388 00389 // 00390 // Set the PTE address to the physical page for 00391 // virtual address alignment checking. 00392 // 00393 00394 Pfn1->PteAddress = (PMMPTE)(PageFrameIndex << PTE_SHIFT); 00395 00396 MiInsertPageInList (MmPageLocationList[FreePageList], 00397 PageFrameIndex); 00398 00399 PageFrameIndex += 1; 00400 00401 Pfn1 += 1; 00402 } 00403 00404 MmResidentAvailablePages += NumberOfPages; 00405 MmNumberOfPhysicalPages += (PFN_COUNT)NumberOfPages; 00406 00407 UNLOCK_PFN (OldIrql); 00408 00409 // 00410 // Increase all commit limits to reflect the additional memory. 00411 // 00412 00413 ExAcquireSpinLock (&MmChargeCommitmentLock, &OldIrql); 00414 00415 MmTotalCommitLimit += NumberOfPages; 00416 MmTotalCommitLimitMaximum += NumberOfPages; 00417 00418 MmTotalCommittedPages += PagesNeeded; 00419 00420 ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql); 00421 00422 ExReleaseFastMutex (&MmDynamicMemoryMutex); 00423 00424 ExFreePool (OldPhysicalMemoryBlock); 00425 00426 // 00427 // Indicate number of bytes actually added to our caller. 00428 // 00429 00430 NumberOfBytes->QuadPart = (ULONGLONG)NumberOfPages * PAGE_SIZE; 00431 00432 return STATUS_SUCCESS; 00433 }

NTSTATUS MmAddVerifierThunks IN PVOID  ThunkBuffer,
IN ULONG  ThunkBufferSize
 

Definition at line 3846 of file verifier.c.

References _DRIVER_SPECIFIED_VERIFIER_THUNKS::DataTableEntry, ExAllocatePoolWithTag, ExFreePool(), FALSE, KeEnterCriticalRegion, KeLeaveCriticalRegion, KeReleaseMutant(), KernelMode, KeWaitForSingleObject(), KSEG0_BASE, _DRIVER_SPECIFIED_VERIFIER_THUNKS::ListEntry, MiActiveVerifierThunks, MiLookupDataTableEntry(), MiVerifierDriverAddedThunkListHead, MM_BOOT_IMAGE_SIZE, MmSystemLoadLock, NULL, _DRIVER_SPECIFIED_VERIFIER_THUNKS::NumberOfThunks, PAGED_CODE, PagedPool, PDRIVER_SPECIFIED_VERIFIER_THUNKS, TRUE, and WrVirtualMemory.

Referenced by NtSetSystemInformation().

03853 : 03854 03855 This routine adds another set of thunks to the verifier list. 03856 03857 Arguments: 03858 03859 ThunkBuffer - Supplies the buffer containing the thunk pairs. 03860 03861 ThunkBufferSize - Supplies the number of bytes in the thunk buffer. 03862 03863 Return Value: 03864 03865 Returns the status of the operation. 03866 03867 Environment: 03868 03869 Kernel mode. APC_LEVEL and below. 03870 03871 --*/ 03872 03873 { 03874 ULONG i; 03875 ULONG NumberOfThunkPairs; 03876 PDRIVER_VERIFIER_THUNK_PAIRS ThunkPairs; 03877 PDRIVER_VERIFIER_THUNK_PAIRS ThunkTable; 03878 PDRIVER_SPECIFIED_VERIFIER_THUNKS ThunkTableBase; 03879 PLDR_DATA_TABLE_ENTRY DataTableEntry; 03880 PVOID DriverStartAddress; 03881 PVOID DriverEndAddress; 03882 03883 PAGED_CODE(); 03884 03885 if (MiVerifierDriverAddedThunkListHead.Flink == NULL) { 03886 return STATUS_NOT_SUPPORTED; 03887 } 03888 03889 ThunkPairs = (PDRIVER_VERIFIER_THUNK_PAIRS)ThunkBuffer; 03890 NumberOfThunkPairs = ThunkBufferSize / sizeof(DRIVER_VERIFIER_THUNK_PAIRS); 03891 03892 if (NumberOfThunkPairs == 0) { 03893 return STATUS_INVALID_PARAMETER_1; 03894 } 03895 03896 ThunkTableBase = (PDRIVER_SPECIFIED_VERIFIER_THUNKS) ExAllocatePoolWithTag ( 03897 PagedPool, 03898 sizeof (DRIVER_SPECIFIED_VERIFIER_THUNKS) + NumberOfThunkPairs * sizeof (DRIVER_VERIFIER_THUNK_PAIRS), 03899 'tVmM'); 03900 03901 if (ThunkTableBase == NULL) { 03902 return STATUS_INSUFFICIENT_RESOURCES; 03903 } 03904 03905 ThunkTable = (PDRIVER_VERIFIER_THUNK_PAIRS)(ThunkTableBase + 1); 03906 03907 RtlCopyMemory (ThunkTable, 03908 ThunkPairs, 03909 NumberOfThunkPairs * sizeof(DRIVER_VERIFIER_THUNK_PAIRS)); 03910 03911 KeEnterCriticalRegion(); 03912 03913 KeWaitForSingleObject (&MmSystemLoadLock, 03914 WrVirtualMemory, 03915 KernelMode, 03916 FALSE, 03917 (PLARGE_INTEGER)NULL); 03918 03919 // 03920 // Find and validate the image that contains the routines to be thunked. 03921 // 03922 03923 DataTableEntry = MiLookupDataTableEntry (ThunkTable->PristineRoutine, 03924 TRUE); 03925 03926 if (DataTableEntry == NULL) { 03927 KeReleaseMutant (&MmSystemLoadLock, 1, FALSE, FALSE); 03928 KeLeaveCriticalRegion(); 03929 ExFreePool (ThunkTableBase); 03930 return STATUS_INVALID_PARAMETER_2; 03931 } 03932 03933 DriverStartAddress = (PVOID)(DataTableEntry->DllBase); 03934 DriverEndAddress = (PVOID)((PCHAR)DataTableEntry->DllBase + DataTableEntry->SizeOfImage); 03935 03936 // 03937 // Don't let drivers hook calls to kernel or HAL routines. 03938 // 03939 03940 if (DriverStartAddress < (PVOID)(KSEG0_BASE + MM_BOOT_IMAGE_SIZE)) { 03941 KeReleaseMutant (&MmSystemLoadLock, 1, FALSE, FALSE); 03942 KeLeaveCriticalRegion(); 03943 ExFreePool (ThunkTableBase); 03944 return STATUS_INVALID_PARAMETER_2; 03945 } 03946 03947 for (i = 0; i < NumberOfThunkPairs; i += 1) { 03948 03949 // 03950 // Ensure all the routines being thunked are in the same driver. 03951 // 03952 03953 if (((ULONG_PTR)ThunkTable->PristineRoutine < (ULONG_PTR)DriverStartAddress) || 03954 ((ULONG_PTR)ThunkTable->PristineRoutine >= (ULONG_PTR)DriverEndAddress)) { 03955 03956 KeReleaseMutant (&MmSystemLoadLock, 1, FALSE, FALSE); 03957 KeLeaveCriticalRegion(); 03958 ExFreePool (ThunkTableBase); 03959 return STATUS_INVALID_PARAMETER_2; 03960 } 03961 ThunkTable += 1; 03962 } 03963 03964 // 03965 // Add the validated thunk table to the verifier's global list. 03966 // 03967 03968 ThunkTableBase->DataTableEntry = DataTableEntry; 03969 ThunkTableBase->NumberOfThunks = NumberOfThunkPairs; 03970 MiActiveVerifierThunks += 1; 03971 03972 InsertTailList (&MiVerifierDriverAddedThunkListHead, 03973 &ThunkTableBase->ListEntry); 03974 03975 KeReleaseMutant (&MmSystemLoadLock, 1, FALSE, FALSE); 03976 KeLeaveCriticalRegion(); 03977 03978 return STATUS_SUCCESS; 03979 }

VOID MmAdjustPageFileQuota IN ULONG  NewPageFileQuota  ) 
 

NTKERNELAPI NTSTATUS MmAdjustWorkingSetSize IN SIZE_T  WorkingSetMinimum,
IN SIZE_T  WorkingSetMaximum,
IN ULONG  SystemCache
 

Definition at line 2450 of file wslist.c.

References _EPROCESS::AddressSpaceDeleted, _MMSUPPORT::AllowWorkingSetAdjustment, ASSERT, ExPageLockHandle, FALSE, _MMWSL::FirstDynamic, _MMWSL::FirstFree, _MMWSL::HashTable, _MMWSL::LastEntry, _MMWSL::LastInitializedWsle, LOCK_PFN, LOCK_SYSTEM_WS, LOCK_WS, _MMSUPPORT::MaximumWorkingSetSize, MI_NONPAGABLE_MEMORY_AVAILABLE, MiAddWorkingSetPage(), MiEmptyWorkingSet(), MiFreeWsle(), MiGetPteAddress, MiGrowWsleHash(), _MMSUPPORT::MinimumWorkingSetSize, MM_BUMP_COUNTER, MM_FLUID_WORKING_SET, MM_RETRY_COUNT, MmAllowWorkingSetExpansion(), MmAvailablePages, MmLockPagableSectionByHandle(), MmMaximumWorkingSetSize, MmMinimumWorkingSetSize, MmPagesAboveWsMinimum, MmResidentAvailablePages, MmSystemCacheWs, MmUnlockPagableImageSection(), NTSTATUS(), NULL, PAGE_SHIFT, PAGE_SIZE, PERFINFO_GET_PAGE_INFO, PERFINFO_LOG_WS_REMOVAL, PERFINFO_PAGE_INFO_DECL, PsGetCurrentProcess, _MMWSL::Quota, TRUE, _MMWSLE::u1, UNLOCK_PFN, UNLOCK_SYSTEM_WS, UNLOCK_WS, _MMWSLE::VirtualAddress, _EPROCESS::Vm, _MMSUPPORT::VmWorkingSetList, _MMSUPPORT::WorkingSetSize, _MMWSL::Wsle, WSLE_NULL_INDEX, and WSLE_NUMBER.

Referenced by NtSetInformationJobObject(), NtSetSystemInformation(), PspAddProcessToJob(), PspSetQuotaLimits(), and xxxMinMaximize().

02458 : 02459 02460 This routine adjusts the current size of a process's working set 02461 list. If the maximum value is above the current maximum, pages 02462 are removed from the working set list. 02463 02464 An exception is raised if the limit cannot be granted. This 02465 could occur if too many pages were locked in the process's 02466 working set. 02467 02468 Note: if the minimum and maximum are both (SIZE_T)-1, the working set 02469 is purged, but the default sizes are not changed. 02470 02471 Arguments: 02472 02473 WorkingSetMinimumInBytes - Supplies the new minimum working set size in 02474 bytes. 02475 02476 WorkingSetMaximumInBytes - Supplies the new maximum working set size in 02477 bytes. 02478 02479 SystemCache - Supplies TRUE if the system cache working set is being 02480 adjusted, FALSE for all other working sets. 02481 02482 Return Value: 02483 02484 NTSTATUS. 02485 02486 Environment: 02487 02488 Kernel mode, IRQL APC_LEVEL or below. 02489 02490 --*/ 02491 02492 02493 { 02494 PEPROCESS CurrentProcess; 02495 ULONG Entry; 02496 ULONG LastFreed; 02497 PMMWSLE Wsle; 02498 KIRQL OldIrql; 02499 KIRQL OldIrql2; 02500 SPFN_NUMBER i; 02501 PMMPTE PointerPte; 02502 NTSTATUS ReturnStatus; 02503 LONG PagesAbove; 02504 LONG NewPagesAbove; 02505 ULONG FreeTryCount; 02506 PMMSUPPORT WsInfo; 02507 PMMWSL WorkingSetList; 02508 WSLE_NUMBER WorkingSetMinimum; 02509 WSLE_NUMBER WorkingSetMaximum; 02510 02511 PERFINFO_PAGE_INFO_DECL(); 02512 02513 FreeTryCount = 0; 02514 02515 if (SystemCache) { 02516 WsInfo = &MmSystemCacheWs; 02517 } else { 02518 CurrentProcess = PsGetCurrentProcess (); 02519 WsInfo = &CurrentProcess->Vm; 02520 } 02521 02522 if ((WorkingSetMinimumInBytes == (SIZE_T)-1) && 02523 (WorkingSetMaximumInBytes == (SIZE_T)-1)) { 02524 return MiEmptyWorkingSet (WsInfo, TRUE); 02525 } 02526 02527 if (WorkingSetMinimumInBytes == 0) { 02528 WorkingSetMinimum = WsInfo->MinimumWorkingSetSize; 02529 } 02530 else { 02531 WorkingSetMinimum = (WSLE_NUMBER)(WorkingSetMinimumInBytes >> PAGE_SHIFT); 02532 } 02533 02534 if (WorkingSetMaximumInBytes == 0) { 02535 WorkingSetMaximum = WsInfo->MaximumWorkingSetSize; 02536 } 02537 else { 02538 WorkingSetMaximum = (WSLE_NUMBER)(WorkingSetMaximumInBytes >> PAGE_SHIFT); 02539 } 02540 02541 if (WorkingSetMinimum > WorkingSetMaximum) { 02542 return STATUS_BAD_WORKING_SET_LIMIT; 02543 } 02544 02545 MmLockPagableSectionByHandle(ExPageLockHandle); 02546 02547 ReturnStatus = STATUS_SUCCESS; 02548 02549 // 02550 // Get the working set lock and disable APCs. 02551 // 02552 02553 if (SystemCache) { 02554 LOCK_SYSTEM_WS (OldIrql2); 02555 } else { 02556 LOCK_WS (CurrentProcess); 02557 02558 if (CurrentProcess->AddressSpaceDeleted != 0) { 02559 ReturnStatus = STATUS_PROCESS_IS_TERMINATING; 02560 goto Returns; 02561 } 02562 } 02563 02564 if (WorkingSetMaximum > MmMaximumWorkingSetSize) { 02565 WorkingSetMaximum = MmMaximumWorkingSetSize; 02566 ReturnStatus = STATUS_WORKING_SET_LIMIT_RANGE; 02567 } 02568 02569 if (WorkingSetMinimum > MmMaximumWorkingSetSize) { 02570 WorkingSetMinimum = MmMaximumWorkingSetSize; 02571 ReturnStatus = STATUS_WORKING_SET_LIMIT_RANGE; 02572 } 02573 02574 if (WorkingSetMinimum < MmMinimumWorkingSetSize) { 02575 WorkingSetMinimum = (ULONG)MmMinimumWorkingSetSize; 02576 ReturnStatus = STATUS_WORKING_SET_LIMIT_RANGE; 02577 } 02578 02579 // 02580 // Make sure that the number of locked pages will not 02581 // make the working set not fluid. 02582 // 02583 02584 if ((WsInfo->VmWorkingSetList->FirstDynamic + MM_FLUID_WORKING_SET) >= 02585 WorkingSetMaximum) { 02586 ReturnStatus = STATUS_BAD_WORKING_SET_LIMIT; 02587 goto Returns; 02588 } 02589 02590 WorkingSetList = WsInfo->VmWorkingSetList; 02591 Wsle = WorkingSetList->Wsle; 02592 02593 // 02594 // Check to make sure ample resident physical pages exist for 02595 // this operation. 02596 // 02597 02598 LOCK_PFN (OldIrql); 02599 02600 i = WorkingSetMinimum - WsInfo->MinimumWorkingSetSize; 02601 02602 if (i > 0) { 02603 02604 // 02605 // New minimum working set is greater than the old one. Ensure that 02606 // we don't allow this process' working set minimum to increase to 02607 // a point where subsequent nonpaged pool allocations could cause 02608 // us to run out of pages. Additionally, leave 100 extra pages around 02609 // so the user can later bring up tlist and kill processes if necessary. 02610 // 02611 02612 if (MmAvailablePages < (20 + (i / (PAGE_SIZE / sizeof (MMWSLE))))) { 02613 UNLOCK_PFN (OldIrql); 02614 ReturnStatus = STATUS_INSUFFICIENT_RESOURCES; 02615 goto Returns; 02616 } 02617 02618 if (MI_NONPAGABLE_MEMORY_AVAILABLE() - 100 < i) { 02619 UNLOCK_PFN (OldIrql); 02620 ReturnStatus = STATUS_INSUFFICIENT_RESOURCES; 02621 goto Returns; 02622 } 02623 } 02624 02625 // 02626 // Adjust the number of resident pages up or down dependent on 02627 // the size of the new minimum working set size versus the previous 02628 // minimum size. 02629 // 02630 02631 MmResidentAvailablePages -= i; 02632 MM_BUMP_COUNTER(27, i); 02633 02634 UNLOCK_PFN (OldIrql); 02635 02636 if (WsInfo->AllowWorkingSetAdjustment == FALSE) { 02637 MmAllowWorkingSetExpansion (); 02638 } 02639 02640 if (WorkingSetMaximum > WorkingSetList->LastInitializedWsle) { 02641 02642 do { 02643 02644 // 02645 // The maximum size of the working set is being increased, check 02646 // to ensure the proper number of pages are mapped to cover 02647 // the complete working set list. 02648 // 02649 02650 if (!MiAddWorkingSetPage (WsInfo)) { 02651 WorkingSetMaximum = WorkingSetList->LastInitializedWsle - 1; 02652 break; 02653 } 02654 } while (WorkingSetMaximum > WorkingSetList->LastInitializedWsle); 02655 02656 } else { 02657 02658 // 02659 // The new working set maximum is less than the current working set 02660 // maximum. 02661 // 02662 02663 if (WsInfo->WorkingSetSize > WorkingSetMaximum) { 02664 02665 // 02666 // Remove some pages from the working set. 02667 // 02668 02669 // 02670 // Make sure that the number of locked pages will not 02671 // make the working set not fluid. 02672 // 02673 02674 if ((WorkingSetList->FirstDynamic + MM_FLUID_WORKING_SET) >= 02675 WorkingSetMaximum) { 02676 02677 ReturnStatus = STATUS_BAD_WORKING_SET_LIMIT; 02678 02679 LOCK_PFN (OldIrql); 02680 02681 MmResidentAvailablePages += i; 02682 MM_BUMP_COUNTER(54, i); 02683 02684 UNLOCK_PFN (OldIrql); 02685 02686 goto Returns; 02687 } 02688 02689 // 02690 // Attempt to remove the pages from the Maximum downward. 02691 // 02692 02693 LastFreed = WorkingSetList->LastEntry; 02694 if (WorkingSetList->LastEntry > WorkingSetMaximum) { 02695 02696 while (LastFreed >= WorkingSetMaximum) { 02697 02698 PointerPte = MiGetPteAddress( 02699 Wsle[LastFreed].u1.VirtualAddress); 02700 02701 PERFINFO_GET_PAGE_INFO(PointerPte); 02702 02703 if ((Wsle[LastFreed].u1.e1.Valid != 0) && 02704 (!MiFreeWsle (LastFreed, 02705 WsInfo, 02706 PointerPte))) { 02707 02708 // 02709 // This LastFreed could not be removed. 02710 // 02711 02712 break; 02713 } 02714 PERFINFO_LOG_WS_REMOVAL(PERFINFO_LOG_TYPE_OUTWS_ADJUSTWS, WsInfo); 02715 LastFreed -= 1; 02716 } 02717 WorkingSetList->LastEntry = LastFreed; 02718 } 02719 02720 // 02721 // Remove pages. 02722 // 02723 02724 Entry = WorkingSetList->FirstDynamic; 02725 02726 while (WsInfo->WorkingSetSize > WorkingSetMaximum) { 02727 if (Wsle[Entry].u1.e1.Valid != 0) { 02728 PointerPte = MiGetPteAddress ( 02729 Wsle[Entry].u1.VirtualAddress); 02730 PERFINFO_GET_PAGE_INFO(PointerPte); 02731 02732 if (MiFreeWsle(Entry, WsInfo, PointerPte)) { 02733 PERFINFO_LOG_WS_REMOVAL(PERFINFO_LOG_TYPE_OUTWS_ADJUSTWS, 02734 WsInfo); 02735 } 02736 } 02737 Entry += 1; 02738 if (Entry > LastFreed) { 02739 FreeTryCount += 1; 02740 if (FreeTryCount > MM_RETRY_COUNT) { 02741 02742 // 02743 // Page table pages are not becoming free, give up 02744 // and return an error. 02745 // 02746 02747 ReturnStatus = STATUS_BAD_WORKING_SET_LIMIT; 02748 02749 break; 02750 } 02751 Entry = WorkingSetList->FirstDynamic; 02752 } 02753 } 02754 02755 if (FreeTryCount <= MM_RETRY_COUNT) { 02756 WorkingSetList->Quota = WorkingSetMaximum; 02757 } 02758 } 02759 } 02760 02761 // 02762 // Adjust the number of pages above the working set minimum. 02763 // 02764 02765 PagesAbove = (LONG)WsInfo->WorkingSetSize - 02766 (LONG)WsInfo->MinimumWorkingSetSize; 02767 NewPagesAbove = (LONG)WsInfo->WorkingSetSize - 02768 (LONG)WorkingSetMinimum; 02769 02770 LOCK_PFN (OldIrql); 02771 if (PagesAbove > 0) { 02772 MmPagesAboveWsMinimum -= (ULONG)PagesAbove; 02773 } 02774 if (NewPagesAbove > 0) { 02775 MmPagesAboveWsMinimum += (ULONG)NewPagesAbove; 02776 } 02777 UNLOCK_PFN (OldIrql); 02778 02779 if (FreeTryCount <= MM_RETRY_COUNT) { 02780 WsInfo->MaximumWorkingSetSize = WorkingSetMaximum; 02781 WsInfo->MinimumWorkingSetSize = WorkingSetMinimum; 02782 02783 if (WorkingSetMinimum >= WorkingSetList->Quota) { 02784 WorkingSetList->Quota = WorkingSetMinimum; 02785 } 02786 } 02787 else { 02788 LOCK_PFN (OldIrql); 02789 02790 MmResidentAvailablePages += i; 02791 MM_BUMP_COUNTER(55, i); 02792 02793 UNLOCK_PFN (OldIrql); 02794 } 02795 02796 02797 ASSERT ((WorkingSetList->FirstFree <= WorkingSetList->LastInitializedWsle) || 02798 (WorkingSetList->FirstFree == WSLE_NULL_INDEX)); 02799 02800 if ((WorkingSetList->HashTable == NULL) && 02801 (WsInfo->MaximumWorkingSetSize > ((1536*1024) >> PAGE_SHIFT))) { 02802 02803 // 02804 // The working set list consists of more than a single page. 02805 // 02806 02807 MiGrowWsleHash (WsInfo); 02808 } 02809 02810 Returns: 02811 02812 if (SystemCache) { 02813 UNLOCK_SYSTEM_WS (OldIrql2); 02814 } else { 02815 UNLOCK_WS (CurrentProcess); 02816 } 02817 02818 MmUnlockPagableImageSection(ExPageLockHandle); 02819 02820 return ReturnStatus; 02821 }

NTKERNELAPI PVOID MmAllocateContiguousMemory IN SIZE_T  NumberOfBytes,
IN PHYSICAL_ADDRESS  HighestAcceptableAddress
 

Definition at line 3881 of file iosup.c.

References MiAllocateContiguousMemory(), PAGE_SHIFT, and RtlGetCallersAddress().

Referenced by IopGetDumpStack(), and Ki386AllocateContiguousMemory().

03888 : 03889 03890 This function allocates a range of physically contiguous non-paged pool. 03891 03892 This routine is designed to be used by a driver's initialization 03893 routine to allocate a contiguous block of physical memory for 03894 issuing DMA requests from. 03895 03896 Arguments: 03897 03898 NumberOfBytes - Supplies the number of bytes to allocate. 03899 03900 HighestAcceptableAddress - Supplies the highest physical address 03901 which is valid for the allocation. For 03902 example, if the device can only reference 03903 physical memory in the lower 16MB this 03904 value would be set to 0xFFFFFF (16Mb - 1). 03905 03906 Return Value: 03907 03908 NULL - a contiguous range could not be found to satisfy the request. 03909 03910 NON-NULL - Returns a pointer (virtual address in the nonpaged portion 03911 of the system) to the allocated physically contiguous 03912 memory. 03913 03914 Environment: 03915 03916 Kernel mode, IRQL of DISPATCH_LEVEL or below. 03917 03918 --*/ 03919 03920 { 03921 PFN_NUMBER HighestPfn; 03922 PVOID CallingAddress; 03923 03924 #if defined (_X86_) 03925 PVOID CallersCaller; 03926 03927 RtlGetCallersAddress(&CallingAddress, &CallersCaller); 03928 #else 03929 CallingAddress = (PVOID)_ReturnAddress(); 03930 #endif 03931 03932 HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT); 03933 03934 return MiAllocateContiguousMemory(NumberOfBytes, 03935 0, 03936 HighestPfn, 03937 0, 03938 CallingAddress); 03939 }

NTKERNELAPI PVOID MmAllocateContiguousMemorySpecifyCache IN SIZE_T  NumberOfBytes,
IN PHYSICAL_ADDRESS  LowestAcceptableAddress,
IN PHYSICAL_ADDRESS  HighestAcceptableAddress,
IN PHYSICAL_ADDRESS BoundaryAddressMultiple  OPTIONAL,
IN MEMORY_CACHING_TYPE  CacheType
 

Definition at line 3744 of file iosup.c.

References ASSERT, BYTE_OFFSET, KeSweepDcache(), MiAllocateContiguousMemory(), MiGetPteAddress, MmCached, MmFreeContiguousMemory(), MmGetPhysicalAddress(), MmMapIoSpace(), NULL, PAGE_SHIFT, PAGE_SIZE, RtlGetCallersAddress(), TRUE, and _MMPTE::u.

03754 : 03755 03756 This function allocates a range of physically contiguous non-cached, 03757 non-paged memory. This is accomplished by using MmAllocateContiguousMemory 03758 which uses nonpaged pool virtual addresses to map the found memory chunk. 03759 03760 Then this function establishes another map to the same physical addresses, 03761 but this alternate map is initialized as non-cached. All references by 03762 our caller will be done through this alternate map. 03763 03764 This routine is designed to be used by a driver's initialization 03765 routine to allocate a contiguous block of noncached physical memory for 03766 things like the AGP GART. 03767 03768 Arguments: 03769 03770 NumberOfBytes - Supplies the number of bytes to allocate. 03771 03772 LowestAcceptableAddress - Supplies the lowest physical address 03773 which is valid for the allocation. For 03774 example, if the device can only reference 03775 physical memory in the 8M to 16MB range, this 03776 value would be set to 0x800000 (8Mb). 03777 03778 HighestAcceptableAddress - Supplies the highest physical address 03779 which is valid for the allocation. For 03780 example, if the device can only reference 03781 physical memory below 16MB, this 03782 value would be set to 0xFFFFFF (16Mb - 1). 03783 03784 BoundaryAddressMultiple - Supplies the physical address multiple this 03785 allocation must not cross. 03786 03787 Return Value: 03788 03789 NULL - a contiguous range could not be found to satisfy the request. 03790 03791 NON-NULL - Returns a pointer (virtual address in the nonpaged portion 03792 of the system) to the allocated physically contiguous 03793 memory. 03794 03795 Environment: 03796 03797 Kernel mode, IRQL of DISPATCH_LEVEL or below. 03798 03799 --*/ 03800 03801 { 03802 PVOID BaseAddress; 03803 PVOID NewVa; 03804 PFN_NUMBER LowestPfn; 03805 PFN_NUMBER HighestPfn; 03806 PFN_NUMBER BoundaryPfn; 03807 PMMPTE PointerPte; 03808 PHYSICAL_ADDRESS PhysicalAddress; 03809 PVOID CallingAddress; 03810 03811 #if defined (_X86_) 03812 PVOID CallersCaller; 03813 03814 RtlGetCallersAddress(&CallingAddress, &CallersCaller); 03815 #else 03816 CallingAddress = (PVOID)_ReturnAddress(); 03817 #endif 03818 03819 ASSERT (NumberOfBytes != 0); 03820 03821 LowestPfn = (PFN_NUMBER)(LowestAcceptableAddress.QuadPart >> PAGE_SHIFT); 03822 if (BYTE_OFFSET(LowestAcceptableAddress.LowPart)) { 03823 LowestPfn += 1; 03824 } 03825 03826 if (BYTE_OFFSET(BoundaryAddressMultiple.LowPart)) { 03827 return NULL; 03828 } 03829 03830 BoundaryPfn = (PFN_NUMBER)(BoundaryAddressMultiple.QuadPart >> PAGE_SHIFT); 03831 03832 HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT); 03833 03834 BaseAddress = MiAllocateContiguousMemory(NumberOfBytes, 03835 LowestPfn, 03836 HighestPfn, 03837 BoundaryPfn, 03838 CallingAddress); 03839 03840 if (BaseAddress) { 03841 03842 if (CacheType != MmCached) { 03843 03844 // 03845 // We have an address range but it's cached. Create an uncached 03846 // alternate mapping now. Stash the original virtual address at the 03847 // end of the mapped range so we can unmap the nonpaged pool VAs and 03848 // the actual pages when the caller frees the memory. 03849 // 03850 03851 PhysicalAddress = MmGetPhysicalAddress (BaseAddress); 03852 03853 NewVa = MmMapIoSpace (PhysicalAddress, 03854 NumberOfBytes + (2 * PAGE_SIZE), 03855 CacheType); 03856 03857 if (NewVa) { 03858 03859 PointerPte = MiGetPteAddress(NewVa); 03860 03861 PointerPte += ((NumberOfBytes + PAGE_SIZE - 1) >> PAGE_SHIFT); 03862 PointerPte->u.Long = (ULONG_PTR)BaseAddress; 03863 03864 PointerPte += 1; 03865 PointerPte->u.Long = NumberOfBytes; 03866 03867 KeSweepDcache (TRUE); 03868 BaseAddress = NewVa; 03869 } 03870 else { 03871 MmFreeContiguousMemory (BaseAddress); 03872 BaseAddress = NULL; 03873 } 03874 } 03875 } 03876 03877 return BaseAddress; 03878 }

PVOID MmAllocateIndependentPages IN SIZE_T  NumberOfBytes  ) 
 

Definition at line 3942 of file iosup.c.

References ASSERT, BYTES_TO_PAGES, FALSE, LOCK_PFN, MI_GET_PAGE_COLOR_FROM_PTE, MI_MAKE_VALID_PTE, MI_NONPAGABLE_MEMORY_AVAILABLE, MI_SET_PTE_DIRTY, MI_WRITE_VALID_PTE, MiChargeCommitmentCantExpand(), MiEnsureAvailablePageOrWait(), MiGetVirtualAddressMappedByPte, MiInitializePfn(), MiReleaseSystemPtes(), MiRemoveAnyPage(), MiReserveSystemPtes(), MM_BUMP_COUNTER, MM_DBG_COMMIT_INDEPENDENT_PAGES, MM_READWRITE, MM_TRACK_COMMIT, MmResidentAvailablePages, NULL, SystemPteSpace, TRUE, _MMPTE::u, and UNLOCK_PFN.

Referenced by KiI386PentiumLockErrataFixup().

03948 : 03949 03950 This function allocates a range of virtually contiguous nonpaged pages that 03951 can have independent page protections applied to each page. 03952 03953 Arguments: 03954 03955 NumberOfBytes - Supplies the number of bytes to allocate. 03956 03957 Return Value: 03958 03959 The virtual address of the memory or NULL if none could be allocated. 03960 03961 Environment: 03962 03963 Kernel mode, IRQL of APC_LEVEL or below. 03964 03965 --*/ 03966 03967 { 03968 PFN_NUMBER NumberOfPages; 03969 PMMPTE PointerPte; 03970 MMPTE TempPte; 03971 PFN_NUMBER PageFrameIndex; 03972 PVOID BaseAddress; 03973 KIRQL OldIrql; 03974 03975 NumberOfPages = BYTES_TO_PAGES (NumberOfBytes); 03976 03977 PointerPte = MiReserveSystemPtes ((ULONG)NumberOfPages, 03978 SystemPteSpace, 03979 0, 03980 0, 03981 FALSE); 03982 if (PointerPte == NULL) { 03983 return NULL; 03984 } 03985 03986 BaseAddress = (PVOID)MiGetVirtualAddressMappedByPte (PointerPte); 03987 03988 LOCK_PFN (OldIrql); 03989 03990 if ((SPFN_NUMBER)NumberOfPages > MI_NONPAGABLE_MEMORY_AVAILABLE()) { 03991 UNLOCK_PFN (OldIrql); 03992 MiReleaseSystemPtes (PointerPte, (ULONG)NumberOfPages, SystemPteSpace); 03993 return NULL; 03994 } 03995 03996 MmResidentAvailablePages -= NumberOfPages; 03997 MM_BUMP_COUNTER(28, NumberOfPages); 03998 03999 do { 04000 ASSERT (PointerPte->u.Hard.Valid == 0); 04001 MiEnsureAvailablePageOrWait (NULL, NULL); 04002 PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE (PointerPte)); 04003 04004 MI_MAKE_VALID_PTE (TempPte, 04005 PageFrameIndex, 04006 MM_READWRITE, 04007 PointerPte); 04008 04009 MI_SET_PTE_DIRTY (TempPte); 04010 MI_WRITE_VALID_PTE (PointerPte, TempPte); 04011 MiInitializePfn (PageFrameIndex, PointerPte, 1); 04012 04013 PointerPte += 1; 04014 NumberOfPages -= 1; 04015 } while (NumberOfPages != 0); 04016 04017 UNLOCK_PFN (OldIrql); 04018 04019 MiChargeCommitmentCantExpand (NumberOfPages, TRUE); 04020 04021 MM_TRACK_COMMIT (MM_DBG_COMMIT_INDEPENDENT_PAGES, NumberOfPages); 04022 04023 return BaseAddress; 04024 }

NTKERNELAPI PVOID MmAllocateNonCachedMemory IN SIZE_T  NumberOfBytes  ) 
 

Definition at line 5581 of file iosup.c.

References ASSERT, BYTES_TO_PAGES, ExPageLockHandle, FALSE, KeFlushEntireTb(), KeSweepDcache(), LOCK_PFN, MI_DISABLE_CACHING, MI_GET_PAGE_COLOR_FROM_PTE, MI_MAKE_VALID_PTE, MI_NONPAGABLE_MEMORY_AVAILABLE, MI_SET_PTE_DIRTY, MI_WRITE_VALID_PTE, MiChargeCommitmentCantExpand(), MiEnsureAvailablePageOrWait(), MiGetVirtualAddressMappedByPte, MiInitializePfn(), MiReleaseSystemPtes(), MiRemoveAnyPage(), MiReserveSystemPtes(), MiReturnCommitment(), MiSweepCacheMachineDependent(), MM_BUMP_COUNTER, MM_DBG_COMMIT_NONCACHED_PAGES, MM_READWRITE, MM_TRACK_COMMIT, MmLockPagableSectionByHandle(), MmNonCached, MmResidentAvailablePages, MmUnlockPagableImageSection(), NULL, SystemPteSpace, TRUE, _MMPTE::u, and UNLOCK_PFN.

Referenced by IopGetDumpStack().

05587 : 05588 05589 This function allocates a range of noncached memory in 05590 the non-paged portion of the system address space. 05591 05592 This routine is designed to be used by a driver's initialization 05593 routine to allocate a noncached block of virtual memory for 05594 various device specific buffers. 05595 05596 Arguments: 05597 05598 NumberOfBytes - Supplies the number of bytes to allocate. 05599 05600 Return Value: 05601 05602 NON-NULL - Returns a pointer (virtual address in the nonpaged portion 05603 of the system) to the allocated physically contiguous 05604 memory. 05605 05606 NULL - The specified request could not be satisfied. 05607 05608 Environment: 05609 05610 Kernel mode, IRQL of APC_LEVEL or below. 05611 05612 --*/ 05613 05614 { 05615 PMMPTE PointerPte; 05616 MMPTE TempPte; 05617 PFN_NUMBER NumberOfPages; 05618 PFN_NUMBER PageFrameIndex; 05619 PVOID BaseAddress; 05620 KIRQL OldIrql; 05621 05622 ASSERT (NumberOfBytes != 0); 05623 05624 NumberOfPages = BYTES_TO_PAGES(NumberOfBytes); 05625 05626 // 05627 // Obtain enough virtual space to map the pages. 05628 // 05629 05630 PointerPte = MiReserveSystemPtes ((ULONG)NumberOfPages, 05631 SystemPteSpace, 05632 0, 05633 0, 05634 FALSE); 05635 05636 if (PointerPte == NULL) { 05637 return NULL; 05638 } 05639 05640 // 05641 // Obtain backing commitment for the pages. 05642 // 05643 05644 if (MiChargeCommitmentCantExpand (NumberOfPages, FALSE) == FALSE) { 05645 MiReleaseSystemPtes (PointerPte, (ULONG)NumberOfPages, SystemPteSpace); 05646 return NULL; 05647 } 05648 05649 MM_TRACK_COMMIT (MM_DBG_COMMIT_NONCACHED_PAGES, NumberOfPages); 05650 05651 MmLockPagableSectionByHandle (ExPageLockHandle); 05652 05653 // 05654 // Acquire the PFN mutex to synchronize access to the PFN database. 05655 // 05656 05657 LOCK_PFN (OldIrql); 05658 05659 // 05660 // Obtain enough pages to contain the allocation. 05661 // Check to make sure the physical pages are available. 05662 // 05663 05664 if ((SPFN_NUMBER)NumberOfPages > MI_NONPAGABLE_MEMORY_AVAILABLE()) { 05665 UNLOCK_PFN (OldIrql); 05666 MmUnlockPagableImageSection (ExPageLockHandle); 05667 MiReleaseSystemPtes (PointerPte, (ULONG)NumberOfPages, SystemPteSpace); 05668 MiReturnCommitment (NumberOfPages); 05669 return NULL; 05670 } 05671 05672 #if defined(_IA64_) 05673 KeFlushEntireTb(FALSE, TRUE); 05674 #endif 05675 05676 MmResidentAvailablePages -= NumberOfPages; 05677 MM_BUMP_COUNTER(4, NumberOfPages); 05678 05679 BaseAddress = (PVOID)MiGetVirtualAddressMappedByPte (PointerPte); 05680 05681 do { 05682 ASSERT (PointerPte->u.Hard.Valid == 0); 05683 MiEnsureAvailablePageOrWait (NULL, NULL); 05684 PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE (PointerPte)); 05685 05686 MI_MAKE_VALID_PTE (TempPte, 05687 PageFrameIndex, 05688 MM_READWRITE, 05689 PointerPte); 05690 05691 MI_SET_PTE_DIRTY (TempPte); 05692 MI_DISABLE_CACHING (TempPte); 05693 MI_WRITE_VALID_PTE (PointerPte, TempPte); 05694 MiInitializePfn (PageFrameIndex, PointerPte, 1); 05695 05696 PointerPte += 1; 05697 NumberOfPages -= 1; 05698 } while (NumberOfPages != 0); 05699 05700 // 05701 // Flush any data for this page out of the dcaches. 05702 // 05703 05704 #if !defined(_IA64_) 05705 // 05706 // Flush any data for this page out of the dcaches. 05707 // 05708 05709 KeSweepDcache (TRUE); 05710 #else 05711 MiSweepCacheMachineDependent(BaseAddress, NumberOfBytes, MmNonCached); 05712 #endif 05713 05714 UNLOCK_PFN (OldIrql); 05715 MmUnlockPagableImageSection (ExPageLockHandle); 05716 05717 return BaseAddress; 05718 }

NTKERNELAPI PMDL MmAllocatePagesForMdl IN PHYSICAL_ADDRESS  LowAddress,
IN PHYSICAL_ADDRESS  HighAddress,
IN PHYSICAL_ADDRESS  SkipBytes,
IN SIZE_T  TotalBytes
 

Definition at line 4270 of file iosup.c.

References ActiveAndValid, ADDRESS_AND_SIZE_TO_SPAN_PAGES, APC_LEVEL, ASSERT, _PHYSICAL_MEMORY_RUN::BasePage, _MMCOLOR_TABLES::Blink, _MMPFNLIST::Blink, BYTE_OFFSET, _MDL::ByteCount, DbgPrint, ExAllocatePoolWithTag, ExFreePool(), ExPageLockHandle, FALSE, _MMCOLOR_TABLES::Flink, _MMPFNLIST::Flink, FreePageList, LOCK_PFN, MI_MAGIC_AWE_PTEFRAME, MI_NONPAGABLE_MEMORY_AVAILABLE, MI_PFN_ELEMENT, MI_SET_PFN_DELETED, MiChargeCommitmentCantExpand(), MiLastCallColor, MiLastCallHighPage, MiLastCallLowPage, MiRestoreTransitionPte(), MiReturnCommitment(), MiUnlinkFreeOrZeroedPage(), MiUnlinkPageFromList(), MiZeroPhysicalPage(), MM_BUMP_COUNTER, MM_DBG_COMMIT_MDL_PAGES, MM_DEMAND_ZERO_WRITE_PTE, MM_EMPTY_LIST, MM_TRACK_COMMIT, MmCreateMdl(), MmDynamicMemoryMutex, MmFreePagesByColor, MmHighestPhysicalPage, MMLISTS, MmLockPagableSectionByHandle(), MmMdlPagesAllocated, MmPageLocationList, MmPfnDatabase, MmPhysicalMemoryBlock, MmResidentAvailablePages, MmSecondaryColors, MmUnlockPagableImageSection(), NonPagedPool, NULL, _PHYSICAL_MEMORY_DESCRIPTOR::NumberOfRuns, _MMPFN::OriginalPte, PAGE_SHIFT, PAGE_SIZE, _PHYSICAL_MEMORY_RUN::PageCount, _MMPFN::PteFrame, _PHYSICAL_MEMORY_DESCRIPTOR::Run, StandbyPageList, TRUE, _MMPTE::u, _MMPFN::u1, _MMPFN::u2, _MMPFN::u3, UNLOCK_PFN, and ZeroedPageList.

Referenced by NtAllocateUserPhysicalPages().

04279 : 04280 04281 This routine searches the PFN database for free, zeroed or standby pages 04282 to satisfy the request. This does not map the pages - it just allocates 04283 them and puts them into an MDL. It is expected that our caller will 04284 map the MDL as needed. 04285 04286 NOTE: this routine may return an MDL mapping a smaller number of bytes 04287 than the amount requested. It is the caller's responsibility to check the 04288 MDL upon return for the size actually allocated. 04289 04290 These pages comprise physical non-paged memory and are zero-filled. 04291 04292 This routine is designed to be used by an AGP driver to obtain physical 04293 memory in a specified range since hardware may provide substantial 04294 performance wins depending on where the backing memory is allocated. 04295 04296 Arguments: 04297 04298 LowAddress - Supplies the low physical address of the first range that 04299 the allocated pages can come from. 04300 04301 HighAddress - Supplies the high physical address of the first range that 04302 the allocated pages can come from. 04303 04304 SkipBytes - Number of bytes to skip (from the Low Address) to get to the 04305 next physical address range that allocated pages can come from. 04306 04307 TotalBytes - Supplies the number of bytes to allocate. 04308 04309 Return Value: 04310 04311 MDL - An MDL mapping a range of pages in the specified range. 04312 This may map less memory than the caller requested if the full amount 04313 is not currently available. 04314 04315 NULL - No pages in the specified range OR not enough virtually contiguous 04316 nonpaged pool for the MDL is available at this time. 04317 04318 Environment: 04319 04320 Kernel mode, IRQL of APC_LEVEL or below. 04321 04322 --*/ 04323 04324 { 04325 PMDL MemoryDescriptorList; 04326 PMDL MemoryDescriptorList2; 04327 PMMPFN Pfn1; 04328 PMMPFN PfnNextColored; 04329 PMMPFN PfnNextFlink; 04330 PMMPFN PfnLastColored; 04331 KIRQL OldIrql; 04332 PFN_NUMBER start; 04333 PFN_NUMBER count; 04334 PFN_NUMBER Page; 04335 PFN_NUMBER LastPage; 04336 PFN_NUMBER found; 04337 PFN_NUMBER BasePage; 04338 PFN_NUMBER LowPage; 04339 PFN_NUMBER HighPage; 04340 PFN_NUMBER SizeInPages; 04341 PFN_NUMBER MdlPageSpan; 04342 PFN_NUMBER SkipPages; 04343 PFN_NUMBER MaxPages; 04344 PPFN_NUMBER MdlPage; 04345 PPFN_NUMBER LastMdlPage; 04346 ULONG Color; 04347 PMMCOLOR_TABLES ColorHead; 04348 MMLISTS MemoryList; 04349 PPFN_NUMBER FirstMdlPageToZero; 04350 PFN_NUMBER LowPage1; 04351 PFN_NUMBER HighPage1; 04352 LOGICAL PagePlacementOk; 04353 PFN_NUMBER PageNextColored; 04354 PFN_NUMBER PageNextFlink; 04355 PFN_NUMBER PageLastColored; 04356 PMMPFNLIST ListHead; 04357 PPFN_NUMBER ColorAnchorsHead; 04358 PPFN_NUMBER ColorAnchor; 04359 ULONG FullAnchorCount; 04360 #if DBG 04361 ULONG FinishedCount; 04362 #endif 04363 04364 ASSERT (KeGetCurrentIrql() <= APC_LEVEL); 04365 04366 // 04367 // The skip increment must be a page-size multiple. 04368 // 04369 04370 if (BYTE_OFFSET(SkipBytes.LowPart)) { 04371 return (PMDL)0; 04372 } 04373 04374 MmLockPagableSectionByHandle (ExPageLockHandle); 04375 04376 LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT); 04377 HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT); 04378 04379 // 04380 // Maximum allocation size is constrained by the MDL ByteCount field. 04381 // 04382 04383 if (TotalBytes > (SIZE_T)((ULONG)(MAXULONG - PAGE_SIZE))) { 04384 TotalBytes = (SIZE_T)((ULONG)(MAXULONG - PAGE_SIZE)); 04385 } 04386 04387 SizeInPages = (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes); 04388 04389 SkipPages = (PFN_NUMBER)(SkipBytes.QuadPart >> PAGE_SHIFT); 04390 04391 BasePage = LowPage; 04392 04393 LOCK_PFN (OldIrql); 04394 04395 MaxPages = MI_NONPAGABLE_MEMORY_AVAILABLE() - 1024; 04396 04397 if ((SPFN_NUMBER)MaxPages <= 0) { 04398 SizeInPages = 0; 04399 } 04400 else if (SizeInPages > MaxPages) { 04401 SizeInPages = MaxPages; 04402 } 04403 04404 if (SizeInPages == 0) { 04405 UNLOCK_PFN (OldIrql); 04406 MmUnlockPagableImageSection (ExPageLockHandle); 04407 return (PMDL)0; 04408 } 04409 04410 UNLOCK_PFN (OldIrql); 04411 04412 #if DBG 04413 if (SizeInPages < (PFN_NUMBER)ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes)) { 04414 if (MiPrintAwe != 0) { 04415 DbgPrint("MmAllocatePagesForMdl1: unable to get %p pages, trying for %p instead\n", 04416 ADDRESS_AND_SIZE_TO_SPAN_PAGES(0, TotalBytes), 04417 SizeInPages); 04418 } 04419 } 04420 #endif 04421 04422 // 04423 // Allocate an MDL to return the pages in. 04424 // 04425 04426 do { 04427 MemoryDescriptorList = MmCreateMdl ((PMDL)0, 04428 (PVOID)0, 04429 SizeInPages << PAGE_SHIFT); 04430 04431 if (MemoryDescriptorList != (PMDL)0) { 04432 break; 04433 } 04434 SizeInPages -= (SizeInPages >> 4); 04435 } while (SizeInPages != 0); 04436 04437 if (MemoryDescriptorList == (PMDL)0) { 04438 MmUnlockPagableImageSection (ExPageLockHandle); 04439 return (PMDL)0; 04440 } 04441 04442 // 04443 // Allocate a list of colored anchors. 04444 // 04445 04446 ColorAnchorsHead = (PPFN_NUMBER) ExAllocatePoolWithTag (NonPagedPool, 04447 MmSecondaryColors * sizeof (PFN_NUMBER), 04448 'ldmM'); 04449 04450 if (ColorAnchorsHead == NULL) { 04451 MmUnlockPagableImageSection (ExPageLockHandle); 04452 ExFreePool (MemoryDescriptorList); 04453 return (PMDL)0; 04454 } 04455 04456 MdlPageSpan = SizeInPages; 04457 04458 // 04459 // Recalculate as the PFN lock was dropped. 04460 // 04461 04462 start = 0; 04463 found = 0; 04464 04465 MdlPage = (PPFN_NUMBER)(MemoryDescriptorList + 1); 04466 04467 ExAcquireFastMutex (&MmDynamicMemoryMutex); 04468 04469 LOCK_PFN (OldIrql); 04470 04471 MaxPages = MI_NONPAGABLE_MEMORY_AVAILABLE() - 1024; 04472 04473 if ((SPFN_NUMBER)MaxPages <= 0) { 04474 SizeInPages = 0; 04475 } 04476 else if (SizeInPages > MaxPages) { 04477 SizeInPages = MaxPages; 04478 } 04479 04480 if (SizeInPages == 0) { 04481 UNLOCK_PFN (OldIrql); 04482 ExReleaseFastMutex (&MmDynamicMemoryMutex); 04483 MmUnlockPagableImageSection (ExPageLockHandle); 04484 ExFreePool (MemoryDescriptorList); 04485 ExFreePool (ColorAnchorsHead); 04486 return (PMDL)0; 04487 } 04488 04489 // 04490 // Ensure there is enough commit prior to allocating the pages as this 04491 // is not a nonpaged pool allocation but rather a dynamic MDL allocation. 04492 // 04493 04494 if (MiChargeCommitmentCantExpand (SizeInPages, FALSE) == FALSE) { 04495 UNLOCK_PFN (OldIrql); 04496 ExReleaseFastMutex (&MmDynamicMemoryMutex); 04497 MmUnlockPagableImageSection (ExPageLockHandle); 04498 ExFreePool (MemoryDescriptorList); 04499 ExFreePool (ColorAnchorsHead); 04500 return (PMDL)0; 04501 } 04502 04503 MM_TRACK_COMMIT (MM_DBG_COMMIT_MDL_PAGES, SizeInPages); 04504 04505 if ((MiLastCallLowPage != LowPage) || (MiLastCallHighPage != HighPage)) { 04506 MiLastCallColor = 0; 04507 } 04508 04509 MiLastCallLowPage = LowPage; 04510 MiLastCallHighPage = HighPage; 04511 04512 FirstMdlPageToZero = MdlPage; 04513 04514 do { 04515 // 04516 // Grab all zeroed (and then free) pages first directly from the 04517 // colored lists to avoid multiple walks down these singly linked lists. 04518 // Then snatch transition pages as needed. In addition to optimizing 04519 // the speed of the removals this also avoids cannibalizing the page 04520 // cache unless it's absolutely needed. 04521 // 04522 04523 for (MemoryList = ZeroedPageList; MemoryList <= FreePageList; MemoryList += 1) { 04524 04525 ListHead = MmPageLocationList[MemoryList]; 04526 04527 FullAnchorCount = 0; 04528 04529 for (Color = 0; Color < MmSecondaryColors; Color += 1) { 04530 ColorAnchorsHead[Color] = MM_EMPTY_LIST; 04531 } 04532 04533 Color = MiLastCallColor; 04534 ASSERT (Color < MmSecondaryColors); 04535 04536 do { 04537 04538 ColorHead = &MmFreePagesByColor[MemoryList][Color]; 04539 ColorAnchor = &ColorAnchorsHead[Color]; 04540 04541 Color += 1; 04542 if (Color >= MmSecondaryColors) { 04543 Color = 0; 04544 } 04545 04546 if (*ColorAnchor == (MM_EMPTY_LIST - 1)) { 04547 04548 // 04549 // This colored list has already been completely searched. 04550 // 04551 04552 continue; 04553 } 04554 04555 if (ColorHead->Flink == MM_EMPTY_LIST) { 04556 04557 // 04558 // This colored list is empty. 04559 // 04560 04561 FullAnchorCount += 1; 04562 *ColorAnchor = (MM_EMPTY_LIST - 1); 04563 continue; 04564 } 04565 04566 while (ColorHead->Flink != MM_EMPTY_LIST) { 04567 04568 Page = ColorHead->Flink; 04569 04570 Pfn1 = MI_PFN_ELEMENT(Page); 04571 04572 ASSERT ((MMLISTS)Pfn1->u3.e1.PageLocation == MemoryList); 04573 04574 // 04575 // See if the page is within the caller's page constraints. 04576 // 04577 04578 PagePlacementOk = FALSE; 04579 04580 LowPage1 = LowPage; 04581 HighPage1 = HighPage; 04582 04583 do { 04584 if ((Page >= LowPage1) && (Page <= HighPage1)) { 04585 PagePlacementOk = TRUE; 04586 break; 04587 } 04588 04589 if (SkipPages == 0) { 04590 break; 04591 } 04592 04593 LowPage1 += SkipPages; 04594 HighPage1 += SkipPages; 04595 04596 if (LowPage1 > MmHighestPhysicalPage) { 04597 break; 04598 } 04599 if (HighPage1 > MmHighestPhysicalPage) { 04600 HighPage1 = MmHighestPhysicalPage; 04601 } 04602 } while (TRUE); 04603 04604 // 04605 // The Flink and Blink must be nonzero here for the page 04606 // to be on the listhead. Only code that scans the 04607 // MmPhysicalMemoryBlock has to check for the zero case. 04608 // 04609 04610 ASSERT (Pfn1->u1.Flink != 0); 04611 ASSERT (Pfn1->u2.Blink != 0); 04612 04613 if (PagePlacementOk == FALSE) { 04614 04615 // 04616 // Put page on end of list and if first time, save pfn. 04617 // 04618 04619 if (*ColorAnchor == MM_EMPTY_LIST) { 04620 *ColorAnchor = Page; 04621 } 04622 else if (Page == *ColorAnchor) { 04623 04624 // 04625 // No more pages available in this colored chain. 04626 // 04627 04628 FullAnchorCount += 1; 04629 *ColorAnchor = (MM_EMPTY_LIST - 1); 04630 break; 04631 } 04632 04633 // 04634 // If the colored chain has more than one entry then 04635 // put this page on the end. 04636 // 04637 04638 PageNextColored = (PFN_NUMBER)Pfn1->OriginalPte.u.Long; 04639 04640 if (PageNextColored == MM_EMPTY_LIST) { 04641 04642 // 04643 // No more pages available in this colored chain. 04644 // 04645 04646 FullAnchorCount += 1; 04647 *ColorAnchor = (MM_EMPTY_LIST - 1); 04648 break; 04649 } 04650 04651 ASSERT (Pfn1->u1.Flink != 0); 04652 ASSERT (Pfn1->u1.Flink != MM_EMPTY_LIST); 04653 ASSERT (Pfn1->PteFrame != MI_MAGIC_AWE_PTEFRAME); 04654 04655 PfnNextColored = MI_PFN_ELEMENT(PageNextColored); 04656 ASSERT ((MMLISTS)PfnNextColored->u3.e1.PageLocation == MemoryList); 04657 ASSERT (PfnNextColored->PteFrame != MI_MAGIC_AWE_PTEFRAME); 04658 04659 // 04660 // Adjust the free page list so Page 04661 // follows PageNextFlink. 04662 // 04663 04664 PageNextFlink = Pfn1->u1.Flink; 04665 PfnNextFlink = MI_PFN_ELEMENT(PageNextFlink); 04666 04667 ASSERT ((MMLISTS)PfnNextFlink->u3.e1.PageLocation == MemoryList); 04668 ASSERT (PfnNextFlink->PteFrame != MI_MAGIC_AWE_PTEFRAME); 04669 04670 PfnLastColored = ColorHead->Blink; 04671 ASSERT (PfnLastColored != (PMMPFN)MM_EMPTY_LIST); 04672 ASSERT (PfnLastColored->OriginalPte.u.Long == MM_EMPTY_LIST); 04673 ASSERT (PfnLastColored->PteFrame != MI_MAGIC_AWE_PTEFRAME); 04674 ASSERT (PfnLastColored->u2.Blink != MM_EMPTY_LIST); 04675 04676 ASSERT ((MMLISTS)PfnLastColored->u3.e1.PageLocation == MemoryList); 04677 PageLastColored = PfnLastColored - MmPfnDatabase; 04678 04679 if (ListHead->Flink == Page) { 04680 04681 ASSERT (Pfn1->u2.Blink == MM_EMPTY_LIST); 04682 ASSERT (ListHead->Blink != Page); 04683 04684 ListHead->Flink = PageNextFlink; 04685 04686 PfnNextFlink->u2.Blink = MM_EMPTY_LIST; 04687 } 04688 else { 04689 04690 ASSERT (Pfn1->u2.Blink != MM_EMPTY_LIST); 04691 ASSERT ((MMLISTS)(MI_PFN_ELEMENT((MI_PFN_ELEMENT(Pfn1->u2.Blink)->u1.Flink)))->PteFrame != MI_MAGIC_AWE_PTEFRAME); 04692 ASSERT ((MMLISTS)(MI_PFN_ELEMENT((MI_PFN_ELEMENT(Pfn1->u2.Blink)->u1.Flink)))->u3.e1.PageLocation == MemoryList); 04693 04694 MI_PFN_ELEMENT(Pfn1->u2.Blink)->u1.Flink = PageNextFlink; 04695 PfnNextFlink->u2.Blink = Pfn1->u2.Blink; 04696 } 04697 04698 #if DBG 04699 if (PfnLastColored->u1.Flink == MM_EMPTY_LIST) { 04700 ASSERT (ListHead->Blink == PageLastColored); 04701 } 04702 #endif 04703 04704 Pfn1->u1.Flink = PfnLastColored->u1.Flink; 04705 Pfn1->u2.Blink = PageLastColored; 04706 04707 if (ListHead->Blink == PageLastColored) { 04708 ListHead->Blink = Page; 04709 } 04710 04711 // 04712 // Adjust the colored chains. 04713 // 04714 04715 if (PfnLastColored->u1.Flink != MM_EMPTY_LIST) { 04716 ASSERT (MI_PFN_ELEMENT(PfnLastColored->u1.Flink)->PteFrame != MI_MAGIC_AWE_PTEFRAME); 04717 ASSERT ((MMLISTS)(MI_PFN_ELEMENT(PfnLastColored->u1.Flink)->u3.e1.PageLocation) == MemoryList); 04718 MI_PFN_ELEMENT(PfnLastColored->u1.Flink)->u2.Blink = Page; 04719 } 04720 04721 PfnLastColored->u1.Flink = Page; 04722 04723 ColorHead->Flink = PageNextColored; 04724 Pfn1->OriginalPte.u.Long = MM_EMPTY_LIST; 04725 04726 ASSERT (PfnLastColored->OriginalPte.u.Long == MM_EMPTY_LIST); 04727 PfnLastColored->OriginalPte.u.Long = Page; 04728 ColorHead->Blink = Pfn1; 04729 04730 continue; 04731 } 04732 04733 found += 1; 04734 ASSERT (Pfn1->u3.e1.ReadInProgress == 0); 04735 MiUnlinkFreeOrZeroedPage (Page); 04736 Pfn1->u3.e1.PageColor = 0; 04737 04738 Pfn1->u3.e2.ReferenceCount = 1; 04739 Pfn1->u2.ShareCount = 1; 04740 MI_SET_PFN_DELETED(Pfn1); 04741 Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE; 04742 #if DBG 04743 Pfn1->PteFrame = MI_MAGIC_AWE_PTEFRAME; 04744 #endif 04745 Pfn1->u3.e1.PageLocation = ActiveAndValid; 04746 04747 Pfn1->u3.e1.StartOfAllocation = 1; 04748 Pfn1->u3.e1.EndOfAllocation = 1; 04749 Pfn1->u3.e1.VerifierAllocation = 0; 04750 Pfn1->u3.e1.LargeSessionAllocation = 0; 04751 04752 *MdlPage = Page; 04753 MdlPage += 1; 04754 04755 if (found == SizeInPages) { 04756 04757 // 04758 // All the pages requested are available. 04759 // 04760 04761 if (MemoryList == ZeroedPageList) { 04762 FirstMdlPageToZero = MdlPage; 04763 MiLastCallColor = Color; 04764 } 04765 04766 #if DBG 04767 FinishedCount = 0; 04768 for (Color = 0; Color < MmSecondaryColors; Color += 1) { 04769 if (ColorAnchorsHead[Color] == (MM_EMPTY_LIST - 1)) { 04770 FinishedCount += 1; 04771 } 04772 } 04773 ASSERT (FinishedCount == FullAnchorCount); 04774 #endif 04775 04776 goto pass2_done; 04777 } 04778 04779 // 04780 // March on to the next colored chain so the overall 04781 // allocation round-robins the page colors. 04782 // 04783 04784 break; 04785 } 04786 04787 } while (FullAnchorCount != MmSecondaryColors); 04788 04789 #if DBG 04790 FinishedCount = 0; 04791 for (Color = 0; Color < MmSecondaryColors; Color += 1) { 04792 if (ColorAnchorsHead[Color] == (MM_EMPTY_LIST - 1)) { 04793 FinishedCount += 1; 04794 } 04795 } 04796 ASSERT (FinishedCount == FullAnchorCount); 04797 #endif 04798 04799 if (MemoryList == ZeroedPageList) { 04800 FirstMdlPageToZero = MdlPage; 04801 } 04802 04803 MiLastCallColor = 0; 04804 } 04805 04806 start = 0; 04807 04808 do { 04809 04810 count = MmPhysicalMemoryBlock->Run[start].PageCount; 04811 Page = MmPhysicalMemoryBlock->Run[start].BasePage; 04812 04813 if (count != 0) { 04814 04815 // 04816 // Close the gaps, then examine the range for a fit. 04817 // 04818 04819 LastPage = Page + count; 04820 04821 if (LastPage - 1 > HighPage) { 04822 LastPage = HighPage + 1; 04823 } 04824 04825 if (Page < LowPage) { 04826 Page = LowPage; 04827 } 04828 04829 if ((Page < LastPage) && 04830 (Page >= MmPhysicalMemoryBlock->Run[start].BasePage) && 04831 (LastPage <= MmPhysicalMemoryBlock->Run[start].BasePage + 04832 MmPhysicalMemoryBlock->Run[start].PageCount)) { 04833 04834 Pfn1 = MI_PFN_ELEMENT (Page); 04835 do { 04836 04837 if (Pfn1->u3.e1.PageLocation == StandbyPageList) { 04838 04839 if ((Pfn1->u1.Flink != 0) && 04840 (Pfn1->u2.Blink != 0) && 04841 (Pfn1->u3.e2.ReferenceCount == 0)) { 04842 04843 ASSERT (Pfn1->u3.e1.ReadInProgress == 0); 04844 04845 found += 1; 04846 04847 // 04848 // This page is in the desired range - grab it. 04849 // 04850 04851 MiUnlinkPageFromList (Pfn1); 04852 MiRestoreTransitionPte (Page); 04853 04854 Pfn1->u3.e1.PageColor = 0; 04855 04856 Pfn1->u3.e2.ReferenceCount = 1; 04857 Pfn1->u2.ShareCount = 1; 04858 MI_SET_PFN_DELETED(Pfn1); 04859 Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE; 04860 #if DBG 04861 Pfn1->PteFrame = MI_MAGIC_AWE_PTEFRAME; 04862 #endif 04863 Pfn1->u3.e1.PageLocation = ActiveAndValid; 04864 04865 Pfn1->u3.e1.StartOfAllocation = 1; 04866 Pfn1->u3.e1.EndOfAllocation = 1; 04867 Pfn1->u3.e1.VerifierAllocation = 0; 04868 Pfn1->u3.e1.LargeSessionAllocation = 0; 04869 04870 *MdlPage = Page; 04871 MdlPage += 1; 04872 04873 if (found == SizeInPages) { 04874 04875 // 04876 // All the pages requested are available. 04877 // 04878 04879 goto pass2_done; 04880 } 04881 } 04882 } 04883 Page += 1; 04884 Pfn1 += 1; 04885 04886 } while (Page < LastPage); 04887 } 04888 } 04889 start += 1; 04890 } while (start != MmPhysicalMemoryBlock->NumberOfRuns); 04891 04892 if (SkipPages == 0) { 04893 break; 04894 } 04895 LowPage += SkipPages; 04896 HighPage += SkipPages; 04897 if (LowPage > MmHighestPhysicalPage) { 04898 break; 04899 } 04900 if (HighPage > MmHighestPhysicalPage) { 04901 HighPage = MmHighestPhysicalPage; 04902 } 04903 } while (1); 04904 04905 pass2_done: 04906 04907 MmMdlPagesAllocated += found; 04908 04909 MmResidentAvailablePages -= found; 04910 MM_BUMP_COUNTER(34, found); 04911 04912 UNLOCK_PFN (OldIrql); 04913 04914 ExReleaseFastMutex (&MmDynamicMemoryMutex); 04915 MmUnlockPagableImageSection (ExPageLockHandle); 04916 04917 ExFreePool (ColorAnchorsHead); 04918 04919 if (found != SizeInPages) { 04920 ASSERT (found < SizeInPages); 04921 MiReturnCommitment (SizeInPages - found); 04922 MM_TRACK_COMMIT (MM_DBG_COMMIT_MDL_PAGES, 0 - (SizeInPages - found)); 04923 } 04924 04925 if (found == 0) { 04926 ExFreePool (MemoryDescriptorList); 04927 return (PMDL)0; 04928 } 04929 04930 MemoryDescriptorList->ByteCount = (ULONG)(found << PAGE_SHIFT); 04931 04932 if (found != SizeInPages) { 04933 *MdlPage = MM_EMPTY_LIST; 04934 } 04935 04936 // 04937 // If the number of pages allocated was substantially less than the 04938 // initial request amount, attempt to allocate a smaller MDL to save 04939 // pool. 04940 // 04941 04942 if ((MdlPageSpan - found) > ((4 * PAGE_SIZE) / sizeof (PFN_NUMBER))) { 04943 MemoryDescriptorList2 = MmCreateMdl ((PMDL)0, 04944 (PVOID)0, 04945 found << PAGE_SHIFT); 04946 04947 if (MemoryDescriptorList2 != (PMDL)0) { 04948 RtlMoveMemory ((PVOID)(MemoryDescriptorList2 + 1), 04949 (PVOID)(MemoryDescriptorList + 1), 04950 found * sizeof (PFN_NUMBER)); 04951 FirstMdlPageToZero = (PPFN_NUMBER)(MemoryDescriptorList2 + 1) + 04952 (FirstMdlPageToZero - 04953 (PPFN_NUMBER)(MemoryDescriptorList + 1)); 04954 ExFreePool (MemoryDescriptorList); 04955 MemoryDescriptorList = MemoryDescriptorList2; 04956 } 04957 } 04958 04959 MdlPage = (PPFN_NUMBER)(MemoryDescriptorList + 1); 04960 LastMdlPage = MdlPage + found; 04961 04962 #if DBG 04963 // 04964 // Ensure all pages are within the caller's page constraints. 04965 // 04966 04967 LowPage = (PFN_NUMBER)(LowAddress.QuadPart >> PAGE_SHIFT); 04968 HighPage = (PFN_NUMBER)(HighAddress.QuadPart >> PAGE_SHIFT); 04969 04970 while (MdlPage < FirstMdlPageToZero) { 04971 Page = *MdlPage; 04972 PagePlacementOk = FALSE; 04973 LowPage1 = LowPage; 04974 HighPage1 = HighPage; 04975 04976 do { 04977 if ((Page >= LowPage1) && (Page <= HighPage1)) { 04978 PagePlacementOk = TRUE; 04979 break; 04980 } 04981 04982 if (SkipPages == 0) { 04983 break; 04984 } 04985 04986 LowPage1 += SkipPages; 04987 HighPage1 += SkipPages; 04988 04989 if (LowPage1 > MmHighestPhysicalPage) { 04990 break; 04991 } 04992 if (HighPage1 > MmHighestPhysicalPage) { 04993 HighPage1 = MmHighestPhysicalPage; 04994 } 04995 } while (TRUE); 04996 04997 ASSERT (PagePlacementOk == TRUE); 04998 Pfn1 = MI_PFN_ELEMENT(*MdlPage); 04999 ASSERT (Pfn1->PteFrame == MI_MAGIC_AWE_PTEFRAME); 05000 MdlPage += 1; 05001 } 05002 #endif 05003 05004 while (FirstMdlPageToZero < LastMdlPage) { 05005 05006 #if DBG 05007 // 05008 // Ensure all pages are within the caller's page constraints. 05009 // 05010 05011 Page = *FirstMdlPageToZero; 05012 05013 PagePlacementOk = FALSE; 05014 LowPage1 = LowPage; 05015 HighPage1 = HighPage; 05016 05017 do { 05018 if ((Page >= LowPage1) && (Page <= HighPage1)) { 05019 PagePlacementOk = TRUE; 05020 break; 05021 } 05022 05023 if (SkipPages == 0) { 05024 break; 05025 } 05026 05027 LowPage1 += SkipPages; 05028 HighPage1 += SkipPages; 05029 05030 if (LowPage1 > MmHighestPhysicalPage) { 05031 break; 05032 } 05033 if (HighPage1 > MmHighestPhysicalPage) { 05034 HighPage1 = MmHighestPhysicalPage; 05035 } 05036 } while (TRUE); 05037 05038 ASSERT (PagePlacementOk == TRUE); 05039 Pfn1 = MI_PFN_ELEMENT(*FirstMdlPageToZero); 05040 ASSERT (Pfn1->PteFrame == MI_MAGIC_AWE_PTEFRAME); 05041 #endif 05042 MiZeroPhysicalPage (*FirstMdlPageToZero, 0); 05043 FirstMdlPageToZero += 1; 05044 } 05045 05046 return MemoryDescriptorList; 05047 }

PVOID MmAllocateSpecialPool IN SIZE_T  NumberOfBytes,
IN ULONG  Tag,
IN POOL_TYPE  Type,
IN ULONG  SpecialPoolType
 

Definition at line 3408 of file allocpag.c.

References MiAllocateSpecialPool(), MiSpecialPoolPtes, and NULL.

Referenced by ExAllocatePoolWithTag(), ExAllocatePoolWithTagPriority(), and MmSqueezeBadTags().

03417 : 03418 03419 This routine allocates virtual memory from special pool. This allocation 03420 is made from the end of a physical page with the next PTE set to no access 03421 so that any reads or writes will cause an immediate fatal system crash. 03422 03423 This lets us catch components that corrupt pool. 03424 03425 Arguments: 03426 03427 NumberOfBytes - Supplies the number of bytes to commit. 03428 03429 Tag - Supplies the tag of the requested allocation. 03430 03431 PoolType - Supplies the pool type of the requested allocation. 03432 03433 SpecialPoolType - Supplies the special pool type of the 03434 requested allocation. 03435 03436 - 0 indicates overruns. 03437 - 1 indicates underruns. 03438 - 2 indicates use the systemwide pool policy. 03439 03440 Return Value: 03441 03442 A non-NULL pointer if the requested allocation was fulfilled from special 03443 pool. NULL if the allocation was not made. 03444 03445 Environment: 03446 03447 Kernel mode, no pool locks held. 03448 03449 Note this is a nonpagable wrapper so that machines without special pool 03450 can still support drivers allocating nonpaged pool at DISPATCH_LEVEL 03451 requesting special pool. 03452 03453 --*/ 03454 03455 { 03456 if (MiSpecialPoolPtes == 0) { 03457 03458 // 03459 // The special pool allocation code was never initialized. 03460 // 03461 03462 return NULL; 03463 } 03464 03465 return MiAllocateSpecialPool (NumberOfBytes, 03466 Tag, 03467 PoolType, 03468 SpecialPoolType); 03469 }

VOID MmAllowWorkingSetExpansion VOID   ) 
 

Definition at line 4948 of file procsup.c.

References _MMSUPPORT::AllowWorkingSetAdjustment, _MMWORKING_SET_EXPANSION_HEAD::ListHead, LOCK_EXPANSION, MmWorkingSetExpansionHead, PsGetCurrentProcess, TRUE, UNLOCK_EXPANSION, _EPROCESS::Vm, and _MMSUPPORT::WorkingSetExpansionLinks.

Referenced by MmAdjustWorkingSetSize(), PspSystemThreadStartup(), and PspUserThreadStartup().

04954 : 04955 04956 This routine updates the working set list head FLINK field to 04957 indicate that working set adjustment is allowed. 04958 04959 NOTE: This routine may be called more than once per process. 04960 04961 Arguments: 04962 04963 None. 04964 04965 Return Value: 04966 04967 None. 04968 04969 Environment: 04970 04971 Kernel mode. 04972 04973 --*/ 04974 04975 { 04976 04977 PEPROCESS CurrentProcess; 04978 KIRQL OldIrql; 04979 04980 // 04981 // Check the current state of the working set adjustment flag 04982 // in the process header. 04983 // 04984 04985 CurrentProcess = PsGetCurrentProcess(); 04986 04987 LOCK_EXPANSION (OldIrql); 04988 04989 if (!CurrentProcess->Vm.AllowWorkingSetAdjustment) { 04990 CurrentProcess->Vm.AllowWorkingSetAdjustment = TRUE; 04991 04992 InsertTailList (&MmWorkingSetExpansionHead.ListHead, 04993 &CurrentProcess->Vm.WorkingSetExpansionLinks); 04994 } 04995 04996 UNLOCK_EXPANSION (OldIrql); 04997 return; 04998 }

LOGICAL MmAssignProcessToJob IN PEPROCESS  Process  ) 
 

Definition at line 2386 of file wslist.c.

References FALSE, KeAttachProcess(), KeDetachProcess(), LOCK_WS_AND_ADDRESS_SPACE, PAGED_CODE, PS_JOB_STATUS_REPORT_COMMIT_CHANGES, PsChangeJobMemoryUsage(), PsGetCurrentProcess, Status, TRUE, and UNLOCK_WS_AND_ADDRESS_SPACE.

Referenced by PspAddProcessToJob().

02392 : 02393 02394 This routine acquires the working set lock so a consistent snapshot of 02395 the argument process' commit charges and working set size can be used 02396 when adding this process to a job. 02397 02398 Arguments: 02399 02400 Process - Supplies a pointer to the process to operate upon. 02401 02402 Return Value: 02403 02404 TRUE if the process is allowed to join the job, FALSE otherwise. 02405 02406 Note that FALSE cannot be returned without changing the code in ps. 02407 02408 Environment: 02409 02410 Kernel mode, IRQL APC_LEVEL or below. The caller provides protection 02411 from the target process going away. 02412 02413 --*/ 02414 02415 { 02416 LOGICAL Attached; 02417 LOGICAL Status; 02418 02419 PAGED_CODE (); 02420 02421 Attached = FALSE; 02422 02423 if (PsGetCurrentProcess() != Process) { 02424 KeAttachProcess (&Process->Pcb); 02425 Attached = TRUE; 02426 } 02427 02428 LOCK_WS_AND_ADDRESS_SPACE (Process); 02429 02430 Status = PsChangeJobMemoryUsage (Process->CommitCharge); 02431 02432 // 02433 // Join the job unconditionally. If the process is over any limits, it 02434 // will be caught on its next request. 02435 // 02436 02437 Process->JobStatus |= PS_JOB_STATUS_REPORT_COMMIT_CHANGES; 02438 02439 UNLOCK_WS_AND_ADDRESS_SPACE (Process); 02440 02441 if (Attached) { 02442 KeDetachProcess(); 02443 } 02444 02445 return TRUE; 02446 }

NTKERNELAPI VOID MmBuildMdlForNonPagedPool IN OUT PMDL  MemoryDescriptorList  ) 
 

Definition at line 1500 of file iosup.c.

References ASSERT, MDL_MAPPED_TO_SYSTEM_VA, MDL_PAGES_LOCKED, MDL_PARTIAL, MDL_SOURCE_IS_NONPAGED_POOL, MI_CONVERT_PHYSICAL_TO_PFN, MI_GET_PAGE_FRAME_FROM_PTE, MI_IS_PHYSICAL_ADDRESS, MiGetPteAddress, MmIsNonPagedSystemAddressValid(), and NULL.

Referenced by CcZeroData(), MiCreateImageFileMap(), and UdfPrepareBuffers().

01506 : 01507 01508 This routine fills in the "pages" portion of the MDL using the PFN 01509 numbers corresponding the buffers which resides in non-paged pool. 01510 01511 Unlike MmProbeAndLockPages, there is no corresponding unlock as no 01512 reference counts are incremented as the buffers being in nonpaged 01513 pool are always resident. 01514 01515 Arguments: 01516 01517 MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List 01518 (MDL). The supplied MDL must supply a virtual 01519 address, byte offset and length field. The 01520 physical page portion of the MDL is updated when 01521 the pages are locked in memory. The virtual 01522 address must be within the non-paged portion 01523 of the system space. 01524 01525 Return Value: 01526 01527 None. 01528 01529 Environment: 01530 01531 Kernel mode, IRQL of DISPATCH_LEVEL or below. 01532 01533 --*/ 01534 01535 { 01536 PPFN_NUMBER Page; 01537 PMMPTE PointerPte; 01538 PMMPTE LastPte; 01539 PVOID EndVa; 01540 PFN_NUMBER PageFrameIndex; 01541 01542 Page = (PPFN_NUMBER)(MemoryDescriptorList + 1); 01543 01544 ASSERT (MemoryDescriptorList->ByteCount != 0); 01545 ASSERT ((MemoryDescriptorList->MdlFlags & ( 01546 MDL_PAGES_LOCKED | 01547 MDL_MAPPED_TO_SYSTEM_VA | 01548 MDL_SOURCE_IS_NONPAGED_POOL | 01549 MDL_PARTIAL)) == 0); 01550 01551 MemoryDescriptorList->Process = (PEPROCESS)NULL; 01552 01553 // 01554 // Endva is last byte of the buffer. 01555 // 01556 01557 MemoryDescriptorList->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL; 01558 01559 MemoryDescriptorList->MappedSystemVa = 01560 (PVOID)((PCHAR)MemoryDescriptorList->StartVa + 01561 MemoryDescriptorList->ByteOffset); 01562 01563 EndVa = (PVOID)(((PCHAR)MemoryDescriptorList->MappedSystemVa + 01564 MemoryDescriptorList->ByteCount - 1)); 01565 01566 LastPte = MiGetPteAddress (EndVa); 01567 01568 ASSERT (MmIsNonPagedSystemAddressValid (MemoryDescriptorList->StartVa)); 01569 01570 PointerPte = MiGetPteAddress (MemoryDescriptorList->StartVa); 01571 01572 if (MI_IS_PHYSICAL_ADDRESS(EndVa)) { 01573 PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN ( 01574 MemoryDescriptorList->StartVa); 01575 01576 do { 01577 *Page = PageFrameIndex; 01578 Page += 1; 01579 PageFrameIndex += 1; 01580 PointerPte += 1; 01581 } while (PointerPte <= LastPte); 01582 } else { 01583 do { 01584 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 01585 *Page = PageFrameIndex; 01586 Page += 1; 01587 PointerPte += 1; 01588 } while (PointerPte <= LastPte); 01589 } 01590 01591 return; 01592 }

NTSTATUS MmCallDllInitialize IN PLDR_DATA_TABLE_ENTRY  DataTableEntry  ) 
 

Definition at line 6717 of file sysload.c.

References CmRegistryMachineSystemCurrentControlSetServices, ExAllocatePoolWithTag, ExFreePool(), L, MiLocateExportName(), NonPagedPool, NTSTATUS(), NULL, PMM_DLL_INITIALIZE, RtlAppendUnicodeStringToString(), RtlAppendUnicodeToString(), and USHORT.

Referenced by IopInitializeBootDrivers().

06723 : 06724 06725 This function calls the DLL's initialize routine. 06726 06727 Arguments: 06728 06729 DataTableEntry - Supplies the kernel's data table entry. 06730 06731 Return Value: 06732 06733 Various NTSTATUS error codes. 06734 06735 Environment: 06736 06737 Kernel mode. 06738 06739 --*/ 06740 06741 { 06742 NTSTATUS st; 06743 PWCHAR Dot; 06744 PMM_DLL_INITIALIZE Func; 06745 UNICODE_STRING RegistryPath; 06746 UNICODE_STRING ImportName; 06747 06748 Func = MiLocateExportName (DataTableEntry->DllBase, "DllInitialize"); 06749 06750 if (!Func) { 06751 return STATUS_SUCCESS; 06752 } 06753 06754 ImportName.MaximumLength = DataTableEntry->BaseDllName.Length; 06755 ImportName.Buffer = ExAllocatePoolWithTag (NonPagedPool, 06756 ImportName.MaximumLength, 06757 'TDmM'); 06758 06759 if (ImportName.Buffer == NULL) { 06760 return STATUS_INSUFFICIENT_RESOURCES; 06761 } 06762 06763 ImportName.Length = DataTableEntry->BaseDllName.Length; 06764 RtlMoveMemory (ImportName.Buffer, 06765 DataTableEntry->BaseDllName.Buffer, 06766 ImportName.Length); 06767 06768 RegistryPath.MaximumLength = CmRegistryMachineSystemCurrentControlSetServices.Length + 06769 ImportName.Length + 06770 (USHORT)(2*sizeof(WCHAR)); 06771 06772 RegistryPath.Buffer = ExAllocatePoolWithTag (NonPagedPool, 06773 RegistryPath.MaximumLength, 06774 'TDmM'); 06775 06776 if (RegistryPath.Buffer == NULL) { 06777 ExFreePool (ImportName.Buffer); 06778 return STATUS_INSUFFICIENT_RESOURCES; 06779 } 06780 06781 RegistryPath.Length = CmRegistryMachineSystemCurrentControlSetServices.Length; 06782 RtlMoveMemory (RegistryPath.Buffer, 06783 CmRegistryMachineSystemCurrentControlSetServices.Buffer, 06784 CmRegistryMachineSystemCurrentControlSetServices.Length); 06785 06786 RtlAppendUnicodeToString (&RegistryPath, L"\\"); 06787 Dot = wcschr (ImportName.Buffer, L'.'); 06788 if (Dot) { 06789 ImportName.Length = (USHORT)((Dot - ImportName.Buffer) * sizeof(WCHAR)); 06790 } 06791 06792 RtlAppendUnicodeStringToString (&RegistryPath, &ImportName); 06793 ExFreePool (ImportName.Buffer); 06794 06795 // 06796 // Invoke the DLL's initialization routine. 06797 // 06798 06799 st = Func (&RegistryPath); 06800 06801 ExFreePool (RegistryPath.Buffer); 06802 06803 return st; 06804 }

BOOLEAN MmCanFileBeTruncated IN PSECTION_OBJECT_POINTERS  SectionPointer,
IN PLARGE_INTEGER  NewFileSize
 

Definition at line 2552 of file sectsup.c.

References FALSE, MiCanFileBeTruncatedInternal(), TRUE, and UNLOCK_PFN.

Referenced by CcPurgeCacheSection().

02559 : 02560 02561 This routine does the following: 02562 02563 1. Checks to see if a image section is in use for the file, 02564 if so it returns FALSE. 02565 02566 2. Checks to see if a user section exists for the file, if 02567 it does, it checks to make sure the new file size is greater 02568 than the size of the file, if not it returns FALSE. 02569 02570 3. If no image section exists, and no user created data section 02571 exists or the file's size is greater, then TRUE is returned. 02572 02573 Arguments: 02574 02575 SectionPointer - Supplies a pointer to the section object pointers 02576 from the file object. 02577 02578 NewFileSize - Supplies a pointer to the size the file is getting set to. 02579 02580 Return Value: 02581 02582 TRUE if the file can be truncated, FALSE if it cannot be. 02583 02584 Environment: 02585 02586 Kernel mode. 02587 02588 --*/ 02589 02590 { 02591 LARGE_INTEGER LocalOffset; 02592 KIRQL OldIrql; 02593 02594 // 02595 // Capture caller's file size, since we may modify it. 02596 // 02597 02598 if (ARGUMENT_PRESENT(NewFileSize)) { 02599 02600 LocalOffset = *NewFileSize; 02601 NewFileSize = &LocalOffset; 02602 } 02603 02604 if (MiCanFileBeTruncatedInternal( SectionPointer, NewFileSize, FALSE, &OldIrql )) { 02605 02606 UNLOCK_PFN (OldIrql); 02607 return TRUE; 02608 } 02609 02610 return FALSE; 02611 }

BOOLEAN MmCheckCachedPageState IN PVOID  Address,
IN BOOLEAN  SetToZero
 

Definition at line 1016 of file mapcache.c.

References ActiveAndValid, ASSERT, FALSE, KernelMode, LOCK_PFN, LOCK_SYSTEM_WS, MI_BARRIER_SYNCHRONIZE, MI_DETERMINE_OWNER, MI_GET_PAGE_COLOR_FROM_PTE, MI_GET_PAGE_FRAME_FROM_PTE, MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE, MI_MAKE_VALID_PTE, MI_PFN_ELEMENT, MI_SET_GLOBAL_STATE, MI_SET_PTE_IN_WORKING_SET, MI_WRITE_VALID_PTE, MiGetPteAddress, MiGetSubsectionAddress, MiGetVirtualAddressMappedByPte, MiInitializePfn(), MiLocateAndReserveWsle(), MiMakeSystemAddressValidPfn(), MiPteToProto, MiRemoveZeroPage(), MiUnlinkPageFromList(), MiUpdateWsle(), MM_PTE_OWNER_MASK, MmAccessFault(), MmAvailablePages, MmSystemCacheWorkingSetList, MmSystemCacheWs, MmSystemCacheWsle, NULL, _MMPFN::OriginalPte, PsGetCurrentThread, _MMPFN::PteFrame, TRUE, _MMPTE::u, _MMPFN::u1, _MMWSLE::u1, _MMPFN::u2, _MMPFN::u3, UNLOCK_PFN, UNLOCK_SYSTEM_WS, and WSLE_NUMBER.

Referenced by CcCopyRead(), CcFastCopyRead(), CcMapAndRead(), and CcPerformReadAhead().

01023 : 01024 01025 This routine checks the state of the specified page that is mapped in 01026 the system cache. If the specified virtual address can be made valid 01027 (i.e., the page is already in memory), it is made valid and the value 01028 TRUE is returned. 01029 01030 If the page is not in memory, and SetToZero is FALSE, the 01031 value FALSE is returned. However, if SetToZero is TRUE, a page of 01032 zeroes is materialized for the specified virtual address and the address 01033 is made valid and the value TRUE is returned. 01034 01035 This routine is for usage by the cache manager. 01036 01037 Arguments: 01038 01039 Address - Supplies the address of a page mapped in the system cache. 01040 01041 SetToZero - Supplies TRUE if a page of zeroes should be created in the 01042 case where no page is already mapped. 01043 01044 Return Value: 01045 01046 FALSE if there if touching this page would cause a page fault resulting 01047 in a page read. 01048 01049 TRUE if there is a physical page in memory for this address. 01050 01051 Environment: 01052 01053 Kernel mode. 01054 01055 --*/ 01056 01057 { 01058 PMMPTE PointerPte; 01059 PMMPTE PointerPde; 01060 PMMPTE ProtoPte; 01061 PFN_NUMBER PageFrameIndex; 01062 WSLE_NUMBER WorkingSetIndex; 01063 MMPTE TempPte; 01064 MMPTE ProtoPteContents; 01065 PMMPFN Pfn1; 01066 PMMPFN Pfn2; 01067 KIRQL OldIrql; 01068 LOGICAL BarrierNeeded; 01069 ULONG BarrierStamp; 01070 01071 BarrierNeeded = FALSE; 01072 01073 PointerPte = MiGetPteAddress (Address); 01074 01075 // 01076 // Make the PTE valid if possible. 01077 // 01078 01079 if (PointerPte->u.Hard.Valid == 1) { 01080 return TRUE; 01081 } 01082 01083 LOCK_PFN (OldIrql); 01084 01085 if (PointerPte->u.Hard.Valid == 1) { 01086 goto UnlockAndReturnTrue; 01087 } 01088 01089 ASSERT (PointerPte->u.Soft.Prototype == 1); 01090 01091 ProtoPte = MiPteToProto (PointerPte); 01092 01093 // 01094 // Pte is not valid, check the state of the prototype PTE. 01095 // 01096 01097 if (MiMakeSystemAddressValidPfn (ProtoPte)) { 01098 01099 // 01100 // If page fault occurred, recheck state of original PTE. 01101 // 01102 01103 if (PointerPte->u.Hard.Valid == 1) { 01104 goto UnlockAndReturnTrue; 01105 } 01106 } 01107 01108 ProtoPteContents = *ProtoPte; 01109 01110 if (ProtoPteContents.u.Hard.Valid == 1) { 01111 01112 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&ProtoPteContents); 01113 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 01114 01115 // 01116 // The prototype PTE is valid, make the cache PTE 01117 // valid and add it to the working set. 01118 // 01119 01120 TempPte = ProtoPteContents; 01121 01122 } else if ((ProtoPteContents.u.Soft.Transition == 1) && 01123 (ProtoPteContents.u.Soft.Prototype == 0)) { 01124 01125 // 01126 // Prototype PTE is in the transition state. Remove the page 01127 // from the page list and make it valid. 01128 // 01129 01130 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (&ProtoPteContents); 01131 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 01132 if ((Pfn1->u3.e1.ReadInProgress) || 01133 (Pfn1->u3.e1.InPageError)) { 01134 01135 // 01136 // Collided page fault, return. 01137 // 01138 01139 goto UnlockAndReturnTrue; 01140 } 01141 01142 MiUnlinkPageFromList (Pfn1); 01143 01144 Pfn1->u3.e2.ReferenceCount += 1; 01145 Pfn1->u3.e1.PageLocation = ActiveAndValid; 01146 01147 MI_MAKE_VALID_PTE (TempPte, 01148 PageFrameIndex, 01149 Pfn1->OriginalPte.u.Soft.Protection, 01150 NULL ); 01151 01152 MI_WRITE_VALID_PTE (ProtoPte, TempPte); 01153 01154 // 01155 // Increment the valid PTE count for the page containing 01156 // the prototype PTE. 01157 // 01158 01159 Pfn2 = MI_PFN_ELEMENT (Pfn1->PteFrame); 01160 01161 } else { 01162 01163 // 01164 // Page is not in memory, if a page of zeroes is requested, 01165 // get a page of zeroes and make it valid. 01166 // 01167 01168 if ((SetToZero == FALSE) || (MmAvailablePages < 8)) { 01169 UNLOCK_PFN (OldIrql); 01170 01171 // 01172 // Fault the page into memory. 01173 // 01174 01175 MmAccessFault (FALSE, Address, KernelMode, (PVOID)0); 01176 return FALSE; 01177 } 01178 01179 // 01180 // Increment the count of Pfn references for the control area 01181 // corresponding to this file. 01182 // 01183 01184 MiGetSubsectionAddress ( 01185 ProtoPte)->ControlArea->NumberOfPfnReferences += 1; 01186 01187 PageFrameIndex = MiRemoveZeroPage(MI_GET_PAGE_COLOR_FROM_PTE (ProtoPte)); 01188 01189 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 01190 01191 // 01192 // This barrier check is needed after zeroing the page and 01193 // before setting the PTE (not the prototype PTE) valid. 01194 // Capture it now, check it at the last possible moment. 01195 // 01196 01197 BarrierNeeded = TRUE; 01198 BarrierStamp = (ULONG)Pfn1->PteFrame; 01199 01200 MiInitializePfn (PageFrameIndex, ProtoPte, 1); 01201 Pfn1->u2.ShareCount = 0; 01202 Pfn1->u3.e1.PrototypePte = 1; 01203 01204 MI_MAKE_VALID_PTE (TempPte, 01205 PageFrameIndex, 01206 Pfn1->OriginalPte.u.Soft.Protection, 01207 NULL ); 01208 01209 MI_WRITE_VALID_PTE (ProtoPte, TempPte); 01210 } 01211 01212 // 01213 // Increment the share count since the page is being put into a working 01214 // set. 01215 // 01216 01217 Pfn1->u2.ShareCount += 1; 01218 01219 if (Pfn1->u1.Event == NULL) { 01220 Pfn1->u1.Event = (PVOID)PsGetCurrentThread(); 01221 } 01222 01223 // 01224 // Increment the reference count of the page table 01225 // page for this PTE. 01226 // 01227 01228 PointerPde = MiGetPteAddress (PointerPte); 01229 Pfn2 = MI_PFN_ELEMENT (PointerPde->u.Hard.PageFrameNumber); 01230 01231 Pfn2->u2.ShareCount += 1; 01232 01233 MI_SET_GLOBAL_STATE (TempPte, 1); 01234 01235 #if defined (_WIN64) 01236 if (MI_DETERMINE_OWNER (PointerPte) == 0) { 01237 TempPte.u.Long &= ~MM_PTE_OWNER_MASK; 01238 } 01239 #else 01240 TempPte.u.Hard.Owner = MI_DETERMINE_OWNER (PointerPte); 01241 #endif 01242 01243 if (BarrierNeeded) { 01244 MI_BARRIER_SYNCHRONIZE (BarrierStamp); 01245 } 01246 01247 MI_WRITE_VALID_PTE (PointerPte, TempPte); 01248 01249 UNLOCK_PFN (OldIrql); 01250 01251 LOCK_SYSTEM_WS (OldIrql); 01252 01253 WorkingSetIndex = MiLocateAndReserveWsle (&MmSystemCacheWs); 01254 01255 MiUpdateWsle (&WorkingSetIndex, 01256 MiGetVirtualAddressMappedByPte (PointerPte), 01257 MmSystemCacheWorkingSetList, 01258 Pfn1); 01259 01260 MmSystemCacheWsle[WorkingSetIndex].u1.e1.SameProtectAsProto = 1; 01261 01262 MI_SET_PTE_IN_WORKING_SET (PointerPte, WorkingSetIndex); 01263 01264 UNLOCK_SYSTEM_WS (OldIrql); 01265 01266 return TRUE; 01267 01268 UnlockAndReturnTrue: 01269 UNLOCK_PFN (OldIrql); 01270 return TRUE; 01271 }

VOID MmCleanProcessAddressSpace VOID   ) 
 

VOID MmCleanUserProcessAddressSpace VOID   ) 
 

VOID MmCleanVirtualAddressDescriptor VOID   ) 
 

NTSTATUS MmCopyToCachedPage IN PVOID  Address,
IN PVOID  UserBuffer,
IN ULONG  Offset,
IN SIZE_T  CountInBytes,
IN BOOLEAN  DontZero
 

Definition at line 1274 of file mapcache.c.

References ActiveAndValid, APC_LEVEL, _ETHREAD::ApcNeeded, ASSERT, Buffer, CHAR, Copy, DISPATCH_LEVEL, Event(), EXCEPTION_EXECUTE_HANDLER, FALSE, IoRetryIrpCompletions(), KeDelayExecutionThread(), KeEnterCriticalRegion, KeLeaveCriticalRegion, KeLowerIrql(), KeRaiseIrql(), KernelMode, KeSetEvent(), LOCK_PFN, LOCK_SYSTEM_WS, MI_ADD_LOCKED_PAGE_CHARGE, MI_ADD_LOCKED_PAGE_CHARGE_FOR_MODIFIED_PAGE, MI_DETERMINE_OWNER, MI_GET_PAGE_COLOR_FROM_PTE, MI_GET_PAGE_FRAME_FROM_PTE, MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE, MI_MAGIC_AWE_PTEFRAME, MI_MAKE_VALID_PTE, MI_PFN_ELEMENT, MI_REMOVE_LOCKED_PAGE_CHARGE, MI_SET_GLOBAL_STATE, MI_SET_PTE_DIRTY, MI_SET_PTE_IN_WORKING_SET, MI_WRITE_VALID_PTE, MiEnsureAvailablePageOrWait(), MiFreeInPageSupportBlock(), MiGetInPageSupportBlock(), MiGetPteAddress, MiGetSubsectionAddress, MiGetVirtualAddressMappedByPte, MiInitializeTransitionPfn(), MiLocateAndReserveWsle(), MiMakeSystemAddressValidPfn(), MiMapCacheExceptionFilter(), MiPteToProto, MiRemoveAnyPage(), MiUnlinkPageFromList(), MiUpdateWsle(), MM_PTE_OWNER_MASK, MmResetPageFaultReadAhead, MmSavePageFaultReadAhead, MmSetPageFaultReadAhead, MmShortTime, MmSystemCacheWorkingSetList, MmSystemCacheWs, MmSystemCacheWsle, _ETHREAD::NestedFaultCount, NTSTATUS(), NULL, _CONTROL_AREA::NumberOfPfnReferences, _CONTROL_AREA::NumberOfUserReferences, Offset, _MMPFN::OriginalPte, PAGE_SIZE, PsGetCurrentThread, _MMPFN::PteAddress, _MMPFN::PteFrame, TRUE, _MMPTE::u, _MMPFN::u1, _MMWSLE::u1, _MMPFN::u2, _MMPFN::u3, UNLOCK_PFN, UNLOCK_SYSTEM_WS, and WSLE_NUMBER.

Referenced by CcMapAndCopy().

01284 : 01285 01286 This routine checks the state of the specified page that is mapped in 01287 the system cache. If the specified virtual address can be made valid 01288 (i.e., the page is already in memory), it is made valid and the value 01289 TRUE is returned. 01290 01291 If the page is not in memory, and SetToZero is FALSE, the 01292 value FALSE is returned. However, if SetToZero is TRUE, a page of 01293 zeroes is materialized for the specified virtual address and the address 01294 is made valid and the value TRUE is returned. 01295 01296 This routine is for usage by the cache manager. 01297 01298 Arguments: 01299 01300 Address - Supplies the address of a page mapped in the system cache. 01301 This MUST be a page aligned address! 01302 01303 UserBuffer - Supplies the address of a user buffer to copy into the 01304 system cache at the specified address + offset. 01305 01306 Offset - Supplies the offset into the UserBuffer to copy the data. 01307 01308 CountInBytes - Supplies the byte count to copy from the user buffer. 01309 01310 DontZero - Supplies TRUE if the buffer should not be zeroed (the 01311 caller will track zeroing). FALSE if it should be zeroed. 01312 01313 Return Value: 01314 01315 Returns the status of the copy. 01316 01317 Environment: 01318 01319 Kernel mode, <= APC_LEVEL. 01320 01321 --*/ 01322 01323 { 01324 PMMPTE PointerPte; 01325 PMMPTE PointerPde; 01326 PMMPTE ProtoPte; 01327 PFN_NUMBER PageFrameIndex; 01328 WSLE_NUMBER WorkingSetIndex; 01329 MMPTE TempPte; 01330 MMPTE ProtoPteContents; 01331 PMMPFN Pfn1; 01332 PMMPFN Pfn2; 01333 KIRQL OldIrql; 01334 ULONG TransitionState; 01335 ULONG AddToWorkingSet; 01336 LOGICAL ShareCountUpped; 01337 SIZE_T EndFill; 01338 PVOID Buffer; 01339 NTSTATUS status; 01340 PMMINPAGE_SUPPORT Event; 01341 PCONTROL_AREA ControlArea; 01342 PETHREAD Thread; 01343 ULONG SavedState; 01344 LOGICAL ApcsExplicitlyBlocked; 01345 LOGICAL ApcNeeded; 01346 01347 TransitionState = FALSE; 01348 AddToWorkingSet = FALSE; 01349 ApcsExplicitlyBlocked = FALSE; 01350 ApcNeeded = FALSE; 01351 01352 ASSERT (((ULONG_PTR)Address & (PAGE_SIZE - 1)) == 0); 01353 ASSERT ((CountInBytes + Offset) <= PAGE_SIZE); 01354 ASSERT (KeGetCurrentIrql() < DISPATCH_LEVEL); 01355 01356 PointerPte = MiGetPteAddress (Address); 01357 01358 if (PointerPte->u.Hard.Valid == 1) { 01359 goto Copy; 01360 } 01361 01362 // 01363 // Touch the user's buffer to make it resident. This is required in 01364 // order to safely detect the case where both the system and user 01365 // address are pointing at the same physical page. This case causes 01366 // a deadlock during the RtlCopyBytes if the inpage support block needed 01367 // to be allocated and the PTE for the user page is not valid. This 01368 // potential deadlock is resolved because if the user page causes a 01369 // collided fault, the initiator thread is checked for. If they are 01370 // the same, then an exception is thrown by the pager. 01371 // 01372 01373 try { 01374 01375 *(volatile CHAR *)UserBuffer; 01376 01377 } except (EXCEPTION_EXECUTE_HANDLER) { 01378 return GetExceptionCode(); 01379 } 01380 01381 // 01382 // Make the PTE valid if possible. 01383 // 01384 01385 LOCK_PFN (OldIrql); 01386 01387 Recheck: 01388 01389 if (PointerPte->u.Hard.Valid == 1) { 01390 goto UnlockAndCopy; 01391 } 01392 01393 ASSERT (PointerPte->u.Soft.Prototype == 1); 01394 01395 ProtoPte = MiPteToProto (PointerPte); 01396 01397 // 01398 // Pte is not valid, check the state of the prototype PTE. 01399 // 01400 01401 if (MiMakeSystemAddressValidPfn (ProtoPte)) { 01402 01403 // 01404 // If page fault occurred, recheck state of original PTE. 01405 // 01406 01407 if (PointerPte->u.Hard.Valid == 1) { 01408 goto UnlockAndCopy; 01409 } 01410 } 01411 01412 ShareCountUpped = FALSE; 01413 ProtoPteContents = *ProtoPte; 01414 01415 if (ProtoPteContents.u.Hard.Valid == 1) { 01416 01417 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&ProtoPteContents); 01418 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 01419 01420 // 01421 // Increment the share count so the prototype PTE will remain 01422 // valid until this can be added into the system's working set. 01423 // 01424 01425 Pfn1->u2.ShareCount += 1; 01426 ShareCountUpped = TRUE; 01427 01428 // 01429 // The prototype PTE is valid, make the cache PTE 01430 // valid and add it to the working set. 01431 // 01432 01433 TempPte = ProtoPteContents; 01434 01435 } else if ((ProtoPteContents.u.Soft.Transition == 1) && 01436 (ProtoPteContents.u.Soft.Prototype == 0)) { 01437 01438 // 01439 // Prototype PTE is in the transition state. Remove the page 01440 // from the page list and make it valid. 01441 // 01442 01443 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (&ProtoPteContents); 01444 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 01445 if ((Pfn1->u3.e1.ReadInProgress) || 01446 (Pfn1->u3.e1.InPageError)) { 01447 01448 // 01449 // Collided page fault or in page error, try the copy 01450 // operation incurring a page fault. 01451 // 01452 01453 goto UnlockAndCopy; 01454 } 01455 01456 MiUnlinkPageFromList (Pfn1); 01457 01458 Pfn1->u3.e2.ReferenceCount += 1; 01459 Pfn1->u3.e1.PageLocation = ActiveAndValid; 01460 Pfn1->u3.e1.Modified = 1; 01461 ASSERT (Pfn1->u2.ShareCount == 0); 01462 Pfn1->u2.ShareCount += 1; 01463 ShareCountUpped = TRUE; 01464 01465 MI_MAKE_VALID_PTE (TempPte, 01466 PageFrameIndex, 01467 Pfn1->OriginalPte.u.Soft.Protection, 01468 NULL ); 01469 MI_SET_PTE_DIRTY (TempPte); 01470 01471 MI_WRITE_VALID_PTE (ProtoPte, TempPte); 01472 01473 // 01474 // Increment the valid pte count for the page containing 01475 // the prototype PTE. 01476 // 01477 01478 } else { 01479 01480 // 01481 // Page is not in memory, if a page of zeroes is requested, 01482 // get a page of zeroes and make it valid. 01483 // 01484 01485 if (MiEnsureAvailablePageOrWait (NULL, NULL)) { 01486 01487 // 01488 // A wait operation occurred which could have changed the 01489 // state of the PTE. Recheck the PTE state. 01490 // 01491 01492 goto Recheck; 01493 } 01494 01495 Event = MiGetInPageSupportBlock (); 01496 if (Event == NULL) { 01497 UNLOCK_PFN (OldIrql); 01498 KeDelayExecutionThread (KernelMode, FALSE, (PLARGE_INTEGER)&MmShortTime); 01499 LOCK_PFN (OldIrql); 01500 goto Recheck; 01501 } 01502 01503 // 01504 // Increment the count of Pfn references for the control area 01505 // corresponding to this file. 01506 // 01507 01508 ControlArea = MiGetSubsectionAddress (ProtoPte)->ControlArea; 01509 ControlArea->NumberOfPfnReferences += 1; 01510 if (ControlArea->NumberOfUserReferences > 0) { 01511 01512 // 01513 // There is a user reference to this file, always zero ahead. 01514 // 01515 01516 DontZero = FALSE; 01517 } 01518 01519 // 01520 // Remove any page from the list and turn it into a transition 01521 // page in the cache with read in progress set. This causes 01522 // any other references to this page to block on the specified 01523 // event while the copy operation to the cache is on-going. 01524 // 01525 01526 PageFrameIndex = MiRemoveAnyPage(MI_GET_PAGE_COLOR_FROM_PTE (ProtoPte)); 01527 01528 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 01529 01530 // 01531 // Increment the valid PTE count for the page containing 01532 // the prototype PTE. 01533 // 01534 01535 MiInitializeTransitionPfn (PageFrameIndex, ProtoPte, 0xFFFFFFFF); 01536 01537 Pfn1->u2.ShareCount = 0; 01538 01539 Pfn1->u3.e2.ReferenceCount = 0; // for the add_locked_page macro 01540 MI_ADD_LOCKED_PAGE_CHARGE_FOR_MODIFIED_PAGE (Pfn1, 24); 01541 01542 Pfn1->u3.e2.ReferenceCount = 1; 01543 Pfn1->u3.e1.PrototypePte = 1; 01544 Pfn1->u3.e1.Modified = 1; 01545 Pfn1->u3.e1.ReadInProgress = 1; 01546 Pfn1->u1.Event = &Event->Event; 01547 Event->Pfn = Pfn1; 01548 01549 // 01550 // This is needed in case a special kernel APC fires that ends up 01551 // referencing the same page (this may even be through a different 01552 // virtual address from the user/system one here). 01553 // 01554 01555 Thread = PsGetCurrentThread (); 01556 ASSERT (Thread->NestedFaultCount <= 1); 01557 Thread->NestedFaultCount += 1; 01558 01559 TransitionState = TRUE; 01560 01561 MI_MAKE_VALID_PTE (TempPte, 01562 PageFrameIndex, 01563 Pfn1->OriginalPte.u.Soft.Protection, 01564 NULL); 01565 MI_SET_PTE_DIRTY (TempPte); 01566 01567 // 01568 // APCs must be explicitly disabled to prevent suspend APCs from 01569 // interrupting this thread before the RtlCopyBytes completes. 01570 // Otherwise this page can remain in transition indefinitely (until 01571 // the suspend APC is released) which blocks any other threads that 01572 // may reference it. 01573 // 01574 01575 KeEnterCriticalRegion(); 01576 ApcsExplicitlyBlocked = TRUE; 01577 } 01578 01579 // 01580 // Increment the share count of the page table page for this PTE. 01581 // 01582 01583 PointerPde = MiGetPteAddress (PointerPte); 01584 Pfn2 = MI_PFN_ELEMENT (PointerPde->u.Hard.PageFrameNumber); 01585 01586 Pfn2->u2.ShareCount += 1; 01587 01588 MI_SET_GLOBAL_STATE (TempPte, 1); 01589 #if defined (_WIN64) 01590 if (MI_DETERMINE_OWNER (PointerPte) == 0) { 01591 TempPte.u.Long &= ~MM_PTE_OWNER_MASK; 01592 } 01593 #else 01594 TempPte.u.Hard.Owner = MI_DETERMINE_OWNER (PointerPte); 01595 #endif 01596 MI_WRITE_VALID_PTE (PointerPte, TempPte); 01597 01598 AddToWorkingSet = TRUE; 01599 01600 UnlockAndCopy: 01601 01602 // 01603 // Unlock the PFN database and perform the copy. 01604 // 01605 01606 UNLOCK_PFN (OldIrql); 01607 01608 Copy: 01609 01610 Thread = PsGetCurrentThread (); 01611 MmSavePageFaultReadAhead( Thread, &SavedState ); 01612 MmSetPageFaultReadAhead( Thread, 0 ); 01613 status = STATUS_SUCCESS; 01614 01615 // 01616 // Copy the user buffer into the cache under an exception handler. 01617 // 01618 01619 try { 01620 01621 Buffer = (PVOID)((PCHAR)Address + Offset); 01622 RtlCopyBytes (Buffer, UserBuffer, CountInBytes); 01623 01624 if (TransitionState) { 01625 01626 // 01627 // Only zero the memory outside the range if a page was taken 01628 // from the free list. 01629 // 01630 01631 if (Offset != 0) { 01632 RtlZeroMemory (Address, Offset); 01633 } 01634 01635 if (DontZero == FALSE) { 01636 EndFill = PAGE_SIZE - (Offset + CountInBytes); 01637 01638 if (EndFill != 0) { 01639 Buffer = (PVOID)((PCHAR)Buffer + CountInBytes); 01640 RtlZeroMemory (Buffer, EndFill); 01641 } 01642 } 01643 } 01644 } except (MiMapCacheExceptionFilter (&status, GetExceptionInformation())) { 01645 01646 if (status == STATUS_MULTIPLE_FAULT_VIOLATION) { 01647 ASSERT (TransitionState == TRUE); 01648 } 01649 01650 // 01651 // Zero out the page if it came from the free list. 01652 // 01653 01654 if (TransitionState) { 01655 RtlZeroMemory (Address, PAGE_SIZE); 01656 } 01657 } 01658 01659 MmResetPageFaultReadAhead(Thread, SavedState); 01660 01661 if (AddToWorkingSet) { 01662 01663 LOCK_PFN (OldIrql); 01664 01665 if (ApcsExplicitlyBlocked == TRUE) { 01666 KeLeaveCriticalRegion(); 01667 } 01668 01669 ASSERT (Pfn1->u3.e2.ReferenceCount != 0); 01670 ASSERT (Pfn1->PteAddress == ProtoPte); 01671 01672 if (TransitionState) { 01673 01674 // 01675 // This is a newly allocated page. 01676 // 01677 01678 ASSERT (ShareCountUpped == FALSE); 01679 ASSERT (Pfn1->u2.ShareCount <= 1); 01680 ASSERT (Pfn1->u1.Event == &Event->Event); 01681 01682 MiMakeSystemAddressValidPfn (ProtoPte); 01683 MI_SET_GLOBAL_STATE (TempPte, 0); 01684 MI_WRITE_VALID_PTE (ProtoPte, TempPte); 01685 Pfn1->u1.Event = (PVOID)PsGetCurrentThread(); 01686 ASSERT (Pfn1->u3.e2.ReferenceCount != 0); 01687 ASSERT (Pfn1->PteFrame != MI_MAGIC_AWE_PTEFRAME); 01688 01689 ASSERT (Event->Completed == FALSE); 01690 Event->Completed = TRUE; 01691 01692 ASSERT (Pfn1->u2.ShareCount == 0); 01693 MI_REMOVE_LOCKED_PAGE_CHARGE(Pfn1, 41); 01694 Pfn1->u3.e1.PageLocation = ActiveAndValid; 01695 01696 ASSERT (Pfn1->u3.e1.ReadInProgress == 1); 01697 Pfn1->u3.e1.ReadInProgress = 0; 01698 01699 // 01700 // Increment the share count since the page is 01701 // being put into a working set. 01702 // 01703 01704 Pfn1->u2.ShareCount += 1; 01705 01706 if (Event->WaitCount != 1) { 01707 Event->IoStatus.Status = STATUS_SUCCESS; 01708 Event->IoStatus.Information = 0; 01709 KeSetEvent (&Event->Event, 0, FALSE); 01710 } 01711 01712 MiFreeInPageSupportBlock (Event); 01713 if (DontZero != FALSE) { 01714 MI_ADD_LOCKED_PAGE_CHARGE(Pfn1, 40); 01715 Pfn1->u3.e2.ReferenceCount += 1; 01716 status = STATUS_CACHE_PAGE_LOCKED; 01717 } 01718 01719 ASSERT (Thread->NestedFaultCount <= 3); 01720 ASSERT (Thread->NestedFaultCount != 0); 01721 01722 Thread->NestedFaultCount -= 1; 01723 01724 if ((Thread->ApcNeeded == 1) && (Thread->NestedFaultCount == 0)) { 01725 ApcNeeded = TRUE; 01726 Thread->ApcNeeded = 0; 01727 } 01728 01729 } else { 01730 01731 // 01732 // This is either a frame that was originally on the transition list 01733 // or was already valid when this routine began execution. Either 01734 // way, the share count (and therefore the systemwide locked pages 01735 // count) has been dealt with. 01736 // 01737 01738 ASSERT (ShareCountUpped == TRUE); 01739 01740 if (Pfn1->u1.Event == NULL) { 01741 Pfn1->u1.Event = (PVOID)PsGetCurrentThread(); 01742 } 01743 } 01744 01745 UNLOCK_PFN (OldIrql); 01746 01747 LOCK_SYSTEM_WS (OldIrql); 01748 01749 WorkingSetIndex = MiLocateAndReserveWsle (&MmSystemCacheWs); 01750 01751 MiUpdateWsle (&WorkingSetIndex, 01752 MiGetVirtualAddressMappedByPte (PointerPte), 01753 MmSystemCacheWorkingSetList, 01754 Pfn1); 01755 01756 MmSystemCacheWsle[WorkingSetIndex].u1.e1.SameProtectAsProto = 1; 01757 01758 MI_SET_PTE_IN_WORKING_SET (PointerPte, WorkingSetIndex); 01759 01760 UNLOCK_SYSTEM_WS (OldIrql); 01761 01762 if (ApcNeeded == TRUE) { 01763 ASSERT (OldIrql < APC_LEVEL); 01764 ASSERT (Thread->NestedFaultCount == 0); 01765 ASSERT (Thread->ApcNeeded == 0); 01766 KeRaiseIrql (APC_LEVEL, &OldIrql); 01767 IoRetryIrpCompletions (); 01768 KeLowerIrql (OldIrql); 01769 } 01770 } 01771 else { 01772 ASSERT (ApcsExplicitlyBlocked == FALSE); 01773 } 01774 01775 return status; 01776 }

NTSTATUS MmCopyVirtualMemory IN PEPROCESS  FromProcess,
IN PVOID  FromAddress,
IN PEPROCESS  ToProcess,
OUT PVOID  ToAddress,
IN ULONG  BufferSize,
IN KPROCESSOR_MODE  PreviousMode,
OUT PULONG  NumberOfBytesCopied
 

Definition at line 362 of file readwrt.c.

References _EPROCESS::AddressSpaceDeleted, ASSERT, BufferSize, FALSE, KeSetEvent(), MiDoMappedCopy(), MiDoPoolCopy(), MiLockSystemSpace, MiUnlockSystemSpace, NTSTATUS(), NULL, POOL_MOVE_THRESHOLD, PsGetCurrentProcess, Status, _EPROCESS::VmOperation, and _EPROCESS::VmOperationEvent.

Referenced by LpcpCopyRequestData(), NtReadVirtualMemory(), and NtWriteVirtualMemory().

00371 { 00372 NTSTATUS Status; 00373 KIRQL OldIrql; 00374 PEPROCESS ProcessToLock; 00375 00376 if (BufferSize == 0) { 00377 ASSERT (FALSE); // No one should call with a zero size. 00378 return STATUS_SUCCESS; 00379 } 00380 00381 ProcessToLock = FromProcess; 00382 if (FromProcess == PsGetCurrentProcess()) { 00383 ProcessToLock = ToProcess; 00384 } 00385 00386 // 00387 // Make sure the process still has an address space. 00388 // 00389 00390 MiLockSystemSpace(OldIrql); 00391 if (ProcessToLock->AddressSpaceDeleted != 0) { 00392 MiUnlockSystemSpace(OldIrql); 00393 return STATUS_PROCESS_IS_TERMINATING; 00394 } 00395 ProcessToLock->VmOperation += 1; 00396 MiUnlockSystemSpace(OldIrql); 00397 00398 00399 // 00400 // If the buffer size is greater than the pool move threshold, 00401 // then attempt to write the memory via direct mapping. 00402 // 00403 00404 if (BufferSize > POOL_MOVE_THRESHOLD) { 00405 Status = MiDoMappedCopy(FromProcess, 00406 FromAddress, 00407 ToProcess, 00408 ToAddress, 00409 BufferSize, 00410 PreviousMode, 00411 NumberOfBytesCopied); 00412 00413 // 00414 // If the completion status is not a working quota problem, 00415 // then finish the service. Otherwise, attempt to write the 00416 // memory through nonpaged pool. 00417 // 00418 00419 if (Status != STATUS_WORKING_SET_QUOTA) { 00420 goto CompleteService; 00421 } 00422 00423 *NumberOfBytesCopied = 0; 00424 } 00425 00426 // 00427 // There was not enough working set quota to write the memory via 00428 // direct mapping or the size of the write was below the pool move 00429 // threshold. Attempt to write the specified memory through nonpaged 00430 // pool. 00431 // 00432 00433 Status = MiDoPoolCopy(FromProcess, 00434 FromAddress, 00435 ToProcess, 00436 ToAddress, 00437 BufferSize, 00438 PreviousMode, 00439 NumberOfBytesCopied); 00440 00441 // 00442 // Dereference the target process. 00443 // 00444 00445 CompleteService: 00446 00447 // 00448 // Indicate that the vm operation is complete. 00449 // 00450 00451 MiLockSystemSpace(OldIrql); 00452 ProcessToLock->VmOperation -= 1; 00453 if ((ProcessToLock->VmOperation == 0) && 00454 (ProcessToLock->VmOperationEvent != NULL)) { 00455 KeSetEvent (ProcessToLock->VmOperationEvent, 0, FALSE); 00456 } 00457 MiUnlockSystemSpace(OldIrql); 00458 00459 return Status; 00460 }

PVOID MmCreateKernelStack BOOLEAN  LargeStack  ) 
 

NTKERNELAPI PMDL MmCreateMdl IN PMDL MemoryDescriptorList  OPTIONAL,
IN PVOID  Base,
IN SIZE_T  Length
 

Definition at line 5852 of file iosup.c.

References ExAllocatePoolWithTag, MmInitializeMdl, MmSizeOfMdl(), NonPagedPool, NonPagedPoolMustSucceed, and POOL_BUDDY_MAX.

Referenced by IoWriteCrashDump(), MiCheckForCrashDump(), MiCreateImageFileMap(), MmAllocatePagesForMdl(), and NtFreeUserPhysicalPages().

05860 : 05861 05862 This function optionally allocates and initializes an MDL. 05863 05864 Arguments: 05865 05866 MemoryDescriptorList - Optionally supplies the address of the MDL 05867 to initialize. If this address is supplied as NULL 05868 an MDL is allocated from non-paged pool and 05869 initialized. 05870 05871 Base - Supplies the base virtual address for the buffer. 05872 05873 Length - Supplies the size of the buffer in bytes. 05874 05875 Return Value: 05876 05877 Returns the address of the initialized MDL. 05878 05879 Environment: 05880 05881 Kernel mode, IRQL of DISPATCH_LEVEL or below. 05882 05883 --*/ 05884 05885 { 05886 SIZE_T MdlSize; 05887 05888 MdlSize = MmSizeOfMdl( Base, Length ); 05889 05890 if (!ARGUMENT_PRESENT( MemoryDescriptorList )) { 05891 05892 // 05893 // The pool manager doesn't like being called with large requests 05894 // marked MustSucceed, so try the normal nonpaged if the 05895 // request is large. 05896 // 05897 05898 if (MdlSize > POOL_BUDDY_MAX) { 05899 MemoryDescriptorList = (PMDL)ExAllocatePoolWithTag ( 05900 NonPagedPool, 05901 MdlSize, 05902 'ldmM'); 05903 if (MemoryDescriptorList == (PMDL)0) { 05904 return (PMDL)0; 05905 } 05906 } 05907 else { 05908 MemoryDescriptorList = (PMDL)ExAllocatePoolWithTag ( 05909 NonPagedPoolMustSucceed, 05910 MdlSize, 05911 'ldmM'); 05912 } 05913 } 05914 05915 MmInitializeMdl (MemoryDescriptorList, Base, Length); 05916 return MemoryDescriptorList; 05917 }

PPEB MmCreatePeb IN PEPROCESS  TargetProcess,
IN PINITIAL_PEB  InitialPeb
 

Definition at line 4631 of file procsup.c.

References CmNtCSDVersion, EXCEPTION_EXECUTE_HANDLER, ExRaiseStatus(), FALSE, InitAnsiCodePageDataOffset, InitNlsSectionPointer, InitOemCodePageDataOffset, InitUnicodeCaseTableDataOffset, KeActiveProcessors, KeAttachProcess(), KeDetachProcess(), KeNumberProcessors, L, MI_INIT_PEB_FROM_IMAGE, MiCreatePebOrTeb(), MmCriticalSectionTimeout, MmHeapDeCommitFreeBlockThreshold, MmHeapDeCommitTotalFreeThreshold, MmHeapSegmentCommit, MmHeapSegmentReserve, MmMapViewOfSection(), MmRotatingUniprocessorNumber, NT_SUCCESS, NtBuildNumber, NtGlobalFlag, NtMajorVersion, NtMinorVersion, NTSTATUS(), NULL, PAGE_SIZE, ProbeForRead, RtlImageDirectoryEntryToData(), RtlImageNtHeader(), Status, TRUE, and USHORT.

Referenced by PspCreateProcess().

04638 : 04639 04640 This routine creates a PEB page within the target process 04641 and copies the initial PEB values into it. 04642 04643 Arguments: 04644 04645 TargetProcess - Supplies a pointer to the process in which to create 04646 and initialize the PEB. 04647 04648 InitialPeb - Supplies a pointer to the initial PEB to copy into the 04649 newly created PEB. 04650 04651 Return Value: 04652 04653 Returns the address of the base of the newly created PEB. 04654 04655 Can raise exceptions if no address space is available for the PEB or 04656 the user has exceeded quota (non-paged, pagefile, commit). 04657 04658 Environment: 04659 04660 Kernel mode. 04661 04662 --*/ 04663 04664 { 04665 PPEB PebBase; 04666 USHORT Magic; 04667 USHORT Characteristics; 04668 NTSTATUS Status; 04669 PVOID ViewBase; 04670 LARGE_INTEGER SectionOffset; 04671 PIMAGE_NT_HEADERS NtHeaders; 04672 SIZE_T ViewSize; 04673 ULONG ReturnedSize; 04674 PIMAGE_LOAD_CONFIG_DIRECTORY ImageConfigData; 04675 ULONG ProcessAffinityMask; 04676 04677 ViewBase = NULL; 04678 SectionOffset.LowPart = 0; 04679 SectionOffset.HighPart = 0; 04680 ViewSize = 0; 04681 04682 // 04683 // If the specified process is not the current process, attach 04684 // to the specified process. 04685 // 04686 04687 KeAttachProcess (&TargetProcess->Pcb); 04688 04689 // 04690 // Map the NLS tables into the application's address space. 04691 // 04692 04693 Status = MmMapViewOfSection( 04694 InitNlsSectionPointer, 04695 TargetProcess, 04696 &ViewBase, 04697 0L, 04698 0L, 04699 &SectionOffset, 04700 &ViewSize, 04701 ViewShare, 04702 MEM_TOP_DOWN | SEC_NO_CHANGE, 04703 PAGE_READONLY 04704 ); 04705 04706 if ( !NT_SUCCESS(Status) ) { 04707 KeDetachProcess(); 04708 ExRaiseStatus(Status); 04709 } 04710 04711 PebBase = (PPEB)MiCreatePebOrTeb (TargetProcess, 04712 (ULONG)sizeof( PEB )); 04713 04714 // 04715 // Initialize the Peb. 04716 // 04717 04718 PebBase->InheritedAddressSpace = InitialPeb->InheritedAddressSpace; 04719 PebBase->Mutant = InitialPeb->Mutant; 04720 PebBase->ImageBaseAddress = TargetProcess->SectionBaseAddress; 04721 04722 PebBase->AnsiCodePageData = (PVOID)((PUCHAR)ViewBase+InitAnsiCodePageDataOffset); 04723 PebBase->OemCodePageData = (PVOID)((PUCHAR)ViewBase+InitOemCodePageDataOffset); 04724 PebBase->UnicodeCaseTableData = (PVOID)((PUCHAR)ViewBase+InitUnicodeCaseTableDataOffset); 04725 04726 PebBase->NumberOfProcessors = KeNumberProcessors; 04727 PebBase->BeingDebugged = (BOOLEAN)(TargetProcess->DebugPort != NULL ? TRUE : FALSE); 04728 PebBase->NtGlobalFlag = NtGlobalFlag; 04729 PebBase->CriticalSectionTimeout = MmCriticalSectionTimeout; 04730 PebBase->HeapSegmentReserve = MmHeapSegmentReserve; 04731 PebBase->HeapSegmentCommit = MmHeapSegmentCommit; 04732 PebBase->HeapDeCommitTotalFreeThreshold = MmHeapDeCommitTotalFreeThreshold; 04733 PebBase->HeapDeCommitFreeBlockThreshold = MmHeapDeCommitFreeBlockThreshold; 04734 PebBase->NumberOfHeaps = 0; 04735 PebBase->MaximumNumberOfHeaps = (PAGE_SIZE - sizeof( PEB )) / sizeof( PVOID ); 04736 PebBase->ProcessHeaps = (PVOID *)(PebBase+1); 04737 04738 PebBase->OSMajorVersion = NtMajorVersion; 04739 PebBase->OSMinorVersion = NtMinorVersion; 04740 PebBase->OSBuildNumber = (USHORT)(NtBuildNumber & 0x3FFF); 04741 PebBase->OSPlatformId = 2; // VER_PLATFORM_WIN32_NT from winbase.h 04742 PebBase->OSCSDVersion = (USHORT)CmNtCSDVersion; 04743 04744 // 04745 // Every reference to NtHeaders (including the call to RtlImageNtHeader) 04746 // must be wrapped in try-except in case the inpage fails. The inpage 04747 // can fail for any reason including network failures, low resources, etc. 04748 // 04749 04750 try { 04751 NtHeaders = RtlImageNtHeader( PebBase->ImageBaseAddress ); 04752 Magic = NtHeaders->OptionalHeader.Magic; 04753 Characteristics = NtHeaders->FileHeader.Characteristics; 04754 } except (EXCEPTION_EXECUTE_HANDLER) { 04755 KeDetachProcess(); 04756 ExRaiseStatus(STATUS_INVALID_IMAGE_PROTECT); 04757 } 04758 04759 if (NtHeaders != NULL) { 04760 04761 ProcessAffinityMask = 0; 04762 #if defined(_WIN64) 04763 if (Magic == IMAGE_NT_OPTIONAL_HDR32_MAGIC) { 04764 04765 // 04766 // If this call fails, an exception will be thrown and the 04767 // detach performed so no need to handle errors here. 04768 // 04769 04770 MiInitializeWowPeb (NtHeaders, PebBase, TargetProcess); 04771 04772 } else // a PE32+ image 04773 #endif 04774 { 04775 try { 04776 ImageConfigData = RtlImageDirectoryEntryToData ( 04777 PebBase->ImageBaseAddress, 04778 TRUE, 04779 IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG, 04780 &ReturnedSize); 04781 04782 ProbeForRead ((PVOID)ImageConfigData, 04783 sizeof (*ImageConfigData), 04784 sizeof (ULONG)); 04785 04786 MI_INIT_PEB_FROM_IMAGE(NtHeaders, ImageConfigData); 04787 04788 if (ImageConfigData != NULL && ImageConfigData->ProcessAffinityMask != 0) { 04789 ProcessAffinityMask = ImageConfigData->ProcessAffinityMask; 04790 } 04791 04792 } except (EXCEPTION_EXECUTE_HANDLER) { 04793 KeDetachProcess(); 04794 ExRaiseStatus(STATUS_INVALID_IMAGE_PROTECT); 04795 } 04796 04797 } 04798 04799 // 04800 // Note NT4 examined the NtHeaders->FileHeader.Characteristics 04801 // for the IMAGE_FILE_AGGRESIVE_WS_TRIM bit, but this is not needed 04802 // or used for NT5 and above. 04803 // 04804 04805 // 04806 // See if image wants to override the default processor affinity mask. 04807 // 04808 04809 if (Characteristics & IMAGE_FILE_UP_SYSTEM_ONLY) { 04810 04811 // 04812 // Image is NOT MP safe. Assign it a processor on a rotating 04813 // basis to spread these processes around on MP systems. 04814 // 04815 04816 do { 04817 PebBase->ImageProcessAffinityMask = (KAFFINITY)(0x1 << MmRotatingUniprocessorNumber); 04818 if (++MmRotatingUniprocessorNumber >= KeNumberProcessors) { 04819 MmRotatingUniprocessorNumber = 0; 04820 } 04821 } while ((PebBase->ImageProcessAffinityMask & KeActiveProcessors) == 0); 04822 } else { 04823 04824 if (ProcessAffinityMask != 0) { 04825 04826 // 04827 // Pass the affinity mask from the image header 04828 // to LdrpInitializeProcess via the PEB. 04829 // 04830 04831 PebBase->ImageProcessAffinityMask = ProcessAffinityMask; 04832 } 04833 } 04834 } 04835 04836 PebBase->SessionId = TargetProcess->SessionId; 04837 04838 KeDetachProcess(); 04839 return PebBase; 04840 }

BOOLEAN MmCreateProcessAddressSpace IN ULONG  MinimumWorkingSetSize,
IN PEPROCESS  NewProcess,
OUT PULONG_PTR  DirectoryTableBase
 

Definition at line 190 of file procsup.c.

References ASSERT, CODE_END, CODE_START, CONSISTENCY_LOCK_PFN, CONSISTENCY_UNLOCK_PFN, FALSE, HYPER_SPACE, INITIALIZE_DIRECTORY_TABLE_BASE, KeInitializeSpinLock(), KSEG0_BASE, KSEG2_BASE, KSTACK_POOL_START, LOCK_PFN, LOCK_WS, MI_GET_PAGE_FRAME_FROM_PTE, MI_INITIALIZE_HYPERSPACE_MAP, MI_PAGE_COLOR_PTE_PROCESS, MI_PAGE_COLOR_VA_PROCESS, MI_PFN_ELEMENT, MI_SET_GLOBAL_STATE, MI_WRITE_INVALID_PTE, MI_WRITE_VALID_PTE, MiChargeCommitment(), MiEnsureAvailablePageOrWait(), MiGetPdeAddress, MiGetPdeOffset, MiGetPpeAddress, MiGetPpeOffset, MiGetPteAddress, MiHydra, MiMapPageInHyperSpace(), MiNumberOfExtraSystemPdes, MiRemoveAnyPage(), MiRemoveZeroPageIfAny, MiReturnCommitment(), MiSessionAddProcess(), MiSystemCacheEndExtra, MiSystemCacheStartExtra, MiUnmapPageInHyperSpace, MiZeroPhysicalPage(), MM_BUMP_COUNTER, MM_DBG_COMMIT_PROCESS_CREATE, MM_DBG_COMMIT_RETURN_PROCESS_CREATE_FAILURE1, MM_KSEG0_BASE, MM_PROCESS_COMMIT_CHARGE, MM_SESSION_SPACE_DEFAULT, MM_SYSTEM_CACHE_WORKING_SET, MM_SYSTEM_SPACE_END, MM_SYSTEM_SPACE_START, MM_TRACK_COMMIT, MM_VA_MAPPED_BY_PDE, MmNonPagedSystemStart, MmProcessColorSeed, MmProcessCommit, MMPTE, MmResidentAvailablePages, MmSessionSpace, MmSystemCacheEnd, MmVirtualBias, MmWorkingSetList, _EPROCESS::NextPageColor, NON_PAGED_SYSTEM_END, NULL, PAGE_SHIFT, PsGetCurrentProcess, PTE_PER_PAGE, _MMPFN::PteAddress, RtlRandom(), TRUE, _MMPTE::u, _MMSUPPORT::u, _MMPFN::u3, UNLOCK_PFN, UNLOCK_WS, USHORT, ValidPdePde, and _EPROCESS::Vm.

Referenced by PspCreateProcess().

00198 : 00199 00200 This routine creates an address space which maps the system 00201 portion and contains a hyper space entry. 00202 00203 Arguments: 00204 00205 MinimumWorkingSetSize - Supplies the minimum working set size for 00206 this address space. This value is only used 00207 to ensure that ample physical pages exist 00208 to create this process. 00209 00210 NewProcess - Supplies a pointer to the process object being created. 00211 00212 DirectoryTableBase - Returns the value of the newly created 00213 address space's Page Directory (PD) page and 00214 hyper space page. 00215 00216 Return Value: 00217 00218 Returns TRUE if an address space was successfully created, FALSE 00219 if ample physical pages do not exist. 00220 00221 Environment: 00222 00223 Kernel mode. APCs Disabled. 00224 00225 --*/ 00226 00227 { 00228 PFN_NUMBER HyperDirectoryIndex; 00229 PFN_NUMBER PageDirectoryIndex; 00230 PMMPTE PointerPte; 00231 PMMPTE PointerPde; 00232 PMMPTE PointerPpe; 00233 PFN_NUMBER HyperSpaceIndex; 00234 PFN_NUMBER PageContainingWorkingSet; 00235 MMPTE TempPte; 00236 PMMPTE LastPte; 00237 PMMPTE PointerFillPte; 00238 PMMPTE CurrentAddressSpacePde; 00239 PEPROCESS CurrentProcess; 00240 KIRQL OldIrql; 00241 PMMPFN Pfn1; 00242 ULONG Color; 00243 #if defined (_X86PAE_) 00244 ULONG TopQuad; 00245 MMPTE TopPte; 00246 PPAE_ENTRY PaeVa; 00247 PFN_NUMBER PageDirectoryIndex2; 00248 KIRQL OldIrql2; 00249 ULONG i; 00250 PFN_NUMBER HyperSpaceIndex2; 00251 PVOID PoolBlock; 00252 #endif 00253 #if defined(_IA64_) 00254 PFN_NUMBER SessionParentIndex; 00255 #endif 00256 00257 // 00258 // Get the PFN LOCK to prevent another thread in this 00259 // process from using hyper space and to get physical pages. 00260 // 00261 00262 CurrentProcess = PsGetCurrentProcess (); 00263 00264 // 00265 // Charge commitment for the page directory pages, working set page table 00266 // page, and working set list. 00267 // 00268 00269 if (MiChargeCommitment (MM_PROCESS_COMMIT_CHARGE, NULL) == FALSE) { 00270 return FALSE; 00271 } 00272 00273 MM_TRACK_COMMIT (MM_DBG_COMMIT_PROCESS_CREATE, MM_PROCESS_COMMIT_CHARGE); 00274 00275 NewProcess->NextPageColor = (USHORT)(RtlRandom(&MmProcessColorSeed)); 00276 KeInitializeSpinLock (&NewProcess->HyperSpaceLock); 00277 00278 #if defined (_X86PAE_) 00279 TopQuad = MiPaeAllocate (&PaeVa); 00280 if (TopQuad == 0) { 00281 MiReturnCommitment (MM_PROCESS_COMMIT_CHARGE); 00282 return FALSE; 00283 } 00284 00285 // 00286 // This page must be in the first 4GB of RAM. 00287 // 00288 00289 ASSERT ((TopQuad >> PAGE_SHIFT) <= MM_HIGHEST_PAE_PAGE); 00290 #endif 00291 00292 LOCK_WS (CurrentProcess); 00293 00294 LOCK_PFN (OldIrql); 00295 00296 // 00297 // Check to make sure the physical pages are available. 00298 // 00299 00300 if (MmResidentAvailablePages <= (SPFN_NUMBER)MinimumWorkingSetSize) { 00301 00302 #if defined (_X86PAE_) 00303 PoolBlock = MiPaeFree (PaeVa); 00304 #endif 00305 00306 UNLOCK_PFN (OldIrql); 00307 UNLOCK_WS (CurrentProcess); 00308 MiReturnCommitment (MM_PROCESS_COMMIT_CHARGE); 00309 MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_PROCESS_CREATE_FAILURE1, MM_PROCESS_COMMIT_CHARGE); 00310 00311 #if defined (_X86PAE_) 00312 if (PoolBlock != NULL) { 00313 MiPaeFreeEntirePage (PoolBlock); 00314 } 00315 #endif 00316 // 00317 // Indicate no directory base was allocated. 00318 // 00319 00320 return FALSE; 00321 } 00322 00323 MmResidentAvailablePages -= MinimumWorkingSetSize; 00324 MM_BUMP_COUNTER(6, MinimumWorkingSetSize); 00325 MmProcessCommit += MM_PROCESS_COMMIT_CHARGE; 00326 00327 NewProcess->AddressSpaceInitialized = 1; 00328 NewProcess->Vm.MinimumWorkingSetSize = MinimumWorkingSetSize; 00329 00330 // 00331 // Allocate a page directory (parent for 64-bit systems) page. 00332 // 00333 00334 MiEnsureAvailablePageOrWait (CurrentProcess, NULL); 00335 00336 Color = MI_PAGE_COLOR_PTE_PROCESS (PDE_BASE, 00337 &CurrentProcess->NextPageColor); 00338 00339 PageDirectoryIndex = MiRemoveZeroPageIfAny (Color); 00340 if (PageDirectoryIndex == 0) { 00341 PageDirectoryIndex = MiRemoveAnyPage (Color); 00342 UNLOCK_PFN (OldIrql); 00343 MiZeroPhysicalPage (PageDirectoryIndex, Color); 00344 LOCK_PFN (OldIrql); 00345 } 00346 00347 #if defined (_X86PAE_) 00348 TempPte = ValidPdePde; 00349 MI_SET_GLOBAL_STATE (TempPte, 0); 00350 00351 for (i = 0; i < PD_PER_SYSTEM - 1; i += 1) { 00352 00353 MiEnsureAvailablePageOrWait (CurrentProcess, NULL); 00354 00355 Color = MI_PAGE_COLOR_PTE_PROCESS (PDE_BASE, 00356 &CurrentProcess->NextPageColor); 00357 00358 PageDirectoryIndex2 = MiRemoveZeroPageIfAny (Color); 00359 if (PageDirectoryIndex2 == 0) { 00360 PageDirectoryIndex2 = MiRemoveAnyPage (Color); 00361 UNLOCK_PFN (OldIrql); 00362 MiZeroPhysicalPage (PageDirectoryIndex2, Color); 00363 LOCK_PFN (OldIrql); 00364 } 00365 00366 // 00367 // Recursively map each page directory page so it points to itself. 00368 // 00369 00370 TempPte.u.Hard.PageFrameNumber = PageDirectoryIndex2; 00371 PointerPte = (PMMPTE)MiMapPageInHyperSpace (PageDirectoryIndex, 00372 &OldIrql2); 00373 PointerPte[i] = TempPte; 00374 MiUnmapPageInHyperSpace (OldIrql2); 00375 TopPte.u.Long = TempPte.u.Long & ~MM_PAE_PDPTE_MASK; 00376 PaeVa->PteEntry[i].u.Long = TopPte.u.Long; 00377 } 00378 00379 // 00380 // Recursively map the topmost page directory page so it points to itself. 00381 // 00382 00383 TempPte.u.Hard.PageFrameNumber = PageDirectoryIndex; 00384 PointerPte = (PMMPTE)MiMapPageInHyperSpace (PageDirectoryIndex, &OldIrql2); 00385 PointerPte[PD_PER_SYSTEM - 1] = TempPte; 00386 MiUnmapPageInHyperSpace (OldIrql2); 00387 TopPte.u.Long = TempPte.u.Long & ~MM_PAE_PDPTE_MASK; 00388 PaeVa->PteEntry[PD_PER_SYSTEM - 1].u.Long = TopPte.u.Long; 00389 NewProcess->PaePageDirectoryPage = PageDirectoryIndex; 00390 NewProcess->PaeTop = (PVOID)PaeVa; 00391 DirectoryTableBase[0] = TopQuad; 00392 #else 00393 INITIALIZE_DIRECTORY_TABLE_BASE(&DirectoryTableBase[0], PageDirectoryIndex); 00394 #endif 00395 00396 #if defined (_WIN64) 00397 00398 PointerPpe = KSEG_ADDRESS (PageDirectoryIndex); 00399 TempPte = ValidPdePde; 00400 00401 // 00402 // Map the top level page directory parent page recursively onto itself. 00403 // 00404 00405 TempPte.u.Hard.PageFrameNumber = PageDirectoryIndex; 00406 00407 #if defined (_AXP64_) 00408 ASSERT (TempPte.u.Hard.Global == 0); 00409 PointerPpe[MiGetPpeOffset(PDE_TBASE)] = TempPte; 00410 #endif 00411 00412 #if defined(_IA64_) 00413 00414 // 00415 // For IA64, the self-mapped entry is forced to be the last entry of 00416 // PPE table. 00417 // 00418 00419 PointerPpe[(PDE_SELFMAP & 00420 ((sizeof(MMPTE)*PTE_PER_PAGE) - 1))/sizeof(MMPTE)] = TempPte; 00421 00422 #endif 00423 00424 // 00425 // Allocate the page directory for hyper space and map this directory 00426 // page into the page directory parent page. 00427 // 00428 00429 MiEnsureAvailablePageOrWait (CurrentProcess, NULL); 00430 00431 Color = MI_PAGE_COLOR_PTE_PROCESS (MiGetPpeAddress(HYPER_SPACE), 00432 &CurrentProcess->NextPageColor); 00433 00434 HyperDirectoryIndex = MiRemoveZeroPageIfAny (Color); 00435 if (HyperDirectoryIndex == 0) { 00436 HyperDirectoryIndex = MiRemoveAnyPage (Color); 00437 UNLOCK_PFN (OldIrql); 00438 MiZeroPhysicalPage (HyperDirectoryIndex, Color); 00439 LOCK_PFN (OldIrql); 00440 } 00441 00442 TempPte.u.Hard.PageFrameNumber = HyperDirectoryIndex; 00443 PointerPpe[MiGetPpeOffset(HYPER_SPACE)] = TempPte; 00444 00445 #if defined (_IA64_) 00446 00447 // 00448 // Allocate the page directory parent for the session space 00449 // 00450 00451 MiEnsureAvailablePageOrWait (CurrentProcess, NULL); 00452 00453 Color = MI_PAGE_COLOR_PTE_PROCESS (MiGetPpeAddress(SESSION_SPACE_DEFAULT), 00454 &CurrentProcess->NextPageColor); 00455 00456 SessionParentIndex = MiRemoveZeroPageIfAny (Color); 00457 if (SessionParentIndex == 0) { 00458 SessionParentIndex = MiRemoveAnyPage (Color); 00459 UNLOCK_PFN (OldIrql); 00460 MiZeroPhysicalPage (SessionParentIndex, Color); 00461 LOCK_PFN (OldIrql); 00462 } 00463 00464 INITIALIZE_DIRECTORY_TABLE_BASE(&NewProcess->Pcb.SessionParentBase, SessionParentIndex); 00465 00466 PointerPpe = KSEG_ADDRESS (SessionParentIndex); 00467 00468 TempPte.u.Hard.PageFrameNumber = SessionParentIndex; 00469 00470 PointerPpe[(PDE_SSELFMAP & 00471 ((sizeof(MMPTE)*PTE_PER_PAGE) - 1))/sizeof(MMPTE)] = TempPte; 00472 00473 #endif // _IA64_ 00474 00475 #endif 00476 00477 // 00478 // Allocate the hyper space page table page. 00479 // 00480 00481 MiEnsureAvailablePageOrWait (CurrentProcess, NULL); 00482 00483 Color = MI_PAGE_COLOR_PTE_PROCESS (MiGetPdeAddress(HYPER_SPACE), 00484 &CurrentProcess->NextPageColor); 00485 00486 HyperSpaceIndex = MiRemoveZeroPageIfAny (Color); 00487 if (HyperSpaceIndex == 0) { 00488 HyperSpaceIndex = MiRemoveAnyPage (Color); 00489 UNLOCK_PFN (OldIrql); 00490 MiZeroPhysicalPage (HyperSpaceIndex, Color); 00491 LOCK_PFN (OldIrql); 00492 } 00493 00494 #if defined (_WIN64) 00495 PointerPde = KSEG_ADDRESS (HyperDirectoryIndex); 00496 TempPte.u.Hard.PageFrameNumber = HyperSpaceIndex; 00497 PointerPde[MiGetPdeOffset(HYPER_SPACE)] = TempPte; 00498 #endif 00499 00500 #if defined (_X86PAE_) 00501 00502 // 00503 // Allocate the second hyper space page table page. 00504 // Save it in the first PTE used by the first hyperspace PDE. 00505 // 00506 00507 MiEnsureAvailablePageOrWait (CurrentProcess, NULL); 00508 00509 Color = MI_PAGE_COLOR_PTE_PROCESS (MiGetPdeAddress(HYPER_SPACE2), 00510 &CurrentProcess->NextPageColor); 00511 00512 HyperSpaceIndex2 = MiRemoveZeroPageIfAny (Color); 00513 if (HyperSpaceIndex2 == 0) { 00514 HyperSpaceIndex2 = MiRemoveAnyPage (Color); 00515 UNLOCK_PFN (OldIrql); 00516 MiZeroPhysicalPage (HyperSpaceIndex2, Color); 00517 LOCK_PFN (OldIrql); 00518 } 00519 00520 // 00521 // Unlike DirectoryTableBase[0], the HyperSpaceIndex is stored as an 00522 // absolute PFN and does not need to be below 4GB. 00523 // 00524 00525 DirectoryTableBase[1] = HyperSpaceIndex; 00526 #else 00527 INITIALIZE_DIRECTORY_TABLE_BASE(&DirectoryTableBase[1], HyperSpaceIndex); 00528 #endif 00529 00530 // 00531 // Remove page for the working set list. 00532 // 00533 00534 MiEnsureAvailablePageOrWait (CurrentProcess, NULL); 00535 00536 Color = MI_PAGE_COLOR_VA_PROCESS (MmWorkingSetList, 00537 &CurrentProcess->NextPageColor); 00538 00539 PageContainingWorkingSet = MiRemoveZeroPageIfAny (Color); 00540 if (PageContainingWorkingSet == 0) { 00541 PageContainingWorkingSet = MiRemoveAnyPage (Color); 00542 UNLOCK_PFN (OldIrql); 00543 MiZeroPhysicalPage (PageContainingWorkingSet, Color); 00544 LOCK_PFN (OldIrql); 00545 } 00546 00547 // 00548 // Release the PFN mutex as the needed pages have been allocated. 00549 // 00550 00551 UNLOCK_PFN (OldIrql); 00552 00553 NewProcess->WorkingSetPage = PageContainingWorkingSet; 00554 00555 // 00556 // Initialize the page reserved for hyper space. 00557 // 00558 00559 MI_INITIALIZE_HYPERSPACE_MAP (HyperSpaceIndex); 00560 00561 // 00562 // Set the PTE address in the PFN for the top level page directory page. 00563 // 00564 00565 #if defined (_WIN64) 00566 00567 Pfn1 = MI_PFN_ELEMENT (PageDirectoryIndex); 00568 00569 ASSERT (Pfn1->u3.e1.PageColor == 0); 00570 00571 CONSISTENCY_LOCK_PFN (OldIrql); 00572 00573 Pfn1->PteAddress = MiGetPteAddress(PDE_TBASE); 00574 00575 CONSISTENCY_UNLOCK_PFN (OldIrql); 00576 00577 // 00578 // Set the PTE address in the PFN for the hyper space page directory page. 00579 // 00580 00581 Pfn1 = MI_PFN_ELEMENT (HyperDirectoryIndex); 00582 00583 ASSERT (Pfn1->u3.e1.PageColor == 0); 00584 00585 CONSISTENCY_LOCK_PFN (OldIrql); 00586 00587 Pfn1->PteAddress = MiGetPpeAddress(HYPER_SPACE); 00588 00589 CONSISTENCY_UNLOCK_PFN (OldIrql); 00590 00591 #if defined (_AXP64_) 00592 00593 // 00594 // All of the system mappings are global. 00595 // 00596 00597 MI_SET_GLOBAL_STATE (TempPte, 1); 00598 00599 PointerFillPte = &PointerPpe[MiGetPpeOffset(MM_SYSTEM_SPACE_START)]; 00600 CurrentAddressSpacePde = MiGetPpeAddress(MM_SYSTEM_SPACE_START); 00601 RtlCopyMemory (PointerFillPte, 00602 CurrentAddressSpacePde, 00603 ((1 + (MiGetPpeAddress(MM_SYSTEM_SPACE_END) - 00604 MiGetPpeAddress(MM_SYSTEM_SPACE_START))) * sizeof(MMPTE))); 00605 // 00606 // Session space and win32k.sys are local on Hydra configurations. 00607 // However, as an optimization, it can be made global on non-Hydra. 00608 // 00609 00610 if (MiHydra == TRUE) { 00611 MI_SET_GLOBAL_STATE (TempPte, 0); 00612 } 00613 00614 PointerFillPte = &PointerPpe[MiGetPpeOffset(MM_SESSION_SPACE_DEFAULT)]; 00615 CurrentAddressSpacePde = MiGetPpeAddress(MM_SESSION_SPACE_DEFAULT); 00616 MI_WRITE_VALID_PTE (PointerFillPte, *CurrentAddressSpacePde); 00617 00618 #endif 00619 00620 #if defined(_IA64_) 00621 if ((MiHydra == TRUE) && (CurrentProcess->Vm.u.Flags.ProcessInSession != 0)) { 00622 PointerPpe = KSEG_ADDRESS(SessionParentIndex); 00623 PointerFillPte = &PointerPpe[MiGetPpeOffset(MM_SESSION_SPACE_DEFAULT)]; 00624 CurrentAddressSpacePde = MiGetPpeAddress(MM_SESSION_SPACE_DEFAULT); 00625 MI_WRITE_VALID_PTE (PointerFillPte, *CurrentAddressSpacePde); 00626 } 00627 #endif 00628 00629 #else // the following is for !WIN64 only 00630 00631 #if defined (_X86PAE_) 00632 00633 // 00634 // Stash the second hyperspace PDE in the first PTE for the initial 00635 // hyperspace entry. 00636 // 00637 00638 TempPte = ValidPdePde; 00639 TempPte.u.Hard.PageFrameNumber = HyperSpaceIndex2; 00640 MI_SET_GLOBAL_STATE (TempPte, 0); 00641 00642 PointerPte = (PMMPTE)MiMapPageInHyperSpace (HyperSpaceIndex, &OldIrql2); 00643 PointerPte[0] = TempPte; 00644 MiUnmapPageInHyperSpace (OldIrql2); 00645 00646 #endif 00647 00648 // 00649 // Set the PTE address in the PFN for the page directory page. 00650 // 00651 00652 Pfn1 = MI_PFN_ELEMENT (PageDirectoryIndex); 00653 00654 ASSERT (Pfn1->u3.e1.PageColor == 0); 00655 00656 CONSISTENCY_LOCK_PFN (OldIrql); 00657 00658 Pfn1->PteAddress = (PMMPTE)PDE_BASE; 00659 00660 CONSISTENCY_UNLOCK_PFN (OldIrql); 00661 00662 TempPte = ValidPdePde; 00663 TempPte.u.Hard.PageFrameNumber = HyperSpaceIndex; 00664 MI_SET_GLOBAL_STATE (TempPte, 0); 00665 00666 // 00667 // Map the page directory page in hyperspace. 00668 // Note for PAE, this is the high 1GB virtual only. 00669 // 00670 00671 PointerPte = (PMMPTE)MiMapPageInHyperSpace (PageDirectoryIndex, &OldIrql); 00672 PointerPte[MiGetPdeOffset(HYPER_SPACE)] = TempPte; 00673 00674 #if defined (_X86PAE_) 00675 00676 // 00677 // Map in the second hyperspace page directory. 00678 // The page directory page is already recursively mapped. 00679 // 00680 00681 TempPte.u.Hard.PageFrameNumber = HyperSpaceIndex2; 00682 PointerPte[MiGetPdeOffset(HYPER_SPACE2)] = TempPte; 00683 00684 #else 00685 00686 // 00687 // Recursively map the page directory page so it points to itself. 00688 // 00689 00690 TempPte.u.Hard.PageFrameNumber = PageDirectoryIndex; 00691 PointerPte[MiGetPdeOffset(PTE_BASE)] = TempPte; 00692 00693 #endif 00694 00695 // 00696 // Map in the non paged portion of the system. 00697 // 00698 00699 #if defined(_ALPHA_) 00700 00701 PointerFillPte = &PointerPte[MiGetPdeOffset(MM_SYSTEM_SPACE_START)]; 00702 CurrentAddressSpacePde = MiGetPdeAddress(MM_SYSTEM_SPACE_START); 00703 RtlCopyMemory (PointerFillPte, 00704 CurrentAddressSpacePde, 00705 ((1 + (MiGetPdeAddress(MM_SYSTEM_SPACE_END) - 00706 MiGetPdeAddress(MM_SYSTEM_SPACE_START))) * sizeof(MMPTE))); 00707 00708 // 00709 // KSEG0 is identity-mapped on the Alpha. Copy the PDEs for this region. 00710 // 00711 00712 PointerFillPte = &PointerPte[MiGetPdeOffset(MM_KSEG0_BASE)]; 00713 CurrentAddressSpacePde = MiGetPdeAddress(MM_KSEG0_BASE); 00714 RtlCopyMemory (PointerFillPte, 00715 CurrentAddressSpacePde, 00716 MiGetPdeOffset(KSEG2_BASE-KSEG0_BASE) * sizeof(MMPTE)); 00717 00718 #else // the following is for x86 only 00719 00720 // 00721 // If the system has not been loaded at a biased address, then system PDEs 00722 // exist in the 2gb->3gb range which must be copied. 00723 // 00724 00725 #if defined (_X86PAE_) 00726 00727 // 00728 // For the PAE case, only the last page directory is currently mapped, so 00729 // only copy the system PDEs for the last 1GB - any that need copying in 00730 // the 2gb->3gb range will be done a little later. 00731 // 00732 00733 if (MmVirtualBias != 0) { 00734 PointerFillPte = &PointerPte[MiGetPdeOffset(CODE_START + MmVirtualBias)]; 00735 CurrentAddressSpacePde = MiGetPdeAddress(CODE_START + MmVirtualBias); 00736 00737 RtlCopyMemory (PointerFillPte, 00738 CurrentAddressSpacePde, 00739 (((1 + CODE_END) - CODE_START) / MM_VA_MAPPED_BY_PDE) * sizeof(MMPTE)); 00740 } 00741 #else 00742 PointerFillPte = &PointerPte[MiGetPdeOffset(CODE_START + MmVirtualBias)]; 00743 CurrentAddressSpacePde = MiGetPdeAddress(CODE_START + MmVirtualBias); 00744 00745 RtlCopyMemory (PointerFillPte, 00746 CurrentAddressSpacePde, 00747 (((1 + CODE_END) - CODE_START) / MM_VA_MAPPED_BY_PDE) * sizeof(MMPTE)); 00748 #endif 00749 00750 LastPte = &PointerPte[MiGetPdeOffset(NON_PAGED_SYSTEM_END)]; 00751 PointerFillPte = &PointerPte[MiGetPdeOffset(MmNonPagedSystemStart)]; 00752 CurrentAddressSpacePde = MiGetPdeAddress(MmNonPagedSystemStart); 00753 00754 RtlCopyMemory (PointerFillPte, 00755 CurrentAddressSpacePde, 00756 ((1 + (MiGetPdeAddress(NON_PAGED_SYSTEM_END) - 00757 CurrentAddressSpacePde))) * sizeof(MMPTE)); 00758 00759 // 00760 // Map in the system cache page table pages. 00761 // 00762 00763 LastPte = &PointerPte[MiGetPdeOffset(MmSystemCacheEnd)]; 00764 PointerFillPte = &PointerPte[MiGetPdeOffset(MM_SYSTEM_CACHE_WORKING_SET)]; 00765 CurrentAddressSpacePde = MiGetPdeAddress(MM_SYSTEM_CACHE_WORKING_SET); 00766 00767 RtlCopyMemory (PointerFillPte, 00768 CurrentAddressSpacePde, 00769 ((1 + (MiGetPdeAddress(MmSystemCacheEnd) - 00770 CurrentAddressSpacePde))) * sizeof(MMPTE)); 00771 00772 #if !defined (_X86PAE_) 00773 // 00774 // Map in any additional system cache page table pages. 00775 // 00776 00777 if (MiSystemCacheEndExtra != MmSystemCacheEnd) { 00778 LastPte = &PointerPte[MiGetPdeOffset(MiSystemCacheEndExtra)]; 00779 PointerFillPte = &PointerPte[MiGetPdeOffset(MiSystemCacheStartExtra)]; 00780 CurrentAddressSpacePde = MiGetPdeAddress(MiSystemCacheStartExtra); 00781 00782 RtlCopyMemory (PointerFillPte, 00783 CurrentAddressSpacePde, 00784 ((1 + (MiGetPdeAddress(MiSystemCacheEndExtra) - 00785 CurrentAddressSpacePde))) * sizeof(MMPTE)); 00786 } 00787 #endif 00788 00789 #endif // end of x86 specific else 00790 00791 #if !defined (_X86PAE_) 00792 if (MiHydra == TRUE) { 00793 00794 // 00795 // Copy the bootstrap entry for session space. 00796 // The rest is faulted in as needed. 00797 // 00798 00799 PointerFillPte = &PointerPte[MiGetPdeOffset(MmSessionSpace)]; 00800 CurrentAddressSpacePde = MiGetPdeAddress(MmSessionSpace); 00801 if (CurrentAddressSpacePde->u.Hard.Valid == 1) { 00802 MI_WRITE_VALID_PTE (PointerFillPte, *CurrentAddressSpacePde); 00803 } 00804 else { 00805 MI_WRITE_INVALID_PTE (PointerFillPte, *CurrentAddressSpacePde); 00806 } 00807 } 00808 #endif 00809 00810 #if defined(_X86_) 00811 00812 // 00813 // Map in the additional system PTE range if present. 00814 // 00815 00816 #if !defined (_X86PAE_) 00817 if (MiNumberOfExtraSystemPdes) { 00818 00819 PointerFillPte = &PointerPte[MiGetPdeOffset(KSTACK_POOL_START)]; 00820 CurrentAddressSpacePde = MiGetPdeAddress(KSTACK_POOL_START); 00821 00822 RtlCopyMemory (PointerFillPte, 00823 CurrentAddressSpacePde, 00824 MiNumberOfExtraSystemPdes * sizeof(MMPTE)); 00825 } 00826 #endif 00827 #endif 00828 00829 MiUnmapPageInHyperSpace (OldIrql); 00830 00831 #if defined (_X86PAE_) 00832 00833 // 00834 // Map all the virtual space in the 2GB->3GB range when it's not user space. 00835 // 00836 00837 if (MmVirtualBias == 0) { 00838 00839 PageDirectoryIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PaeVa->PteEntry[PD_PER_SYSTEM - 2]); 00840 00841 PointerPte = (PMMPTE)MiMapPageInHyperSpace (PageDirectoryIndex, &OldIrql); 00842 00843 PointerFillPte = &PointerPte[MiGetPdeOffset(CODE_START)]; 00844 CurrentAddressSpacePde = MiGetPdeAddress(CODE_START); 00845 00846 RtlCopyMemory (PointerFillPte, 00847 CurrentAddressSpacePde, 00848 (((1 + CODE_END) - CODE_START) / MM_VA_MAPPED_BY_PDE) * sizeof(MMPTE)); 00849 00850 if (MiSystemCacheEndExtra != MmSystemCacheEnd) { 00851 LastPte = &PointerPte[MiGetPdeOffset(MiSystemCacheEndExtra)]; 00852 PointerFillPte = &PointerPte[MiGetPdeOffset(MiSystemCacheStartExtra)]; 00853 CurrentAddressSpacePde = MiGetPdeAddress(MiSystemCacheStartExtra); 00854 00855 RtlCopyMemory (PointerFillPte, 00856 CurrentAddressSpacePde, 00857 ((1 + (MiGetPdeAddress(MiSystemCacheEndExtra) - 00858 CurrentAddressSpacePde))) * sizeof(MMPTE)); 00859 } 00860 00861 if (MiHydra == TRUE) { 00862 00863 // 00864 // Copy the bootstrap entry for session space. 00865 // The rest is faulted in as needed. 00866 // 00867 00868 PointerFillPte = &PointerPte[MiGetPdeOffset(MmSessionSpace)]; 00869 CurrentAddressSpacePde = MiGetPdeAddress(MmSessionSpace); 00870 if (CurrentAddressSpacePde->u.Hard.Valid == 1) { 00871 MI_WRITE_VALID_PTE (PointerFillPte, *CurrentAddressSpacePde); 00872 } 00873 else { 00874 MI_WRITE_INVALID_PTE (PointerFillPte, *CurrentAddressSpacePde); 00875 } 00876 } 00877 00878 if (MiNumberOfExtraSystemPdes) { 00879 00880 PointerFillPte = &PointerPte[MiGetPdeOffset(KSTACK_POOL_START)]; 00881 CurrentAddressSpacePde = MiGetPdeAddress(KSTACK_POOL_START); 00882 00883 RtlCopyMemory (PointerFillPte, 00884 CurrentAddressSpacePde, 00885 MiNumberOfExtraSystemPdes * sizeof(MMPTE)); 00886 } 00887 MiUnmapPageInHyperSpace (OldIrql); 00888 } 00889 #endif 00890 00891 #endif // end of !WIN64 specific else 00892 00893 // 00894 // Up the session space reference count. 00895 // 00896 00897 if (MiHydra == TRUE) { 00898 MiSessionAddProcess (NewProcess); 00899 } 00900 00901 // 00902 // Release working set mutex and lower IRQL. 00903 // 00904 00905 UNLOCK_WS (CurrentProcess); 00906 00907 return TRUE; 00908 }

NTKERNELAPI NTSTATUS MmCreateSection OUT PVOID *  SectionObject,
IN ACCESS_MASK  DesiredAccess,
IN POBJECT_ATTRIBUTES ObjectAttributes  OPTIONAL,
IN PLARGE_INTEGER  MaximumSize,
IN ULONG  SectionPageProtection,
IN ULONG  AllocationAttributes,
IN HANDLE FileHandle  OPTIONAL,
IN PFILE_OBJECT File  OPTIONAL
 

PTEB MmCreateTeb IN PEPROCESS  TargetProcess,
IN PINITIAL_TEB  InitialTeb,
IN PCLIENT_ID  ClientId
 

Definition at line 4314 of file procsup.c.

References BBTBuffer, InitialTeb, KeAttachProcess(), KeDetachProcess(), MiCreatePebOrTeb(), NULL, and USHORT.

Referenced by PspCreateThread().

04322 : 04323 04324 This routine creates a TEB page within the target process 04325 and copies the initial TEB values into it. 04326 04327 Arguments: 04328 04329 TargetProcess - Supplies a pointer to the process in which to create 04330 and initialize the TEB. 04331 04332 InitialTeb - Supplies a pointer to the initial TEB to copy into the 04333 newly created TEB. 04334 04335 Return Value: 04336 04337 Returns the address of the base of the newly created TEB. 04338 04339 Can raise exceptions if no address space is available for the TEB or 04340 the user has exceeded quota (non-paged, pagefile, commit). 04341 04342 Environment: 04343 04344 Kernel mode. 04345 04346 --*/ 04347 04348 { 04349 PTEB TebBase; 04350 04351 // 04352 // If the specified process is not the current process, attach 04353 // to the specified process. 04354 // 04355 04356 KeAttachProcess (&TargetProcess->Pcb); 04357 04358 TebBase = (PTEB)MiCreatePebOrTeb (TargetProcess, 04359 (ULONG)sizeof(TEB)); 04360 04361 // 04362 // Initialize the TEB. 04363 // 04364 04365 #if defined(_WIN64) 04366 TebBase->NtTib.ExceptionList = NULL; 04367 #else 04368 TebBase->NtTib.ExceptionList = EXCEPTION_CHAIN_END; 04369 #endif 04370 04371 TebBase->NtTib.SubSystemTib = NULL; 04372 TebBase->NtTib.Version = OS2_VERSION; 04373 TebBase->NtTib.ArbitraryUserPointer = NULL; 04374 TebBase->NtTib.Self = (PNT_TIB)TebBase; 04375 TebBase->EnvironmentPointer = NULL; 04376 TebBase->ProcessEnvironmentBlock = TargetProcess->Peb; 04377 TebBase->ClientId = *ClientId; 04378 TebBase->RealClientId = *ClientId; 04379 04380 if ((InitialTeb->OldInitialTeb.OldStackBase == NULL) && 04381 (InitialTeb->OldInitialTeb.OldStackLimit == NULL)) { 04382 04383 TebBase->NtTib.StackBase = InitialTeb->StackBase; 04384 TebBase->NtTib.StackLimit = InitialTeb->StackLimit; 04385 TebBase->DeallocationStack = InitialTeb->StackAllocationBase; 04386 04387 #if defined(_IA64_) 04388 TebBase->BStoreLimit = InitialTeb->BStoreLimit; 04389 TebBase->DeallocationBStore = (PCHAR)InitialTeb->StackBase 04390 + ((ULONG_PTR)InitialTeb->StackBase - (ULONG_PTR)InitialTeb->StackAllocationBase); 04391 #endif 04392 04393 } 04394 else { 04395 TebBase->NtTib.StackBase = InitialTeb->OldInitialTeb.OldStackBase; 04396 TebBase->NtTib.StackLimit = InitialTeb->OldInitialTeb.OldStackLimit; 04397 } 04398 04399 TebBase->StaticUnicodeString.Buffer = TebBase->StaticUnicodeBuffer; 04400 TebBase->StaticUnicodeString.MaximumLength = (USHORT)sizeof( TebBase->StaticUnicodeBuffer ); 04401 TebBase->StaticUnicodeString.Length = (USHORT)0; 04402 04403 // 04404 // Used for BBT of ntdll and kernel32.dll. 04405 // 04406 04407 TebBase->ReservedForPerf = BBTBuffer; 04408 04409 KeDetachProcess(); 04410 return TebBase; 04411 }

PVOID MmDbgReadCheck IN PVOID  VirtualAddress  ) 
 

Definition at line 27 of file alpha/debugsup.c.

References KSEG0_BASE, KSEG2_BASE, MmIsAddressValid(), NULL, and PAGE_SIZE.

Referenced by IopWriteDriverList(), KdpMoveMemory(), KdpReadIoSpace(), KdpReadVirtualMemory(), KdpSearchMemory(), KeDumpMachineState(), KiDumpParameterImages(), KiPcToFileHeader(), KiScanBugCheckCallbackList(), and MmDbgReadCheck64().

00033 : 00034 00035 00036 ALPHA implementation specific: 00037 00038 This routine returns the virtual address which is valid (mapped) 00039 for read access. 00040 00041 Arguments: 00042 00043 VirtualAddress - Supplies the virtual address to check. 00044 00045 Return Value: 00046 00047 Returns NULL if the address is not valid or readable, otherwise 00048 returns the virtual address. 00049 00050 Environment: 00051 00052 Kernel mode IRQL at DISPATCH_LEVEL or greater. 00053 00054 --*/ 00055 00056 { 00057 if ((VirtualAddress >= (PVOID)KSEG0_BASE) && 00058 (VirtualAddress < (PVOID)KSEG2_BASE)) { 00059 return VirtualAddress; 00060 } 00061 00062 if (!MmIsAddressValid (VirtualAddress)) { 00063 return NULL; 00064 } 00065 00066 return VirtualAddress; 00067 }

PVOID64 MmDbgReadCheck64 IN PVOID64  VirtualAddress  ) 
 

Definition at line 246 of file alpha/debugsup.c.

References NULL.

Referenced by KdpReadVirtualMemory(), and KdpReadVirtualMemory64().

00252 : 00253 00254 00255 ALPHA implementation specific: 00256 00257 This routine returns the virtual address which is valid (mapped) 00258 for read access. 00259 00260 If the address is valid and readable then the called address is returned. 00261 00262 Arguments: 00263 00264 VirtualAddress - Supplies the virtual address to check. 00265 00266 Return Value: 00267 00268 Returns NULL if the address is not valid or readable, otherwise 00269 returns the virtual address. 00270 00271 Environment: 00272 00273 Kernel mode IRQL at DISPATCH_LEVEL or greater. 00274 00275 --*/ 00276 00277 { 00278 #ifdef VLM_SUPPORT 00279 00280 if (!MmIsAddressValid64 (VirtualAddress)) { 00281 return NULL; 00282 } 00283 00284 return VirtualAddress; 00285 #else 00286 return NULL; 00287 #endif 00288 }

VOID MmDbgReleaseAddress IN PVOID  VirtualAddress,
IN PHARDWARE_PTE  Opaque
 

Definition at line 182 of file alpha/debugsup.c.

References KiFlushSingleTb(), MiGetPteAddress, TRUE, and _MMPTE::u.

Referenced by KdpAddBreakpoint(), KdpMoveMemory(), KdpWriteIoSpace(), KdpWriteVirtualMemory(), and KdSetOwedBreakpoints().

00189 : 00190 00191 i386/486 implementation specific: 00192 00193 This routine resets the specified virtual address access permissions 00194 to its original state. 00195 00196 Arguments: 00197 00198 VirtualAddress - Supplies the virtual address to check. 00199 00200 Opaque - Supplies an opaque pointer. 00201 00202 Return Value: 00203 00204 None. 00205 00206 Environment: 00207 00208 Kernel mode IRQL at DISPATCH_LEVEL or greater. 00209 00210 --*/ 00211 00212 { 00213 MMPTE TempPte; 00214 PMMPTE PointerPte; 00215 PMMPTE InputPte; 00216 00217 InputPte = (PMMPTE)Opaque; 00218 00219 ASSERT (MmIsAddressValid (VirtualAddress)); 00220 00221 if (InputPte->u.Long != 0) { 00222 00223 PointerPte = MiGetPteAddress (VirtualAddress); 00224 00225 TempPte = *InputPte; 00226 00227 // LWFIX: Need to make the write go out to memory but can't 00228 // make it dirty here ! TempPte.u.Hard.Dirty = MM_PTE_DIRTY; 00229 00230 *PointerPte = TempPte; 00231 00232 // 00233 // BUGBUG John Vert (jvert) 3/4/1999 00234 // KeFillEntryTb is liable to IPI the other processors. This is 00235 // definitely NOT what we want as the other processors are frozen 00236 // in the debugger and we will deadlock if we try and IPI them. 00237 // Just flush the current processor instead. 00238 //KeFillEntryTb ((PHARDWARE_PTE)PointerPte, VirtualAddress, TRUE); 00239 KiFlushSingleTb(TRUE, VirtualAddress); 00240 } 00241 00242 return; 00243 }

PVOID64 MmDbgTranslatePhysicalAddress64 IN PHYSICAL_ADDRESS  PhysicalAddress  ) 
 

Definition at line 342 of file alpha/debugsup.c.

References BYTE_OFFSET, KiFlushSingleTb(), MI_PFN_ELEMENT, MiGetVirtualAddressMappedByPte, MmDebugPte, MmHighestPhysicalPage, MmIsAddressValid(), MmLowestPhysicalPage, NULL, PAGE_SHIFT, TRUE, _MMPTE::u, and ValidKernelPte.

Referenced by KdpReadPhysicalMemory(), KdpSearchPhysicalPage(), and KdpWritePhysicalMemory().

00348 : 00349 00350 ALPHA implementation specific: 00351 00352 The Alpha processor provides a direct-mapped address space called 00353 the superpage. The entire physical address space can be 00354 addressed via the superpage. This routine translates a physical 00355 address to its corresponding superpage address. Unfortunately, 00356 the base superpage address is processor-dependent. Therefore, we 00357 have to compute it based on the processor level. As new processors are 00358 released, this routine will need to be updated. 00359 00360 This routine does not use PTEs. 00361 00362 Arguments: 00363 00364 PhysicalAddress - Supplies the physical address to translate. 00365 00366 Return Value: 00367 00368 The virtual (superpage) address which corresponds to the physical address. 00369 00370 Environment: 00371 00372 Kernel mode IRQL at DISPATCH_LEVEL or greater. 00373 00374 --*/ 00375 00376 { 00377 switch (KeProcessorLevel) { 00378 00379 case PROCESSOR_ALPHA_21064: 00380 case PROCESSOR_ALPHA_21066: 00381 case PROCESSOR_ALPHA_21068: 00382 PhysicalAddress.QuadPart &= 0x00000003ffffffff; 00383 PhysicalAddress.QuadPart |= 0xfffffc0000000000; 00384 break; 00385 00386 case PROCESSOR_ALPHA_21164: 00387 case PROCESSOR_ALPHA_21164PC: 00388 PhysicalAddress.QuadPart &= 0x000000ffffffffff; 00389 PhysicalAddress.QuadPart |= 0xfffffc0000000000; 00390 break; 00391 00392 case PROCESSOR_ALPHA_21264: 00393 PhysicalAddress.QuadPart &= 0x00000fffffffffff; 00394 PhysicalAddress.QuadPart |= 0xffff800000000000; 00395 break; 00396 00397 default: 00398 return NULL64; 00399 00400 } 00401 00402 return (PVOID64)PhysicalAddress.QuadPart; 00403 } }

PVOID MmDbgWriteCheck IN PVOID  VirtualAddress,
IN PHARDWARE_PTE  Opaque
 

Definition at line 70 of file alpha/debugsup.c.

00077 : 00078 00079 ALPHA implementation specific: 00080 00081 This routine returns the physical address for a virtual address 00082 which is valid (mapped) for write access. 00083 00084 If the address is valid and writable and not within KSEG0 00085 the physical address within KSEG0 is returned. If the address 00086 is within KSEG0 then the called address is returned. 00087 00088 NOTE: The physical address must only be used while the interrupt 00089 level on ALL processors is above DISPATCH_LEVEL, otherwise the 00090 binding between the virtual address and the physical address can 00091 change due to paging. 00092 00093 Arguments: 00094 00095 VirtualAddress - Supplies the virtual address to check. 00096 00097 Opaque - Supplies a pointer to fill with an opaque value. 00098 00099 Return Value: 00100 00101 Returns NULL if the address is not valid or readable, otherwise 00102 returns the physical address of the corresponding virtual address. 00103 00104 Environment: 00105 00106 Kernel mode IRQL at DISPATCH_LEVEL or greater. 00107 00108 --*/ 00109 00110 { 00111 MMPTE PteContents; 00112 PMMPTE PointerPte; 00113 PMMPTE InputPte; 00114 00115 InputPte = (PMMPTE)Opaque; 00116 00117 InputPte->u.Long = 0; 00118 00119 if ((VirtualAddress >= (PVOID)KSEG0_BASE) && 00120 (VirtualAddress < (PVOID)KSEG2_BASE)) { 00121 return VirtualAddress; 00122 } 00123 00124 if (!MmIsAddressValid (VirtualAddress)) { 00125 return NULL; 00126 } 00127 00128 PointerPte = MiGetPteAddress (VirtualAddress); 00129 if ((VirtualAddress <= MM_HIGHEST_USER_ADDRESS) && 00130 (PointerPte->u.Hard.PageFrameNumber < MM_PAGES_IN_KSEG0)) { 00131 00132 // 00133 // User mode - return the physical address. This prevents 00134 // copy on write faults for breakpoints on user-mode pages. 00135 // IGNORE write protection. 00136 // 00137 // N.B. - The physical address must be less than 1GB to allow this 00138 // short-cut mapping. 00139 // 00140 // N.B. - Any non-breakpoint modifications can get lost when the page 00141 // is paged out because the PTE is not marked modified when 00142 // the access is made through this alternate mapping. 00143 // 00144 00145 return (PVOID) 00146 ((ULONG)MmGetPhysicalAddress(VirtualAddress).LowPart + KSEG0_BASE); 00147 } 00148 00149 if (PointerPte->u.Hard.Write == 0) { 00150 00151 // 00152 // PTE is not writable, make it so. 00153 // 00154 00155 PteContents = *PointerPte; 00156 00157 *InputPte = PteContents; 00158 00159 // 00160 // Modify the PTE to ensure write permissions. 00161 // 00162 00163 PteContents.u.Hard.Write = 1; 00164 00165 *PointerPte = PteContents; 00166 00167 // 00168 // BUGBUG John Vert (jvert) 3/4/1999 00169 // KeFillEntryTb is liable to IPI the other processors. This is 00170 // definitely NOT what we want as the other processors are frozen 00171 // in the debugger and we will deadlock if we try and IPI them. 00172 // Just flush the the current processor instead. 00173 //KeFillEntryTb ((PHARDWARE_PTE)PointerPte, VirtualAddress, TRUE); 00174 KiFlushSingleTb(TRUE, VirtualAddress); 00175 00176 } 00177 00178 return VirtualAddress; 00179 }

PVOID64 MmDbgWriteCheck64 IN PVOID64  VirtualAddress  ) 
 

Definition at line 291 of file alpha/debugsup.c.

References NULL.

Referenced by KdpWriteVirtualMemory(), and KdpWriteVirtualMemory64().

00297 : 00298 00299 ALPHA implementation specific: 00300 00301 This routine returns the physical address for a virtual address 00302 which is valid (mapped) for write access. 00303 00304 If the address is valid and writable then the called address is returned. 00305 00306 Arguments: 00307 00308 VirtualAddress - Supplies the virtual address to check. 00309 00310 Return Value: 00311 00312 Returns NULL if the address is not valid or readable, otherwise 00313 returns the virtual address. 00314 00315 Environment: 00316 00317 Kernel mode IRQL at DISPATCH_LEVEL or greater. 00318 00319 --*/ 00320 00321 { 00322 #ifdef VLM_SUPPORT 00323 PMMPTE PointerPte; 00324 00325 if (!MmIsAddressValid64 (VirtualAddress)) { 00326 return NULL; 00327 } 00328 00329 PointerPte = MiGetPteAddress64 (VirtualAddress); 00330 00331 if (PointerPte->u.Hard.Write == 0) { 00332 return NULL; 00333 } 00334 00335 return VirtualAddress; 00336 #else 00337 return NULL; 00338 #endif 00339 }

VOID MmDeleteKernelStack IN PVOID  PointerKernelStack,
IN BOOLEAN  LargeStack
 

Definition at line 2723 of file procsup.c.

References BYTES_TO_PAGES, DbgPrint, LOCK_PFN, MI_GET_PAGE_FRAME_FROM_PTE, MI_PFN_ELEMENT, MI_SET_PFN_DELETED, MiDecrementShareAndValidCount, MiDecrementShareCountOnly, MiGetPteAddress, MiReleaseSystemPtes(), MiReturnCommitment(), MM_BUMP_COUNTER, MM_DBG_COMMIT_RETURN_KERNEL_STACK_DELETE, MM_STACK_ALIGNMENT, MM_TRACK_COMMIT, MmFirstDeadKernelStack, MmKernelStackPages, MmKernelStackResident, MmLargeStacks, MmMaximumDeadKernelStacks, MmNumberDeadKernelStacks, MmProcessCommit, MmResidentAvailablePages, MmSmallStacks, PERFINFO_DELETE_STACK, _MMPFN::PteAddress, _MMPFN::PteFrame, SystemPteSpace, _MMPTE::u, _MMPFN::u1, and UNLOCK_PFN.

Referenced by KeStartAllProcessors(), PsConvertToGuiThread(), PspCreateThread(), PspReaper(), and PspThreadDelete().

02730 : 02731 02732 This routine deletes a kernel stack and the no-access page within 02733 the non-pagable portion of the system address space. 02734 02735 Arguments: 02736 02737 PointerKernelStack - Supplies a pointer to the base of the kernel stack. 02738 02739 LargeStack - Supplies the value TRUE if a large stack is being deleted. 02740 FALSE if a small stack is to be deleted. 02741 02742 Return Value: 02743 02744 None. 02745 02746 Environment: 02747 02748 Kernel mode. APCs Disabled. 02749 02750 --*/ 02751 02752 { 02753 PMMPTE PointerPte; 02754 PMMPFN Pfn1; 02755 PFN_NUMBER NumberOfPages; 02756 ULONG NumberOfPtes; 02757 PFN_NUMBER PageFrameIndex; 02758 ULONG i; 02759 KIRQL OldIrql; 02760 MMPTE PteContents; 02761 02762 if (LargeStack) { 02763 #if defined(_IA64_) 02764 NumberOfPtes = BYTES_TO_PAGES (KERNEL_LARGE_STACK_SIZE + KERNEL_LARGE_BSTORE_SIZE); 02765 #else 02766 NumberOfPtes = BYTES_TO_PAGES (KERNEL_LARGE_STACK_SIZE); 02767 #endif 02768 } else { 02769 #if defined(_IA64_) 02770 NumberOfPtes = BYTES_TO_PAGES (KERNEL_STACK_SIZE + KERNEL_BSTORE_SIZE); 02771 #else 02772 NumberOfPtes = BYTES_TO_PAGES (KERNEL_STACK_SIZE); 02773 #endif 02774 } 02775 02776 PointerPte = MiGetPteAddress (PointerKernelStack); 02777 02778 // 02779 // PointerPte points to the guard page, point to the previous 02780 // page before removing physical pages. 02781 // 02782 02783 PointerPte -= 1; 02784 02785 LOCK_PFN (OldIrql); 02786 02787 // 02788 // Check to see if the stack page should be placed on the dead 02789 // kernel stack page list. The dead kernel stack list is a 02790 // singly linked list of kernel stacks from terminated threads. 02791 // The stacks are saved on a linked list up to a maximum number 02792 // to avoid the overhead of flushing the entire TB on all processors 02793 // everytime a thread terminates. The TB on all processors must 02794 // be flushed as kernel stacks reside in the non paged system part 02795 // of the address space. 02796 // 02797 02798 if ((!LargeStack) && 02799 (MmNumberDeadKernelStacks < MmMaximumDeadKernelStacks)) { 02800 02801 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 02802 02803 #if DBG 02804 { 02805 ULONG i = MmNumberDeadKernelStacks; 02806 PMMPFN PfnList = MmFirstDeadKernelStack; 02807 02808 while (i > 0) { 02809 i--; 02810 if ((PfnList != MmKstacks[i].Pfn) || 02811 (PfnList->PteAddress != MmKstacks[i].Pte)) { 02812 DbgPrint("MMPROCSUP: kstacks %p %ld. %p\n", 02813 PfnList, i, MmKstacks[i].Pfn); 02814 DbgBreakPoint(); 02815 } 02816 PfnList = PfnList->u1.NextStackPfn; 02817 } 02818 MmKstacks[MmNumberDeadKernelStacks].Pte = Pfn1->PteAddress; 02819 MmKstacks[MmNumberDeadKernelStacks].Pfn = Pfn1; 02820 } 02821 #endif //DBG 02822 02823 MmNumberDeadKernelStacks += 1; 02824 Pfn1->u1.NextStackPfn = MmFirstDeadKernelStack; 02825 MmFirstDeadKernelStack = Pfn1; 02826 02827 PERFINFO_DELETE_STACK(PointerPte, NumberOfPtes); 02828 02829 UNLOCK_PFN (OldIrql); 02830 02831 return; 02832 } 02833 02834 #if defined(_IA64_) 02835 02836 // 02837 // Since PointerKernelStack points to the center of the stack space, 02838 // the size of kernel backing store needs to be added to get the 02839 // top of the stack space. 02840 // 02841 02842 PointerPte = MiGetPteAddress (LargeStack ? 02843 (PCHAR)PointerKernelStack+KERNEL_LARGE_BSTORE_SIZE : 02844 (PCHAR)PointerKernelStack+KERNEL_BSTORE_SIZE); 02845 02846 // 02847 // PointerPte points to the guard page, point to the previous 02848 // page before removing physical pages. 02849 // 02850 02851 PointerPte -= 1; 02852 02853 #endif 02854 02855 // 02856 // We have exceeded the limit of dead kernel stacks or this is a large 02857 // stack, delete this kernel stack. 02858 // 02859 02860 NumberOfPages = 0; 02861 for (i = 0; i < NumberOfPtes; i += 1) { 02862 02863 PteContents = *PointerPte; 02864 02865 if (PteContents.u.Hard.Valid == 1) { 02866 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents); 02867 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 02868 MiDecrementShareAndValidCount (Pfn1->PteFrame); 02869 02870 // 02871 // Set the pointer to PTE as empty so the page 02872 // is deleted when the reference count goes to zero. 02873 // 02874 02875 MI_SET_PFN_DELETED (Pfn1); 02876 MiDecrementShareCountOnly (MI_GET_PAGE_FRAME_FROM_PTE (&PteContents)); 02877 NumberOfPages += 1; 02878 } 02879 PointerPte -= 1; 02880 } 02881 02882 #if defined(_IA64_) 02883 MmKernelStackPages -= NumberOfPtes + 2 + (MM_STACK_ALIGNMENT?1:0); 02884 02885 MiReleaseSystemPtes (PointerPte, 02886 NumberOfPtes + 2 + (MM_STACK_ALIGNMENT?1:0), 02887 SystemPteSpace); 02888 #else 02889 MmKernelStackPages -= NumberOfPtes + 1 + (MM_STACK_ALIGNMENT?1:0); 02890 02891 MiReleaseSystemPtes (PointerPte, 02892 NumberOfPtes + 1 + (MM_STACK_ALIGNMENT?1:0), 02893 SystemPteSpace); 02894 #endif 02895 02896 // 02897 // Update the count of available resident pages. 02898 // 02899 02900 MmKernelStackResident -= NumberOfPages; 02901 MmResidentAvailablePages += NumberOfPages; 02902 MM_BUMP_COUNTER(10, NumberOfPages); 02903 MmProcessCommit -= NumberOfPtes; 02904 02905 MmLargeStacks -= LargeStack; 02906 MmSmallStacks -= !LargeStack; 02907 UNLOCK_PFN (OldIrql); 02908 02909 // 02910 // Return commitment. 02911 // 02912 02913 MiReturnCommitment (NumberOfPtes); 02914 MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_KERNEL_STACK_DELETE, NumberOfPtes); 02915 02916 return; 02917 }

VOID MmDeleteProcessAddressSpace IN PEPROCESS  Process  ) 
 

Definition at line 1398 of file procsup.c.

References ASSERT, FreePageList, HYPER_SPACE, LOCK_PFN, MI_GET_DIRECTORY_FRAME_FROM_PROCESS, MI_GET_PAGE_FRAME_FROM_PTE, MI_PFN_ELEMENT, MI_SET_PFN_DELETED, MiContractPagingFiles(), MiDecrementShareAndValidCount, MiDecrementShareCountOnly, MiGetPpeOffset, MiInsertPageInList(), MiMapPageInHyperSpace(), MiReturnCommitment(), MiUnmapPageInHyperSpace, MM_BUMP_COUNTER, MM_DBG_COMMIT_RETURN_PROCESS_DELETE, MM_PROCESS_COMMIT_CHARGE, MM_PROCESS_CREATE_CHARGE, MM_TRACK_COMMIT, MmPageLocationList, MmProcessCommit, MmResidentAvailablePages, NULL, _MMPFN::PteFrame, _MMPFN::u3, and UNLOCK_PFN.

Referenced by PspProcessDelete().

01404 : 01405 01406 This routine deletes a process's Page Directory and working set page. 01407 01408 Arguments: 01409 01410 Process - Supplies a pointer to the deleted process. 01411 01412 Return Value: 01413 01414 None. 01415 01416 Environment: 01417 01418 Kernel mode. APCs Disabled. 01419 01420 --*/ 01421 01422 { 01423 PMMPFN Pfn1; 01424 KIRQL OldIrql; 01425 PFN_NUMBER PageFrameIndex; 01426 PFN_NUMBER PageFrameIndex2; 01427 #if defined (_WIN64) 01428 PMMPTE PageDirectoryParent; 01429 PMMPTE Ppe; 01430 #endif 01431 #if defined (_X86PAE_) 01432 ULONG i; 01433 KIRQL OldIrql2; 01434 PMMPTE PointerPte; 01435 PVOID PoolBlock; 01436 PFN_NUMBER PageDirectories[PD_PER_SYSTEM]; 01437 01438 PoolBlock = NULL; 01439 #endif 01440 01441 // 01442 // Return commitment. 01443 // 01444 01445 MiReturnCommitment (MM_PROCESS_COMMIT_CHARGE); 01446 MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_PROCESS_DELETE, MM_PROCESS_COMMIT_CHARGE); 01447 ASSERT (Process->CommitCharge == 0); 01448 01449 // 01450 // Remove the working set list page from the deleted process. 01451 // 01452 01453 Pfn1 = MI_PFN_ELEMENT (Process->WorkingSetPage); 01454 01455 LOCK_PFN (OldIrql); 01456 MmProcessCommit -= MM_PROCESS_COMMIT_CHARGE; 01457 01458 if (Process->AddressSpaceInitialized == 2) { 01459 01460 MI_SET_PFN_DELETED (Pfn1); 01461 01462 MiDecrementShareAndValidCount (Pfn1->PteFrame); 01463 MiDecrementShareCountOnly (Process->WorkingSetPage); 01464 01465 ASSERT ((Pfn1->u3.e2.ReferenceCount == 0) || (Pfn1->u3.e1.WriteInProgress)); 01466 01467 // 01468 // Remove the hyper space page table page from the deleted process. 01469 // 01470 01471 #if defined (_X86PAE_) 01472 01473 PageFrameIndex = (PFN_NUMBER)Process->Pcb.DirectoryTableBase[1]; 01474 // 01475 // Remove the second hyper space page table page. 01476 // 01477 01478 PointerPte = (PMMPTE)MiMapPageInHyperSpace (PageFrameIndex, &OldIrql2); 01479 PageFrameIndex2 = MI_GET_PAGE_FRAME_FROM_PTE(PointerPte); 01480 MiUnmapPageInHyperSpace (OldIrql2); 01481 01482 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex2); 01483 01484 MI_SET_PFN_DELETED (Pfn1); 01485 01486 MiDecrementShareAndValidCount (Pfn1->PteFrame); 01487 MiDecrementShareCountOnly (PageFrameIndex2); 01488 01489 ASSERT ((Pfn1->u3.e2.ReferenceCount == 0) || (Pfn1->u3.e1.WriteInProgress)); 01490 #else 01491 PageFrameIndex = 01492 MI_GET_PAGE_FRAME_FROM_PTE((PMMPTE)(&(Process->Pcb.DirectoryTableBase[1]))); 01493 #endif 01494 01495 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 01496 01497 MI_SET_PFN_DELETED (Pfn1); 01498 01499 MiDecrementShareAndValidCount (Pfn1->PteFrame); 01500 MiDecrementShareCountOnly (PageFrameIndex); 01501 ASSERT ((Pfn1->u3.e2.ReferenceCount == 0) || (Pfn1->u3.e1.WriteInProgress)); 01502 01503 // 01504 // Remove the page directory page. 01505 // 01506 01507 PageFrameIndex = MI_GET_DIRECTORY_FRAME_FROM_PROCESS(Process); 01508 01509 #if defined (_X86PAE_) 01510 01511 PointerPte = (PMMPTE)MiMapPageInHyperSpace (PageFrameIndex, &OldIrql2); 01512 for (i = 0; i < PD_PER_SYSTEM - 1; i += 1) { 01513 PageDirectories[i] = MI_GET_PAGE_FRAME_FROM_PTE(&PointerPte[i]); 01514 } 01515 MiUnmapPageInHyperSpace (OldIrql2); 01516 01517 for (i = 0; i < PD_PER_SYSTEM - 1; i += 1) { 01518 Pfn1 = MI_PFN_ELEMENT (PageDirectories[i]); 01519 01520 MI_SET_PFN_DELETED (Pfn1); 01521 01522 MiDecrementShareAndValidCount (PageDirectories[i]); 01523 MiDecrementShareAndValidCount (Pfn1->PteFrame); 01524 01525 ASSERT ((Pfn1->u3.e2.ReferenceCount == 0) || (Pfn1->u3.e1.WriteInProgress)); 01526 } 01527 #endif 01528 01529 #if defined (_WIN64) 01530 01531 // 01532 // Get a pointer to the top-level page directory parent page via 01533 // its KSEG0 address. 01534 // 01535 01536 PageDirectoryParent = KSEG_ADDRESS (PageFrameIndex); 01537 01538 // 01539 // Remove the hyper space page directory page from the deleted process. 01540 // 01541 01542 Ppe = &PageDirectoryParent[MiGetPpeOffset(HYPER_SPACE)]; 01543 PageFrameIndex2 = MI_GET_PAGE_FRAME_FROM_PTE(Ppe); 01544 Pfn1 = MI_PFN_ELEMENT(PageFrameIndex2); 01545 01546 MI_SET_PFN_DELETED (Pfn1); 01547 MiDecrementShareAndValidCount (Pfn1->PteFrame); 01548 MiDecrementShareCountOnly (PageFrameIndex2); 01549 ASSERT ((Pfn1->u3.e2.ReferenceCount == 0) || (Pfn1->u3.e1.WriteInProgress)); 01550 #endif 01551 01552 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 01553 01554 MI_SET_PFN_DELETED (Pfn1); 01555 01556 MiDecrementShareAndValidCount (PageFrameIndex); 01557 01558 MiDecrementShareCountOnly (PageFrameIndex); 01559 01560 ASSERT ((Pfn1->u3.e2.ReferenceCount == 0) || (Pfn1->u3.e1.WriteInProgress)); 01561 01562 #if defined (_X86PAE_) 01563 01564 // 01565 // Free the page directory page pointers. 01566 // 01567 01568 PoolBlock = MiPaeFree ((PPAE_ENTRY)Process->PaeTop); 01569 #endif 01570 01571 #if defined(_IA64_) 01572 01573 // 01574 // Free the session space page directory parent page 01575 // 01576 01577 PageFrameIndex = 01578 MI_GET_PAGE_FRAME_FROM_PTE((PMMPTE)(&(Process->Pcb.SessionParentBase))); 01579 01580 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 01581 01582 MI_SET_PFN_DELETED (Pfn1); 01583 01584 MiDecrementShareAndValidCount (Pfn1->PteFrame); 01585 01586 MiDecrementShareCountOnly (PageFrameIndex); 01587 01588 ASSERT ((Pfn1->u3.e2.ReferenceCount == 0) || (Pfn1->u3.e1.WriteInProgress)); 01589 01590 #endif 01591 01592 } else { 01593 01594 // 01595 // Process initialization never completed, just return the pages 01596 // to the free list. 01597 // 01598 01599 MiInsertPageInList (MmPageLocationList[FreePageList], 01600 Process->WorkingSetPage); 01601 01602 #if defined (_WIN64) 01603 01604 // 01605 // Get a pointer to the top-level page directory parent page via 01606 // its KSEG0 address. 01607 // 01608 01609 PageFrameIndex = 01610 MI_GET_PAGE_FRAME_FROM_PTE((PMMPTE)(&(Process->Pcb.DirectoryTableBase[0]))); 01611 01612 PageDirectoryParent = KSEG_ADDRESS (PageFrameIndex); 01613 01614 Ppe = &PageDirectoryParent[MiGetPpeOffset(HYPER_SPACE)]; 01615 PageFrameIndex2 = MI_GET_PAGE_FRAME_FROM_PTE(Ppe); 01616 01617 MiInsertPageInList (MmPageLocationList[FreePageList], 01618 PageFrameIndex2); 01619 #endif 01620 01621 #if defined (_X86PAE_) 01622 PageFrameIndex = MI_GET_DIRECTORY_FRAME_FROM_PROCESS(Process); 01623 01624 PointerPte = (PMMPTE)MiMapPageInHyperSpace (PageFrameIndex, &OldIrql2); 01625 for (i = 0; i < PD_PER_SYSTEM - 1; i += 1) { 01626 PageDirectories[i] = MI_GET_PAGE_FRAME_FROM_PTE(&PointerPte[i]); 01627 } 01628 MiUnmapPageInHyperSpace (OldIrql2); 01629 01630 for (i = 0; i < PD_PER_SYSTEM - 1; i += 1) { 01631 MiInsertPageInList (MmPageLocationList[FreePageList], 01632 PageDirectories[i]); 01633 } 01634 01635 // 01636 // Free the second hyper space page table page. 01637 // 01638 01639 PageFrameIndex = (PFN_NUMBER)Process->Pcb.DirectoryTableBase[1]; 01640 01641 PointerPte = (PMMPTE)MiMapPageInHyperSpace (PageFrameIndex, &OldIrql2); 01642 PageFrameIndex2 = MI_GET_PAGE_FRAME_FROM_PTE(PointerPte); 01643 MiUnmapPageInHyperSpace (OldIrql2); 01644 MiInsertPageInList (MmPageLocationList[FreePageList], PageFrameIndex2); 01645 01646 // 01647 // Free the first hyper space page table page. 01648 // 01649 01650 MiInsertPageInList (MmPageLocationList[FreePageList], 01651 (PFN_NUMBER)Process->Pcb.DirectoryTableBase[1]); 01652 01653 MiInsertPageInList (MmPageLocationList[FreePageList], 01654 MI_GET_DIRECTORY_FRAME_FROM_PROCESS(Process)); 01655 01656 // 01657 // Free the page directory page pointers. 01658 // 01659 01660 PoolBlock = MiPaeFree ((PPAE_ENTRY)Process->PaeTop); 01661 #else 01662 01663 MiInsertPageInList (MmPageLocationList[FreePageList], 01664 MI_GET_PAGE_FRAME_FROM_PTE((PMMPTE)(&(Process->Pcb.DirectoryTableBase[1])))); 01665 01666 MiInsertPageInList (MmPageLocationList[FreePageList], 01667 MI_GET_PAGE_FRAME_FROM_PTE((PMMPTE)(&(Process->Pcb.DirectoryTableBase[0])))); 01668 #endif 01669 #if defined(_IA64_) 01670 MiInsertPageInList (MmPageLocationList[FreePageList], Process->Pcb.SessionParentBase); 01671 #endif 01672 } 01673 01674 MmResidentAvailablePages += MM_PROCESS_CREATE_CHARGE; 01675 MM_BUMP_COUNTER(7, MM_PROCESS_CREATE_CHARGE); 01676 01677 UNLOCK_PFN (OldIrql); 01678 01679 #if defined (_X86PAE_) 01680 if (PoolBlock != NULL) { 01681 MiPaeFreeEntirePage (PoolBlock); 01682 } 01683 #endif 01684 01685 // 01686 // Check to see if the paging files should be contracted. 01687 // 01688 01689 MiContractPagingFiles (); 01690 01691 return; 01692 }

VOID MmDeleteTeb IN PEPROCESS  TargetProcess,
IN PVOID  TebBase
 

Definition at line 4843 of file procsup.c.

References ASSERT, _MMVAD::EndingVpn, ExFreePool(), KeAttachProcess(), KeDetachProcess(), List, _MMSECURE_ENTRY::List, LOCK_WS_AND_ADDRESS_SPACE, MI_VA_TO_VPN, MiDeleteFreeVm(), MiLocateAddress(), MiRemoveVad(), NT_SUCCESS, NTSTATUS(), NULL, ROUND_TO_PAGES, _MMVAD::StartingVpn, Status, _MMVAD::u, _MMVAD::u2, _MMVAD::u3, and UNLOCK_WS_AND_ADDRESS_SPACE.

Referenced by PspCreateThread(), and PspExitThread().

04850 : 04851 04852 This routine deletes a TEB page within the target process. 04853 04854 Arguments: 04855 04856 TargetProcess - Supplies a pointer to the process in which to delete 04857 the TEB. 04858 04859 TebBase - Supplies the base address of the TEB to delete. 04860 04861 Return Value: 04862 04863 None. 04864 04865 Environment: 04866 04867 Kernel mode. 04868 04869 --*/ 04870 04871 { 04872 PVOID EndingAddress; 04873 PMMVAD Vad; 04874 NTSTATUS Status; 04875 PMMSECURE_ENTRY Secure; 04876 04877 EndingAddress = ((PCHAR)TebBase + 04878 ROUND_TO_PAGES (sizeof(TEB)) - 1); 04879 04880 // 04881 // Attach to the specified process. 04882 // 04883 04884 KeAttachProcess (&TargetProcess->Pcb); 04885 04886 // 04887 // Get the address creation mutex to block multiple threads from 04888 // creating or deleting address space at the same time and 04889 // get the working set mutex so virtual address descriptors can 04890 // be inserted and walked. 04891 // 04892 04893 LOCK_WS_AND_ADDRESS_SPACE (TargetProcess); 04894 04895 Vad = MiLocateAddress (TebBase); 04896 04897 ASSERT (Vad != (PMMVAD)NULL); 04898 04899 ASSERT ((Vad->StartingVpn == MI_VA_TO_VPN (TebBase)) && 04900 (Vad->EndingVpn == MI_VA_TO_VPN (EndingAddress))); 04901 04902 // 04903 // If someone has secured the TEB (in addition to the standard securing 04904 // that was done by memory management on creation, then don't delete it 04905 // now - just leave it around until the entire process is deleted. 04906 // 04907 04908 ASSERT (Vad->u.VadFlags.NoChange == 1); 04909 if (Vad->u2.VadFlags2.OneSecured) { 04910 Status = STATUS_SUCCESS; 04911 } 04912 else { 04913 ASSERT (Vad->u2.VadFlags2.MultipleSecured); 04914 ASSERT (IsListEmpty (&Vad->u3.List) == 0); 04915 04916 // 04917 // If there's only one entry, then that's the one we defined when we 04918 // initially created the TEB. So TEB deletion can take place right 04919 // now. If there's more than one entry, let the TEB sit around until 04920 // the process goes away. 04921 // 04922 04923 Secure = CONTAINING_RECORD (Vad->u3.List.Flink, 04924 MMSECURE_ENTRY, 04925 List); 04926 04927 if (Secure->List.Flink == &Vad->u3.List) { 04928 Status = STATUS_SUCCESS; 04929 } 04930 else { 04931 Status = STATUS_NOT_FOUND; 04932 } 04933 } 04934 04935 if (NT_SUCCESS(Status)) { 04936 04937 MiRemoveVad (Vad); 04938 ExFreePool (Vad); 04939 04940 MiDeleteFreeVm (TebBase, EndingAddress); 04941 } 04942 04943 UNLOCK_WS_AND_ADDRESS_SPACE (TargetProcess); 04944 KeDetachProcess(); 04945 }

POOL_TYPE MmDeterminePoolType IN PVOID  VirtualAddress  ) 
 

Definition at line 437 of file allocpag.c.

References MI_IS_SESSION_POOL_ADDRESS, MmPagedPoolEnd, MmPagedPoolStart, NonPagedPool, PagedPool, PagedPoolSession, and TRUE.

Referenced by CheckPool(), ExFreePool(), ExFreePoolSanityChecks(), ExFreePoolWithTag(), ExInitializeResource(), ExInitializeResourceLite(), and ExReinitializeResourceLite().

00443 : 00444 00445 This function determines which pool a virtual address resides within. 00446 00447 Arguments: 00448 00449 VirtualAddress - Supplies the virtual address to determine which pool 00450 it resides within. 00451 00452 Return Value: 00453 00454 Returns the POOL_TYPE (PagedPool, NonPagedPool, PagedPoolSession or 00455 NonPagedPoolSession), it never returns any information about 00456 MustSucceed pool types. 00457 00458 Environment: 00459 00460 Kernel Mode Only. 00461 00462 --*/ 00463 00464 { 00465 if ((VirtualAddress >= MmPagedPoolStart) && 00466 (VirtualAddress <= MmPagedPoolEnd)) { 00467 return PagedPool; 00468 } 00469 00470 if (MI_IS_SESSION_POOL_ADDRESS (VirtualAddress) == TRUE) { 00471 return PagedPoolSession; 00472 } 00473 00474 return NonPagedPool; 00475 }

BOOLEAN MmDisableModifiedWriteOfSection IN PSECTION_OBJECT_POINTERS  SectionObjectPointer  ) 
 

Definition at line 4397 of file modwrite.c.

References LOCK_PFN, NULL, _CONTROL_AREA::NumberOfMappedViews, _CONTROL_AREA::u, and UNLOCK_PFN.

Referenced by CcInitializeCacheMap().

04403 : 04404 04405 This function disables page writing by the modified page writer for 04406 the section which is mapped by the specified file object pointer. 04407 04408 This should only be used for files which CANNOT be mapped by user 04409 programs, e.g., volume files, directory files, etc. 04410 04411 Arguments: 04412 04413 SectionObjectPointer - Supplies a pointer to the section objects 04414 04415 04416 Return Value: 04417 04418 Returns TRUE if the operation was a success, FALSE if either 04419 the there is no section or the section already has a view. 04420 04421 --*/ 04422 04423 { 04424 PCONTROL_AREA ControlArea; 04425 KIRQL OldIrql; 04426 BOOLEAN state = 1; 04427 04428 LOCK_PFN (OldIrql); 04429 04430 ControlArea = ((PCONTROL_AREA)(SectionObjectPointer->DataSectionObject)); 04431 04432 if (ControlArea != NULL) { 04433 if (ControlArea->NumberOfMappedViews == 0) { 04434 04435 // 04436 // There are no views to this section, indicate no modified 04437 // page writing is allowed. 04438 // 04439 04440 ControlArea->u.Flags.NoModifiedWriting = 1; 04441 } else { 04442 04443 // 04444 // Return the current modified page writing state. 04445 // 04446 04447 state = (BOOLEAN)ControlArea->u.Flags.NoModifiedWriting; 04448 } 04449 } else { 04450 04451 // 04452 // This file no longer has an associated segment. 04453 // 04454 04455 state = 0; 04456 } 04457 04458 UNLOCK_PFN (OldIrql); 04459 return state; 04460 }

NTSTATUS MmDispatchWin32Callout IN PKWIN32_CALLOUT  Function,
IN PKWIN32_CALLOUT WorkerCallback  OPTIONAL,
IN PVOID  Arg,
IN PULONG SessionId  OPTIONAL
 

Referenced by IopNotifyDeviceClassChange(), IopNotifyHwProfileChange(), IopNotifySetupDeviceArrival(), IopNotifyTargetDeviceChange(), IoRegisterPlugPlayNotification(), NtAssignProcessToJobObject(), NtSetInformationJobObject(), and PspJobDelete().

VOID MmEnablePAT VOID   ) 
 

Referenced by KiInitializePAT().

LOGICAL MmEnforceWorkingSetLimit IN PMMSUPPORT  WsInfo,
IN LOGICAL  Enable
 

Definition at line 543 of file wslist.c.

References LOCK_EXPANSION, LOCK_SYSTEM_WS, LOCK_WS, MmSystemCacheWs, PsGetCurrentProcess, UNLOCK_EXPANSION, UNLOCK_SYSTEM_WS, and UNLOCK_WS.

Referenced by NtSetInformationJobObject(), PspAddProcessToJob(), and PspApplyJobLimitsToProcess().

00550 : 00551 00552 This function enables hard enforcement of the working set maximum for 00553 the specified WsInfo. 00554 00555 Arguments: 00556 00557 WsInfo - Supplies the working set info pointer. 00558 00559 Enable - Supplies TRUE if enabling hard enforcement, FALSE if not. 00560 00561 Return Value: 00562 00563 The previous state of the working set enforcement. 00564 00565 Environment: 00566 00567 Kernel mode, APCs disabled. The working set lock must NOT be held. 00568 The caller guarantees that the target WsInfo cannot go away. 00569 00570 --*/ 00571 00572 { 00573 KIRQL OldIrql; 00574 00575 LOGICAL PreviousWorkingSetEnforcement; 00576 00577 LOCK_EXPANSION (OldIrql); 00578 00579 PreviousWorkingSetEnforcement = WsInfo->u.Flags.WorkingSetHard; 00580 00581 WsInfo->u.Flags.WorkingSetHard = Enable; 00582 00583 UNLOCK_EXPANSION (OldIrql); 00584 00585 #if 0 00586 00587 PEPROCESS CurrentProcess; 00588 00589 // 00590 // Get the working set lock and disable APCs. 00591 // The working set could be trimmed at this point if it is excessive. 00592 // 00593 // The working set lock cannot be acquired at this point without updating 00594 // ps in order to avoid deadlock. 00595 // 00596 00597 if (WsInfo == &MmSystemCacheWs) { 00598 LOCK_SYSTEM_WS (OldIrql2); 00599 UNLOCK_SYSTEM_WS (OldIrql2); 00600 } 00601 else if (WsInfo->u.Flags.SessionSpace == 0) { 00602 CurrentProcess = PsGetCurrentProcess (); 00603 LOCK_WS (CurrentProcess); 00604 00605 UNLOCK_WS (CurrentProcess); 00606 } 00607 #endif 00608 00609 return PreviousWorkingSetEnforcement; 00610 }

NTSTATUS MmExtendSection IN PVOID  SectionToExtend,
IN OUT PLARGE_INTEGER  NewSectionSize,
IN ULONG  IgnoreFileSizeChecking
 

Definition at line 201 of file extsect.c.

References ASSERT, _MMEXTEND_INFO::CommittedSize, _SEGMENT::ControlArea, _SUBSECTION::ControlArea, DbgPrint, _CONTROL_AREA::DereferenceList, EX_REAL_POOL_USAGE, ExAcquireResourceExclusive, ExAllocatePoolWithTag, ExFreePool(), ExReleaseResource, _SEGMENT::ExtendInfo, FALSE, _CONTROL_AREA::FilePointer, FsRtlGetFileSize(), FsRtlSetFileSize(), KeEnterCriticalRegion, KeLeaveCriticalRegion, Mi4KStartForSubsection, Mi4KStartFromSubsection, MI_GET_PROTECTION_FROM_SOFT_PTE, MI_MAXIMUM_SECTION_SIZE, MI_WRITE_INVALID_PTE, MiGetSubsectionAddressForPte, MM4K_MASK, MM4K_SHIFT, MMPTE, MmSectionBasedMutex, MmSectionExtendResource, MmSectionExtendSetResource, _SUBSECTION::NextSubsection, NonPagedPool, _CONTROL_AREA::NonPagedPoolUsage, NT_SUCCESS, NTSTATUS(), NULL, _SUBSECTION::NumberOfFullSectors, PAGE_SHIFT, PAGE_SIZE, PAGED_CODE, PagedPool, _CONTROL_AREA::PagedPoolUsage, PSECTION, _SUBSECTION::PtesInSubsection, ROUND_TO_PAGES, _CONTROL_AREA::Segment, _SEGMENT::SegmentPteTemplate, _SEGMENT::SizeOfSegment, Status, SUBSECTION, _SUBSECTION::SubsectionBase, _SEGMENT::TotalNumberOfPtes, TRUE, _CONTROL_AREA::u, _SUBSECTION::u, _MMPTE::u, and _SUBSECTION::UnusedPtes.

Referenced by CcInitializeCacheMap(), CcSetFileSizes(), MmCreateSection(), NtAllocateVirtualMemory(), and NtExtendSection().

00209 : 00210 00211 This function extends the size of the specified section. If 00212 the current size of the section is greater than or equal to the 00213 specified section size, the size is not updated. 00214 00215 Arguments: 00216 00217 Section - Supplies a pointer to a referenced section object. 00218 00219 NewSectionSize - Supplies the new size for the section object. 00220 00221 IgnoreFileSizeChecking - Supplies the value TRUE is file size 00222 checking should be ignored (i.e., it 00223 is being called from a file system which 00224 has already done the checks). FALSE 00225 if the checks still need to be made. 00226 00227 Return Value: 00228 00229 Returns the status 00230 00231 TBS 00232 00233 00234 --*/ 00235 00236 { 00237 PMMPTE PointerPte; 00238 PMMPTE LastPte; 00239 PMMPTE ExtendedPtes; 00240 MMPTE TempPte; 00241 PCONTROL_AREA ControlArea; 00242 PSECTION Section; 00243 PSUBSECTION LastSubsection; 00244 PSUBSECTION ExtendedSubsection; 00245 ULONG RequiredPtes; 00246 ULONG NumberOfPtes; 00247 ULONG PtesUsed; 00248 ULONG AllocationSize; 00249 UINT64 EndOfFile; 00250 UINT64 NumberOfPtesForEntireFile; 00251 NTSTATUS Status; 00252 LARGE_INTEGER NumberOf4KsForEntireFile; 00253 LARGE_INTEGER Starting4K; 00254 LARGE_INTEGER Last4KChunk; 00255 00256 PAGED_CODE(); 00257 00258 Section = (PSECTION)SectionToExtend; 00259 00260 // 00261 // Make sure the section is really extendable - physical and 00262 // image sections are not. 00263 // 00264 00265 ControlArea = Section->Segment->ControlArea; 00266 00267 if ((ControlArea->u.Flags.PhysicalMemory || ControlArea->u.Flags.Image) || 00268 (ControlArea->FilePointer == NULL)) { 00269 return STATUS_SECTION_NOT_EXTENDED; 00270 } 00271 00272 // 00273 // Acquire the section extension mutex, this blocks other threads from 00274 // updating the size at the same time. 00275 // 00276 00277 KeEnterCriticalRegion (); 00278 ExAcquireResourceExclusive (&MmSectionExtendResource, TRUE); 00279 00280 // 00281 // Each subsection is limited to 16TB - 64K because the NumberOfFullSectors 00282 // and various other fields in the subsection are ULONGs. For NT64, the 00283 // allocation could be split into multiple subsections as needed to 00284 // conform to this limit - this is not worth doing for NT32 unless 00285 // sparse prototype PTE allocations are supported. 00286 // 00287 00288 // This must be a multiple of the size of prototype pte allocation so any 00289 // given prototype pte allocation will have the same subsection for all 00290 // PTEs. 00291 // 00292 // The total section size is limited to 16PB - 4K because of the 00293 // StartingSector4132 field in each subsection. 00294 // 00295 00296 NumberOfPtesForEntireFile = (NewSectionSize->QuadPart + PAGE_SIZE - 1) >> PAGE_SHIFT; 00297 00298 NumberOfPtes = (ULONG)NumberOfPtesForEntireFile; 00299 00300 if (NewSectionSize->QuadPart > MI_MAXIMUM_SECTION_SIZE) { 00301 Status = STATUS_SECTION_TOO_BIG; 00302 goto ReleaseAndReturn; 00303 } 00304 00305 if (NumberOfPtesForEntireFile > (UINT64)((MAXULONG_PTR / sizeof(MMPTE)) - sizeof (SEGMENT))) { 00306 Status = STATUS_SECTION_TOO_BIG; 00307 goto ReleaseAndReturn; 00308 } 00309 00310 if (NumberOfPtesForEntireFile > (UINT64)NewSectionSize->QuadPart) { 00311 Status = STATUS_SECTION_TOO_BIG; 00312 goto ReleaseAndReturn; 00313 } 00314 00315 if (ControlArea->u.Flags.WasPurged == 0) { 00316 00317 if ((UINT64)NewSectionSize->QuadPart <= (UINT64)Section->SizeOfSection.QuadPart) { 00318 *NewSectionSize = Section->SizeOfSection; 00319 goto ReleaseAndReturnSuccess; 00320 } 00321 } 00322 00323 // 00324 // If a file handle was specified, set the allocation size of the file. 00325 // 00326 00327 if (IgnoreFileSizeChecking == FALSE) { 00328 00329 // 00330 // Release the resource so we don't deadlock with the file 00331 // system trying to extend this section at the same time. 00332 // 00333 00334 ExReleaseResource (&MmSectionExtendResource); 00335 00336 // 00337 // Get a different resource to single thread query/set operations. 00338 // 00339 00340 ExAcquireResourceExclusive (&MmSectionExtendSetResource, TRUE); 00341 00342 // 00343 // Query the file size to see if this file really needs extending. 00344 // 00345 // If the specified size is less than the current size, return 00346 // the current size. 00347 // 00348 00349 Status = FsRtlGetFileSize (ControlArea->FilePointer, 00350 (PLARGE_INTEGER)&EndOfFile); 00351 00352 if (!NT_SUCCESS (Status)) { 00353 ExReleaseResource (&MmSectionExtendSetResource); 00354 KeLeaveCriticalRegion (); 00355 return Status; 00356 } 00357 00358 if ((UINT64)NewSectionSize->QuadPart > EndOfFile) { 00359 00360 // 00361 // Don't allow section extension unless the section was originally 00362 // created with write access. The check couldn't be done at create 00363 // time without breaking existing binaries, so the caller gets the 00364 // error at this point instead. 00365 // 00366 00367 if (((Section->InitialPageProtection & PAGE_READWRITE) | 00368 (Section->InitialPageProtection & PAGE_EXECUTE_READWRITE)) == 0) { 00369 #if DBG 00370 DbgPrint("Section extension failed %x\n", Section); 00371 #endif 00372 ExReleaseResource (&MmSectionExtendSetResource); 00373 KeLeaveCriticalRegion (); 00374 return STATUS_SECTION_NOT_EXTENDED; 00375 } 00376 00377 // 00378 // Current file is smaller, attempt to set a new end of file. 00379 // 00380 00381 EndOfFile = *(PUINT64)NewSectionSize; 00382 00383 Status = FsRtlSetFileSize (ControlArea->FilePointer, 00384 (PLARGE_INTEGER)&EndOfFile); 00385 00386 if (!NT_SUCCESS (Status)) { 00387 ExReleaseResource (&MmSectionExtendSetResource); 00388 KeLeaveCriticalRegion (); 00389 return Status; 00390 } 00391 } 00392 00393 if (ControlArea->Segment->ExtendInfo) { 00394 ExAcquireFastMutex (&MmSectionBasedMutex); 00395 if (ControlArea->Segment->ExtendInfo) { 00396 ControlArea->Segment->ExtendInfo->CommittedSize = EndOfFile; 00397 } 00398 ExReleaseFastMutex (&MmSectionBasedMutex); 00399 } 00400 00401 // 00402 // Release the query/set resource and reacquire the extend section 00403 // resource. 00404 // 00405 00406 ExReleaseResource (&MmSectionExtendSetResource); 00407 ExAcquireResourceExclusive (&MmSectionExtendResource, TRUE); 00408 } 00409 00410 // 00411 // Find the last subsection. 00412 // 00413 00414 ASSERT (ControlArea->u.Flags.GlobalOnlyPerSession == 0); 00415 00416 LastSubsection = (PSUBSECTION)(ControlArea + 1); 00417 00418 while (LastSubsection->NextSubsection != NULL ) { 00419 ASSERT (LastSubsection->UnusedPtes == 0); 00420 LastSubsection = LastSubsection->NextSubsection; 00421 } 00422 00423 #if DBG 00424 MiSubsectionConsistent(LastSubsection); 00425 #endif 00426 00427 // 00428 // Does the structure need extending? 00429 // 00430 00431 if (NumberOfPtes <= Section->Segment->TotalNumberOfPtes) { 00432 00433 // 00434 // The segment is already large enough, just update 00435 // the section size and return. 00436 // 00437 00438 Section->SizeOfSection = *NewSectionSize; 00439 if (Section->Segment->SizeOfSegment < (UINT64)NewSectionSize->QuadPart) { 00440 // 00441 // Only update if it is really bigger. 00442 // 00443 00444 Section->Segment->SizeOfSegment = *(PUINT64)NewSectionSize; 00445 00446 Mi4KStartFromSubsection(&Starting4K, LastSubsection); 00447 00448 Last4KChunk.QuadPart = (NewSectionSize->QuadPart >> MM4K_SHIFT) - Starting4K.QuadPart; 00449 00450 ASSERT (Last4KChunk.HighPart == 0); 00451 00452 LastSubsection->NumberOfFullSectors = Last4KChunk.LowPart; 00453 LastSubsection->u.SubsectionFlags.SectorEndOffset = 00454 NewSectionSize->LowPart & MM4K_MASK; 00455 #if DBG 00456 MiSubsectionConsistent(LastSubsection); 00457 #endif 00458 } 00459 goto ReleaseAndReturnSuccess; 00460 } 00461 00462 // 00463 // Add new structures to the section - locate the last subsection 00464 // and add there. 00465 // 00466 00467 RequiredPtes = NumberOfPtes - Section->Segment->TotalNumberOfPtes; 00468 PtesUsed = 0; 00469 00470 if (RequiredPtes < LastSubsection->UnusedPtes) { 00471 00472 // 00473 // There are ample PTEs to extend the section already allocated. 00474 // 00475 00476 PtesUsed = RequiredPtes; 00477 RequiredPtes = 0; 00478 00479 } else { 00480 PtesUsed = LastSubsection->UnusedPtes; 00481 RequiredPtes -= PtesUsed; 00482 } 00483 00484 LastSubsection->PtesInSubsection += PtesUsed; 00485 LastSubsection->UnusedPtes -= PtesUsed; 00486 ControlArea->Segment->SizeOfSegment += (ULONG_PTR)PtesUsed * PAGE_SIZE; 00487 ControlArea->Segment->TotalNumberOfPtes += PtesUsed; 00488 00489 if (RequiredPtes == 0) { 00490 00491 // 00492 // There is no extension necessary, update the high VBN. 00493 // 00494 00495 Mi4KStartFromSubsection(&Starting4K, LastSubsection); 00496 00497 Last4KChunk.QuadPart = (NewSectionSize->QuadPart >> MM4K_SHIFT) - Starting4K.QuadPart; 00498 00499 ASSERT (Last4KChunk.HighPart == 0); 00500 00501 LastSubsection->NumberOfFullSectors = Last4KChunk.LowPart; 00502 00503 LastSubsection->u.SubsectionFlags.SectorEndOffset = 00504 NewSectionSize->LowPart & MM4K_MASK; 00505 #if DBG 00506 MiSubsectionConsistent(LastSubsection); 00507 #endif 00508 } else { 00509 00510 // 00511 // An extension is required. Allocate paged pool 00512 // and populate it with prototype PTEs. 00513 // 00514 00515 AllocationSize = (ULONG) ROUND_TO_PAGES (RequiredPtes * sizeof(MMPTE)); 00516 00517 ExtendedPtes = (PMMPTE)ExAllocatePoolWithTag (PagedPool, 00518 AllocationSize, 00519 'ppmM'); 00520 00521 if (ExtendedPtes == NULL) { 00522 00523 // 00524 // The required pool could not be allocated. Reset 00525 // the subsection and control area fields to their 00526 // original values. 00527 // 00528 00529 LastSubsection->PtesInSubsection -= PtesUsed; 00530 LastSubsection->UnusedPtes += PtesUsed; 00531 ControlArea->Segment->TotalNumberOfPtes -= PtesUsed; 00532 ControlArea->Segment->SizeOfSegment -= ((ULONG_PTR)PtesUsed * PAGE_SIZE); 00533 Status = STATUS_INSUFFICIENT_RESOURCES; 00534 goto ReleaseAndReturn; 00535 } 00536 00537 // 00538 // Allocate an extended subsection descriptor. 00539 // 00540 00541 ExtendedSubsection = (PSUBSECTION)ExAllocatePoolWithTag (NonPagedPool, 00542 sizeof(SUBSECTION), 00543 'bSmM' 00544 ); 00545 if (ExtendedSubsection == NULL) { 00546 00547 // 00548 // The required pool could not be allocated. Reset 00549 // the subsection and control area fields to their 00550 // original values. 00551 // 00552 00553 LastSubsection->PtesInSubsection -= PtesUsed; 00554 LastSubsection->UnusedPtes += PtesUsed; 00555 ControlArea->Segment->TotalNumberOfPtes -= PtesUsed; 00556 ControlArea->Segment->SizeOfSegment -= ((ULONG_PTR)PtesUsed * PAGE_SIZE); 00557 ExFreePool (ExtendedPtes); 00558 Status = STATUS_INSUFFICIENT_RESOURCES; 00559 goto ReleaseAndReturn; 00560 } 00561 00562 ControlArea->NonPagedPoolUsage += EX_REAL_POOL_USAGE(sizeof(SUBSECTION)); 00563 ControlArea->PagedPoolUsage += AllocationSize; 00564 00565 ASSERT (ControlArea->DereferenceList.Flink == NULL); 00566 00567 NumberOf4KsForEntireFile.QuadPart = 00568 ControlArea->Segment->SizeOfSegment >> MM4K_SHIFT; 00569 00570 Mi4KStartFromSubsection(&Starting4K, LastSubsection); 00571 00572 Last4KChunk.QuadPart = NumberOf4KsForEntireFile.QuadPart - 00573 Starting4K.QuadPart; 00574 00575 if (LastSubsection->u.SubsectionFlags.SectorEndOffset) { 00576 Last4KChunk.QuadPart += 1; 00577 } 00578 00579 ASSERT(Last4KChunk.HighPart == 0); 00580 00581 LastSubsection->NumberOfFullSectors = Last4KChunk.LowPart; 00582 LastSubsection->u.SubsectionFlags.SectorEndOffset = 0; 00583 00584 // 00585 // If the number of sectors doesn't completely fill the PTEs (this can 00586 // only happen when the page size is not MM4K), then fill it now. 00587 // 00588 00589 if (LastSubsection->NumberOfFullSectors & ((1 << (PAGE_SHIFT - MM4K_SHIFT)) - 1)) { 00590 LastSubsection->NumberOfFullSectors += 1; 00591 } 00592 00593 #if DBG 00594 MiSubsectionConsistent(LastSubsection); 00595 #endif 00596 00597 ExtendedSubsection->u.LongFlags = 0; 00598 ExtendedSubsection->NextSubsection = NULL; 00599 ExtendedSubsection->UnusedPtes = (AllocationSize / sizeof(MMPTE)) - 00600 RequiredPtes; 00601 00602 ExtendedSubsection->ControlArea = ControlArea; 00603 ExtendedSubsection->PtesInSubsection = RequiredPtes; 00604 00605 Starting4K.QuadPart += LastSubsection->NumberOfFullSectors; 00606 00607 Mi4KStartForSubsection(&Starting4K, ExtendedSubsection); 00608 00609 Last4KChunk.QuadPart = 00610 (NewSectionSize->QuadPart >> MM4K_SHIFT) - Starting4K.QuadPart; 00611 00612 ASSERT(Last4KChunk.HighPart == 0); 00613 00614 ExtendedSubsection->NumberOfFullSectors = Last4KChunk.LowPart; 00615 ExtendedSubsection->u.SubsectionFlags.SectorEndOffset = 00616 NewSectionSize->LowPart & MM4K_MASK; 00617 00618 #if DBG 00619 MiSubsectionConsistent(ExtendedSubsection); 00620 #endif 00621 00622 ExtendedSubsection->SubsectionBase = ExtendedPtes; 00623 00624 PointerPte = ExtendedPtes; 00625 LastPte = ExtendedPtes + (AllocationSize / sizeof(MMPTE)); 00626 00627 if (ControlArea->FilePointer != NULL) { 00628 TempPte.u.Long = MiGetSubsectionAddressForPte(ExtendedSubsection); 00629 } 00630 #if DBG 00631 else { 00632 DbgPrint("MM: Extend with no control area file pointer %x %x\n", 00633 ExtendedSubsection, ControlArea); 00634 DbgBreakPoint(); 00635 } 00636 #endif 00637 00638 TempPte.u.Soft.Protection = ControlArea->Segment->SegmentPteTemplate.u.Soft.Protection; 00639 TempPte.u.Soft.Prototype = 1; 00640 ExtendedSubsection->u.SubsectionFlags.Protection = 00641 MI_GET_PROTECTION_FROM_SOFT_PTE(&TempPte); 00642 00643 while (PointerPte < LastPte) { 00644 MI_WRITE_INVALID_PTE (PointerPte, TempPte); 00645 PointerPte += 1; 00646 } 00647 00648 // 00649 // Link this into the list. 00650 // 00651 00652 LastSubsection->NextSubsection = ExtendedSubsection; 00653 ControlArea->Segment->TotalNumberOfPtes += RequiredPtes; 00654 00655 #if defined(_ALPHA_) && !defined(NT_UP) 00656 // 00657 // A memory barrier is required here to synchronize with 00658 // NtMapViewOfSection, which validates the specified offset against 00659 // the section object without holding lock synchronization. 00660 // This memory barrier forces the subsection chaining to be correct 00661 // before increasing the size in the section object. 00662 // 00663 __MB(); 00664 #endif 00665 00666 } 00667 00668 ControlArea->Segment->SizeOfSegment = *(PUINT64)NewSectionSize; 00669 Section->SizeOfSection = *NewSectionSize; 00670 00671 ReleaseAndReturnSuccess: 00672 00673 Status = STATUS_SUCCESS; 00674 00675 ReleaseAndReturn: 00676 00677 ExReleaseResource (&MmSectionExtendResource); 00678 KeLeaveCriticalRegion (); 00679 00680 return Status; 00681 }

BOOLEAN MmFlushImageSection IN PSECTION_OBJECT_POINTERS  SectionObjectPointer,
IN MMFLUSH_TYPE  FlushType
 

Definition at line 2094 of file flushsec.c.

References ASSERT, CheckImageSection, FALSE, LOCK_PFN, MiCheckControlArea(), MiCheckControlAreaStatus(), MiCleanSection(), MiHydra, MMFLUSH_TYPE, MmFlushForDelete, NULL, _CONTROL_AREA::NumberOfMappedViews, _LARGE_CONTROL_AREA::NumberOfSectionReferences, _CONTROL_AREA::NumberOfUserReferences, TRUE, _CONTROL_AREA::u, _LARGE_CONTROL_AREA::u, and UNLOCK_PFN.

Referenced by MiCanFileBeTruncatedInternal(), and UdfPurgeVolume().

02101 : 02102 02103 This function determines if any views of the specified image section 02104 are mapped, and if not, flushes valid pages (even modified ones) 02105 from the specified section and returns any used pages to the free 02106 list. This is accomplished by examining the prototype PTEs 02107 from the specified offset to the end of the section, and if 02108 any prototype PTEs are in the transition state, putting the 02109 prototype PTE back into its original state and putting the 02110 physical page on the free list. 02111 02112 Arguments: 02113 02114 SectionPointer - Supplies a pointer to a section object pointers 02115 within the FCB. 02116 02117 FlushType - Supplies the type of flush to check for. One of 02118 MmFlushForDelete or MmFlushForWrite. 02119 02120 Return Value: 02121 02122 Returns TRUE if either no section exists for the file object or 02123 the section is not mapped and the purge was done, FALSE otherwise. 02124 02125 --*/ 02126 02127 { 02128 PLIST_ENTRY Next; 02129 PCONTROL_AREA ControlArea; 02130 PLARGE_CONTROL_AREA LargeControlArea; 02131 KIRQL OldIrql; 02132 BOOLEAN state; 02133 BOOLEAN FinalControlArea; 02134 02135 02136 if (FlushType == MmFlushForDelete) { 02137 02138 // 02139 // Do a quick check to see if there are any mapped views for 02140 // the data section. If so, just return FALSE. 02141 // 02142 02143 LOCK_PFN (OldIrql); 02144 ControlArea = (PCONTROL_AREA)(SectionPointer->DataSectionObject); 02145 if (ControlArea != NULL) { 02146 if ((ControlArea->NumberOfUserReferences != 0) || 02147 (ControlArea->u.Flags.BeingCreated)) { 02148 UNLOCK_PFN (OldIrql); 02149 return FALSE; 02150 } 02151 } 02152 UNLOCK_PFN (OldIrql); 02153 } 02154 02155 // 02156 // Check the status of the control area. If the control area is in use 02157 // or the control area is being deleted, this operation cannot continue. 02158 // 02159 02160 state = MiCheckControlAreaStatus (CheckImageSection, 02161 SectionPointer, 02162 FALSE, 02163 &ControlArea, 02164 &OldIrql); 02165 02166 if (ControlArea == NULL) { 02167 return state; 02168 } 02169 02170 // 02171 // PFN LOCK IS NOW HELD! 02172 // 02173 02174 // 02175 // Repeat until there are no more control areas - multiple control areas 02176 // for the same image section occur to support user global DLLs - these DLLs 02177 // require data that is shared within a session but not across sessions. 02178 // Note this can only happen for Hydra. 02179 // 02180 02181 do { 02182 02183 // 02184 // Set the being deleted flag and up the number of mapped views 02185 // for the segment. Upping the number of mapped views prevents 02186 // the segment from being deleted and passed to the deletion thread 02187 // while we are forcing a delete. 02188 // 02189 02190 ControlArea->u.Flags.BeingDeleted = 1; 02191 ControlArea->NumberOfMappedViews = 1; 02192 FinalControlArea = TRUE; 02193 02194 if (MiHydra == FALSE) { 02195 ASSERT (ControlArea == 02196 (PCONTROL_AREA)SectionPointer->ImageSectionObject); 02197 } 02198 02199 else if (ControlArea->u.Flags.GlobalOnlyPerSession == 0) { 02200 } 02201 else if (IsListEmpty(&((PLARGE_CONTROL_AREA)ControlArea)->UserGlobalList)) { 02202 ASSERT (ControlArea == 02203 (PCONTROL_AREA)SectionPointer->ImageSectionObject); 02204 } 02205 else { 02206 02207 // 02208 // Check if there's only one image section in this control area, so 02209 // we don't reference the section object pointers as the 02210 // MiCleanSection call may result in its deletion. 02211 // 02212 02213 FinalControlArea = FALSE; 02214 02215 // 02216 // There are multiple control areas, bump the reference count 02217 // on one of them (doesn't matter which one) so that it can't 02218 // go away. This ensures the section object pointers will stick 02219 // around even after the calls below so we can safely reloop to 02220 // flush any other remaining control areas. 02221 // 02222 02223 ASSERT (ControlArea->u.Flags.GlobalOnlyPerSession == 1); 02224 02225 Next = ((PLARGE_CONTROL_AREA)ControlArea)->UserGlobalList.Flink; 02226 02227 LargeControlArea = CONTAINING_RECORD (Next, 02228 LARGE_CONTROL_AREA, 02229 UserGlobalList); 02230 02231 ASSERT (LargeControlArea->u.Flags.GlobalOnlyPerSession == 1); 02232 02233 LargeControlArea->NumberOfSectionReferences += 1; 02234 } 02235 02236 // 02237 // This is a page file backed or image segment. The segment is being 02238 // deleted, remove all references to the paging file and physical 02239 // memory. 02240 // 02241 02242 UNLOCK_PFN (OldIrql); 02243 02244 MiCleanSection (ControlArea, TRUE); 02245 02246 // 02247 // Get the next Hydra control area. 02248 // 02249 02250 if (FinalControlArea == FALSE) { 02251 state = MiCheckControlAreaStatus (CheckImageSection, 02252 SectionPointer, 02253 FALSE, 02254 &ControlArea, 02255 &OldIrql); 02256 if (!ControlArea) { 02257 LOCK_PFN (OldIrql); 02258 LargeControlArea->NumberOfSectionReferences -= 1; 02259 MiCheckControlArea ((PCONTROL_AREA)LargeControlArea, 02260 NULL, 02261 OldIrql); 02262 } 02263 else { 02264 LargeControlArea->NumberOfSectionReferences -= 1; 02265 MiCheckControlArea ((PCONTROL_AREA)LargeControlArea, 02266 NULL, 02267 OldIrql); 02268 LOCK_PFN (OldIrql); 02269 } 02270 } else { 02271 state = TRUE; 02272 break; 02273 } 02274 02275 } while (ControlArea); 02276 02277 return state; 02278 }

NTSTATUS MmFlushSection IN PSECTION_OBJECT_POINTERS  SectionObjectPointer,
IN PLARGE_INTEGER Offset  OPTIONAL,
IN SIZE_T  RegionSize,
OUT PIO_STATUS_BLOCK  IoStatus,
IN ULONG  AcquireFile
 

Definition at line 668 of file flushsec.c.

References ASSERT, BYTE_OFFSET, FALSE, _CONTROL_AREA::FilePointer, _ETHREAD::ForwardClusterOnly, FsRtlAcquireFileForCcFlush(), FsRtlReleaseFileForCcFlush(), KeDelayExecutionThread(), KernelMode, LOCK_PFN, MiCheckControlArea(), MiFlushSectionInternal(), MmShortTime, _SUBSECTION::NextSubsection, NTSTATUS(), NULL, _CONTROL_AREA::NumberOfMappedViews, _CONTROL_AREA::NumberOfPfnReferences, Offset, PAGE_SHIFT, PsGetCurrentThread, _SUBSECTION::PtesInSubsection, _SUBSECTION::SubsectionBase, TRUE, _CONTROL_AREA::u, and UNLOCK_PFN.

Referenced by CcFlushCache(), CcMapAndCopy(), CcMdlWriteComplete2(), CcPurgeAndClearCacheSection(), CcSetFileSizes(), CcUnpinRepinnedBcb(), and MiFlushDataSection().

00678 : 00679 00680 This function flushes to the backing file any modified pages within 00681 the specified range of the section. 00682 00683 Arguments: 00684 00685 SectionObjectPointer - Supplies a pointer to the section objects. 00686 00687 Offset - Supplies the offset into the section in which to begin 00688 flushing pages. If this argument is not present, then the 00689 whole section is flushed without regard to the region size 00690 argument. 00691 00692 RegionSize - Supplies the size in bytes to flush. This is rounded 00693 to a page multiple. 00694 00695 IoStatus - Returns the value of the IoStatus for the last attempted 00696 I/O operation. 00697 00698 AcquireFile - Nonzero if the callback should be used to acquire the file 00699 00700 Return Value: 00701 00702 Returns status of the operation. 00703 00704 --*/ 00705 00706 { 00707 PCONTROL_AREA ControlArea; 00708 PMMPTE PointerPte; 00709 PMMPTE LastPte; 00710 KIRQL OldIrql; 00711 ULONG PteOffset; 00712 PSUBSECTION Subsection; 00713 PSUBSECTION LastSubsection; 00714 BOOLEAN DeleteSegment = FALSE; 00715 PETHREAD CurrentThread; 00716 NTSTATUS status; 00717 BOOLEAN OldClusterState; 00718 ULONG ConsecutiveFileLockFailures; 00719 00720 // 00721 // Initialize IoStatus for success, in case we take an early exit. 00722 // 00723 00724 IoStatus->Status = STATUS_SUCCESS; 00725 IoStatus->Information = RegionSize; 00726 00727 LOCK_PFN (OldIrql); 00728 00729 ControlArea = ((PCONTROL_AREA)(SectionObjectPointer->DataSectionObject)); 00730 00731 ASSERT ((ControlArea == NULL) || (ControlArea->u.Flags.Image == 0)); 00732 00733 if ((ControlArea == NULL) || 00734 (ControlArea->u.Flags.BeingDeleted) || 00735 (ControlArea->u.Flags.BeingCreated) || 00736 (ControlArea->NumberOfPfnReferences == 0)) { 00737 00738 // 00739 // This file no longer has an associated segment or is in the 00740 // process of coming or going. 00741 // If the number of PFN references is zero, then this control 00742 // area does not have any valid or transition pages that need 00743 // to be flushed. 00744 // 00745 00746 UNLOCK_PFN (OldIrql); 00747 return STATUS_SUCCESS; 00748 } 00749 00750 // 00751 // Locate the subsection. 00752 // 00753 00754 ASSERT (ControlArea->u.Flags.GlobalOnlyPerSession == 0); 00755 00756 Subsection = (PSUBSECTION)(ControlArea + 1); 00757 00758 if (!ARGUMENT_PRESENT (Offset)) { 00759 00760 // 00761 // If the offset is not specified, flush the complete file ignoring 00762 // the region size. 00763 // 00764 00765 PointerPte = &Subsection->SubsectionBase[0]; 00766 LastSubsection = Subsection; 00767 while (LastSubsection->NextSubsection != NULL) { 00768 LastSubsection = LastSubsection->NextSubsection; 00769 } 00770 LastPte = &LastSubsection->SubsectionBase 00771 [LastSubsection->PtesInSubsection - 1]; 00772 } else { 00773 00774 PteOffset = (ULONG)(Offset->QuadPart >> PAGE_SHIFT); 00775 00776 // 00777 // Make sure the PTEs are not in the extended part of the 00778 // segment. 00779 // 00780 00781 while (PteOffset >= Subsection->PtesInSubsection) { 00782 PteOffset -= Subsection->PtesInSubsection; 00783 if (Subsection->NextSubsection == NULL) { 00784 00785 // 00786 // Past end of mapping, just return success. 00787 // 00788 00789 UNLOCK_PFN (OldIrql); 00790 return STATUS_SUCCESS; 00791 } 00792 Subsection = Subsection->NextSubsection; 00793 } 00794 00795 ASSERT (PteOffset < Subsection->PtesInSubsection); 00796 PointerPte = &Subsection->SubsectionBase[PteOffset]; 00797 00798 // 00799 // Locate the address of the last prototype PTE to be flushed. 00800 // 00801 00802 PteOffset += (ULONG)(((RegionSize + BYTE_OFFSET(Offset->LowPart)) - 1) >> PAGE_SHIFT); 00803 00804 LastSubsection = Subsection; 00805 00806 while (PteOffset >= LastSubsection->PtesInSubsection) { 00807 PteOffset -= LastSubsection->PtesInSubsection; 00808 if (LastSubsection->NextSubsection == NULL) { 00809 PteOffset = LastSubsection->PtesInSubsection - 1; 00810 break; 00811 } 00812 LastSubsection = LastSubsection->NextSubsection; 00813 } 00814 00815 ASSERT (PteOffset < LastSubsection->PtesInSubsection); 00816 LastPte = &LastSubsection->SubsectionBase[PteOffset]; 00817 } 00818 00819 // 00820 // Up the map view count so the control area cannot be deleted 00821 // out from under the call. 00822 // 00823 00824 ControlArea->NumberOfMappedViews += 1; 00825 00826 UNLOCK_PFN (OldIrql); 00827 00828 CurrentThread = PsGetCurrentThread(); 00829 00830 // 00831 // Indicate that disk verify errors should be returned as exceptions. 00832 // 00833 00834 OldClusterState = CurrentThread->ForwardClusterOnly; 00835 CurrentThread->ForwardClusterOnly = TRUE; 00836 00837 // 00838 // Preacquire the file if we are going to synchronize the flush. 00839 // 00840 00841 if (AcquireFile == 0) { 00842 00843 // 00844 // Flush the PTEs from the specified section. 00845 // 00846 00847 status = MiFlushSectionInternal (PointerPte, 00848 LastPte, 00849 Subsection, 00850 LastSubsection, 00851 TRUE, 00852 TRUE, 00853 IoStatus); 00854 } 00855 else { 00856 00857 ConsecutiveFileLockFailures = 0; 00858 00859 do { 00860 00861 FsRtlAcquireFileForCcFlush (ControlArea->FilePointer); 00862 00863 // 00864 // Flush the PTEs from the specified section. 00865 // 00866 00867 status = MiFlushSectionInternal (PointerPte, 00868 LastPte, 00869 Subsection, 00870 LastSubsection, 00871 TRUE, 00872 TRUE, 00873 IoStatus); 00874 00875 // 00876 // Release the file we acquired. 00877 // 00878 00879 FsRtlReleaseFileForCcFlush (ControlArea->FilePointer); 00880 00881 // 00882 // Only try the request more than once if the filesystem told us 00883 // it had a deadlock. 00884 // 00885 00886 if (status != STATUS_FILE_LOCK_CONFLICT) { 00887 break; 00888 } 00889 00890 ConsecutiveFileLockFailures += 1; 00891 KeDelayExecutionThread (KernelMode, FALSE, &MmShortTime); 00892 00893 } while (ConsecutiveFileLockFailures < 5); 00894 } 00895 00896 CurrentThread->ForwardClusterOnly = OldClusterState; 00897 00898 LOCK_PFN (OldIrql); 00899 00900 ASSERT ((LONG)ControlArea->NumberOfMappedViews >= 1); 00901 ControlArea->NumberOfMappedViews -= 1; 00902 00903 // 00904 // Check to see if the control area should be deleted. This 00905 // will release the PFN lock. 00906 // 00907 00908 MiCheckControlArea (ControlArea, NULL, OldIrql); 00909 00910 return status; 00911 00912 }

NTSTATUS MmFlushVirtualMemory IN PEPROCESS  Process,
IN OUT PVOID *  BaseAddress,
IN OUT PSIZE_T  RegionSize,
OUT PIO_STATUS_BLOCK  IoStatus
 

Definition at line 297 of file flushsec.c.

References ASSERT, _MMVAD::ControlArea, _MMVAD::EndingVpn, FALSE, _CONTROL_AREA::FilePointer, FsRtlAcquireFileForCcFlush(), FsRtlReleaseFileForCcFlush(), KeAttachProcess(), KeDelayExecutionThread(), KeDetachProcess(), KernelMode, LOCK_WS, LOCK_WS_AND_ADDRESS_SPACE, MI_IS_SESSION_ADDRESS, MI_IS_SYSTEM_CACHE_ADDRESS, MI_VA_TO_VPN, MI_VPN_TO_VA_ENDING, MiDoesPdeExistAndMakeValid(), MiDoesPpeExistAndMakeValid, MiFlushAcquire(), MiFlushDirtyBitsToPfn(), MiFlushRelease(), MiFlushSectionInternal(), MiGetPdeAddress, MiGetPpeAddress, MiGetProtoPteAddress, MiGetPteAddress, MiGetSystemCacheSubsection(), MiGetVirtualAddressMappedByPte, MiIsPteOnPdeBoundary, MiLocateAddress(), MiLocateSubsection(), MmShortTime, _SUBSECTION::NextSubsection, NTSTATUS(), NULL, PAGE_ALIGN, PAGE_SIZE, PAGED_CODE, PsGetCurrentProcess, _SUBSECTION::PtesInSubsection, Status, _SUBSECTION::SubsectionBase, TRUE, _MMVAD::u, UNLOCK_WS, and UNLOCK_WS_AND_ADDRESS_SPACE.

Referenced by NtFlushVirtualMemory().

00306 : 00307 00308 This function flushes a range of virtual address which map 00309 a data file back into the data file if they have been modified. 00310 00311 Note that the modification is this process's view of the pages, 00312 on certain implementations (like the Intel 386), the modify 00313 bit is captured in the PTE and not forced to the PFN database 00314 until the page is removed from the working set. This means 00315 that pages which have been modified by another process will 00316 not be flushed to the data file. 00317 00318 Arguments: 00319 00320 Process - Supplies a pointer to a process object. 00321 00322 BaseAddress - Supplies a pointer to a variable that will receive 00323 the base address of the flushed region. The initial value 00324 of this argument is the base address of the region of the 00325 pages to flush. 00326 00327 RegionSize - Supplies a pointer to a variable that will receive 00328 the actual size in bytes of the flushed region of pages. 00329 The initial value of this argument is rounded up to the 00330 next host-page-size boundary. 00331 00332 If this value is specified as zero, the mapped range from 00333 the base address to the end of the range is flushed. 00334 00335 IoStatus - Returns the value of the IoStatus for the last attempted 00336 I/O operation. 00337 00338 Return Value: 00339 00340 Returns the NT status 00341 00342 --*/ 00343 00344 { 00345 PMMVAD Vad; 00346 PVOID EndingAddress; 00347 PVOID Va; 00348 PEPROCESS CurrentProcess; 00349 BOOLEAN SystemCache; 00350 PCONTROL_AREA ControlArea; 00351 PMMPTE PointerPte; 00352 PMMPTE PointerPde; 00353 PMMPTE PointerPpe; 00354 PMMPTE LastPte; 00355 PMMPTE FinalPte; 00356 PSUBSECTION Subsection; 00357 PSUBSECTION LastSubsection; 00358 NTSTATUS Status; 00359 ULONG ConsecutiveFileLockFailures; 00360 ULONG Waited; 00361 LOGICAL EntireRestOfVad; 00362 LOGICAL Attached; 00363 00364 PAGED_CODE(); 00365 00366 Attached = FALSE; 00367 00368 // 00369 // Determine if the specified base address is within the system 00370 // cache and if so, don't attach, the working set mutex is still 00371 // required to "lock" paged pool pages (proto PTEs) into the 00372 // working set. 00373 // 00374 00375 EndingAddress = (PVOID)(((ULONG_PTR)*BaseAddress + *RegionSize - 1) | 00376 (PAGE_SIZE - 1)); 00377 *BaseAddress = PAGE_ALIGN (*BaseAddress); 00378 00379 if (MI_IS_SESSION_ADDRESS (*BaseAddress)) { 00380 00381 // 00382 // Nothing in session space needs flushing. 00383 // 00384 00385 return STATUS_NOT_MAPPED_VIEW; 00386 } 00387 00388 CurrentProcess = PsGetCurrentProcess (); 00389 00390 if (!MI_IS_SYSTEM_CACHE_ADDRESS(*BaseAddress)) { 00391 00392 SystemCache = FALSE; 00393 00394 // 00395 // Attach to the specified process. 00396 // 00397 00398 if (PsGetCurrentProcess() != Process) { 00399 KeAttachProcess (&Process->Pcb); 00400 Attached = TRUE; 00401 } 00402 00403 LOCK_WS_AND_ADDRESS_SPACE (Process); 00404 00405 // 00406 // Make sure the address space was not deleted, if so, return an error. 00407 // 00408 00409 if (Process->AddressSpaceDeleted != 0) { 00410 Status = STATUS_PROCESS_IS_TERMINATING; 00411 goto ErrorReturn; 00412 } 00413 00414 Vad = MiLocateAddress (*BaseAddress); 00415 00416 if (Vad == (PMMVAD)NULL) { 00417 00418 // 00419 // No Virtual Address Descriptor located for Base Address. 00420 // 00421 00422 Status = STATUS_NOT_MAPPED_VIEW; 00423 goto ErrorReturn; 00424 } 00425 00426 if (*RegionSize == 0) { 00427 EndingAddress = MI_VPN_TO_VA_ENDING (Vad->EndingVpn); 00428 EntireRestOfVad = TRUE; 00429 } 00430 else { 00431 EntireRestOfVad = FALSE; 00432 } 00433 00434 if ((Vad->u.VadFlags.PrivateMemory == 1) || 00435 (MI_VA_TO_VPN (EndingAddress) > Vad->EndingVpn)) { 00436 00437 // 00438 // This virtual address descriptor does not refer to a Segment 00439 // object. 00440 // 00441 00442 Status = STATUS_NOT_MAPPED_VIEW; 00443 goto ErrorReturn; 00444 } 00445 00446 // 00447 // Make sure this VAD maps a data file (not an image file). 00448 // 00449 00450 ControlArea = Vad->ControlArea; 00451 00452 if ((ControlArea->FilePointer == NULL) || 00453 (Vad->u.VadFlags.ImageMap == 1)) { 00454 00455 // 00456 // This virtual address descriptor does not refer to a Segment 00457 // object. 00458 // 00459 00460 Status = STATUS_NOT_MAPPED_DATA; 00461 goto ErrorReturn; 00462 } 00463 00464 } else { 00465 00466 SystemCache = TRUE; 00467 Process = CurrentProcess; 00468 LOCK_WS (Process); 00469 } 00470 00471 PointerPpe = MiGetPpeAddress (*BaseAddress); 00472 PointerPde = MiGetPdeAddress (*BaseAddress); 00473 PointerPte = MiGetPteAddress (*BaseAddress); 00474 LastPte = MiGetPteAddress (EndingAddress); 00475 *RegionSize = (PCHAR)EndingAddress - (PCHAR)*BaseAddress + 1; 00476 00477 retry: 00478 00479 while (!MiDoesPpeExistAndMakeValid (PointerPpe, Process, FALSE, &Waited)) { 00480 00481 // 00482 // This page directory parent entry is empty, go to the next one. 00483 // 00484 00485 PointerPpe += 1; 00486 PointerPde = MiGetVirtualAddressMappedByPte (PointerPpe); 00487 PointerPte = MiGetVirtualAddressMappedByPte (PointerPde); 00488 Va = MiGetVirtualAddressMappedByPte (PointerPte); 00489 00490 if (PointerPte > LastPte) { 00491 break; 00492 } 00493 } 00494 00495 Waited = 0; 00496 00497 if (PointerPte <= LastPte) { 00498 while (!MiDoesPdeExistAndMakeValid(PointerPde, Process, FALSE, &Waited)) { 00499 00500 // 00501 // No page table page exists for this address. 00502 // 00503 00504 PointerPde += 1; 00505 00506 PointerPte = MiGetVirtualAddressMappedByPte (PointerPde); 00507 00508 if (PointerPte > LastPte) { 00509 break; 00510 } 00511 00512 #if defined (_WIN64) 00513 if (MiIsPteOnPdeBoundary (PointerPde)) { 00514 PointerPpe = MiGetPteAddress (PointerPde); 00515 goto retry; 00516 } 00517 #endif 00518 00519 Va = MiGetVirtualAddressMappedByPte (PointerPte); 00520 } 00521 00522 // 00523 // If the PFN lock (and accordingly the WS mutex) was 00524 // released and reacquired we must retry the operation. 00525 // 00526 00527 if (PointerPte <= LastPte && Waited != 0) { 00528 goto retry; 00529 } 00530 } 00531 00532 MiFlushDirtyBitsToPfn (PointerPte, LastPte, Process, SystemCache); 00533 00534 if (SystemCache) { 00535 00536 // 00537 // No VADs exist for the system cache. 00538 // 00539 00540 Subsection = MiGetSystemCacheSubsection (*BaseAddress, 00541 Process, 00542 &PointerPte); 00543 LastSubsection = MiGetSystemCacheSubsection (EndingAddress, 00544 Process, 00545 &FinalPte); 00546 UNLOCK_WS (Process); 00547 00548 // 00549 // Flush the PTEs from the specified section. 00550 // 00551 00552 Status = MiFlushSectionInternal (PointerPte, 00553 FinalPte, 00554 Subsection, 00555 LastSubsection, 00556 FALSE, 00557 TRUE, 00558 IoStatus); 00559 } 00560 else { 00561 00562 // 00563 // Protect against the section being prematurely deleted. 00564 // 00565 00566 MiFlushAcquire (ControlArea); 00567 00568 PointerPte = MiGetProtoPteAddress (Vad, MI_VA_TO_VPN (*BaseAddress)); 00569 Subsection = MiLocateSubsection (Vad, MI_VA_TO_VPN(*BaseAddress)); 00570 LastSubsection = MiLocateSubsection (Vad, MI_VA_TO_VPN(EndingAddress)); 00571 00572 // 00573 // The last subsection is NULL if the section is not fully 00574 // committed. Only allow the flush if the caller said do the whole 00575 // thing, otherwise it's an error. 00576 // 00577 00578 if (LastSubsection == NULL) { 00579 00580 if (EntireRestOfVad == FALSE) { 00581 00582 // 00583 // Caller can only specify the range that is committed or zero 00584 // to indicate the entire range. 00585 // 00586 00587 UNLOCK_WS_AND_ADDRESS_SPACE (Process); 00588 if (Attached == TRUE) { 00589 KeDetachProcess(); 00590 } 00591 MiFlushRelease (ControlArea); 00592 return STATUS_NOT_MAPPED_VIEW; 00593 } 00594 00595 LastSubsection = Subsection; 00596 while (LastSubsection->NextSubsection) { 00597 LastSubsection = LastSubsection->NextSubsection; 00598 } 00599 FinalPte = LastSubsection->SubsectionBase + LastSubsection->PtesInSubsection - 1; 00600 } 00601 else { 00602 FinalPte = MiGetProtoPteAddress (Vad, MI_VA_TO_VPN (EndingAddress)); 00603 } 00604 00605 UNLOCK_WS_AND_ADDRESS_SPACE (Process); 00606 if (Attached == TRUE) { 00607 KeDetachProcess(); 00608 } 00609 00610 // 00611 // Preacquire the file to synchronize the flush. 00612 // 00613 00614 ConsecutiveFileLockFailures = 0; 00615 00616 do { 00617 00618 FsRtlAcquireFileForCcFlush (ControlArea->FilePointer); 00619 00620 // 00621 // Flush the PTEs from the specified section. 00622 // 00623 00624 Status = MiFlushSectionInternal (PointerPte, 00625 FinalPte, 00626 Subsection, 00627 LastSubsection, 00628 TRUE, 00629 TRUE, 00630 IoStatus); 00631 00632 // 00633 // Release the file we acquired. 00634 // 00635 00636 FsRtlReleaseFileForCcFlush (ControlArea->FilePointer); 00637 00638 // 00639 // Only try the request more than once if the filesystem told us 00640 // it had a deadlock. 00641 // 00642 00643 if (Status != STATUS_FILE_LOCK_CONFLICT) { 00644 break; 00645 } 00646 00647 ConsecutiveFileLockFailures += 1; 00648 KeDelayExecutionThread (KernelMode, FALSE, &MmShortTime); 00649 00650 } while (ConsecutiveFileLockFailures < 5); 00651 00652 MiFlushRelease (ControlArea); 00653 } 00654 00655 return Status; 00656 00657 ErrorReturn: 00658 ASSERT (SystemCache == FALSE); 00659 UNLOCK_WS_AND_ADDRESS_SPACE (Process); 00660 if (Attached == TRUE) { 00661 KeDetachProcess(); 00662 } 00663 return Status; 00664 00665 }

BOOLEAN MmForceSectionClosed IN PSECTION_OBJECT_POINTERS  SectionObjectPointer,
IN BOOLEAN  DelayClose
 

Definition at line 957 of file sectsup.c.

References ASSERT, CheckBothSection, MiCheckControlAreaStatus(), MiCleanSection(), MiHydra, NULL, _CONTROL_AREA::NumberOfMappedViews, TRUE, _CONTROL_AREA::u, and UNLOCK_PFN.

00964 : 00965 00966 This function examines the Section object pointers. If they are NULL, 00967 no further action is taken and the value TRUE is returned. 00968 00969 If the Section object pointer is not NULL, the section reference count 00970 and the map view count are checked. If both counts are zero, the 00971 segment associated with the file is deleted and the file closed. 00972 If one of the counts is non-zero, no action is taken and the 00973 value FALSE is returned. 00974 00975 Arguments: 00976 00977 SectionObjectPointer - Supplies a pointer to a section object. 00978 00979 DelayClose - Supplies the value TRUE if the close operation should 00980 occur as soon as possible in the event this section 00981 cannot be closed now due to outstanding references. 00982 00983 Return Value: 00984 00985 TRUE - The segment was deleted and the file closed or no segment was 00986 located. 00987 00988 FALSE - The segment was not deleted and no action was performed OR 00989 an I/O error occurred trying to write the pages. 00990 00991 --*/ 00992 00993 { 00994 PCONTROL_AREA ControlArea; 00995 KIRQL OldIrql; 00996 BOOLEAN state; 00997 00998 // 00999 // Check the status of the control area, if the control area is in use 01000 // or the control area is being deleted, this operation cannot continue. 01001 // 01002 01003 state = MiCheckControlAreaStatus (CheckBothSection, 01004 SectionObjectPointer, 01005 DelayClose, 01006 &ControlArea, 01007 &OldIrql); 01008 01009 if (ControlArea == NULL) { 01010 return state; 01011 } 01012 01013 // 01014 // PFN LOCK IS NOW HELD! 01015 // 01016 01017 // 01018 // Repeat until there are no more control areas - multiple control areas 01019 // for the same image section occur to support user global DLLs - these DLLs 01020 // require data that is shared within a session but not across sessions. 01021 // Note this can only happen for Hydra. 01022 // 01023 01024 do { 01025 01026 // 01027 // Set the being deleted flag and up the number of mapped views 01028 // for the segment. Upping the number of mapped views prevents 01029 // the segment from being deleted and passed to the deletion thread 01030 // while we are forcing a delete. 01031 // 01032 01033 ControlArea->u.Flags.BeingDeleted = 1; 01034 ASSERT (ControlArea->NumberOfMappedViews == 0); 01035 ControlArea->NumberOfMappedViews = 1; 01036 01037 // 01038 // This is a page file backed or image Segment. The Segment is being 01039 // deleted, remove all references to the paging file and physical memory. 01040 // 01041 01042 UNLOCK_PFN (OldIrql); 01043 01044 // 01045 // Delete the section by flushing all modified pages back to the section 01046 // if it is a file and freeing up the pages such that the 01047 // PfnReferenceCount goes to zero. 01048 // 01049 01050 MiCleanSection (ControlArea, TRUE); 01051 01052 // 01053 // Get the next Hydra control area. 01054 // 01055 01056 if (MiHydra == TRUE) { 01057 state = MiCheckControlAreaStatus (CheckBothSection, 01058 SectionObjectPointer, 01059 DelayClose, 01060 &ControlArea, 01061 &OldIrql); 01062 } 01063 else { 01064 state = TRUE; 01065 break; 01066 } 01067 01068 } while (ControlArea); 01069 01070 return state; 01071 }

NTKERNELAPI VOID MmFreeContiguousMemory IN PVOID  BaseAddress  ) 
 

Definition at line 5372 of file iosup.c.

References ExFreePool(), MiFreeLowMemory(), MiNoLowMemory, PAGED_CODE, and TRUE.

Referenced by IoFreeDumpStack(), Ki386ClearIdentityMap(), and MmAllocateContiguousMemorySpecifyCache().

05378 : 05379 05380 This function deallocates a range of physically contiguous non-paged 05381 pool which was allocated with the MmAllocateContiguousMemory function. 05382 05383 Arguments: 05384 05385 BaseAddress - Supplies the base virtual address where the physical 05386 address was previously mapped. 05387 05388 Return Value: 05389 05390 None. 05391 05392 Environment: 05393 05394 Kernel mode, IRQL of APC_LEVEL or below. 05395 05396 --*/ 05397 05398 { 05399 PAGED_CODE(); 05400 05401 #if defined (_X86PAE_) 05402 if (MiNoLowMemory == TRUE) { 05403 if (MiFreeLowMemory (BaseAddress, 'tnoC') == TRUE) { 05404 return; 05405 } 05406 } 05407 #endif 05408 05409 ExFreePool (BaseAddress); 05410 }

NTKERNELAPI VOID MmFreeContiguousMemorySpecifyCache IN PVOID  BaseAddress,
IN SIZE_T  NumberOfBytes,
IN MEMORY_CACHING_TYPE  CacheType
 

Definition at line 5414 of file iosup.c.

References ASSERT, ExFreePool(), MiFreeLowMemory(), MiGetPteAddress, MiNoLowMemory, MmCached, MmNonPagedSystemStart, MmNumberOfSystemPtes, MmUnmapIoSpace(), PAGE_SHIFT, PAGE_SIZE, PAGED_CODE, TRUE, and _MMPTE::u.

05422 : 05423 05424 This function deallocates a range of noncached memory in 05425 the non-paged portion of the system address space. 05426 05427 Arguments: 05428 05429 BaseAddress - Supplies the base virtual address where the noncached 05430 05431 NumberOfBytes - Supplies the number of bytes allocated to the request. 05432 This must be the same number that was obtained with 05433 the MmAllocateContiguousMemorySpecifyCache call. 05434 05435 CacheType - Supplies the cachetype used when the caller made the 05436 MmAllocateContiguousMemorySpecifyCache call. 05437 05438 Return Value: 05439 05440 None. 05441 05442 Environment: 05443 05444 Kernel mode, IRQL of APC_LEVEL or below. 05445 05446 --*/ 05447 05448 { 05449 PVOID PoolAddress; 05450 PMMPTE PointerPte; 05451 05452 PAGED_CODE(); 05453 05454 if (CacheType != MmCached) { 05455 05456 // 05457 // The caller was using an alternate mapping - free these PTEs too. 05458 // 05459 05460 PointerPte = MiGetPteAddress(BaseAddress); 05461 05462 PointerPte += ((NumberOfBytes + PAGE_SIZE - 1) >> PAGE_SHIFT); 05463 PoolAddress = (PVOID)(ULONG_PTR)PointerPte->u.Long; 05464 05465 PointerPte += 1; 05466 ASSERT (NumberOfBytes == PointerPte->u.Long); 05467 05468 NumberOfBytes += (2 * PAGE_SIZE); 05469 MmUnmapIoSpace (BaseAddress, NumberOfBytes); 05470 BaseAddress = PoolAddress; 05471 } 05472 else { 05473 ASSERT (BaseAddress < MmNonPagedSystemStart || 05474 BaseAddress >= (PVOID)((PCHAR)MmNonPagedSystemStart + (MmNumberOfSystemPtes << PAGE_SHIFT))); 05475 } 05476 05477 #if defined (_X86PAE_) 05478 if (MiNoLowMemory == TRUE) { 05479 if (MiFreeLowMemory (BaseAddress, 'tnoC') == TRUE) { 05480 return; 05481 } 05482 } 05483 #endif 05484 05485 ExFreePool (BaseAddress); 05486 }

VOID MmFreeDriverInitialization IN PVOID  Section  ) 
 

Definition at line 2106 of file sysload.c.

References ASSERT, ExPageLockHandle, FALSE, MI_IS_SESSION_ADDRESS, MiDeleteSystemPagableVm(), MiGetPteAddress, MiReturnCommitment(), MM_BUMP_COUNTER, MM_DBG_COMMIT_RETURN_DRIVER_INIT_CODE, MM_TRACK_COMMIT, MmDriverCommit, MmLockPagableSectionByHandle(), MmResidentAvailablePages, MmUnlockPagableImageSection(), NULL, PAGE_SHIFT, ROUND_TO_PAGES, RtlImageNtHeader(), and ZeroKernelPte.

Referenced by IopLoadDriver().

02112 : 02113 02114 This routine removes the pages that relocate and debug information from 02115 the address space of the driver. 02116 02117 NOTE: This routine looks at the last sections defined in the image 02118 header and if that section is marked as DISCARDABLE in the 02119 characteristics, it is removed from the image. This means 02120 that all discardable sections at the end of the driver are 02121 deleted. 02122 02123 Arguments: 02124 02125 SectionObject - Supplies the section object for the image. 02126 02127 Return Value: 02128 02129 None. 02130 02131 --*/ 02132 02133 { 02134 PLDR_DATA_TABLE_ENTRY DataTableEntry; 02135 PMMPTE PointerPte; 02136 PMMPTE LastPte; 02137 PFN_NUMBER NumberOfPtes; 02138 PVOID Base; 02139 ULONG i; 02140 PIMAGE_NT_HEADERS NtHeaders; 02141 PIMAGE_SECTION_HEADER NtSection; 02142 PIMAGE_SECTION_HEADER FoundSection; 02143 PFN_NUMBER PagesDeleted; 02144 02145 MmLockPagableSectionByHandle(ExPageLockHandle); 02146 DataTableEntry = (PLDR_DATA_TABLE_ENTRY)ImageHandle; 02147 Base = DataTableEntry->DllBase; 02148 02149 ASSERT (MI_IS_SESSION_ADDRESS (Base) == FALSE); 02150 02151 NumberOfPtes = DataTableEntry->SizeOfImage >> PAGE_SHIFT; 02152 LastPte = MiGetPteAddress (Base) + NumberOfPtes; 02153 02154 NtHeaders = (PIMAGE_NT_HEADERS)RtlImageNtHeader(Base); 02155 02156 NtSection = (PIMAGE_SECTION_HEADER)((PCHAR)NtHeaders + 02157 sizeof(ULONG) + 02158 sizeof(IMAGE_FILE_HEADER) + 02159 NtHeaders->FileHeader.SizeOfOptionalHeader 02160 ); 02161 02162 NtSection += NtHeaders->FileHeader.NumberOfSections; 02163 02164 FoundSection = NULL; 02165 for (i = 0; i < NtHeaders->FileHeader.NumberOfSections; i += 1) { 02166 NtSection -= 1; 02167 if ((NtSection->Characteristics & IMAGE_SCN_MEM_DISCARDABLE) != 0) { 02168 FoundSection = NtSection; 02169 } else { 02170 02171 // 02172 // There was a non discardable section between the this 02173 // section and the last non discardable section, don't 02174 // discard this section and don't look any more. 02175 // 02176 02177 break; 02178 } 02179 } 02180 02181 if (FoundSection != NULL) { 02182 02183 PointerPte = MiGetPteAddress (ROUND_TO_PAGES ( 02184 (PCHAR)Base + FoundSection->VirtualAddress)); 02185 NumberOfPtes = (PFN_NUMBER)(LastPte - PointerPte); 02186 02187 PagesDeleted = MiDeleteSystemPagableVm (PointerPte, 02188 NumberOfPtes, 02189 ZeroKernelPte, 02190 FALSE, 02191 NULL); 02192 02193 MmResidentAvailablePages += PagesDeleted; 02194 MM_BUMP_COUNTER(18, PagesDeleted); 02195 MiReturnCommitment (PagesDeleted); 02196 MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_DRIVER_INIT_CODE, PagesDeleted); 02197 MmDriverCommit -= (ULONG)PagesDeleted; 02198 #if DBG 02199 MiPagesConsumed -= PagesDeleted; 02200 #endif 02201 } 02202 02203 MmUnlockPagableImageSection(ExPageLockHandle); 02204 return; 02205 }

VOID MmFreeLoaderBlock IN PLOADER_PARAMETER_BLOCK  LoaderBlock  ) 
 

Definition at line 2005 of file mminit.c.

References _MEMORY_ALLOCATION_DESCRIPTOR::BasePage, FreePageList, KeBugCheckEx(), KeFlushEntireTb(), KSEG0_BASE, _MEMORY_ALLOCATION_DESCRIPTOR::ListEntry, LoaderNlsData, LoaderOsloaderHeap, LoaderRegistryData, LOCK_PFN, _MEMORY_ALLOCATION_DESCRIPTOR::MemoryType, MI_IS_PHYSICAL_ADDRESS, MI_PFN_ELEMENT, MI_SET_PFN_DELETED, MI_WRITE_INVALID_PTE, MiDecrementShareCountOnly, MiGetPdeAddress, MiGetVirtualAddressMappedByPte, MiInsertPageInList(), MM_MAX_LOADER_BLOCKS, MmPageLocationList, MmVirtualBias, _MEMORY_ALLOCATION_DESCRIPTOR::PageCount, PTE_SHIFT, _MMPFN::PteAddress, TRUE, _MMPFN::u1, _MMPFN::u3, UNLOCK_PFN, ZeroKernelPte, and ZeroPte.

02011 : 02012 02013 This function is called as the last routine in phase 1 initialization. 02014 It frees memory used by the OsLoader. 02015 02016 Arguments: 02017 02018 LoadBlock - Supplies a pointer the system loader block. 02019 02020 Return Value: 02021 02022 None. 02023 02024 Environment: 02025 02026 Kernel Mode Only. System initialization. 02027 02028 --*/ 02029 02030 { 02031 02032 PLIST_ENTRY NextMd; 02033 PMMPTE Pde; 02034 PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor; 02035 MEMORY_ALLOCATION_DESCRIPTOR SavedDescriptor[MM_MAX_LOADER_BLOCKS]; 02036 PFN_NUMBER i; 02037 PFN_NUMBER NextPhysicalPage; 02038 PMMPFN Pfn1; 02039 LONG BlockNumber = -1; 02040 KIRQL OldIrql; 02041 02042 // 02043 // 02044 // Walk through the memory descriptors and add pages to the 02045 // free list in the PFN database. 02046 // 02047 02048 NextMd = LoaderBlock->MemoryDescriptorListHead.Flink; 02049 02050 while (NextMd != &LoaderBlock->MemoryDescriptorListHead) { 02051 02052 MemoryDescriptor = CONTAINING_RECORD(NextMd, 02053 MEMORY_ALLOCATION_DESCRIPTOR, 02054 ListEntry); 02055 02056 02057 switch (MemoryDescriptor->MemoryType) { 02058 case LoaderOsloaderHeap: 02059 case LoaderRegistryData: 02060 case LoaderNlsData: 02061 //case LoaderMemoryData: //this has page table and other stuff. 02062 02063 // 02064 // Capture the data to temporary storage so we won't 02065 // free memory we are referencing. Coalesce it if 02066 // the blocks are adjacent and of the same type. 02067 // 02068 02069 if (BlockNumber != -1 && 02070 MemoryDescriptor->MemoryType == SavedDescriptor[BlockNumber].MemoryType && 02071 MemoryDescriptor->BasePage == SavedDescriptor[BlockNumber].BasePage + SavedDescriptor[BlockNumber].PageCount) { 02072 02073 // 02074 // these blocks are adjacent - merge them 02075 // 02076 02077 SavedDescriptor[BlockNumber].PageCount += MemoryDescriptor->PageCount; 02078 } 02079 else { 02080 BlockNumber += 1; 02081 if (BlockNumber >= MM_MAX_LOADER_BLOCKS) { 02082 KeBugCheckEx (MEMORY_MANAGEMENT, 0, 0, 0, 0); 02083 } 02084 SavedDescriptor[BlockNumber] = *MemoryDescriptor; 02085 } 02086 02087 break; 02088 02089 default: 02090 02091 break; 02092 } 02093 02094 NextMd = MemoryDescriptor->ListEntry.Flink; 02095 } 02096 02097 LOCK_PFN (OldIrql); 02098 02099 while (BlockNumber >= 0) { 02100 02101 i = SavedDescriptor[BlockNumber].PageCount; 02102 NextPhysicalPage = SavedDescriptor[BlockNumber].BasePage; 02103 02104 Pfn1 = MI_PFN_ELEMENT (NextPhysicalPage); 02105 while (i != 0) { 02106 02107 if (Pfn1->u3.e2.ReferenceCount == 0) { 02108 if (Pfn1->u1.Flink == 0) { 02109 02110 // 02111 // Set the PTE address to the physical page for 02112 // virtual address alignment checking. 02113 // 02114 02115 Pfn1->PteAddress = 02116 (PMMPTE)(NextPhysicalPage << PTE_SHIFT); 02117 MiInsertPageInList (MmPageLocationList[FreePageList], 02118 NextPhysicalPage); 02119 } 02120 } else { 02121 02122 if (NextPhysicalPage != 0) { 02123 // 02124 // Remove PTE and insert into the free list. If it is 02125 // a physical address within the PFN database, the PTE 02126 // element does not exist and therefore cannot be updated. 02127 // 02128 02129 if (!MI_IS_PHYSICAL_ADDRESS ( 02130 MiGetVirtualAddressMappedByPte (Pfn1->PteAddress))) { 02131 02132 // 02133 // Not a physical address. 02134 // 02135 02136 *(Pfn1->PteAddress) = ZeroPte; 02137 } 02138 02139 MI_SET_PFN_DELETED (Pfn1); 02140 MiDecrementShareCountOnly (NextPhysicalPage); 02141 } 02142 } 02143 02144 Pfn1++; 02145 i -= 1; 02146 NextPhysicalPage += 1; 02147 } 02148 02149 BlockNumber -= 1; 02150 } 02151 02152 // 02153 // If the kernel has been biased to allow for 3gb of user address space, 02154 // then the first 16mb of memory is doubly mapped to KSEG0_BASE and to 02155 // ALTERNATE_BASE. Therefore, the KSEG0_BASE entries must be unmapped. 02156 // 02157 02158 #if defined(_X86_) 02159 02160 if (MmVirtualBias != 0) { 02161 Pde = MiGetPdeAddress(KSEG0_BASE); 02162 MI_WRITE_INVALID_PTE (Pde, ZeroKernelPte); 02163 MI_WRITE_INVALID_PTE (Pde + 1, ZeroKernelPte); 02164 MI_WRITE_INVALID_PTE (Pde + 2, ZeroKernelPte); 02165 MI_WRITE_INVALID_PTE (Pde + 3, ZeroKernelPte); 02166 02167 #if defined(_X86PAE_) 02168 MI_WRITE_INVALID_PTE (Pde + 4, ZeroKernelPte); 02169 MI_WRITE_INVALID_PTE (Pde + 5, ZeroKernelPte); 02170 MI_WRITE_INVALID_PTE (Pde + 6, ZeroKernelPte); 02171 MI_WRITE_INVALID_PTE (Pde + 7, ZeroKernelPte); 02172 #endif 02173 02174 } 02175 02176 #endif 02177 02178 KeFlushEntireTb (TRUE, TRUE); 02179 UNLOCK_PFN (OldIrql); 02180 return; 02181 }

NTKERNELAPI VOID MmFreeNonCachedMemory IN PVOID  BaseAddress,
IN SIZE_T  NumberOfBytes
 

Definition at line 5721 of file iosup.c.

References ASSERT, BYTES_TO_PAGES, ExPageLockHandle, LOCK_PFN, MI_GET_PAGE_FRAME_FROM_PTE, MI_MAKING_MULTIPLE_PTES_INVALID, MI_PFN_ELEMENT, MI_SET_PFN_DELETED, MiDecrementShareAndValidCount, MiDecrementShareCountOnly, MiGetPteAddress, MiReleaseSystemPtes(), MiReturnCommitment(), MM_BUMP_COUNTER, MM_DBG_COMMIT_RETURN_NONCACHED_PAGES, MM_TRACK_COMMIT, MmLockPagableSectionByHandle(), MmResidentAvailablePages, MmUnlockPagableImageSection(), PAGE_ALIGN, _MMPFN::PteFrame, SystemPteSpace, TRUE, _MMPFN::u2, and UNLOCK_PFN.

05728 : 05729 05730 This function deallocates a range of noncached memory in 05731 the non-paged portion of the system address space. 05732 05733 Arguments: 05734 05735 BaseAddress - Supplies the base virtual address where the noncached 05736 memory resides. 05737 05738 NumberOfBytes - Supplies the number of bytes allocated to the request. 05739 This must be the same number that was obtained with 05740 the MmAllocateNonCachedMemory call. 05741 05742 Return Value: 05743 05744 None. 05745 05746 Environment: 05747 05748 Kernel mode, IRQL of APC_LEVEL or below. 05749 05750 --*/ 05751 05752 { 05753 05754 PMMPTE PointerPte; 05755 PMMPFN Pfn1; 05756 PFN_NUMBER NumberOfPages; 05757 PFN_NUMBER i; 05758 PFN_NUMBER PageFrameIndex; 05759 KIRQL OldIrql; 05760 05761 ASSERT (NumberOfBytes != 0); 05762 ASSERT (PAGE_ALIGN (BaseAddress) == BaseAddress); 05763 05764 MI_MAKING_MULTIPLE_PTES_INVALID (TRUE); 05765 05766 NumberOfPages = BYTES_TO_PAGES(NumberOfBytes); 05767 05768 PointerPte = MiGetPteAddress (BaseAddress); 05769 05770 i = NumberOfPages; 05771 05772 MmLockPagableSectionByHandle (ExPageLockHandle); 05773 05774 LOCK_PFN (OldIrql); 05775 05776 do { 05777 05778 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 05779 05780 // 05781 // Mark the page for deletion when the reference count goes to zero. 05782 // 05783 05784 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 05785 ASSERT (Pfn1->u2.ShareCount == 1); 05786 MiDecrementShareAndValidCount (Pfn1->PteFrame); 05787 MI_SET_PFN_DELETED (Pfn1); 05788 MiDecrementShareCountOnly (PageFrameIndex); 05789 PointerPte += 1; 05790 i -= 1; 05791 } while (i != 0); 05792 05793 PointerPte -= NumberOfPages; 05794 05795 // 05796 // Update the count of available resident pages. 05797 // 05798 05799 MmResidentAvailablePages += NumberOfPages; 05800 MM_BUMP_COUNTER(5, NumberOfPages); 05801 05802 UNLOCK_PFN (OldIrql); 05803 05804 MmUnlockPagableImageSection (ExPageLockHandle); 05805 05806 MiReleaseSystemPtes (PointerPte, (ULONG)NumberOfPages, SystemPteSpace); 05807 05808 MiReturnCommitment (NumberOfPages); 05809 MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_NONCACHED_PAGES, NumberOfPages); 05810 05811 return; 05812 }

NTKERNELAPI VOID MmFreePagesFromMdl IN PMDL  MemoryDescriptorList  ) 
 

Definition at line 5051 of file iosup.c.

References ADDRESS_AND_SIZE_TO_SPAN_PAGES, APC_LEVEL, ASSERT, ExPageLockHandle, LOCK_PFN, MDL_IO_SPACE, MDL_PHYSICAL_VIEW, MI_IS_PFN_DELETED, MI_MAGIC_AWE_PTEFRAME, MI_MAKING_MULTIPLE_PTES_INVALID, MI_PFN_ELEMENT, MI_PFN_IS_AWE, MiDecrementReferenceCount(), MiReturnCommitment(), MM_BUMP_COUNTER, MM_DBG_COMMIT_RETURN_MDL_PAGES, MM_EMPTY_LIST, MM_TRACK_COMMIT, MmHighestPhysicalPage, MmLockPagableSectionByHandle(), MmMdlPagesAllocated, MmResidentAvailablePages, MmUnlockPagableImageSection(), PAGE_SIZE, StandbyPageList, TRUE, _MMPFN::u2, and UNLOCK_PFN.

Referenced by MiCleanPhysicalProcessPages(), NtAllocateUserPhysicalPages(), and NtFreeUserPhysicalPages().

05057 : 05058 05059 This routine walks the argument MDL freeing each physical page back to 05060 the PFN database. This is designed to free pages acquired via 05061 MmAllocatePagesForMdl only. 05062 05063 Arguments: 05064 05065 MemoryDescriptorList - Supplies an MDL which contains the pages to be freed. 05066 05067 Return Value: 05068 05069 None. 05070 05071 Environment: 05072 05073 Kernel mode, IRQL of APC_LEVEL or below. 05074 05075 --*/ 05076 { 05077 PMMPFN Pfn1; 05078 KIRQL OldIrql; 05079 PVOID StartingAddress; 05080 PVOID AlignedVa; 05081 PPFN_NUMBER Page; 05082 PFN_NUMBER NumberOfPages; 05083 PFN_NUMBER PagesFreed; 05084 05085 ASSERT (KeGetCurrentIrql() <= APC_LEVEL); 05086 05087 PagesFreed = 0; 05088 05089 MmLockPagableSectionByHandle (ExPageLockHandle); 05090 05091 Page = (PPFN_NUMBER)(MemoryDescriptorList + 1); 05092 05093 ASSERT ((MemoryDescriptorList->MdlFlags & (MDL_IO_SPACE | MDL_PHYSICAL_VIEW)) == 0); 05094 05095 ASSERT (((ULONG_PTR)MemoryDescriptorList->StartVa & (PAGE_SIZE - 1)) == 0); 05096 AlignedVa = (PVOID)MemoryDescriptorList->StartVa; 05097 05098 StartingAddress = (PVOID)((PCHAR)AlignedVa + 05099 MemoryDescriptorList->ByteOffset); 05100 05101 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartingAddress, 05102 MemoryDescriptorList->ByteCount); 05103 05104 MI_MAKING_MULTIPLE_PTES_INVALID (TRUE); 05105 05106 LOCK_PFN (OldIrql); 05107 05108 do { 05109 05110 if (*Page == MM_EMPTY_LIST) { 05111 05112 // 05113 // There are no more locked pages. 05114 // 05115 05116 break; 05117 } 05118 05119 ASSERT (*Page <= MmHighestPhysicalPage); 05120 05121 Pfn1 = MI_PFN_ELEMENT (*Page); 05122 ASSERT (Pfn1->u2.ShareCount == 1); 05123 ASSERT (MI_IS_PFN_DELETED (Pfn1) == TRUE); 05124 ASSERT (MI_PFN_IS_AWE (Pfn1) == TRUE); 05125 ASSERT (Pfn1->PteFrame == MI_MAGIC_AWE_PTEFRAME); 05126 05127 Pfn1->u3.e1.StartOfAllocation = 0; 05128 Pfn1->u3.e1.EndOfAllocation = 0; 05129 Pfn1->u2.ShareCount = 0; 05130 #if DBG 05131 Pfn1->PteFrame -= 1; 05132 Pfn1->u3.e1.PageLocation = StandbyPageList; 05133 #endif 05134 05135 MiDecrementReferenceCount (*Page); 05136 05137 PagesFreed += 1; 05138 05139 StartingAddress = (PVOID)((PCHAR)StartingAddress + PAGE_SIZE); 05140 05141 *Page++ = MM_EMPTY_LIST; 05142 NumberOfPages -= 1; 05143 05144 } while (NumberOfPages != 0); 05145 05146 MmMdlPagesAllocated -= PagesFreed; 05147 05148 MmResidentAvailablePages += PagesFreed; 05149 MM_BUMP_COUNTER(35, PagesFreed); 05150 05151 UNLOCK_PFN (OldIrql); 05152 05153 MmUnlockPagableImageSection (ExPageLockHandle); 05154 05155 MiReturnCommitment (PagesFreed); 05156 MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_MDL_PAGES, PagesFreed); 05157 }

VOID MmFreeSpecialPool IN PVOID  P  ) 
 

Definition at line 3742 of file allocpag.c.

References APC_LEVEL, ASSERT, BYTE_OFFSET, DISPATCH_LEVEL, FALSE, Header, KeBugCheckEx(), KeFlushSingleTb(), KeQueryTickCount(), LOCK_PFN2, MI_FREED_SPECIAL_POOL_SIGNATURE, MI_GET_PAGE_FRAME_FROM_PTE, MI_PFN_ELEMENT, MI_SET_PFN_DELETED, MI_SPECIAL_POOL_PAGABLE, MI_SPECIAL_POOL_PTE_NONPAGABLE, MI_SPECIAL_POOL_PTE_PAGABLE, MI_SPECIAL_POOL_VERIFIER, MI_STACK_BYTES, MI_VERIFIER_POOL_HEADER, MiDecrementShareCount(), MiDeleteSystemPagableVm(), MiGetPteAddress, MiReturnCommitment(), MiSpecialPagesNonPaged, MiSpecialPagesPagable, MiSpecialPoolLastPte, MM_BUMP_COUNTER, MM_EMPTY_PTE_LIST, MM_KERNEL_NOACCESS_PTE, MM_NOACCESS, MmResidentAvailablePages, MmSpecialPagesInUse, MmSystemPteBase, NoAccessPte, NonPagedPool, NULL, _MI_FREED_SPECIAL_POOL::NumberOfBytesRequested, _MI_FREED_SPECIAL_POOL::Pagable, PAGE_ALIGN, PAGE_SIZE, PagedPool, PMI_FREED_SPECIAL_POOL, POOL_OVERHEAD, POOL_TYPE, PsGetCurrentThread, _MI_FREED_SPECIAL_POOL::Signature, _MI_FREED_SPECIAL_POOL::StackBytes, _MI_FREED_SPECIAL_POOL::StackData, _MI_FREED_SPECIAL_POOL::StackPointer, _MI_FREED_SPECIAL_POOL::Thread, _MI_FREED_SPECIAL_POOL::TickCount, TRUE, _MMPTE::u, UNLOCK_PFN2, USHORT, VerifierFreeTrackedPool(), _MI_FREED_SPECIAL_POOL::VirtualAddress, and ZeroKernelPte.

03748 : 03749 03750 This routine frees a special pool allocation. The backing page is freed 03751 and the mapping virtual address is made no access (the next virtual 03752 address is already no access). 03753 03754 The virtual address PTE pair is then placed into an LRU queue to provide 03755 maximum no-access (protection) life to catch components that access 03756 deallocated pool. 03757 03758 Arguments: 03759 03760 VirtualAddress - Supplies the special pool virtual address to free. 03761 03762 Return Value: 03763 03764 None. 03765 03766 Environment: 03767 03768 Kernel mode, no locks (not even pool locks) held. 03769 03770 --*/ 03771 03772 { 03773 MMPTE PteContents; 03774 PMMPTE PointerPte; 03775 PMMPFN Pfn1; 03776 KIRQL OldIrql; 03777 ULONG SlopBytes; 03778 ULONG NumberOfBytesCalculated; 03779 ULONG NumberOfBytesRequested; 03780 POOL_TYPE PoolType; 03781 MMPTE NoAccessPte; 03782 PPOOL_HEADER Header; 03783 PUCHAR Slop; 03784 ULONG i; 03785 LOGICAL BufferAtPageEnd; 03786 PMI_FREED_SPECIAL_POOL AllocationBase; 03787 LARGE_INTEGER CurrentTime; 03788 PULONG_PTR StackPointer; 03789 03790 PointerPte = MiGetPteAddress (P); 03791 PteContents = *PointerPte; 03792 03793 // 03794 // Check the PTE now so we can give a more friendly bugcheck rather than 03795 // crashing below on a bad reference. 03796 // 03797 03798 if (PteContents.u.Hard.Valid == 0) { 03799 if ((PteContents.u.Soft.Protection == 0) || 03800 (PteContents.u.Soft.Protection == MM_NOACCESS)) { 03801 KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION, 03802 (ULONG_PTR)P, 03803 (ULONG_PTR)PteContents.u.Long, 03804 0, 03805 0x20); 03806 } 03807 } 03808 03809 if (((ULONG_PTR)P & (PAGE_SIZE - 1))) { 03810 Header = PAGE_ALIGN (P); 03811 BufferAtPageEnd = TRUE; 03812 } 03813 else { 03814 Header = (PPOOL_HEADER)((PCHAR)PAGE_ALIGN (P) + PAGE_SIZE - POOL_OVERHEAD); 03815 BufferAtPageEnd = FALSE; 03816 } 03817 03818 if (Header->Ulong1 & MI_SPECIAL_POOL_PAGABLE) { 03819 ASSERT ((PointerPte + 1)->u.Soft.PageFileHigh == MI_SPECIAL_POOL_PTE_PAGABLE); 03820 if (KeGetCurrentIrql() > APC_LEVEL) { 03821 KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION, 03822 KeGetCurrentIrql(), 03823 PagedPool, 03824 (ULONG_PTR)P, 03825 0x31); 03826 } 03827 PoolType = PagedPool; 03828 } 03829 else { 03830 ASSERT ((PointerPte + 1)->u.Soft.PageFileHigh == MI_SPECIAL_POOL_PTE_NONPAGABLE); 03831 if (KeGetCurrentIrql() > DISPATCH_LEVEL) { 03832 KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION, 03833 KeGetCurrentIrql(), 03834 NonPagedPool, 03835 (ULONG_PTR)P, 03836 0x31); 03837 } 03838 PoolType = NonPagedPool; 03839 } 03840 03841 NumberOfBytesRequested = (ULONG)(USHORT)(Header->Ulong1 & ~(MI_SPECIAL_POOL_PAGABLE | MI_SPECIAL_POOL_VERIFIER)); 03842 03843 // 03844 // We gave the caller pool-header aligned data, so account for 03845 // that when checking here. 03846 // 03847 03848 if (BufferAtPageEnd == TRUE) { 03849 03850 NumberOfBytesCalculated = PAGE_SIZE - BYTE_OFFSET(P); 03851 03852 if (NumberOfBytesRequested > NumberOfBytesCalculated) { 03853 03854 // 03855 // Seems like we didn't give the caller enough - this is an error. 03856 // 03857 03858 KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION, 03859 (ULONG_PTR)P, 03860 NumberOfBytesRequested, 03861 NumberOfBytesCalculated, 03862 0x21); 03863 } 03864 03865 if (NumberOfBytesRequested + POOL_OVERHEAD < NumberOfBytesCalculated) { 03866 03867 // 03868 // Seems like we gave the caller too much - also an error. 03869 // 03870 03871 KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION, 03872 (ULONG_PTR)P, 03873 NumberOfBytesRequested, 03874 NumberOfBytesCalculated, 03875 0x22); 03876 } 03877 03878 // 03879 // Check the memory before the start of the caller's allocation. 03880 // 03881 03882 Slop = (PUCHAR)(Header + 1); 03883 if (Header->Ulong1 & MI_SPECIAL_POOL_VERIFIER) { 03884 Slop += sizeof(MI_VERIFIER_POOL_HEADER); 03885 } 03886 03887 for ( ; Slop < (PUCHAR)P; Slop += 1) { 03888 03889 if (*Slop != Header->BlockSize) { 03890 03891 KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION, 03892 (ULONG_PTR)P, 03893 (ULONG_PTR)Slop, 03894 Header->Ulong1, 03895 0x23); 03896 } 03897 } 03898 } 03899 else { 03900 NumberOfBytesCalculated = 0; 03901 } 03902 03903 // 03904 // Check the memory after the end of the caller's allocation. 03905 // 03906 03907 Slop = (PUCHAR)P + NumberOfBytesRequested; 03908 03909 SlopBytes = (ULONG)((PUCHAR)(PAGE_ALIGN(P)) + PAGE_SIZE - Slop); 03910 03911 if (BufferAtPageEnd == FALSE) { 03912 SlopBytes -= POOL_OVERHEAD; 03913 if (Header->Ulong1 & MI_SPECIAL_POOL_VERIFIER) { 03914 SlopBytes -= sizeof(MI_VERIFIER_POOL_HEADER); 03915 } 03916 } 03917 03918 for (i = 0; i < SlopBytes; i += 1) { 03919 03920 if (*Slop != Header->BlockSize) { 03921 03922 // 03923 // The caller wrote slop between the free alignment we gave and the 03924 // end of the page (this is not detectable from page protection). 03925 // 03926 03927 KeBugCheckEx (SPECIAL_POOL_DETECTED_MEMORY_CORRUPTION, 03928 (ULONG_PTR)P, 03929 (ULONG_PTR)Slop, 03930 Header->Ulong1, 03931 0x24); 03932 } 03933 Slop += 1; 03934 } 03935 03936 if (Header->Ulong1 & MI_SPECIAL_POOL_VERIFIER) { 03937 VerifierFreeTrackedPool (P, 03938 NumberOfBytesRequested, 03939 PoolType, 03940 TRUE); 03941 } 03942 03943 AllocationBase = (PMI_FREED_SPECIAL_POOL)(PAGE_ALIGN (P)); 03944 03945 AllocationBase->Signature = MI_FREED_SPECIAL_POOL_SIGNATURE; 03946 03947 KeQueryTickCount(&CurrentTime); 03948 AllocationBase->TickCount = CurrentTime.LowPart; 03949 03950 AllocationBase->NumberOfBytesRequested = NumberOfBytesRequested; 03951 AllocationBase->Pagable = (ULONG)PoolType; 03952 AllocationBase->VirtualAddress = P; 03953 AllocationBase->Thread = PsGetCurrentThread (); 03954 03955 #if defined (_X86_) 03956 _asm { 03957 mov StackPointer, esp 03958 } 03959 03960 AllocationBase->StackPointer = StackPointer; 03961 03962 // 03963 // For now, don't get fancy with copying more than what's in the current 03964 // stack page. To do so would require checking the thread stack limits, 03965 // DPC stack limits, etc. 03966 // 03967 03968 AllocationBase->StackBytes = PAGE_SIZE - BYTE_OFFSET(StackPointer); 03969 03970 if (AllocationBase->StackBytes != 0) { 03971 03972 if (AllocationBase->StackBytes > MI_STACK_BYTES) { 03973 AllocationBase->StackBytes = MI_STACK_BYTES; 03974 } 03975 03976 RtlCopyMemory (AllocationBase->StackData, 03977 StackPointer, 03978 AllocationBase->StackBytes); 03979 } 03980 #else 03981 AllocationBase->StackPointer = NULL; 03982 AllocationBase->StackBytes = 0; 03983 #endif 03984 03985 if (PoolType == PagedPool) { 03986 NoAccessPte.u.Long = MM_KERNEL_NOACCESS_PTE; 03987 MiDeleteSystemPagableVm (PointerPte, 03988 1, 03989 NoAccessPte, 03990 FALSE, 03991 NULL); 03992 LOCK_PFN2 (OldIrql); 03993 MiSpecialPagesPagable -= 1; 03994 } 03995 else { 03996 03997 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 03998 LOCK_PFN2 (OldIrql); 03999 MiSpecialPagesNonPaged -= 1; 04000 MI_SET_PFN_DELETED (Pfn1); 04001 MiDecrementShareCount (MI_GET_PAGE_FRAME_FROM_PTE (PointerPte)); 04002 KeFlushSingleTb (PAGE_ALIGN(P), 04003 TRUE, 04004 TRUE, 04005 (PHARDWARE_PTE)PointerPte, 04006 ZeroKernelPte.u.Flush); 04007 MmResidentAvailablePages += 1; 04008 MM_BUMP_COUNTER(37, 1); 04009 } 04010 04011 // 04012 // Clear the adjacent PTE to support MmIsSpecialPoolAddressFree(). 04013 // 04014 04015 (PointerPte + 1)->u.Long = 0; 04016 04017 ASSERT (MiSpecialPoolLastPte->u.List.NextEntry == MM_EMPTY_PTE_LIST); 04018 MiSpecialPoolLastPte->u.List.NextEntry = PointerPte - MmSystemPteBase; 04019 04020 MiSpecialPoolLastPte = PointerPte; 04021 MiSpecialPoolLastPte->u.List.NextEntry = MM_EMPTY_PTE_LIST; 04022 04023 MmSpecialPagesInUse -= 1; 04024 04025 UNLOCK_PFN2 (OldIrql); 04026 04027 MiReturnCommitment (1); 04028 04029 return; 04030 }

NTKERNELAPI ULONG MmGatherMemoryForHibernate IN PMDL  Mdl,
IN BOOLEAN  Wait
 

Definition at line 7930 of file iosup.c.

References APC_LEVEL, FALSE, KeDelayExecutionThread(), KernelMode, LOCK_PFN2, MDL_PAGES_LOCKED, MI_GET_PAGE_COLOR_FROM_PTE, MI_PFN_ELEMENT, MI_SET_PFN_DELETED, MiDelayPageFaults, MiEmptyAllWorkingSets(), MiFlushAllPages(), MiRemoveAnyPage(), Mm30Milliseconds, MM_DEMAND_ZERO_WRITE_PTE, MmAvailablePages, NULL, _MMPFN::OriginalPte, PAGE_SHIFT, TRUE, _MMPTE::u, _MMPFN::u3, and UNLOCK_PFN2.

07937 : 07938 07939 Finds enough memory to fill in the pages of the MDL for power management 07940 hibernate function. 07941 07942 Arguments: 07943 07944 Mdl - Supplies an MDL, the start VA field should be NULL. The length 07945 field indicates how many pages to obtain. 07946 07947 Wait - FALSE to fail immediately if the pages aren't available. 07948 07949 Return Value: 07950 07951 TRUE if the MDL could be filled in, FALSE otherwise. 07952 07953 Environment: 07954 07955 Kernel mode, IRQL of APC_LEVEL or below. 07956 07957 --*/ 07958 07959 { 07960 KIRQL OldIrql; 07961 PFN_NUMBER PagesNeeded; 07962 PPFN_NUMBER Pages; 07963 PFN_NUMBER i; 07964 PFN_NUMBER PageFrameIndex; 07965 PMMPFN Pfn1; 07966 ULONG status; 07967 07968 status = FALSE; 07969 07970 PagesNeeded = Mdl->ByteCount >> PAGE_SHIFT; 07971 Pages = (PPFN_NUMBER)(Mdl + 1); 07972 07973 i = Wait ? 100 : 1; 07974 07975 InterlockedIncrement (&MiDelayPageFaults); 07976 07977 do { 07978 07979 LOCK_PFN2 (OldIrql); 07980 if (MmAvailablePages > PagesNeeded) { 07981 07982 // 07983 // Fill in the MDL. 07984 // 07985 07986 do { 07987 PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE (NULL)); 07988 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 07989 MI_SET_PFN_DELETED (Pfn1); 07990 Pfn1->u3.e2.ReferenceCount += 1; 07991 Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE; 07992 *Pages = PageFrameIndex; 07993 Pages += 1; 07994 PagesNeeded -= 1; 07995 } while (PagesNeeded); 07996 UNLOCK_PFN2 (OldIrql); 07997 Mdl->MdlFlags |= MDL_PAGES_LOCKED; 07998 status = TRUE; 07999 break; 08000 } 08001 08002 UNLOCK_PFN2 (OldIrql); 08003 08004 // 08005 // If we're being called at DISPATCH_LEVEL we cannot move pages to 08006 // the standby list because mutexes must be acquired to do so. 08007 // 08008 08009 if (OldIrql > APC_LEVEL) { 08010 break; 08011 } 08012 08013 if (!i) { 08014 break; 08015 } 08016 08017 // 08018 // Attempt to move pages to the standby list. 08019 // 08020 08021 MiEmptyAllWorkingSets (); 08022 MiFlushAllPages(); 08023 08024 KeDelayExecutionThread (KernelMode, 08025 FALSE, 08026 (PLARGE_INTEGER)&Mm30Milliseconds); 08027 i -= 1; 08028 08029 } while (TRUE); 08030 08031 InterlockedDecrement (&MiDelayPageFaults); 08032 08033 return status; 08034 }

NTSTATUS MmGetCrashDumpInformation IN PSYSTEM_CRASH_DUMP_INFORMATION  CrashInfo  ) 
 

Definition at line 1392 of file modwrite.c.

References Handle, MmCrashDumpSection, NT_SUCCESS, NTSTATUS(), NULL, ObInsertObject(), PAGED_CODE, and Status.

Referenced by NtQuerySystemInformation().

01398 : 01399 01400 This function checks to see if a crash dump section exists and 01401 if so creates a handle to the section and returns that value 01402 in the CrashDumpInformation structure. Once the handle to the 01403 section has been created, no other references can be made 01404 to the crash dump section, and when that handle is closed, the 01405 crash dump section is deleted and the paging file space is 01406 available for reuse. 01407 01408 Arguments: 01409 01410 CrashInfo - Supplies a pointer to the crash dump information 01411 structure. 01412 01413 Return Value: 01414 01415 Status of the operation. A handle value of zero indicates no 01416 crash dump was located. 01417 01418 --*/ 01419 01420 { 01421 NTSTATUS Status; 01422 HANDLE Handle; 01423 01424 PAGED_CODE(); 01425 01426 if (MmCrashDumpSection == NULL) { 01427 Handle = 0; 01428 Status = STATUS_SUCCESS; 01429 } else { 01430 Status = ObInsertObject (MmCrashDumpSection, 01431 NULL, 01432 SECTION_MAP_READ, 01433 0, 01434 (PVOID *)NULL, 01435 &Handle); 01436 if (NT_SUCCESS(Status)) { 01437 01438 // 01439 // One shot operation. 01440 // 01441 01442 MmCrashDumpSection = NULL; 01443 } 01444 } 01445 01446 CrashInfo->CrashDumpSection = Handle; 01447 return Status; 01448 }

NTSTATUS MmGetCrashDumpStateInformation IN PSYSTEM_CRASH_STATE_INFORMATION  CrashInfo  ) 
 

Definition at line 1452 of file modwrite.c.

References MmCrashDumpSection, NULL, and PAGED_CODE.

Referenced by NtQuerySystemInformation().

01458 : 01459 01460 This function checks to see if a crash dump section exists and 01461 returns a BOOLEAN value in the CrashStateInformation structure 01462 based on the outcome. 01463 01464 Arguments: 01465 01466 CrashInfo - Supplies a pointer to the crash dump state information 01467 structure. 01468 01469 Return Value: 01470 01471 Status of the operation. A BOOLEAN value of FALSE indicates no 01472 crash dump was located. 01473 01474 --*/ 01475 01476 { 01477 PAGED_CODE(); 01478 01479 CrashInfo->ValidCrashDump = (MmCrashDumpSection != NULL); 01480 return STATUS_SUCCESS; 01481 }

NTSTATUS MmGetFileNameForSection IN HANDLE  Section,
OUT PSTRING  FileName
 

Definition at line 1721 of file sectsup.c.

References ExAllocatePoolWithTag, ExFreePool(), FALSE, FileName, KernelMode, MmSectionObjectType, NT_SUCCESS, NTSTATUS(), NULL, ObDereferenceObject, ObQueryNameString(), ObReferenceObjectByHandle(), PagedPool, PSECTION, RtlUnicodeStringToAnsiString(), Status, TRUE, and xMAX_NAME.

Referenced by DbgkCreateThread(), and DbgkpSectionHandleToFileHandle().

01728 : 01729 01730 This function returns the file name for the corresponding section. 01731 01732 Arguments: 01733 01734 Section - Supplies the handle of the section to get the name of. 01735 01736 FileName - Returns the name of the corresponding section. 01737 01738 Return Value: 01739 01740 TBS 01741 01742 Environment: 01743 01744 Kernel mode, APC_LEVEL or below, no mutexes held. 01745 01746 --*/ 01747 01748 { 01749 01750 PSECTION SectionObject; 01751 POBJECT_NAME_INFORMATION FileNameInfo; 01752 ULONG whocares; 01753 NTSTATUS Status; 01754 ULONG Dereference; 01755 01756 Dereference = TRUE; 01757 01758 #define xMAX_NAME 1024 01759 01760 if ( (ULONG_PTR)Section & 1 ) { 01761 SectionObject = (PSECTION)((ULONG_PTR)Section & ~1); 01762 Dereference = FALSE; 01763 } else { 01764 Status = ObReferenceObjectByHandle ( Section, 01765 0, 01766 MmSectionObjectType, 01767 KernelMode, 01768 (PVOID *)&SectionObject, 01769 NULL ); 01770 01771 if (!NT_SUCCESS(Status)) { 01772 return Status; 01773 } 01774 } 01775 01776 if (SectionObject->u.Flags.Image == 0) { 01777 if ( Dereference ) 01778 ObDereferenceObject (SectionObject); 01779 return STATUS_SECTION_NOT_IMAGE; 01780 } 01781 01782 FileNameInfo = ExAllocatePoolWithTag (PagedPool, xMAX_NAME, ' mM'); 01783 01784 if ( !FileNameInfo ) { 01785 if ( Dereference ) 01786 ObDereferenceObject (SectionObject); 01787 return STATUS_NO_MEMORY; 01788 } 01789 01790 Status = ObQueryNameString( 01791 SectionObject->Segment->ControlArea->FilePointer, 01792 FileNameInfo, 01793 xMAX_NAME, 01794 &whocares 01795 ); 01796 01797 if ( Dereference ) 01798 ObDereferenceObject (SectionObject); 01799 01800 if ( !NT_SUCCESS(Status) ) { 01801 ExFreePool(FileNameInfo); 01802 return Status; 01803 } 01804 01805 FileName->Length = 0; 01806 FileName->MaximumLength = (FileNameInfo->Name.Length/sizeof(WCHAR)) + 1; 01807 FileName->Buffer = ExAllocatePoolWithTag (PagedPool, 01808 FileName->MaximumLength, 01809 ' mM'); 01810 if ( !FileName->Buffer ) { 01811 ExFreePool(FileNameInfo); 01812 return STATUS_NO_MEMORY; 01813 } 01814 RtlUnicodeStringToAnsiString((PANSI_STRING)FileName,&FileNameInfo->Name,FALSE); 01815 FileName->Buffer[FileName->Length] = '\0'; 01816 ExFreePool(FileNameInfo); 01817 01818 return STATUS_SUCCESS; 01819 }

NTSTATUS MmGetPageFileInformation OUT PVOID  SystemInformation,
IN ULONG  SystemInformationLength,
OUT PULONG  Length
 

Definition at line 4466 of file modwrite.c.

References _MMPAGING_FILE::CurrentUsage, MmNumberOfPagingFiles, MmPagingFile, PAGED_CODE, _MMPAGING_FILE::PageFileName, _MMPAGING_FILE::PeakUsage, ROUND_UP, and _MMPAGING_FILE::Size.

Referenced by NtQuerySystemInformation().

04474 : 04475 04476 This routine returns information about the currently active paging 04477 files. 04478 04479 Arguments: 04480 04481 SystemInformation - Returns the paging file information. 04482 04483 SystemInformationLength - Supplies the length of the SystemInformation 04484 buffer. 04485 04486 Length - Returns the length of the paging file information placed in the 04487 buffer. 04488 04489 Return Value: 04490 04491 Returns the status of the operation. 04492 04493 --*/ 04494 04495 { 04496 PSYSTEM_PAGEFILE_INFORMATION PageFileInfo; 04497 ULONG NextEntryOffset = 0; 04498 ULONG TotalSize = 0; 04499 ULONG i; 04500 UNICODE_STRING UserBufferPageFileName; 04501 04502 PAGED_CODE(); 04503 04504 *Length = 0; 04505 PageFileInfo = (PSYSTEM_PAGEFILE_INFORMATION)SystemInformation; 04506 04507 PageFileInfo->TotalSize = 0; 04508 04509 for (i = 0; i < MmNumberOfPagingFiles; i += 1) { 04510 PageFileInfo = (PSYSTEM_PAGEFILE_INFORMATION)( 04511 (PUCHAR)PageFileInfo + NextEntryOffset); 04512 NextEntryOffset = sizeof(SYSTEM_PAGEFILE_INFORMATION); 04513 TotalSize += sizeof(SYSTEM_PAGEFILE_INFORMATION); 04514 04515 if (TotalSize > SystemInformationLength) { 04516 return STATUS_INFO_LENGTH_MISMATCH; 04517 } 04518 04519 PageFileInfo->TotalSize = (ULONG)MmPagingFile[i]->Size; 04520 PageFileInfo->TotalInUse = (ULONG)MmPagingFile[i]->CurrentUsage; 04521 PageFileInfo->PeakUsage = (ULONG)MmPagingFile[i]->PeakUsage; 04522 04523 // 04524 // The PageFileName portion of the UserBuffer must be saved locally 04525 // to protect against a malicious thread changing the contents. This 04526 // is because we will reference the contents ourselves when the actual 04527 // string is copied out carefully below. 04528 // 04529 04530 UserBufferPageFileName.Length = MmPagingFile[i]->PageFileName.Length; 04531 UserBufferPageFileName.MaximumLength = MmPagingFile[i]->PageFileName.Length + sizeof(WCHAR); 04532 UserBufferPageFileName.Buffer = (PWCHAR)(PageFileInfo + 1); 04533 04534 PageFileInfo->PageFileName = UserBufferPageFileName; 04535 04536 TotalSize += ROUND_UP (UserBufferPageFileName.MaximumLength, 04537 sizeof(ULONG)); 04538 NextEntryOffset += ROUND_UP (UserBufferPageFileName.MaximumLength, 04539 sizeof(ULONG)); 04540 04541 if (TotalSize > SystemInformationLength) { 04542 return STATUS_INFO_LENGTH_MISMATCH; 04543 } 04544 04545 // 04546 // Carefully reference the user buffer here. 04547 // 04548 04549 RtlMoveMemory(UserBufferPageFileName.Buffer, 04550 MmPagingFile[i]->PageFileName.Buffer, 04551 MmPagingFile[i]->PageFileName.Length); 04552 UserBufferPageFileName.Buffer[ 04553 MmPagingFile[i]->PageFileName.Length/sizeof(WCHAR)] = UNICODE_NULL; 04554 PageFileInfo->NextEntryOffset = NextEntryOffset; 04555 } 04556 PageFileInfo->NextEntryOffset = 0; 04557 *Length = TotalSize; 04558 return STATUS_SUCCESS; 04559 }

NTKERNELAPI PHYSICAL_ADDRESS MmGetPhysicalAddress IN PVOID  BaseAddress  ) 
 

Definition at line 5490 of file iosup.c.

References BYTE_OFFSET, MI_CONVERT_PHYSICAL_TO_PFN, MI_GET_PAGE_FRAME_FROM_PTE, MI_IS_PHYSICAL_ADDRESS, MiGetPteAddress, PAGE_SHIFT, _MMPTE::u, and ZERO_LARGE.

Referenced by IoFreeDumpRange(), IopGetDumpStack(), IopInitializeDCB(), IopMapVirtualToPhysicalMdl(), IoSetDumpRange(), KdpStub(), KeStartAllProcessors(), Ki386BuildIdentityBuffer(), Ki386ConvertPte(), Ki386CreateIdentityMap(), KiGetPhysicalAddress(), MmAllocateContiguousMemorySpecifyCache(), MmDbgWriteCheck(), MmHibernateInformation(), and MmMapUserAddressesToPage().

05496 : 05497 05498 This function returns the corresponding physical address for a 05499 valid virtual address. 05500 05501 Arguments: 05502 05503 BaseAddress - Supplies the virtual address for which to return the 05504 physical address. 05505 05506 Return Value: 05507 05508 Returns the corresponding physical address. 05509 05510 Environment: 05511 05512 Kernel mode. Any IRQL level. 05513 05514 --*/ 05515 05516 { 05517 PMMPTE PointerPte; 05518 PHYSICAL_ADDRESS PhysicalAddress; 05519 05520 if (MI_IS_PHYSICAL_ADDRESS(BaseAddress)) { 05521 PhysicalAddress.QuadPart = MI_CONVERT_PHYSICAL_TO_PFN (BaseAddress); 05522 } else { 05523 05524 PointerPte = MiGetPteAddress(BaseAddress); 05525 05526 if (PointerPte->u.Hard.Valid == 0) { 05527 KdPrint(("MM:MmGetPhysicalAddressFailed base address was %lx", 05528 BaseAddress)); 05529 ZERO_LARGE (PhysicalAddress); 05530 return PhysicalAddress; 05531 } 05532 PhysicalAddress.QuadPart = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 05533 } 05534 05535 PhysicalAddress.QuadPart = PhysicalAddress.QuadPart << PAGE_SHIFT; 05536 PhysicalAddress.LowPart += BYTE_OFFSET(BaseAddress); 05537 05538 return PhysicalAddress; 05539 }

NTKERNELAPI PPHYSICAL_MEMORY_RANGE MmGetPhysicalMemoryRanges VOID   ) 
 

Definition at line 1032 of file dynmem.c.

References ASSERT, _PHYSICAL_MEMORY_RANGE::BaseAddress, _PHYSICAL_MEMORY_RUN::BasePage, ExAllocatePoolWithTag, LOCK_PFN, MmDynamicMemoryMutex, MmPhysicalMemoryBlock, NonPagedPool, NULL, _PHYSICAL_MEMORY_RANGE::NumberOfBytes, _PHYSICAL_MEMORY_DESCRIPTOR::NumberOfRuns, PAGE_SIZE, _PHYSICAL_MEMORY_RUN::PageCount, PASSIVE_LEVEL, PHYSICAL_MEMORY_RANGE, _PHYSICAL_MEMORY_DESCRIPTOR::Run, and UNLOCK_PFN.

01038 : 01039 01040 This routine returns the virtual address of a nonpaged pool block which 01041 contains the physical memory ranges in the system. 01042 01043 The returned block contains physical address and page count pairs. 01044 The last entry contains zero for both. 01045 01046 The caller must understand that this block can change at any point before 01047 or after this snapshot. 01048 01049 It is the caller's responsibility to free this block. 01050 01051 Arguments: 01052 01053 None. 01054 01055 Return Value: 01056 01057 NULL on failure. 01058 01059 Environment: 01060 01061 Kernel mode. PASSIVE level. No locks held. 01062 01063 --*/ 01064 01065 { 01066 ULONG i; 01067 KIRQL OldIrql; 01068 PPHYSICAL_MEMORY_RANGE p; 01069 PPHYSICAL_MEMORY_RANGE PhysicalMemoryBlock; 01070 01071 ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL); 01072 01073 ExAcquireFastMutex (&MmDynamicMemoryMutex); 01074 01075 i = sizeof(PHYSICAL_MEMORY_RANGE) * (MmPhysicalMemoryBlock->NumberOfRuns + 1); 01076 01077 PhysicalMemoryBlock = ExAllocatePoolWithTag (NonPagedPool, 01078 i, 01079 'hPmM'); 01080 01081 if (PhysicalMemoryBlock == NULL) { 01082 ExReleaseFastMutex (&MmDynamicMemoryMutex); 01083 return NULL; 01084 } 01085 01086 p = PhysicalMemoryBlock; 01087 01088 LOCK_PFN (OldIrql); 01089 01090 ASSERT (i == (sizeof(PHYSICAL_MEMORY_RANGE) * (MmPhysicalMemoryBlock->NumberOfRuns + 1))); 01091 01092 for (i = 0; i < MmPhysicalMemoryBlock->NumberOfRuns; i += 1) { 01093 p->BaseAddress.QuadPart = (LONGLONG)MmPhysicalMemoryBlock->Run[i].BasePage * PAGE_SIZE; 01094 p->NumberOfBytes.QuadPart = (LONGLONG)MmPhysicalMemoryBlock->Run[i].PageCount * PAGE_SIZE; 01095 p += 1; 01096 } 01097 01098 p->BaseAddress.QuadPart = 0; 01099 p->NumberOfBytes.QuadPart = 0; 01100 01101 UNLOCK_PFN (OldIrql); 01102 01103 ExReleaseFastMutex (&MmDynamicMemoryMutex); 01104 01105 return PhysicalMemoryBlock; 01106 }

NTSTATUS MmGetSectionRange IN PVOID  AddressWithinSection,
OUT PVOID *  StartingSectionAddress,
OUT PULONG  SizeofSection
 

Definition at line 6797 of file iosup.c.

References ExAcquireResourceShared, ExReleaseResource, KeEnterCriticalRegion, KeLeaveCriticalRegion, MiLookupDataTableEntry(), NTSTATUS(), PAGED_CODE, PsLoadedModuleResource, RtlImageNtHeader(), Status, and TRUE.

06802 { 06803 PLDR_DATA_TABLE_ENTRY DataTableEntry; 06804 ULONG i; 06805 PIMAGE_NT_HEADERS NtHeaders; 06806 PIMAGE_SECTION_HEADER NtSection; 06807 NTSTATUS Status; 06808 ULONG_PTR Rva; 06809 06810 PAGED_CODE(); 06811 06812 // 06813 // Search the loaded module list for the data table entry that describes 06814 // the DLL that was just unloaded. It is possible that an entry is not in 06815 // the list if a failure occurred at a point in loading the DLL just before 06816 // the data table entry was generated. 06817 // 06818 06819 Status = STATUS_NOT_FOUND; 06820 06821 KeEnterCriticalRegion(); 06822 ExAcquireResourceShared (&PsLoadedModuleResource, TRUE); 06823 06824 DataTableEntry = MiLookupDataTableEntry (AddressWithinSection, TRUE); 06825 if (DataTableEntry) { 06826 06827 Rva = (ULONG_PTR)((PUCHAR)AddressWithinSection - (ULONG_PTR)DataTableEntry->DllBase); 06828 06829 NtHeaders = (PIMAGE_NT_HEADERS)RtlImageNtHeader(DataTableEntry->DllBase); 06830 06831 NtSection = (PIMAGE_SECTION_HEADER)((PCHAR)NtHeaders + 06832 sizeof(ULONG) + 06833 sizeof(IMAGE_FILE_HEADER) + 06834 NtHeaders->FileHeader.SizeOfOptionalHeader 06835 ); 06836 06837 for (i = 0; i < NtHeaders->FileHeader.NumberOfSections; i += 1) { 06838 06839 if ( Rva >= NtSection->VirtualAddress && 06840 Rva < NtSection->VirtualAddress + NtSection->SizeOfRawData ) { 06841 06842 // 06843 // Found it 06844 // 06845 06846 *StartingSectionAddress = (PVOID) 06847 ((PCHAR) DataTableEntry->DllBase + NtSection->VirtualAddress); 06848 *SizeofSection = NtSection->SizeOfRawData; 06849 Status = STATUS_SUCCESS; 06850 break; 06851 } 06852 06853 NtSection += 1; 06854 } 06855 } 06856 06857 ExReleaseResource (&PsLoadedModuleResource); 06858 KeLeaveCriticalRegion(); 06859 return Status; 06860 }

NTKERNELAPI PVOID MmGetSystemRoutineAddress IN PUNICODE_STRING  SystemRoutineName  ) 
 

Definition at line 6808 of file sysload.c.

References ASSERT, ExAcquireResourceShared, ExReleaseResource, FALSE, KeDelayExecutionThread(), KeEnterCriticalRegion, KeLeaveCriticalRegion, KernelMode, L, MiFindExportedRoutineByName(), MmShortTime, NT_SUCCESS, NTSTATUS(), NULL, PASSIVE_LEVEL, PsLoadedModuleList, PsLoadedModuleResource, RtlEqualUnicodeString(), RtlFreeAnsiString(), RtlInitUnicodeString(), RtlUnicodeStringToAnsiString(), Status, and TRUE.

06814 : 06815 06816 This function returns the address of the argument function pointer if 06817 it is in the kernel or HAL, NULL if it is not. 06818 06819 Arguments: 06820 06821 SystemRoutineName - Supplies the name of the desired routine. 06822 06823 Return Value: 06824 06825 Non-NULL function pointer if successful. NULL if not. 06826 06827 Environment: 06828 06829 Kernel mode. 06830 06831 --*/ 06832 06833 { 06834 ULONG AnsiLength; 06835 NTSTATUS Status; 06836 PLDR_DATA_TABLE_ENTRY DataTableEntry; 06837 ANSI_STRING AnsiString; 06838 PLIST_ENTRY NextEntry; 06839 UNICODE_STRING KernelString; 06840 UNICODE_STRING HalString; 06841 PVOID FunctionAddress; 06842 LOGICAL Found; 06843 ULONG EntriesChecked; 06844 06845 ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL); 06846 06847 EntriesChecked = 0; 06848 FunctionAddress = NULL; 06849 06850 RtlInitUnicodeString (&KernelString, L"ntoskrnl.exe"); 06851 RtlInitUnicodeString (&HalString, L"hal.dll"); 06852 06853 do { 06854 Status = RtlUnicodeStringToAnsiString( &AnsiString, 06855 SystemRoutineName, 06856 TRUE ); 06857 06858 if (NT_SUCCESS( Status)) { 06859 break; 06860 } 06861 06862 KeDelayExecutionThread (KernelMode, FALSE, &MmShortTime); 06863 06864 } while (TRUE); 06865 06866 // 06867 // Arbitrary process context so prevent suspend APCs now. 06868 // 06869 06870 KeEnterCriticalRegion(); 06871 ExAcquireResourceShared (&PsLoadedModuleResource, TRUE); 06872 06873 // 06874 // Check only the kernel and the HAL for exports. 06875 // 06876 06877 NextEntry = PsLoadedModuleList.Flink; 06878 while (NextEntry != &PsLoadedModuleList) { 06879 06880 Found = FALSE; 06881 06882 DataTableEntry = CONTAINING_RECORD(NextEntry, 06883 LDR_DATA_TABLE_ENTRY, 06884 InLoadOrderLinks); 06885 06886 if (RtlEqualUnicodeString (&KernelString, 06887 &DataTableEntry->BaseDllName, 06888 TRUE)) { 06889 06890 Found = TRUE; 06891 EntriesChecked += 1; 06892 06893 } 06894 else if (RtlEqualUnicodeString (&HalString, 06895 &DataTableEntry->BaseDllName, 06896 TRUE)) { 06897 06898 Found = TRUE; 06899 EntriesChecked += 1; 06900 } 06901 06902 if (Found == TRUE) { 06903 06904 FunctionAddress = MiFindExportedRoutineByName (DataTableEntry, 06905 &AnsiString); 06906 06907 if (FunctionAddress != NULL) { 06908 break; 06909 } 06910 06911 if (EntriesChecked == 2) { 06912 break; 06913 } 06914 } 06915 06916 NextEntry = NextEntry->Flink; 06917 } 06918 06919 ExReleaseResource (&PsLoadedModuleResource); 06920 KeLeaveCriticalRegion(); 06921 06922 RtlFreeAnsiString (&AnsiString); 06923 06924 return FunctionAddress; 06925 }

NTSTATUS MmGetVerifierInformation OUT PVOID  SystemInformation,
IN ULONG  SystemInformationLength,
OUT PULONG  Length
 

Definition at line 4046 of file verifier.c.

References _MI_VERIFIER_DRIVER_ENTRY::BaseName, _MI_VERIFIER_DRIVER_ENTRY::CurrentNonPagedPoolAllocations, _MI_VERIFIER_DRIVER_ENTRY::CurrentPagedPoolAllocations, EXCEPTION_EXECUTE_HANDLER, ExRaiseStatus(), FALSE, _MI_VERIFIER_DRIVER_ENTRY::Flags, KeEnterCriticalRegion, KeLeaveCriticalRegion, KeReleaseMutant(), KernelMode, KeWaitForSingleObject(), _MI_VERIFIER_DRIVER_ENTRY::Loads, MiSuspectDriverList, MmSystemLoadLock, MmVerifierData, _MI_VERIFIER_DRIVER_ENTRY::NonPagedBytes, NTSTATUS(), NULL, PAGED_CODE, _MI_VERIFIER_DRIVER_ENTRY::PagedBytes, _MI_VERIFIER_DRIVER_ENTRY::PeakNonPagedBytes, _MI_VERIFIER_DRIVER_ENTRY::PeakNonPagedPoolAllocations, _MI_VERIFIER_DRIVER_ENTRY::PeakPagedBytes, _MI_VERIFIER_DRIVER_ENTRY::PeakPagedPoolAllocations, ROUND_UP, Status, _MI_VERIFIER_DRIVER_ENTRY::Unloads, VI_VERIFYING_DIRECTLY, and WrVirtualMemory.

Referenced by NtQuerySystemInformation().

04054 : 04055 04056 This routine returns information about drivers undergoing verification. 04057 04058 Arguments: 04059 04060 SystemInformation - Returns the driver verification information. 04061 04062 SystemInformationLength - Supplies the length of the SystemInformation 04063 buffer. 04064 04065 Length - Returns the length of the driver verification file information 04066 placed in the buffer. 04067 04068 Return Value: 04069 04070 Returns the status of the operation. 04071 04072 Environment: 04073 04074 The SystemInformation buffer is in user space and our caller has wrapped 04075 a try-except around this entire routine. Capture any exceptions here and 04076 release resources accordingly. 04077 04078 --*/ 04079 04080 { 04081 PSYSTEM_VERIFIER_INFORMATION UserVerifyBuffer; 04082 ULONG NextEntryOffset; 04083 ULONG TotalSize; 04084 NTSTATUS Status; 04085 PLIST_ENTRY NextEntry; 04086 PMI_VERIFIER_DRIVER_ENTRY Verifier; 04087 UNICODE_STRING UserBufferDriverName; 04088 04089 PAGED_CODE(); 04090 04091 NextEntryOffset = 0; 04092 TotalSize = 0; 04093 04094 *Length = 0; 04095 UserVerifyBuffer = (PSYSTEM_VERIFIER_INFORMATION)SystemInformation; 04096 04097 // 04098 // Capture the number of verifying drivers and the relevant data while 04099 // synchronized. Then return it to our caller. 04100 // 04101 04102 Status = STATUS_SUCCESS; 04103 04104 KeEnterCriticalRegion(); 04105 04106 KeWaitForSingleObject (&MmSystemLoadLock, 04107 WrVirtualMemory, 04108 KernelMode, 04109 FALSE, 04110 (PLARGE_INTEGER)NULL); 04111 04112 try { 04113 04114 NextEntry = MiSuspectDriverList.Flink; 04115 while (NextEntry != &MiSuspectDriverList) { 04116 04117 Verifier = CONTAINING_RECORD(NextEntry, 04118 MI_VERIFIER_DRIVER_ENTRY, 04119 Links); 04120 04121 if ((Verifier->Flags & VI_VERIFYING_DIRECTLY) == 0) { 04122 NextEntry = NextEntry->Flink; 04123 continue; 04124 } 04125 04126 UserVerifyBuffer = (PSYSTEM_VERIFIER_INFORMATION)( 04127 (PUCHAR)UserVerifyBuffer + NextEntryOffset); 04128 NextEntryOffset = sizeof(SYSTEM_VERIFIER_INFORMATION); 04129 TotalSize += sizeof(SYSTEM_VERIFIER_INFORMATION); 04130 04131 if (TotalSize > SystemInformationLength) { 04132 ExRaiseStatus (STATUS_INFO_LENGTH_MISMATCH); 04133 } 04134 04135 // 04136 // This data is cumulative for all drivers. 04137 // 04138 04139 UserVerifyBuffer->Level = MmVerifierData.Level; 04140 UserVerifyBuffer->RaiseIrqls = MmVerifierData.RaiseIrqls; 04141 UserVerifyBuffer->AcquireSpinLocks = MmVerifierData.AcquireSpinLocks; 04142 04143 UserVerifyBuffer->UnTrackedPool = MmVerifierData.UnTrackedPool; 04144 UserVerifyBuffer->SynchronizeExecutions = MmVerifierData.SynchronizeExecutions; 04145 04146 UserVerifyBuffer->AllocationsAttempted = MmVerifierData.AllocationsAttempted; 04147 UserVerifyBuffer->AllocationsSucceeded = MmVerifierData.AllocationsSucceeded; 04148 UserVerifyBuffer->AllocationsSucceededSpecialPool = MmVerifierData.AllocationsSucceededSpecialPool; 04149 UserVerifyBuffer->AllocationsWithNoTag = MmVerifierData.AllocationsWithNoTag; 04150 04151 UserVerifyBuffer->TrimRequests = MmVerifierData.TrimRequests; 04152 UserVerifyBuffer->Trims = MmVerifierData.Trims; 04153 UserVerifyBuffer->AllocationsFailed = MmVerifierData.AllocationsFailed; 04154 UserVerifyBuffer->AllocationsFailedDeliberately = MmVerifierData.AllocationsFailedDeliberately; 04155 04156 // 04157 // This data is kept on a per-driver basis. 04158 // 04159 04160 UserVerifyBuffer->CurrentPagedPoolAllocations = Verifier->CurrentPagedPoolAllocations; 04161 UserVerifyBuffer->CurrentNonPagedPoolAllocations = Verifier->CurrentNonPagedPoolAllocations; 04162 UserVerifyBuffer->PeakPagedPoolAllocations = Verifier->PeakPagedPoolAllocations; 04163 UserVerifyBuffer->PeakNonPagedPoolAllocations = Verifier->PeakNonPagedPoolAllocations; 04164 04165 UserVerifyBuffer->PagedPoolUsageInBytes = Verifier->PagedBytes; 04166 UserVerifyBuffer->NonPagedPoolUsageInBytes = Verifier->NonPagedBytes; 04167 UserVerifyBuffer->PeakPagedPoolUsageInBytes = Verifier->PeakPagedBytes; 04168 UserVerifyBuffer->PeakNonPagedPoolUsageInBytes = Verifier->PeakNonPagedBytes; 04169 04170 UserVerifyBuffer->Loads = Verifier->Loads; 04171 UserVerifyBuffer->Unloads = Verifier->Unloads; 04172 04173 // 04174 // The DriverName portion of the UserVerifyBuffer must be saved 04175 // locally to protect against a malicious thread changing the 04176 // contents. This is because we will reference the contents 04177 // ourselves when the actual string is copied out carefully below. 04178 // 04179 04180 UserBufferDriverName.Length = Verifier->BaseName.Length; 04181 UserBufferDriverName.MaximumLength = Verifier->BaseName.Length + sizeof (WCHAR); 04182 UserBufferDriverName.Buffer = (PWCHAR)(UserVerifyBuffer + 1); 04183 04184 UserVerifyBuffer->DriverName = UserBufferDriverName; 04185 04186 TotalSize += ROUND_UP (UserBufferDriverName.MaximumLength, 04187 sizeof(ULONG)); 04188 NextEntryOffset += ROUND_UP (UserBufferDriverName.MaximumLength, 04189 sizeof(ULONG)); 04190 04191 if (TotalSize > SystemInformationLength) { 04192 ExRaiseStatus (STATUS_INFO_LENGTH_MISMATCH); 04193 } 04194 04195 // 04196 // Carefully reference the UserVerifyBuffer here. 04197 // 04198 04199 RtlMoveMemory(UserBufferDriverName.Buffer, 04200 Verifier->BaseName.Buffer, 04201 Verifier->BaseName.Length); 04202 04203 UserBufferDriverName.Buffer[ 04204 Verifier->BaseName.Length/sizeof(WCHAR)] = UNICODE_NULL; 04205 UserVerifyBuffer->NextEntryOffset = NextEntryOffset; 04206 04207 NextEntry = NextEntry->Flink; 04208 } 04209 } except (EXCEPTION_EXECUTE_HANDLER) { 04210 Status = GetExceptionCode(); 04211 } 04212 04213 KeReleaseMutant (&MmSystemLoadLock, 1, FALSE, FALSE); 04214 04215 KeLeaveCriticalRegion(); 04216 04217 if (Status != STATUS_INFO_LENGTH_MISMATCH) { 04218 UserVerifyBuffer->NextEntryOffset = 0; 04219 *Length = TotalSize; 04220 } 04221 04222 return Status; 04223 }

NTKERNELAPI PVOID MmGetVirtualForPhysical IN PHYSICAL_ADDRESS  PhysicalAddress  ) 
 

Definition at line 5542 of file iosup.c.

References BYTE_OFFSET, MI_PFN_ELEMENT, MiGetVirtualAddressMappedByPte, PAGE_SHIFT, and _MMPFN::PteAddress.

Referenced by MmSetKernelDumpRange().

05548 : 05549 05550 This function returns the corresponding virtual address for a physical 05551 address whose primary virtual address is in system space. 05552 05553 Arguments: 05554 05555 PhysicalAddress - Supplies the physical address for which to return the 05556 virtual address. 05557 05558 Return Value: 05559 05560 Returns the corresponding virtual address. 05561 05562 Environment: 05563 05564 Kernel mode. Any IRQL level. 05565 05566 --*/ 05567 05568 { 05569 PFN_NUMBER PageFrameIndex; 05570 PMMPFN Pfn; 05571 05572 PageFrameIndex = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT); 05573 05574 Pfn = MI_PFN_ELEMENT (PageFrameIndex); 05575 05576 return (PVOID)((PCHAR)MiGetVirtualAddressMappedByPte (Pfn->PteAddress) + 05577 BYTE_OFFSET (PhysicalAddress.LowPart)); 05578 }

NTKERNELAPI NTSTATUS MmGrowKernelStack IN PVOID  CurrentStack  ) 
 

Definition at line 2921 of file procsup.c.

References ASSERT, LOCK_PFN, MI_GET_PAGE_COLOR_FROM_PTE, MI_MAKE_VALID_PTE, MI_SET_PTE_DIRTY, MiEnsureAvailablePageOrWait(), MiGetPteAddress, MiGetVirtualAddressMappedByPte, MiInitializePfn(), MiRemoveAnyPage(), MM_BUMP_COUNTER, MM_KERNEL_DEMAND_ZERO_PTE, MM_KSTACK_OUTSWAPPED, MM_READWRITE, MmKernelStackResident, MmResidentAvailablePages, NULL, PAGE_SIZE, PERFINFO_GROW_STACK, PsGetCurrentThread, _KTHREAD::StackBase, _KTHREAD::StackLimit, _ETHREAD::Tcb, _MMPTE::u, and UNLOCK_PFN.

02927 : 02928 02929 This function attempts to grows the current thread's kernel stack 02930 such that there is always KERNEL_LARGE_STACK_COMMIT bytes below 02931 the current stack pointer. 02932 02933 Arguments: 02934 02935 CurrentStack - Supplies a pointer to the current stack pointer. 02936 02937 Return Value: 02938 02939 STATUS_SUCCESS is returned if the stack was grown. 02940 02941 STATUS_STACK_OVERFLOW is returned if there was not enough space reserved 02942 for the commitment. 02943 02944 STATUS_NO_MEMORY is returned if there was not enough physical memory 02945 in the system. 02946 02947 --*/ 02948 02949 { 02950 PMMPTE NewLimit; 02951 PMMPTE StackLimit; 02952 PMMPTE EndStack; 02953 PETHREAD Thread; 02954 PFN_NUMBER NumberOfPages; 02955 KIRQL OldIrql; 02956 PFN_NUMBER PageFrameIndex; 02957 MMPTE TempPte; 02958 02959 Thread = PsGetCurrentThread (); 02960 ASSERT (((PCHAR)Thread->Tcb.StackBase - (PCHAR)Thread->Tcb.StackLimit) <= 02961 (KERNEL_LARGE_STACK_SIZE + PAGE_SIZE)); 02962 NewLimit = MiGetPteAddress ((PVOID)((PUCHAR)CurrentStack - 02963 KERNEL_LARGE_STACK_COMMIT)); 02964 02965 StackLimit = MiGetPteAddress (Thread->Tcb.StackLimit); 02966 02967 // 02968 // If the new stack limit exceeds the reserved region for the kernel 02969 // stack, then return an error. 02970 // 02971 02972 EndStack = MiGetPteAddress ((PVOID)((PUCHAR)Thread->Tcb.StackBase - 02973 KERNEL_LARGE_STACK_SIZE)); 02974 02975 if (NewLimit < EndStack) { 02976 02977 // 02978 // Don't go into guard page. 02979 // 02980 02981 return STATUS_STACK_OVERFLOW; 02982 02983 } 02984 02985 ASSERT (StackLimit->u.Hard.Valid == 1); 02986 02987 // 02988 // Lock the PFN database and attempt to expand the kernel stack. 02989 // 02990 02991 StackLimit -= 1; 02992 02993 NumberOfPages = (PFN_NUMBER) (StackLimit - NewLimit + 1); 02994 02995 LOCK_PFN (OldIrql); 02996 02997 if (MmResidentAvailablePages <= (SPFN_NUMBER)NumberOfPages) { 02998 UNLOCK_PFN (OldIrql); 02999 return STATUS_NO_MEMORY; 03000 } 03001 03002 // 03003 // Note MmResidentAvailablePages must be charged before calling 03004 // MiEnsureAvailablePageOrWait as it may let go of the PFN lock. 03005 // 03006 03007 MmResidentAvailablePages -= NumberOfPages; 03008 MM_BUMP_COUNTER(11, NumberOfPages); 03009 03010 while (StackLimit >= NewLimit) { 03011 03012 ASSERT (StackLimit->u.Hard.Valid == 0); 03013 03014 MiEnsureAvailablePageOrWait (NULL, NULL); 03015 PageFrameIndex = MiRemoveAnyPage (MI_GET_PAGE_COLOR_FROM_PTE (StackLimit)); 03016 StackLimit->u.Long = MM_KERNEL_DEMAND_ZERO_PTE; 03017 03018 #ifdef PROTECT_KSTACKS 03019 StackLimit->u.Soft.Protection = MM_KSTACK_OUTSWAPPED; 03020 #endif 03021 03022 MiInitializePfn (PageFrameIndex, StackLimit, 1); 03023 03024 MI_MAKE_VALID_PTE (TempPte, 03025 PageFrameIndex, 03026 MM_READWRITE, 03027 StackLimit ); 03028 03029 MI_SET_PTE_DIRTY (TempPte); 03030 *StackLimit = TempPte; 03031 StackLimit -= 1; 03032 } 03033 03034 MmKernelStackResident += NumberOfPages; 03035 UNLOCK_PFN (OldIrql); 03036 03037 #if DBG 03038 ASSERT (NewLimit->u.Hard.Valid == 1); 03039 if (NewLimit != EndStack) { 03040 ASSERT ((NewLimit - 1)->u.Hard.Valid == 0); 03041 } 03042 #endif 03043 03044 Thread->Tcb.StackLimit = MiGetVirtualAddressMappedByPte (NewLimit); 03045 03046 PERFINFO_GROW_STACK(Thread); 03047 03048 return STATUS_SUCCESS; 03049 }

VOID MmHibernateInformation IN PVOID  MemoryMap,
OUT PULONG_PTR  HiberVa,
OUT PPHYSICAL_ADDRESS  HiberPte
 

Definition at line 1503 of file mmsup.c.

References MiGetVirtualAddressMappedByPte, MmCrashDumpPte, MmGetPhysicalAddress(), PO_MEM_CLONE, and PoSetHiberRange().

01508 { 01509 // 01510 // Mark PTE page where the 16 dump PTEs reside as needing cloned 01511 // 01512 01513 PoSetHiberRange ( 01514 MemoryMap, 01515 PO_MEM_CLONE, 01516 MmCrashDumpPte, 01517 1, 01518 ' etP' 01519 ); 01520 01521 // 01522 // Return the dump PTEs to the loader (as it needs to use them 01523 // to map it's relocation code into the kernel space on the 01524 // final bit of restoring memory) 01525 // 01526 01527 *HiberVa = (ULONG_PTR) MiGetVirtualAddressMappedByPte(MmCrashDumpPte); 01528 *HiberPte = MmGetPhysicalAddress(MmCrashDumpPte); 01529 }

VOID MmInitializeMemoryLimits IN PLOADER_PARAMETER_BLOCK  LoaderBlock,
IN PBOOLEAN  IncludedType,
OUT PPHYSICAL_MEMORY_DESCRIPTOR  Memory
 

Definition at line 1582 of file mminit.c.

References ASSERT, _MEMORY_ALLOCATION_DESCRIPTOR::BasePage, FALSE, _MEMORY_ALLOCATION_DESCRIPTOR::ListEntry, LoaderMaximum, _MEMORY_ALLOCATION_DESCRIPTOR::MemoryType, _MEMORY_ALLOCATION_DESCRIPTOR::PageCount, TotalPages, and TRUE.

Referenced by IopInitializeResourceMap(), and MmInitSystem().

01590 : 01591 01592 This function walks through the loader block's memory 01593 descriptor list and builds a list of contiguous physical 01594 memory blocks of the desired types. 01595 01596 Arguments: 01597 01598 LoaderBlock - Supplies a pointer the system loader block. 01599 01600 IncludeType - Array of BOOLEANS of size LoaderMaximum. 01601 TRUE means include this type of memory in return. 01602 01603 Memory - Returns the physical memory blocks. 01604 01605 Return Value: 01606 01607 None. 01608 01609 Environment: 01610 01611 Kernel Mode Only. System initialization. 01612 01613 --*/ 01614 { 01615 01616 PMEMORY_ALLOCATION_DESCRIPTOR MemoryDescriptor; 01617 PLIST_ENTRY NextMd; 01618 PFN_NUMBER i; 01619 PFN_NUMBER LowestFound; 01620 PFN_NUMBER Found; 01621 PFN_NUMBER Merged; 01622 PFN_NUMBER NextPage; 01623 PFN_NUMBER TotalPages; 01624 01625 TotalPages = 0; 01626 01627 // 01628 // Walk through the memory descriptors and build the physical memory list. 01629 // 01630 01631 LowestFound = 0; 01632 Memory->Run[0].BasePage = 0xffffffff; 01633 NextPage = 0xffffffff; 01634 Memory->Run[0].PageCount = 0; 01635 i = 0; 01636 01637 do { 01638 Merged = FALSE; 01639 Found = FALSE; 01640 NextMd = LoaderBlock->MemoryDescriptorListHead.Flink; 01641 01642 while (NextMd != &LoaderBlock->MemoryDescriptorListHead) { 01643 01644 MemoryDescriptor = CONTAINING_RECORD(NextMd, 01645 MEMORY_ALLOCATION_DESCRIPTOR, 01646 ListEntry); 01647 01648 if (MemoryDescriptor->MemoryType < LoaderMaximum && 01649 IncludeType [MemoryDescriptor->MemoryType] ) { 01650 01651 // 01652 // Try to merge runs. 01653 // 01654 01655 if (MemoryDescriptor->BasePage == NextPage) { 01656 ASSERT (MemoryDescriptor->PageCount != 0); 01657 Memory->Run[i - 1].PageCount += MemoryDescriptor->PageCount; 01658 NextPage += MemoryDescriptor->PageCount; 01659 TotalPages += MemoryDescriptor->PageCount; 01660 Merged = TRUE; 01661 Found = TRUE; 01662 break; 01663 } 01664 01665 if (MemoryDescriptor->BasePage >= LowestFound) { 01666 if (Memory->Run[i].BasePage > MemoryDescriptor->BasePage) { 01667 Memory->Run[i].BasePage = MemoryDescriptor->BasePage; 01668 Memory->Run[i].PageCount = MemoryDescriptor->PageCount; 01669 } 01670 Found = TRUE; 01671 } 01672 } 01673 NextMd = MemoryDescriptor->ListEntry.Flink; 01674 } 01675 01676 if (!Merged && Found) { 01677 NextPage = Memory->Run[i].BasePage + Memory->Run[i].PageCount; 01678 TotalPages += Memory->Run[i].PageCount; 01679 i += 1; 01680 } 01681 Memory->Run[i].BasePage = 0xffffffff; 01682 LowestFound = NextPage; 01683 01684 } while (Found); 01685 ASSERT (i <= Memory->NumberOfRuns); 01686 Memory->NumberOfRuns = (ULONG)i; 01687 Memory->NumberOfPages = TotalPages; 01688 01689 return; 01690 }

NTSTATUS MmInitializeProcessAddressSpace IN PEPROCESS  ProcessToInitialize,
IN PEPROCESS ProcessToClone  OPTIONAL,
IN PVOID SectionToMap  OPTIONAL,
OUT PUNICODE_STRING *AuditName  OPTIONAL
 

Definition at line 911 of file procsup.c.

References _2gb, ASSERT, DbgPrint, ExAllocatePoolWithTag, ExFreePool(), ExInitializeFastMutex, ExVerifySuite(), FALSE, HYPER_SPACE, KeAttachProcess(), KeDetachProcess(), KeInitializeSpinLock(), KeQuerySystemTime(), _LOCK_HEADER::ListHead, LOCK_HEADER, LOCK_PFN, MI_GET_PAGE_FRAME_FROM_PTE, MI_MAKE_VALID_PTE, MI_SET_PTE_DIRTY, MI_WRITE_VALID_PTE, MiAllocateVad(), MiCheckForConflictingVad, MiCloneProcessAddressSpace(), MiGetPdeAddress, MiGetPpeAddress, MiGetPteAddress, MiInitializeAlternateTable(), MiInitializePfn(), MiInitializeWorkingSetList(), MiInsertVad(), MM_DBG_PTE_UPDATE, MM_DEMAND_ZERO_WRITE_PTE, MM_READWRITE, MmMapViewOfSection(), MmTrackLockedPages, MmVirtualBias, MmWorkingSetList, n, NonPagedPool, NTSTATUS(), NULL, PAGE_SHIFT, PSECTION, PWOW64_PROCESS, Status, TRUE, UNLOCK_PFN, VadTreeWalk(), WOW64_PROCESS, and ZERO_LARGE.

Referenced by MiInitMachineDependent(), and PspCreateProcess().

00920 : 00921 00922 This routine initializes the working set and mutexes within an 00923 newly created address space to support paging. 00924 00925 No page faults may occur in a new process until this routine is 00926 completed. 00927 00928 Arguments: 00929 00930 ProcessToInitialize - Supplies a pointer to the process to initialize. 00931 00932 ProcessToClone - Optionally supplies a pointer to the process whose 00933 address space should be copied into the 00934 ProcessToInitialize address space. 00935 00936 SectionToMap - Optionally supplies a section to map into the newly 00937 initialized address space. 00938 00939 Only one of ProcessToClone and SectionToMap may be specified. 00940 00941 00942 Return Value: 00943 00944 None. 00945 00946 00947 Environment: 00948 00949 Kernel mode. APCs Disabled. 00950 00951 --*/ 00952 00953 00954 { 00955 PMMPTE PointerPte; 00956 MMPTE TempPte; 00957 PVOID BaseAddress; 00958 SIZE_T ViewSize; 00959 KIRQL OldIrql; 00960 NTSTATUS Status; 00961 PFN_NUMBER PpePhysicalPage; 00962 PFN_NUMBER PdePhysicalPage; 00963 PFN_NUMBER PageContainingWorkingSet; 00964 LARGE_INTEGER SectionOffset; 00965 PSECTION_IMAGE_INFORMATION ImageInfo; 00966 PMMVAD VadShare; 00967 PMMVAD VadReserve; 00968 PLOCK_HEADER LockedPagesHeader; 00969 #if defined (_X86PAE_) 00970 ULONG i; 00971 PFN_NUMBER PdePhysicalPage2; 00972 #endif 00973 #if defined (_WIN64) 00974 PWOW64_PROCESS Wow64Process; 00975 #endif 00976 00977 // 00978 // Initialize Working Set Mutex in process header. 00979 // 00980 00981 KeAttachProcess (&ProcessToInitialize->Pcb); 00982 ProcessToInitialize->AddressSpaceInitialized = 2; 00983 00984 ExInitializeFastMutex(&ProcessToInitialize->AddressCreationLock); 00985 00986 ExInitializeFastMutex(&ProcessToInitialize->WorkingSetLock); 00987 00988 // 00989 // NOTE: The process block has been zeroed when allocated, so 00990 // there is no need to zero fields and set pointers to NULL. 00991 // 00992 00993 ASSERT (ProcessToInitialize->VadRoot == NULL); 00994 00995 KeQuerySystemTime(&ProcessToInitialize->Vm.LastTrimTime); 00996 ProcessToInitialize->Vm.VmWorkingSetList = MmWorkingSetList; 00997 00998 // 00999 // Obtain a page to map the working set and initialize the 01000 // working set. Get PFN mutex to allocate physical pages. 01001 // 01002 01003 LOCK_PFN (OldIrql); 01004 01005 // 01006 // Initialize the PFN database for the Page Directory and the 01007 // PDE which maps hyper space. 01008 // 01009 01010 #if defined (_WIN64) 01011 01012 PointerPte = MiGetPteAddress (PDE_TBASE); 01013 PpePhysicalPage = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 01014 01015 MiInitializePfn (PpePhysicalPage, PointerPte, 1); 01016 01017 PointerPte = MiGetPpeAddress (HYPER_SPACE); 01018 MiInitializePfn (MI_GET_PAGE_FRAME_FROM_PTE (PointerPte), PointerPte, 1); 01019 01020 #if defined(_IA64_) 01021 PointerPte = MiGetPteAddress (PDE_STBASE); 01022 MiInitializePfn (MI_GET_PAGE_FRAME_FROM_PTE (PointerPte), PointerPte, 1); 01023 #endif 01024 01025 #else 01026 01027 #if defined (_X86PAE_) 01028 PointerPte = MiGetPdeAddress (PDE_BASE); 01029 #else 01030 PointerPte = MiGetPteAddress (PDE_BASE); 01031 #endif 01032 PdePhysicalPage = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 01033 01034 MiInitializePfn (PdePhysicalPage, PointerPte, 1); 01035 01036 #endif 01037 01038 PointerPte = MiGetPdeAddress (HYPER_SPACE); 01039 MiInitializePfn (MI_GET_PAGE_FRAME_FROM_PTE (PointerPte), PointerPte, 1); 01040 01041 #if defined (_X86PAE_) 01042 01043 for (i = 0; i < PD_PER_SYSTEM - 1; i += 1) { 01044 PointerPte = MiGetPteAddress (PDE_BASE + (i << PAGE_SHIFT)); 01045 PdePhysicalPage2 = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 01046 MiInitializePfn (PdePhysicalPage2, PointerPte, 1); 01047 } 01048 01049 PointerPte = MiGetPdeAddress (HYPER_SPACE2); 01050 MiInitializePfn (MI_GET_PAGE_FRAME_FROM_PTE (PointerPte), PointerPte, 1); 01051 #endif 01052 01053 PageContainingWorkingSet = ProcessToInitialize->WorkingSetPage; 01054 01055 PointerPte = MiGetPteAddress (MmWorkingSetList); 01056 PointerPte->u.Long = MM_DEMAND_ZERO_WRITE_PTE; 01057 01058 MiInitializePfn (PageContainingWorkingSet, PointerPte, 1); 01059 01060 UNLOCK_PFN (OldIrql); 01061 01062 MI_MAKE_VALID_PTE (TempPte, 01063 PageContainingWorkingSet, 01064 MM_READWRITE, 01065 PointerPte ); 01066 01067 MI_SET_PTE_DIRTY (TempPte); 01068 MI_WRITE_VALID_PTE (PointerPte, TempPte); 01069 01070 ASSERT (ProcessToInitialize->LockedPagesList == NULL); 01071 01072 if (MmTrackLockedPages == TRUE) { 01073 LockedPagesHeader = ExAllocatePoolWithTag (NonPagedPool, 01074 sizeof(LOCK_HEADER), 01075 'xTmM'); 01076 01077 if (LockedPagesHeader) { 01078 RtlZeroMemory (LockedPagesHeader, sizeof(LOCK_HEADER)); 01079 ProcessToInitialize->LockedPagesList = (PVOID)LockedPagesHeader; 01080 InitializeListHead (&LockedPagesHeader->ListHead); 01081 } 01082 } 01083 01084 MiInitializeWorkingSetList (ProcessToInitialize); 01085 01086 KeInitializeSpinLock (&ProcessToInitialize->AweLock); 01087 InitializeListHead (&ProcessToInitialize->PhysicalVadList); 01088 01089 // 01090 // Page faults may be taken now. 01091 // 01092 // If the system has been biased to an alternate base address to allow 01093 // 3gb of user address space and a process is not being cloned, then 01094 // create a VAD for the shared memory page. 01095 // 01096 01097 #if defined(_X86_) && defined(MM_SHARED_USER_DATA_VA) 01098 01099 if ((MmVirtualBias != 0) && (ProcessToClone == NULL)) { 01100 01101 // 01102 // Allocate a VAD to map the shared memory page. If a VAD cannot be 01103 // allocated, then detach from the target process and return a failure 01104 // status. This VAD is marked as not deletable. 01105 // 01106 01107 VadShare = MiAllocateVad (MM_SHARED_USER_DATA_VA, 01108 MM_SHARED_USER_DATA_VA, 01109 FALSE); 01110 01111 if (VadShare == NULL) { 01112 KeDetachProcess (); 01113 return STATUS_NO_MEMORY; 01114 } 01115 01116 // 01117 // If a section is being mapped and the executable is not large 01118 // address space aware, then create a VAD that reserves the address 01119 // space between 2gb and the highest user address. 01120 // 01121 01122 if (SectionToMap != NULL) { 01123 if (!((PSECTION)SectionToMap)->u.Flags.Image) { 01124 KeDetachProcess (); 01125 ExFreePool (VadShare); 01126 return STATUS_SECTION_NOT_IMAGE; 01127 } 01128 ImageInfo = ((PSECTION)SectionToMap)->Segment->ImageInformation; 01129 if ((ExVerifySuite(Enterprise) == FALSE) || 01130 ((ImageInfo->ImageCharacteristics & IMAGE_FILE_LARGE_ADDRESS_AWARE) == 0)) { 01131 01132 // 01133 // Allocate a VAD to map the address space between 2gb and 01134 // the highest user address. If a VAD can not be allocated, 01135 // then deallocate the shared address space VAD, detach from 01136 // the target process, and return a failure status. 01137 // This VAD is marked as not deletable. 01138 // 01139 01140 VadReserve = MiAllocateVad (_2gb, 01141 (ULONG_PTR)MM_HIGHEST_USER_ADDRESS, 01142 FALSE); 01143 01144 if (VadReserve == NULL) { 01145 KeDetachProcess (); 01146 ExFreePool (VadShare); 01147 return STATUS_NO_MEMORY; 01148 } 01149 01150 // 01151 // Insert the VAD. 01152 // 01153 // N.B. No exception can occur since there is no commit charge. 01154 // 01155 01156 MiInsertVad (VadReserve); 01157 } 01158 } 01159 01160 // 01161 // Insert the VAD. 01162 // 01163 // N.B. No exception can occur since there is no commit charge. 01164 // 01165 01166 MiInsertVad (VadShare); 01167 } 01168 01169 #endif 01170 01171 #if defined(_WIN64) 01172 01173 if (ProcessToClone == NULL) { 01174 01175 // 01176 // Reserve the address space just below KUSER_SHARED_DATA as the 01177 // compatibility area. This range can be unreserved by user mode 01178 // code such as WOW64 or csrss. 01179 // 01180 01181 ASSERT(MiCheckForConflictingVad(WOW64_COMPATIBILITY_AREA_ADDRESS, MM_SHARED_USER_DATA_VA) == NULL); 01182 01183 VadShare = MiAllocateVad (WOW64_COMPATIBILITY_AREA_ADDRESS, 01184 MM_SHARED_USER_DATA_VA, 01185 TRUE); 01186 01187 if (VadShare == NULL) { 01188 KeDetachProcess (); 01189 return STATUS_NO_MEMORY; 01190 } 01191 01192 // 01193 // Reserve the memory above 2GB to prevent 32 bit (WOW64) process 01194 // access. 01195 // 01196 01197 if (SectionToMap != NULL) { 01198 if (!((PSECTION)SectionToMap)->u.Flags.Image) { 01199 KeDetachProcess (); 01200 ExFreePool (VadShare); 01201 return STATUS_SECTION_NOT_IMAGE; 01202 } 01203 ImageInfo = ((PSECTION)SectionToMap)->Segment->ImageInformation; 01204 01205 if ((ImageInfo->ImageCharacteristics & IMAGE_FILE_LARGE_ADDRESS_AWARE) == 0 || 01206 #if defined(_AXP64_) 01207 ImageInfo->Machine == IMAGE_FILE_MACHINE_ALPHA || 01208 #endif 01209 ImageInfo->Machine == IMAGE_FILE_MACHINE_I386) { 01210 01211 // 01212 // Allocate a VAD to reserve the address space between 2gb and 01213 // the highest user address. If a VAD cannot be allocated, 01214 // then deallocate the compatibility VAD, detach from the target 01215 // process and return a failure status. 01216 // 01217 01218 VadReserve = MiAllocateVad (_2gb, 01219 (ULONG_PTR)MM_HIGHEST_USER_ADDRESS, 01220 TRUE); 01221 01222 if (VadReserve == NULL) { 01223 KeDetachProcess (); 01224 ExFreePool (VadShare); 01225 return STATUS_NO_MEMORY; 01226 } 01227 01228 // 01229 // Insert the VAD. 01230 // 01231 // N.B. No exception can occur since there is no commit charge. 01232 // 01233 01234 MiInsertVad (VadReserve); 01235 01236 // 01237 // Initialize Wow64 Process structure 01238 // 01239 01240 Wow64Process = 01241 (PWOW64_PROCESS) ExAllocatePoolWithTag (NonPagedPool, 01242 sizeof(WOW64_PROCESS), 01243 'WowM'); 01244 01245 if (Wow64Process == (PWOW64_PROCESS) NULL) { 01246 KeDetachProcess (); 01247 return STATUS_NO_MEMORY; 01248 } 01249 01250 RtlZeroMemory(Wow64Process, sizeof(WOW64_PROCESS)); 01251 01252 ProcessToInitialize->Wow64Process = Wow64Process; 01253 01254 #if defined(_MIALT4K_) 01255 01256 // 01257 // Initialize the alternate page table for the 4kb page function 01258 // 01259 01260 Status = MiInitializeAlternateTable (ProcessToInitialize); 01261 if (Status != STATUS_SUCCESS) { 01262 KeDetachProcess (); 01263 return Status; 01264 } 01265 01266 #endif 01267 } 01268 } 01269 01270 // 01271 // Insert the VAD. 01272 // 01273 // N.B. No exception can occur since there is no commit charge. 01274 // 01275 01276 MiInsertVad (VadShare); 01277 } 01278 01279 #endif 01280 01281 if (SectionToMap != (PSECTION)NULL) { 01282 01283 // 01284 // Map the specified section into the address space of the 01285 // process but only if it is an image section. 01286 // 01287 01288 if (!((PSECTION)SectionToMap)->u.Flags.Image) { 01289 Status = STATUS_SECTION_NOT_IMAGE; 01290 } else { 01291 UNICODE_STRING UnicodeString; 01292 ULONG n; 01293 PWSTR Src; 01294 PCHAR Dst; 01295 01296 UnicodeString = ((PSECTION)SectionToMap)->Segment->ControlArea->FilePointer->FileName; 01297 Src = (PWSTR)((PCHAR)UnicodeString.Buffer + UnicodeString.Length); 01298 n = 0; 01299 if (UnicodeString.Buffer != NULL) { 01300 while (Src > UnicodeString.Buffer) { 01301 if (*--Src == OBJ_NAME_PATH_SEPARATOR) { 01302 Src += 1; 01303 break; 01304 } 01305 else { 01306 n += 1; 01307 } 01308 } 01309 } 01310 Dst = ProcessToInitialize->ImageFileName; 01311 if (n >= sizeof( ProcessToInitialize->ImageFileName )) { 01312 n = sizeof( ProcessToInitialize->ImageFileName ) - 1; 01313 } 01314 01315 while (n--) { 01316 *Dst++ = (UCHAR)*Src++; 01317 } 01318 *Dst = '\0'; 01319 01320 if (AuditName) { 01321 *AuditName = &((PSECTION)SectionToMap)->Segment->ControlArea->FilePointer->FileName ; 01322 } 01323 01324 ProcessToInitialize->SubSystemMajorVersion = 01325 (UCHAR)((PSECTION)SectionToMap)->Segment->ImageInformation->SubSystemMajorVersion; 01326 ProcessToInitialize->SubSystemMinorVersion = 01327 (UCHAR)((PSECTION)SectionToMap)->Segment->ImageInformation->SubSystemMinorVersion; 01328 01329 BaseAddress = NULL; 01330 ViewSize = 0; 01331 ZERO_LARGE (SectionOffset); 01332 01333 Status = MmMapViewOfSection ( (PSECTION)SectionToMap, 01334 ProcessToInitialize, 01335 &BaseAddress, 01336 0, // ZeroBits, 01337 0, // CommitSize, 01338 &SectionOffset, //SectionOffset, 01339 &ViewSize, 01340 ViewShare, //InheritDisposition, 01341 0, //allocation type 01342 PAGE_READWRITE // Protect 01343 ); 01344 01345 ProcessToInitialize->SectionBaseAddress = BaseAddress; 01346 01347 #if DBG 01348 if (MmDebug & MM_DBG_PTE_UPDATE) { 01349 DbgPrint("mapped image section vads\n"); 01350 VadTreeWalk(ProcessToInitialize->VadRoot); 01351 } 01352 #endif //DBG 01353 } 01354 01355 KeDetachProcess (); 01356 return Status; 01357 } 01358 01359 if (ProcessToClone != (PEPROCESS)NULL) { 01360 #if DEVL 01361 strcpy( ProcessToInitialize->ImageFileName, ProcessToClone->ImageFileName ); 01362 #endif // DEVL 01363 01364 // 01365 // Clone the address space of the specified process. 01366 // 01367 01368 // 01369 // As the page directory and page tables are private to each 01370 // process, the physical pages which map the directory page 01371 // and the page table usage must be mapped into system space 01372 // so they can be updated while in the context of the process 01373 // we are cloning. 01374 // 01375 01376 KeDetachProcess (); 01377 return MiCloneProcessAddressSpace (ProcessToClone, 01378 ProcessToInitialize, 01379 #if defined (_WIN64) 01380 PpePhysicalPage, 01381 #else 01382 PdePhysicalPage, 01383 #endif 01384 PageContainingWorkingSet 01385 ); 01386 01387 } 01388 01389 // 01390 // System Process. 01391 // 01392 01393 KeDetachProcess (); 01394 return STATUS_SUCCESS; 01395 }

BOOLEAN MmInitSystem IN ULONG  Phase,
IN PLOADER_PARAMETER_BLOCK  LoaderBlock,
IN PPHYSICAL_MEMORY_DESCRIPTOR  PhysicalMemoryBlock
 

Definition at line 184 of file mminit.c.

References ActiveAndValid, _MMPAGE_FILE_EXPANSION::ActualExpansion, ASSERT, _EPROCESS::AweLock, _PHYSICAL_MEMORY_RUN::BasePage, BBTPagesToReserve, CHAR, DbgPrint, _MMPAGE_FILE_EXPANSION::Event, ExAcquireResourceExclusive, ExAllocatePoolWithTag, ExInitializeFastMutex, ExInitializeResource, ExpMultiUserTS, ExReleaseResource, ExVerifySuite(), FALSE, _MMPAGE_FILE_EXPANSION::InProgress, KeBalanceSetManager(), KeInitializeDpc(), KeInitializeEvent, KeInitializeMutant(), KeInitializeSpinLock(), KeInitializeTimerEx(), KeSwapProcessOrStack(), KSEG0_BASE, L, _MMWORKING_SET_EXPANSION_HEAD::ListHead, _MMINPAGE_SUPPORT_LIST::ListHead, _MMEVENT_COUNT_LIST::ListHead, LoaderBad, LoaderBBTMemory, LoaderFirmwarePermanent, LoaderMaximum, LoaderSpecialMemory, LOCK_PFN, MAX_PHYSICAL_MEMORY_FRAGMENTS, MI_EXTEND_ANY_PAGEFILE, MI_GET_PAGE_COLOR_FROM_PTE, MI_GET_PAGE_FRAME_FROM_PTE, MI_MAKE_VALID_PTE, MI_PFN_ELEMENT, MI_SESSION_IMAGE_SIZE, MI_SESSION_SPACE_END, MI_SESSION_SPACE_TOTAL_SIZE, MI_WRITE_VALID_PTE, MiAddSystemPtes(), MiAdjustWorkingSetManagerParameters(), MiBuildPagedPool(), MiChargeCommitmentCantExpand(), MiDumpPfn(), MiDumpValidAddresses(), MiEnableKernelVerifier(), MiEnablePagingTheExecutive(), MiFillMemoryPte, MiFormatPte(), MiGetPdeAddress, MiGetPpeAddress, MiGetPteAddress, MiGetSubsectionAddress, MiGetSubsectionAddressForPte, MiGetVirtualAddressMappedByPte, MiHighestUserPde, MiHighestUserPte, MiHydra, MiInitializeDriverVerifierList(), MiInitializeIoTrackers(), MiInitializeLoadedModuleList(), MiInitializePfn(), MiInitializeSessionIds(), MiInitializeSessionWsSupport(), MiInitializeSpecialPoolCriteria(), MiInitializeSystemCache(), MiInitMachineDependent(), MiIsPteOnPdeBoundary, MiMapBBTMemory(), MiMappedPagesTooOldEvent, MiMaximumSystemCacheSizeExtra, MiMaximumWorkingSet, MiMergeMemoryLimit(), MiModifiedPageLife, MiModifiedPageWriter(), MiModifiedPageWriterTimer, MiModifiedPageWriterTimerDispatch(), MiModifiedPageWriterTimerDpc, MiProtoAddressForPte, MiPteStr, MiPteToProto, MiReloadBootLoadedDrivers(), MiRemoveAnyPage(), MiRemoveZeroPage(), MiRequestedSystemPtes, MiSectionInitialization(), MiSessionBasePte, MiSessionLastPte, MiSessionWideInitializeAddresses(), MiSystemCacheEndExtra, MiSystemCacheStartExtra, MiSystemViewStart, MiTriageSystem(), MiTrimInProgressCount, MiWriteProtectSystemImage(), Mm64BitPhysicalAddress, MM_BOOT_IMAGE_SIZE, MM_DBG_CHECK_PFN_LOCK, MM_DBG_COMMIT_EXTRA_SYSTEM_PTES, MM_DBG_DUMP_BOOT_PTES, MM_DEFAULT_SYSTEM_PTES, MM_DEMAND_ZERO_WRITE_PTE, MM_FLUID_PHYSICAL_PAGES, MM_MAXIMUM_SYSTEM_PTES, MM_MAXIMUM_WORKING_SET, MM_MEDIUM_SYSTEM, MM_MINIMUM_SYSTEM_PTES, MM_NONPAGED_POOL_END, MM_READONLY, MM_SESSION_SPACE_DEFAULT, MM_SMALL_SYSTEM, MM_SPECIAL_POOL_PTES, MM_SYSTEM_CACHE_END, MM_SYSTEM_CACHE_START, MM_SYSTEM_CACHE_START_EXTRA, MM_SYSTEM_VIEW_SIZE, MM_SYSTEM_VIEW_SIZE_IF_HYDRA, MM_SYSTEM_VIEW_START, MM_SYSTEM_VIEW_START_IF_HYDRA, MM_TRACK_COMMIT, MmAttemptForCantExtend, MmAvailablePages, MmAvailablePagesEvent, MmAvailablePagesEventHigh, MmChargeCommitmentLock, MmCodeClusterSize, MmCollidedFlushEvent, MmCollidedLockEvent, MmCriticalSectionTimeout, MmCritsectTimeoutSeconds, MmDataClusterSize, MmDontVerifyRandomDrivers, MmDynamicMemoryMutex, MmEnforceWriteProtection, MmEventCountList, MmExpansionLock, MmHardFaultNotifyRoutine, MmHeapDeCommitFreeBlockThreshold, MmHeapDeCommitTotalFreeThreshold, MmHeapSegmentCommit, MmHeapSegmentReserve, MmHighestUserAddress, MmHighSectionBase, MmImageMappingPteEvent, MmInitializeMemoryLimits(), MmInPageSupportList, MmLargeSystem, MmLargeSystemCache, MmLoadedUserImageList, MmLockConflictList, MmLockPagesLimit, MmLockPagesPercentage, MmMappedFileIoComplete, MmMaximumDeadKernelStacks, MmMaximumNonPagedPoolInBytes, MmMaximumWorkingSetSize, MmMaxUnusedSegmentNonPagedPoolUsage, MmMaxUnusedSegmentPagedPoolUsage, MmMediumSystem, MmMinimumFreePages, MmModifiedPageLifeInSeconds, MmModifiedPageMaximum, MmModifiedPageMinimum, MmModifiedPageWriterEvent, MmMoreThanEnoughFreePages, MmNonPagedSystemStart, MmNumberOfPhysicalPages, MmNumberOfSystemPtes, MmOverCommit, MmPagedPoolEnd, MmPagedPoolStart, MmPageFaultNotifyRoutine, MmPageFileCreationLock, MmPhysicalMemoryBlock, MmProductType, MmProtectFreedNonPagedPool, MmReadClusterSize, MmResidentAvailableAtInit, MmResidentAvailablePages, MmSectionBasedMutex, MmSectionCommitMutex, MmSectionExtendResource, MmSectionExtendSetResource, MmSessionBase, MmSessionSpace, MmSharedUserDataPte, MmSizeOfPagedPoolInBytes, MmSizeOfSystemCacheInPages, MmSmallSystem, MmSnapUnloads, MmSpecialPoolTag, MmSystemCacheEnd, MmSystemCacheStart, MmSystemCacheWorkingSetList, MmSystemCacheWsMaximum, MmSystemCacheWsMinimum, MmSystemLoadLock, MmSystemRangeStart, MmSystemSize, MmSystemWsLock, MmThrottleBottom, MmThrottleTop, MmTotalCommitLimit, MmTotalCommitLimitMaximum, MmTrackLockedPages, MmTrackPtes, MmUnusedSegmentNonPagedPoolReduction, MmUnusedSegmentPagedPoolReduction, MmUnusedSegmentTrimLevel, MmUserProbeAddress, MmVerifyDriverBufferLength, MmVirtualBias, MmWorkingSetExpansionHead, MmWorkingSetManagerEvent, MmZeroingPageEvent, MmZeroingPageThreadActive, NON_PAGED_SYSTEM_END, NonPagedPoolMustSucceed, NT_SUCCESS, NULL, _PHYSICAL_MEMORY_DESCRIPTOR::NumberOfPages, _PHYSICAL_MEMORY_DESCRIPTOR::NumberOfRuns, ObjectAttributes, _MMPFN::OriginalPte, PAGE_SHIFT, PAGE_SIZE, _PHYSICAL_MEMORY_RUN::PageCount, _MMPAGE_FILE_EXPANSION::PageFileNumber, PDE_KTBASE, _EPROCESS::PhysicalVadList, PsCreateSystemThread(), PsGetCurrentProcess, PsLoadedModuleList, PsLoadedModuleResource, PTE_PER_PAGE, PTE_SHIFT, _MMPFN::PteAddress, _MMPFN::PteFrame, _MMPAGE_FILE_EXPANSION::RequestedExpansionSize, RtlImageNtHeader(), _PHYSICAL_MEMORY_DESCRIPTOR::Run, _MMPAGE_FILE_EXPANSION::Segment, SystemPteSpace, ThreadHandle, TRUE, _MMPTE::u, _MMPFN::u2, _MMPFN::u3, UNLOCK_PFN, ValidKernelPde, ValidKernelPte, and ZeroKernelPte.

00192 : 00193 00194 This function is called during Phase 0, phase 1 and at the end 00195 of phase 1 ("phase 2") initialization. 00196 00197 Phase 0 initializes the memory management paging functions, 00198 nonpaged and paged pool, the PFN database, etc. 00199 00200 Phase 1 initializes the section objects, the physical memory 00201 object, and starts the memory management system threads. 00202 00203 Phase 2 frees memory used by the OsLoader. 00204 00205 Arguments: 00206 00207 Phase - System initialization phase. 00208 00209 LoaderBlock - Supplies a pointer to the system loader block. 00210 00211 Return Value: 00212 00213 Returns TRUE if the initialization was successful. 00214 00215 Environment: 00216 00217 Kernel Mode Only. System initialization. 00218 00219 --*/ 00220 00221 { 00222 HANDLE ThreadHandle; 00223 OBJECT_ATTRIBUTES ObjectAttributes; 00224 PMMPTE PointerPte; 00225 PMMPTE PointerPde; 00226 PMMPTE StartPde; 00227 PMMPTE StartPpe; 00228 PMMPTE StartingPte; 00229 PMMPTE EndPde; 00230 PMMPFN Pfn1; 00231 MMPTE Pointer; 00232 PFN_NUMBER i, j; 00233 PFN_NUMBER PageFrameIndex; 00234 PFN_NUMBER DirectoryFrameIndex; 00235 MMPTE TempPte; 00236 KIRQL OldIrql; 00237 LOGICAL First; 00238 PLIST_ENTRY NextEntry; 00239 PLDR_DATA_TABLE_ENTRY DataTableEntry; 00240 ULONG MaximumSystemCacheSize; 00241 ULONG MaximumSystemCacheSizeTotal; 00242 PEPROCESS Process; 00243 PIMAGE_NT_HEADERS NtHeaders; 00244 ULONG_PTR SystemPteMultiplier; 00245 00246 BOOLEAN IncludeType[LoaderMaximum]; 00247 ULONG MemoryAlloc[(sizeof(PHYSICAL_MEMORY_DESCRIPTOR) + 00248 sizeof(PHYSICAL_MEMORY_RUN)*MAX_PHYSICAL_MEMORY_FRAGMENTS) / 00249 sizeof(ULONG)]; 00250 00251 PPHYSICAL_MEMORY_DESCRIPTOR Memory; 00252 00253 // 00254 // Make sure structure alignment is okay. 00255 // 00256 00257 if (Phase == 0) { 00258 MmThrottleTop = 450; 00259 MmThrottleBottom = 127; 00260 00261 // 00262 // Set the highest user address, the system range start address, the 00263 // user probe address, and the virtual bias. 00264 // 00265 00266 #if defined(_AXP64_) || defined(_IA64_) 00267 00268 MmHighestUserAddress = MM_HIGHEST_USER_ADDRESS; 00269 MmUserProbeAddress = MM_USER_PROBE_ADDRESS; 00270 MmSystemRangeStart = MM_SYSTEM_RANGE_START; 00271 00272 #else 00273 00274 MmHighestUserAddress = (PVOID)(KSEG0_BASE - 0x10000 - 1); 00275 MmUserProbeAddress = KSEG0_BASE - 0x10000; 00276 MmSystemRangeStart = (PVOID)KSEG0_BASE; 00277 00278 #endif 00279 00280 MiHighestUserPte = MiGetPteAddress (MmHighestUserAddress); 00281 MiHighestUserPde = MiGetPdeAddress (MmHighestUserAddress); 00282 00283 MmVirtualBias = 0; 00284 00285 // 00286 // Set the highest section base address. 00287 // 00288 // N.B. In 32-bit systems this address must be 2gb or less even for 00289 // systems that run with 3gb enabled. Otherwise, it would not 00290 // be possible to map based sections identically in all processes. 00291 // 00292 00293 MmHighSectionBase = ((PCHAR)MmHighestUserAddress - 0x800000); 00294 00295 if (ExVerifySuite(TerminalServer) == TRUE) { 00296 MiHydra = TRUE; 00297 MiSystemViewStart = MM_SYSTEM_VIEW_START_IF_HYDRA; 00298 MmSessionBase = (ULONG_PTR)MM_SESSION_SPACE_DEFAULT; 00299 00300 } else { 00301 MiSystemViewStart = MM_SYSTEM_VIEW_START; 00302 MiHydra = FALSE; 00303 } 00304 00305 MaximumSystemCacheSize = (MM_SYSTEM_CACHE_END - MM_SYSTEM_CACHE_START) >> PAGE_SHIFT; 00306 00307 // 00308 // If the system has been biased to an alternate base address to 00309 // allow 3gb of user address space, then set the user probe address 00310 // and the maximum system cache size. 00311 // 00312 00313 #if defined(_X86_) 00314 00315 MmVirtualBias = LoaderBlock->u.I386.VirtualBias; 00316 00317 if (MmVirtualBias != 0) { 00318 MmHighestUserAddress = ((PCHAR)MmHighestUserAddress + 0x40000000); 00319 MmSystemRangeStart = ((PCHAR)MmSystemRangeStart + 0x40000000); 00320 MmUserProbeAddress += 0x40000000; 00321 MiMaximumWorkingSet += 0x40000000 >> PAGE_SHIFT; 00322 00323 MiHighestUserPte = MiGetPteAddress (MmHighestUserAddress); 00324 MiHighestUserPde = MiGetPdeAddress (MmHighestUserAddress); 00325 00326 MaximumSystemCacheSize -= MM_BOOT_IMAGE_SIZE >> PAGE_SHIFT; 00327 00328 if (MiHydra == TRUE) { 00329 00330 // 00331 // Moving to 3GB means moving session space to just above 00332 // the system cache (and lowering the system cache max size 00333 // accordingly). 00334 // 00335 00336 MaximumSystemCacheSize -= (MI_SESSION_SPACE_TOTAL_SIZE + MM_SYSTEM_VIEW_SIZE_IF_HYDRA) >> PAGE_SHIFT; 00337 00338 MiSystemViewStart = (ULONG_PTR)(MM_SYSTEM_CACHE_START + 00339 (MaximumSystemCacheSize << PAGE_SHIFT)); 00340 00341 MmSessionBase = MiSystemViewStart + MM_SYSTEM_VIEW_SIZE_IF_HYDRA + MM_BOOT_IMAGE_SIZE; 00342 00343 } else { 00344 MaximumSystemCacheSize -= MM_SYSTEM_VIEW_SIZE >> PAGE_SHIFT; 00345 MiSystemViewStart = (ULONG_PTR)(MM_SYSTEM_CACHE_START + 00346 (MaximumSystemCacheSize << PAGE_SHIFT) + 00347 MM_BOOT_IMAGE_SIZE); 00348 } 00349 } 00350 00351 #else 00352 00353 if (MiHydra == TRUE) { 00354 MaximumSystemCacheSize -= MM_SYSTEM_VIEW_SIZE_IF_HYDRA >> PAGE_SHIFT; 00355 MiSystemViewStart = MM_SYSTEM_VIEW_START_IF_HYDRA; 00356 } 00357 00358 #endif 00359 00360 if (MiHydra == TRUE) { 00361 MmSessionSpace = (PMM_SESSION_SPACE)((ULONG_PTR)MmSessionBase + MI_SESSION_IMAGE_SIZE); 00362 00363 MiSessionBasePte = MiGetPteAddress (MmSessionBase); 00364 MiSessionLastPte = MiGetPteAddress (MI_SESSION_SPACE_END); 00365 } 00366 00367 // 00368 // A few sanity checks to ensure things are as they should be. 00369 // 00370 00371 #if DBG 00372 if ((sizeof(MMWSL) % 8) != 0) { 00373 DbgPrint("working set list is not a quadword sized structure\n"); 00374 } 00375 00376 if ((sizeof(CONTROL_AREA) % 8) != 0) { 00377 DbgPrint("control area list is not a quadword sized structure\n"); 00378 } 00379 00380 if ((sizeof(SUBSECTION) % 8) != 0) { 00381 DbgPrint("subsection list is not a quadword sized structure\n"); 00382 } 00383 00384 // 00385 // Some checks to make sure prototype PTEs can be placed in 00386 // either paged or nonpaged (prototype PTEs for paged pool are here) 00387 // can be put into pte format. 00388 // 00389 00390 PointerPte = (PMMPTE)MmPagedPoolStart; 00391 Pointer.u.Long = MiProtoAddressForPte (PointerPte); 00392 TempPte = Pointer; 00393 PointerPde = MiPteToProto(&TempPte); 00394 if (PointerPte != PointerPde) { 00395 DbgPrint("unable to map start of paged pool as prototype pte %p %p\n", 00396 PointerPde, 00397 PointerPte); 00398 } 00399 00400 PointerPte = 00401 (PMMPTE)((ULONG_PTR)MM_NONPAGED_POOL_END & ~((1 << PTE_SHIFT) - 1)); 00402 00403 Pointer.u.Long = MiProtoAddressForPte (PointerPte); 00404 TempPte = Pointer; 00405 PointerPde = MiPteToProto(&TempPte); 00406 if (PointerPte != PointerPde) { 00407 DbgPrint("unable to map end of nonpaged pool as prototype pte %p %p\n", 00408 PointerPde, 00409 PointerPte); 00410 } 00411 00412 PointerPte = (PMMPTE)(((ULONG_PTR)NON_PAGED_SYSTEM_END - 00413 0x37000 + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)); 00414 00415 for (j = 0; j < 20; j++) { 00416 Pointer.u.Long = MiProtoAddressForPte (PointerPte); 00417 TempPte = Pointer; 00418 PointerPde = MiPteToProto(&TempPte); 00419 if (PointerPte != PointerPde) { 00420 DbgPrint("unable to map end of nonpaged pool as prototype pte %p %p\n", 00421 PointerPde, 00422 PointerPte); 00423 } 00424 00425 PointerPte++; 00426 } 00427 00428 PointerPte = (PMMPTE)(((ULONG_PTR)MM_NONPAGED_POOL_END - 0x133448) & ~(ULONG_PTR)7); 00429 Pointer.u.Long = MiGetSubsectionAddressForPte (PointerPte); 00430 TempPte = Pointer; 00431 PointerPde = (PMMPTE)MiGetSubsectionAddress(&TempPte); 00432 if (PointerPte != PointerPde) { 00433 DbgPrint("unable to map end of nonpaged pool as section pte %p %p\n", 00434 PointerPde, 00435 PointerPte); 00436 00437 MiFormatPte(&TempPte); 00438 } 00439 00440 // 00441 // End of sanity checks. 00442 // 00443 00444 #endif //dbg 00445 00446 if (MmEnforceWriteProtection) { 00447 MiPteStr[0] = (CHAR)1; 00448 } 00449 00450 InitializeListHead( &MmLoadedUserImageList ); 00451 InitializeListHead( &MmLockConflictList ); 00452 00453 MmCriticalSectionTimeout.QuadPart = Int32x32To64( 00454 MmCritsectTimeoutSeconds, 00455 -10000000); 00456 00457 00458 // 00459 // Initialize PFN database mutex and System Address Space creation 00460 // mutex. 00461 // 00462 00463 ExInitializeFastMutex (&MmSectionCommitMutex); 00464 ExInitializeFastMutex (&MmSectionBasedMutex); 00465 ExInitializeFastMutex (&MmDynamicMemoryMutex); 00466 00467 KeInitializeMutant (&MmSystemLoadLock, FALSE); 00468 00469 KeInitializeEvent (&MmAvailablePagesEvent, NotificationEvent, TRUE); 00470 KeInitializeEvent (&MmAvailablePagesEventHigh, NotificationEvent, TRUE); 00471 KeInitializeEvent (&MmMappedFileIoComplete, NotificationEvent, FALSE); 00472 KeInitializeEvent (&MmImageMappingPteEvent, NotificationEvent, FALSE); 00473 KeInitializeEvent (&MmZeroingPageEvent, SynchronizationEvent, FALSE); 00474 KeInitializeEvent (&MmCollidedFlushEvent, NotificationEvent, FALSE); 00475 KeInitializeEvent (&MmCollidedLockEvent, NotificationEvent, FALSE); 00476 KeInitializeEvent (&MiMappedPagesTooOldEvent, NotificationEvent, FALSE); 00477 00478 KeInitializeDpc( &MiModifiedPageWriterTimerDpc, MiModifiedPageWriterTimerDispatch, NULL ); 00479 KeInitializeTimerEx( &MiModifiedPageWriterTimer, SynchronizationTimer ); 00480 00481 MiModifiedPageLife.QuadPart = Int32x32To64( 00482 MmModifiedPageLifeInSeconds, 00483 -10000000); 00484 00485 InitializeListHead (&MmWorkingSetExpansionHead.ListHead); 00486 InitializeListHead (&MmInPageSupportList.ListHead); 00487 InitializeListHead (&MmEventCountList.ListHead); 00488 00489 MmZeroingPageThreadActive = FALSE; 00490 00491 // 00492 // Compute physical memory blocks yet again 00493 // 00494 00495 Memory = (PPHYSICAL_MEMORY_DESCRIPTOR)&MemoryAlloc; 00496 Memory->NumberOfRuns = MAX_PHYSICAL_MEMORY_FRAGMENTS; 00497 00498 // include all memory types ... 00499 for (i=0; i < LoaderMaximum; i++) { 00500 IncludeType[i] = TRUE; 00501 } 00502 00503 // ... expect these.. 00504 IncludeType[LoaderBad] = FALSE; 00505 IncludeType[LoaderFirmwarePermanent] = FALSE; 00506 IncludeType[LoaderSpecialMemory] = FALSE; 00507 IncludeType[LoaderBBTMemory] = FALSE; 00508 00509 MmInitializeMemoryLimits(LoaderBlock, IncludeType, Memory); 00510 00511 #if defined (_X86PAE_) 00512 MiCheckPaeLicense (LoaderBlock, IncludeType, Memory); 00513 #endif 00514 00515 #if defined (_X86PAE_) || defined (_WIN64) 00516 Mm64BitPhysicalAddress = TRUE; 00517 #endif 00518 00519 // 00520 // Add all memory runs in PhysicalMemoryBlock to Memory 00521 // 00522 00523 for (i = 0; i < PhysicalMemoryBlock->NumberOfRuns; i += 1) { 00524 MiMergeMemoryLimit (Memory, 00525 PhysicalMemoryBlock->Run[i].BasePage, 00526 PhysicalMemoryBlock->Run[i].PageCount 00527 ); 00528 } 00529 00530 // 00531 // Sort and merge adjacent runs. 00532 // 00533 00534 for (i=0; i < Memory->NumberOfRuns; i++) { 00535 for (j=i+1; j < Memory->NumberOfRuns; j++) { 00536 if (Memory->Run[j].BasePage < Memory->Run[i].BasePage) { 00537 // swap runs 00538 PhysicalMemoryBlock->Run[0] = Memory->Run[j]; 00539 Memory->Run[j] = Memory->Run[i]; 00540 Memory->Run[i] = PhysicalMemoryBlock->Run[0]; 00541 } 00542 00543 if (Memory->Run[i].BasePage + Memory->Run[i].PageCount == 00544 Memory->Run[j].BasePage) { 00545 // merge runs 00546 Memory->NumberOfRuns -= 1; 00547 Memory->Run[i].PageCount += Memory->Run[j].PageCount; 00548 Memory->Run[j] = Memory->Run[Memory->NumberOfRuns]; 00549 i -= 1; 00550 break; 00551 } 00552 } 00553 } 00554 00555 // 00556 // When safebooting, don't enable special pool, the verifier or any 00557 // other options that track corruption regardless of registry settings. 00558 // 00559 00560 if (strstr(LoaderBlock->LoadOptions, SAFEBOOT_LOAD_OPTION_A)) { 00561 MmVerifyDriverBufferLength = (ULONG)-1; 00562 MmDontVerifyRandomDrivers = TRUE; 00563 MmSpecialPoolTag = (ULONG)-1; 00564 MmSnapUnloads = FALSE; 00565 MmProtectFreedNonPagedPool = FALSE; 00566 MmEnforceWriteProtection = 0; 00567 MmTrackLockedPages = FALSE; 00568 MmTrackPtes = FALSE; 00569 } 00570 else { 00571 MiTriageSystem (LoaderBlock); 00572 } 00573 00574 SystemPteMultiplier = 0; 00575 00576 if (MmNumberOfSystemPtes == 0) { 00577 #if defined (_WIN64) 00578 00579 // 00580 // 64-bit NT is not contrained by virtual address space. No 00581 // tradeoffs between nonpaged pool, paged pool and system PTEs 00582 // need to be made. So just allocate PTEs on a linear scale as 00583 // a function of the amount of RAM. 00584 // 00585 // For example on Alpha64, 4gb of RAM gets 128gb of PTEs by default. 00586 // The page table cost is the inversion of the multiplier based 00587 // on the PTE_PER_PAGE. 00588 // 00589 00590 if ((MiHydra == TRUE) && (ExpMultiUserTS == TRUE)) { 00591 SystemPteMultiplier = 128; 00592 } 00593 else { 00594 SystemPteMultiplier = 64; 00595 } 00596 if (Memory->NumberOfPages < 0x8000) { 00597 SystemPteMultiplier >>= 1; 00598 } 00599 #else 00600 if (Memory->NumberOfPages < MM_MEDIUM_SYSTEM) { 00601 MmNumberOfSystemPtes = MM_MINIMUM_SYSTEM_PTES; 00602 } else { 00603 MmNumberOfSystemPtes = MM_DEFAULT_SYSTEM_PTES; 00604 if (Memory->NumberOfPages > 8192) { 00605 MmNumberOfSystemPtes += MmNumberOfSystemPtes; 00606 00607 // 00608 // Any reasonable Hydra machine gets the maximum. 00609 // 00610 00611 if ((MiHydra == TRUE) && (ExpMultiUserTS == TRUE)) { 00612 MmNumberOfSystemPtes = MM_MAXIMUM_SYSTEM_PTES; 00613 } 00614 } 00615 } 00616 #endif 00617 } 00618 else if (MmNumberOfSystemPtes == (ULONG)-1) { 00619 00620 // 00621 // This registry setting indicates the maximum number of 00622 // system PTEs possible for this machine must be allocated. 00623 // Snap this for later reference. 00624 // 00625 00626 MiRequestedSystemPtes = MmNumberOfSystemPtes; 00627 00628 #if defined (_WIN64) 00629 SystemPteMultiplier = 256; 00630 #else 00631 MmNumberOfSystemPtes = MM_MAXIMUM_SYSTEM_PTES; 00632 #endif 00633 } 00634 00635 if (SystemPteMultiplier != 0) { 00636 if (Memory->NumberOfPages * SystemPteMultiplier > MM_MAXIMUM_SYSTEM_PTES) { 00637 MmNumberOfSystemPtes = MM_MAXIMUM_SYSTEM_PTES; 00638 } 00639 else { 00640 MmNumberOfSystemPtes = (ULONG)(Memory->NumberOfPages * SystemPteMultiplier); 00641 } 00642 } 00643 00644 if (MmNumberOfSystemPtes > MM_MAXIMUM_SYSTEM_PTES) { 00645 MmNumberOfSystemPtes = MM_MAXIMUM_SYSTEM_PTES; 00646 } 00647 00648 if (MmNumberOfSystemPtes < MM_MINIMUM_SYSTEM_PTES) { 00649 MmNumberOfSystemPtes = MM_MINIMUM_SYSTEM_PTES; 00650 } 00651 00652 if (MmHeapSegmentReserve == 0) { 00653 MmHeapSegmentReserve = 1024 * 1024; 00654 } 00655 00656 if (MmHeapSegmentCommit == 0) { 00657 MmHeapSegmentCommit = PAGE_SIZE * 2; 00658 } 00659 00660 if (MmHeapDeCommitTotalFreeThreshold == 0) { 00661 MmHeapDeCommitTotalFreeThreshold = 64 * 1024; 00662 } 00663 00664 if (MmHeapDeCommitFreeBlockThreshold == 0) { 00665 MmHeapDeCommitFreeBlockThreshold = PAGE_SIZE; 00666 } 00667 00668 #ifndef NO_POOL_CHECKS 00669 MiInitializeSpecialPoolCriteria (); 00670 #endif 00671 00672 // 00673 // If the registry indicates drivers are in the suspect list, 00674 // extra system PTEs need to be allocated to support special pool 00675 // for their allocations. 00676 // 00677 00678 if ((MmVerifyDriverBufferLength != (ULONG)-1) || 00679 ((MmSpecialPoolTag != 0) && (MmSpecialPoolTag != (ULONG)-1))) { 00680 MmNumberOfSystemPtes += MM_SPECIAL_POOL_PTES; 00681 } 00682 00683 MmNumberOfSystemPtes += BBTPagesToReserve; 00684 00685 // 00686 // Initialize the machine dependent portion of the hardware. 00687 // 00688 00689 ExInitializeResource (&MmSystemWsLock); 00690 00691 MiInitMachineDependent (LoaderBlock); 00692 00693 #if PFN_CONSISTENCY 00694 MiPfnProtectionEnabled = TRUE; 00695 #endif 00696 00697 MiReloadBootLoadedDrivers (LoaderBlock); 00698 00699 MiInitializeDriverVerifierList (LoaderBlock); 00700 00701 j = (sizeof(PHYSICAL_MEMORY_DESCRIPTOR) + 00702 (sizeof(PHYSICAL_MEMORY_RUN) * 00703 (Memory->NumberOfRuns - 1))); 00704 00705 MmPhysicalMemoryBlock = ExAllocatePoolWithTag (NonPagedPoolMustSucceed, 00706 j, 00707 ' mM'); 00708 00709 RtlCopyMemory (MmPhysicalMemoryBlock, Memory, j); 00710 00711 // 00712 // Setup the system size as small, medium, or large depending 00713 // on memory available. 00714 // 00715 // For internal MM tuning, the following applies 00716 // 00717 // 12Mb is small 00718 // 12-19 is medium 00719 // > 19 is large 00720 // 00721 // 00722 // For all other external tuning, 00723 // < 19 is small 00724 // 19 - 31 is medium for workstation 00725 // 19 - 63 is medium for server 00726 // >= 32 is large for workstation 00727 // >= 64 is large for server 00728 // 00729 00730 MmReadClusterSize = 7; 00731 if (MmNumberOfPhysicalPages <= MM_SMALL_SYSTEM ) { 00732 MmSystemSize = MmSmallSystem; 00733 MmMaximumDeadKernelStacks = 0; 00734 MmModifiedPageMinimum = 40; 00735 MmModifiedPageMaximum = 100; 00736 MmDataClusterSize = 0; 00737 MmCodeClusterSize = 1; 00738 MmReadClusterSize = 2; 00739 00740 } else if (MmNumberOfPhysicalPages <= MM_MEDIUM_SYSTEM ) { 00741 MmSystemSize = MmSmallSystem; 00742 MmMaximumDeadKernelStacks = 2; 00743 MmModifiedPageMinimum = 80; 00744 MmModifiedPageMaximum = 150; 00745 MmSystemCacheWsMinimum += 100; 00746 MmSystemCacheWsMaximum += 150; 00747 MmDataClusterSize = 1; 00748 MmCodeClusterSize = 2; 00749 MmReadClusterSize = 4; 00750 00751 } else { 00752 MmSystemSize = MmMediumSystem; 00753 MmMaximumDeadKernelStacks = 5; 00754 MmModifiedPageMinimum = 150; 00755 MmModifiedPageMaximum = 300; 00756 MmSystemCacheWsMinimum += 400; 00757 MmSystemCacheWsMaximum += 800; 00758 MmDataClusterSize = 3; 00759 MmCodeClusterSize = 7; 00760 } 00761 00762 if (MmNumberOfPhysicalPages < ((24*1024*1024)/PAGE_SIZE)) { 00763 MmSystemCacheWsMinimum = 32; 00764 } 00765 00766 if (MmNumberOfPhysicalPages >= ((32*1024*1024)/PAGE_SIZE)) { 00767 00768 // 00769 // If we are on a workstation, 32Mb and above are considered large systems 00770 // 00771 if ( MmProductType == 0x00690057 ) { 00772 MmSystemSize = MmLargeSystem; 00773 00774 } else { 00775 00776 // 00777 // For servers, 64Mb and greater is a large system 00778 // 00779 00780 if (MmNumberOfPhysicalPages >= ((64*1024*1024)/PAGE_SIZE)) { 00781 MmSystemSize = MmLargeSystem; 00782 } 00783 } 00784 } 00785 00786 if (MmNumberOfPhysicalPages > ((33*1024*1024)/PAGE_SIZE)) { 00787 MmModifiedPageMinimum = 400; 00788 MmModifiedPageMaximum = 800; 00789 MmSystemCacheWsMinimum += 500; 00790 MmSystemCacheWsMaximum += 900; 00791 } 00792 00793 // 00794 // determine if we are on an AS system ( Winnt is not AS) 00795 // 00796 00797 if (MmProductType == 0x00690057) { 00798 SharedUserData->NtProductType = NtProductWinNt; 00799 MmProductType = 0; 00800 MmThrottleTop = 250; 00801 MmThrottleBottom = 30; 00802 00803 } else { 00804 if ( MmProductType == 0x0061004c ) { 00805 SharedUserData->NtProductType = NtProductLanManNt; 00806 00807 } else { 00808 SharedUserData->NtProductType = NtProductServer; 00809 } 00810 00811 MmProductType = 1; 00812 MmThrottleTop = 450; 00813 MmThrottleBottom = 80; 00814 MmMinimumFreePages = 81; 00815 } 00816 00817 MiAdjustWorkingSetManagerParameters((BOOLEAN)(MmProductType == 0 ? TRUE : FALSE)); 00818 00819 // 00820 // Set the ResidentAvailablePages to the number of available 00821 // pages minus the fluid value. 00822 // 00823 00824 MmResidentAvailablePages = MmAvailablePages - MM_FLUID_PHYSICAL_PAGES; 00825 00826 // 00827 // Subtract off the size of the system cache working set. 00828 // 00829 00830 MmResidentAvailablePages -= MmSystemCacheWsMinimum; 00831 MmResidentAvailableAtInit = MmResidentAvailablePages; 00832 00833 00834 if (MmResidentAvailablePages < 0) { 00835 #if DBG 00836 DbgPrint("system cache working set too big\n"); 00837 #endif 00838 return FALSE; 00839 } 00840 00841 // 00842 // Initialize spin lock for charging and releasing page file 00843 // commitment. 00844 // 00845 00846 KeInitializeSpinLock (&MmChargeCommitmentLock); 00847 00848 MiInitializeIoTrackers (); 00849 00850 // 00851 // Initialize spin lock for allowing working set expansion. 00852 // 00853 00854 KeInitializeSpinLock (&MmExpansionLock); 00855 00856 ExInitializeFastMutex (&MmPageFileCreationLock); 00857 00858 // 00859 // Initialize resource for extending sections. 00860 // 00861 00862 ExInitializeResource (&MmSectionExtendResource); 00863 ExInitializeResource (&MmSectionExtendSetResource); 00864 00865 // 00866 // Build the system cache structures. 00867 // 00868 00869 StartPde = MiGetPdeAddress (MmSystemCacheWorkingSetList); 00870 PointerPte = MiGetPteAddress (MmSystemCacheWorkingSetList); 00871 00872 #if defined (_WIN64) 00873 00874 StartPpe = MiGetPteAddress(StartPde); 00875 00876 TempPte = ValidKernelPte; 00877 00878 if (StartPpe->u.Hard.Valid == 0) { 00879 00880 // 00881 // Map in a page directory page for the system cache working set. 00882 // Note that we only populate one page table for this. 00883 // 00884 00885 DirectoryFrameIndex = MiRemoveAnyPage( 00886 MI_GET_PAGE_COLOR_FROM_PTE (StartPpe)); 00887 TempPte.u.Hard.PageFrameNumber = DirectoryFrameIndex; 00888 *StartPpe = TempPte; 00889 00890 00891 Pfn1 = MI_PFN_ELEMENT(DirectoryFrameIndex); 00892 Pfn1->PteFrame = MI_GET_PAGE_FRAME_FROM_PTE ( 00893 MiGetPteAddress(PDE_KTBASE)); 00894 Pfn1->PteAddress = StartPpe; 00895 Pfn1->u2.ShareCount += 1; 00896 Pfn1->u3.e2.ReferenceCount = 1; 00897 Pfn1->u3.e1.PageLocation = ActiveAndValid; 00898 Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE; 00899 00900 MiFillMemoryPte (StartPde, 00901 PAGE_SIZE, 00902 ZeroKernelPte.u.Long); 00903 } 00904 00905 // 00906 // Map in a page table page. 00907 // 00908 00909 ASSERT (StartPde->u.Hard.Valid == 0); 00910 00911 PageFrameIndex = MiRemoveAnyPage( 00912 MI_GET_PAGE_COLOR_FROM_PTE (StartPde)); 00913 TempPte.u.Hard.PageFrameNumber = PageFrameIndex; 00914 MI_WRITE_VALID_PTE (StartPde, TempPte); 00915 00916 Pfn1 = MI_PFN_ELEMENT(PageFrameIndex); 00917 Pfn1->PteFrame = DirectoryFrameIndex; 00918 Pfn1->PteAddress = StartPde; 00919 Pfn1->u2.ShareCount += 1; 00920 Pfn1->u3.e2.ReferenceCount = 1; 00921 Pfn1->u3.e1.PageLocation = ActiveAndValid; 00922 Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE; 00923 00924 MiFillMemoryPte (MiGetVirtualAddressMappedByPte (StartPde), 00925 PAGE_SIZE, 00926 ZeroKernelPte.u.Long); 00927 00928 StartPpe = MiGetPpeAddress(MmSystemCacheStart); 00929 StartPde = MiGetPdeAddress(MmSystemCacheStart); 00930 PointerPte = MiGetVirtualAddressMappedByPte (StartPde); 00931 00932 #else 00933 #if !defined(_X86PAE_) 00934 ASSERT ((StartPde + 1) == MiGetPdeAddress (MmSystemCacheStart)); 00935 #endif 00936 #endif 00937 00938 MaximumSystemCacheSizeTotal = MaximumSystemCacheSize; 00939 00940 #if defined(_X86_) 00941 MaximumSystemCacheSizeTotal += MiMaximumSystemCacheSizeExtra; 00942 #endif 00943 00944 // 00945 // Size the system cache based on the amount of physical memory. 00946 // 00947 00948 i = (MmNumberOfPhysicalPages + 65) / 1024; 00949 00950 if (i >= 4) { 00951 00952 // 00953 // System has at least 4032 pages. Make the system 00954 // cache 128mb + 64mb for each additional 1024 pages. 00955 // 00956 00957 MmSizeOfSystemCacheInPages = (PFN_COUNT)( 00958 ((128*1024*1024) >> PAGE_SHIFT) + 00959 ((i - 4) * ((64*1024*1024) >> PAGE_SHIFT))); 00960 if (MmSizeOfSystemCacheInPages > MaximumSystemCacheSizeTotal) { 00961 MmSizeOfSystemCacheInPages = MaximumSystemCacheSizeTotal; 00962 } 00963 } 00964 00965 MmSystemCacheEnd = (PVOID)(((PCHAR)MmSystemCacheStart + 00966 MmSizeOfSystemCacheInPages * PAGE_SIZE) - 1); 00967 00968 #if defined(_X86_) 00969 if (MmSizeOfSystemCacheInPages > MaximumSystemCacheSize) { 00970 ASSERT (MiMaximumSystemCacheSizeExtra != 0); 00971 MmSystemCacheEnd = (PVOID)(((PCHAR)MmSystemCacheStart + 00972 MaximumSystemCacheSize * PAGE_SIZE) - 1); 00973 00974 MiSystemCacheStartExtra = (PVOID)MM_SYSTEM_CACHE_START_EXTRA; 00975 MiSystemCacheEndExtra = (PVOID)(((PCHAR)MiSystemCacheStartExtra + 00976 (MmSizeOfSystemCacheInPages - MaximumSystemCacheSize) * PAGE_SIZE) - 1); 00977 } 00978 else { 00979 MiSystemCacheStartExtra = MmSystemCacheStart; 00980 MiSystemCacheEndExtra = MmSystemCacheEnd; 00981 } 00982 #endif 00983 00984 EndPde = MiGetPdeAddress(MmSystemCacheEnd); 00985 00986 TempPte = ValidKernelPte; 00987 00988 #if defined(_WIN64) 00989 First = (StartPpe->u.Hard.Valid == 0) ? TRUE : FALSE; 00990 #endif 00991 00992 #if !defined (_WIN64) 00993 DirectoryFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (MiGetPteAddress(PDE_BASE)); 00994 #endif 00995 00996 LOCK_PFN (OldIrql); 00997 while (StartPde <= EndPde) { 00998 00999 #if defined (_WIN64) 01000 if (First == TRUE || MiIsPteOnPdeBoundary(StartPde)) { 01001 First = FALSE; 01002 StartPpe = MiGetPteAddress(StartPde); 01003 01004 // 01005 // Map in a page directory page. 01006 // 01007 01008 DirectoryFrameIndex = MiRemoveAnyPage( 01009 MI_GET_PAGE_COLOR_FROM_PTE (StartPpe)); 01010 TempPte.u.Hard.PageFrameNumber = DirectoryFrameIndex; 01011 *StartPpe = TempPte; 01012 01013 Pfn1 = MI_PFN_ELEMENT(DirectoryFrameIndex); 01014 Pfn1->PteFrame = MI_GET_PAGE_FRAME_FROM_PTE ( 01015 MiGetPteAddress(PDE_KTBASE)); 01016 Pfn1->PteAddress = StartPpe; 01017 Pfn1->u2.ShareCount += 1; 01018 Pfn1->u3.e2.ReferenceCount = 1; 01019 Pfn1->u3.e1.PageLocation = ActiveAndValid; 01020 Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE; 01021 01022 MiFillMemoryPte (StartPde, 01023 PAGE_SIZE, 01024 ZeroKernelPte.u.Long); 01025 } 01026 #endif 01027 01028 ASSERT (StartPde->u.Hard.Valid == 0); 01029 01030 // 01031 // Map in a page table page. 01032 // 01033 01034 PageFrameIndex = MiRemoveAnyPage( 01035 MI_GET_PAGE_COLOR_FROM_PTE (StartPde)); 01036 TempPte.u.Hard.PageFrameNumber = PageFrameIndex; 01037 MI_WRITE_VALID_PTE (StartPde, TempPte); 01038 01039 Pfn1 = MI_PFN_ELEMENT(PageFrameIndex); 01040 Pfn1->PteFrame = DirectoryFrameIndex; 01041 Pfn1->PteAddress = StartPde; 01042 Pfn1->u2.ShareCount += 1; 01043 Pfn1->u3.e2.ReferenceCount = 1; 01044 Pfn1->u3.e1.PageLocation = ActiveAndValid; 01045 Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE; 01046 01047 MiFillMemoryPte (PointerPte, 01048 PAGE_SIZE, 01049 ZeroKernelPte.u.Long); 01050 01051 StartPde += 1; 01052 PointerPte += PTE_PER_PAGE; 01053 } 01054 01055 #if defined(_X86_) 01056 if (MiSystemCacheEndExtra != MmSystemCacheEnd) { 01057 01058 StartPde = MiGetPdeAddress (MiSystemCacheStartExtra); 01059 EndPde = MiGetPdeAddress(MiSystemCacheEndExtra); 01060 01061 PointerPte = MiGetPteAddress (MiSystemCacheStartExtra); 01062 01063 while (StartPde <= EndPde) { 01064 01065 ASSERT (StartPde->u.Hard.Valid == 0); 01066 01067 // 01068 // Map in a page directory page. 01069 // 01070 01071 PageFrameIndex = MiRemoveAnyPage( 01072 MI_GET_PAGE_COLOR_FROM_PTE (StartPde)); 01073 TempPte.u.Hard.PageFrameNumber = PageFrameIndex; 01074 MI_WRITE_VALID_PTE (StartPde, TempPte); 01075 01076 Pfn1 = MI_PFN_ELEMENT(PageFrameIndex); 01077 Pfn1->PteFrame = MI_GET_PAGE_FRAME_FROM_PTE ( 01078 MiGetPdeAddress(PDE_BASE)); 01079 Pfn1->PteAddress = StartPde; 01080 Pfn1->u2.ShareCount += 1; 01081 Pfn1->u3.e2.ReferenceCount = 1; 01082 Pfn1->u3.e1.PageLocation = ActiveAndValid; 01083 Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE; 01084 01085 MiFillMemoryPte (PointerPte, 01086 PAGE_SIZE, 01087 ZeroKernelPte.u.Long); 01088 01089 StartPde += 1; 01090 PointerPte += PTE_PER_PAGE; 01091 } 01092 } 01093 #endif 01094 01095 UNLOCK_PFN (OldIrql); 01096 01097 // 01098 // Initialize the system cache. Only set the large system cache if 01099 // we have a large amount of physical memory. 01100 // 01101 01102 if (MmLargeSystemCache != 0 && MmNumberOfPhysicalPages > 0x7FF0) { 01103 if ((MmAvailablePages > 01104 MmSystemCacheWsMaximum + ((64*1024*1024) >> PAGE_SHIFT))) { 01105 MmSystemCacheWsMaximum = 01106 MmAvailablePages - ((32*1024*1024) >> PAGE_SHIFT); 01107 ASSERT ((LONG)MmSystemCacheWsMaximum > (LONG)MmSystemCacheWsMinimum); 01108 MmMoreThanEnoughFreePages = 256; 01109 } 01110 } 01111 01112 if (MmSystemCacheWsMaximum > (MM_MAXIMUM_WORKING_SET - 5)) { 01113 MmSystemCacheWsMaximum = MM_MAXIMUM_WORKING_SET - 5; 01114 } 01115 01116 if (MmSystemCacheWsMaximum > MmSizeOfSystemCacheInPages) { 01117 MmSystemCacheWsMaximum = MmSizeOfSystemCacheInPages; 01118 if ((MmSystemCacheWsMinimum + 500) > MmSystemCacheWsMaximum) { 01119 MmSystemCacheWsMinimum = MmSystemCacheWsMaximum - 500; 01120 } 01121 } 01122 01123 MiInitializeSystemCache ((ULONG)MmSystemCacheWsMinimum, 01124 (ULONG)MmSystemCacheWsMaximum); 01125 01126 // 01127 // Set the commit page limit to four times the number of available 01128 // pages. This value is updated as paging files are created. 01129 // 01130 01131 MmTotalCommitLimit = MmAvailablePages << 2; 01132 MmTotalCommitLimitMaximum = MmTotalCommitLimit; 01133 01134 MmAttemptForCantExtend.Segment = NULL; 01135 MmAttemptForCantExtend.RequestedExpansionSize = 1; 01136 MmAttemptForCantExtend.ActualExpansion = 1; 01137 MmAttemptForCantExtend.InProgress = FALSE; 01138 MmAttemptForCantExtend.PageFileNumber = MI_EXTEND_ANY_PAGEFILE; 01139 01140 KeInitializeEvent (&MmAttemptForCantExtend.Event, 01141 NotificationEvent, 01142 FALSE); 01143 01144 if (MmOverCommit == 0) { 01145 01146 // If this value was not set via the registry, set the 01147 // over commit value to the number of available pages 01148 // minus 1024 pages (4mb with 4k pages). 01149 // 01150 01151 if (MmAvailablePages > 1024) { 01152 MmOverCommit = MmAvailablePages - 1024; 01153 } 01154 } 01155 01156 // 01157 // Set maximum working set size to 512 pages less total available 01158 // memory. 2mb on machine with 4k pages. 01159 // 01160 01161 MmMaximumWorkingSetSize = (ULONG)(MmAvailablePages - 512); 01162 01163 if (MmMaximumWorkingSetSize > (MM_MAXIMUM_WORKING_SET - 5)) { 01164 MmMaximumWorkingSetSize = MM_MAXIMUM_WORKING_SET - 5; 01165 } 01166 01167 // 01168 // Create the modified page writer event. 01169 // 01170 01171 KeInitializeEvent (&MmModifiedPageWriterEvent, NotificationEvent, FALSE); 01172 01173 // 01174 // Build paged pool. 01175 // 01176 01177 MiBuildPagedPool (); 01178 01179 // 01180 // Initialize the loaded module list. This cannot be done until 01181 // paged pool has been built. 01182 // 01183 01184 if (MiInitializeLoadedModuleList (LoaderBlock) == FALSE) { 01185 #if DBG 01186 DbgPrint("Loaded module list initialization failed\n"); 01187 #endif 01188 return FALSE; 01189 } 01190 01191 // 01192 // Initialize the unused segment thresholds. The assumption is made 01193 // that the filesystem will tack on approximately a 1024-byte paged 01194 // pool charge (regardless of file size) for each file in the cache. 01195 // 01196 01197 if (MmUnusedSegmentTrimLevel < 5) { 01198 MmUnusedSegmentTrimLevel = 5; 01199 } 01200 else if (MmUnusedSegmentTrimLevel > 40) { 01201 MmUnusedSegmentTrimLevel = 40; 01202 } 01203 01204 MmMaxUnusedSegmentPagedPoolUsage = (MmSizeOfPagedPoolInBytes / 100) * (MmUnusedSegmentTrimLevel << 1); 01205 MmUnusedSegmentPagedPoolReduction = MmMaxUnusedSegmentPagedPoolUsage >> 2; 01206 01207 MmMaxUnusedSegmentNonPagedPoolUsage = (MmMaximumNonPagedPoolInBytes / 100) * (MmUnusedSegmentTrimLevel << 1); 01208 MmUnusedSegmentNonPagedPoolReduction = MmMaxUnusedSegmentNonPagedPoolUsage >> 2; 01209 01210 // 01211 // Add more system PTEs if this is a large memory system. 01212 // Note that 64 bit systems can determine the right value at the 01213 // beginning since there is no virtual address space crunch. 01214 // 01215 01216 #if !defined (_WIN64) 01217 if (MmNumberOfPhysicalPages > ((127*1024*1024) >> PAGE_SHIFT)) { 01218 01219 PointerPde = MiGetPdeAddress ((PCHAR)MmPagedPoolEnd + 1); 01220 StartingPte = MiGetPteAddress ((PCHAR)MmPagedPoolEnd + 1); 01221 j = 0; 01222 01223 TempPte = ValidKernelPde; 01224 LOCK_PFN (OldIrql); 01225 while (PointerPde->u.Hard.Valid == 0) { 01226 01227 MiChargeCommitmentCantExpand (1, TRUE); 01228 MM_TRACK_COMMIT (MM_DBG_COMMIT_EXTRA_SYSTEM_PTES, 1); 01229 01230 PageFrameIndex = MiRemoveZeroPage ( 01231 MI_GET_PAGE_COLOR_FROM_PTE (PointerPde)); 01232 TempPte.u.Hard.PageFrameNumber = PageFrameIndex; 01233 MI_WRITE_VALID_PTE (PointerPde, TempPte); 01234 MiInitializePfn (PageFrameIndex, PointerPde, 1); 01235 PointerPde += 1; 01236 StartingPte += PAGE_SIZE / sizeof(MMPTE); 01237 j += PAGE_SIZE / sizeof(MMPTE); 01238 } 01239 01240 UNLOCK_PFN (OldIrql); 01241 01242 if (j != 0) { 01243 StartingPte = MiGetPteAddress ((PCHAR)MmPagedPoolEnd + 1); 01244 MmNonPagedSystemStart = MiGetVirtualAddressMappedByPte (StartingPte); 01245 MmNumberOfSystemPtes += j; 01246 MiAddSystemPtes (StartingPte, j, SystemPteSpace); 01247 } 01248 } 01249 #endif 01250 01251 01252 #if DBG 01253 if (MmDebug & MM_DBG_DUMP_BOOT_PTES) { 01254 MiDumpValidAddresses (); 01255 MiDumpPfn (); 01256 } 01257 #endif 01258 01259 MmPageFaultNotifyRoutine = NULL; 01260 MmHardFaultNotifyRoutine = NULL; 01261 01262 return TRUE; 01263 } 01264 01265 if (Phase == 1) { 01266 01267 #if DBG 01268 MmDebug |= MM_DBG_CHECK_PFN_LOCK; 01269 #endif 01270 01271 #ifdef _X86_ 01272 MiInitMachineDependent (LoaderBlock); 01273 #endif 01274 MiMapBBTMemory(LoaderBlock); 01275 01276 if (!MiSectionInitialization ()) { 01277 return FALSE; 01278 } 01279 01280 Process = PsGetCurrentProcess (); 01281 if (Process->PhysicalVadList.Flink == NULL) { 01282 KeInitializeSpinLock (&Process->AweLock); 01283 InitializeListHead (&Process->PhysicalVadList); 01284 } 01285 01286 #if defined(MM_SHARED_USER_DATA_VA) 01287 01288 // 01289 // Create double mapped page between kernel and user mode. 01290 // 01291 01292 PointerPte = MiGetPteAddress(KI_USER_SHARED_DATA); 01293 ASSERT (PointerPte->u.Hard.Valid == 1); 01294 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 01295 01296 MI_MAKE_VALID_PTE (MmSharedUserDataPte, 01297 PageFrameIndex, 01298 MM_READONLY, 01299 PointerPte); 01300 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 01301 01302 LOCK_PFN (OldIrql); 01303 01304 Pfn1->OriginalPte.u.Long = MM_DEMAND_ZERO_WRITE_PTE; 01305 01306 UNLOCK_PFN (OldIrql); 01307 #endif 01308 01309 if (MiHydra == TRUE) { 01310 MiSessionWideInitializeAddresses (); 01311 MiInitializeSessionWsSupport (); 01312 MiInitializeSessionIds (); 01313 } 01314 01315 // 01316 // Set up system wide lock pages limit. 01317 // 01318 01319 if ((MmLockPagesPercentage < 5) || (MmLockPagesPercentage >= 100)) { 01320 01321 // 01322 // No (reasonable or max) registry override from the user 01323 // so default to allowing all available memory. 01324 // 01325 01326 MmLockPagesLimit = (PFN_NUMBER)-1; 01327 } 01328 else { 01329 01330 // 01331 // Use the registry value - note it is expressed as a percentage. 01332 // 01333 01334 MmLockPagesLimit = (PFN_NUMBER)((MmAvailablePages * MmLockPagesPercentage) / 100); 01335 } 01336 01337 // 01338 // Start the modified page writer. 01339 // 01340 01341 InitializeObjectAttributes( &ObjectAttributes, NULL, 0, NULL, NULL ); 01342 01343 if (!NT_SUCCESS(PsCreateSystemThread( 01344 &ThreadHandle, 01345 THREAD_ALL_ACCESS, 01346 &ObjectAttributes, 01347 0L, 01348 NULL, 01349 MiModifiedPageWriter, 01350 NULL 01351 ))) { 01352 return FALSE; 01353 } 01354 ZwClose (ThreadHandle); 01355 01356 // 01357 // Start the balance set manager. 01358 // 01359 // The balance set manager performs stack swapping and working 01360 // set management and requires two threads. 01361 // 01362 01363 KeInitializeEvent (&MmWorkingSetManagerEvent, 01364 SynchronizationEvent, 01365 FALSE); 01366 01367 InitializeObjectAttributes( &ObjectAttributes, NULL, 0, NULL, NULL ); 01368 01369 if (!NT_SUCCESS(PsCreateSystemThread( 01370 &ThreadHandle, 01371 THREAD_ALL_ACCESS, 01372 &ObjectAttributes, 01373 0L, 01374 NULL, 01375 KeBalanceSetManager, 01376 NULL 01377 ))) { 01378 01379 return FALSE; 01380 } 01381 ZwClose (ThreadHandle); 01382 01383 if (!NT_SUCCESS(PsCreateSystemThread( 01384 &ThreadHandle, 01385 THREAD_ALL_ACCESS, 01386 &ObjectAttributes, 01387 0L, 01388 NULL, 01389 KeSwapProcessOrStack, 01390 NULL 01391 ))) { 01392 01393 return FALSE; 01394 } 01395 ZwClose (ThreadHandle); 01396 01397 #ifndef NO_POOL_CHECKS 01398 MiInitializeSpecialPoolCriteria (); 01399 #endif 01400 01401 #if defined(_X86_) 01402 MiEnableKernelVerifier (); 01403 #endif 01404 01405 ExAcquireResourceExclusive (&PsLoadedModuleResource, TRUE); 01406 01407 NextEntry = PsLoadedModuleList.Flink; 01408 01409 for ( ; NextEntry != &PsLoadedModuleList; NextEntry = NextEntry->Flink) { 01410 01411 DataTableEntry = CONTAINING_RECORD(NextEntry, 01412 LDR_DATA_TABLE_ENTRY, 01413 InLoadOrderLinks); 01414 01415 NtHeaders = RtlImageNtHeader(DataTableEntry->DllBase); 01416 01417 if ((NtHeaders->OptionalHeader.MajorOperatingSystemVersion >= 5) && 01418 (NtHeaders->OptionalHeader.MajorImageVersion >= 5)) { 01419 DataTableEntry->Flags |= LDRP_ENTRY_NATIVE; 01420 } 01421 01422 MiWriteProtectSystemImage (DataTableEntry->DllBase); 01423 } 01424 ExReleaseResource (&PsLoadedModuleResource); 01425 01426 InterlockedDecrement (&MiTrimInProgressCount); 01427 01428 return TRUE; 01429 } 01430 01431 if (Phase == 2) { 01432 MiEnablePagingTheExecutive(); 01433 return TRUE; 01434 } 01435 01436 return FALSE; 01437 }

VOID MmInPageKernelStack IN PKTHREAD  Thread  ) 
 

Definition at line 3445 of file procsup.c.

References ASSERT, KeBugCheckEx(), KernelDemandZeroPte, LOCK_PFN, MI_GET_PAGE_FRAME_FROM_PTE, MiGetPteAddress, MiGetVirtualAddressMappedByPte, MiMakeOutswappedPageResident(), MM_KERNEL_DEMAND_ZERO_PTE, MM_KSTACK_OUTSWAPPED, MmKernelStackResident, NtGlobalFlag, PAGE_SIZE, _MMPTE::u, and UNLOCK_PFN.

Referenced by KiInSwapKernelStacks().

03451 : 03452 03453 This routine makes the specified kernel stack resident. 03454 03455 Arguments: 03456 03457 Supplies a pointer to the base of the kernel stack. 03458 03459 Return Value: 03460 03461 Thread - Supplies a pointer to the thread whose stack should be 03462 made resident. 03463 03464 Environment: 03465 03466 Kernel mode. 03467 03468 --*/ 03469 03470 { 03471 PVOID BaseOfKernelStack; 03472 PMMPTE PointerPte; 03473 PMMPTE EndOfStackPte; 03474 PMMPTE SignaturePte; 03475 ULONG DiskRead; 03476 PFN_NUMBER ContainingPage; 03477 KIRQL OldIrql; 03478 03479 ASSERT (((PCHAR)Thread->StackBase - (PCHAR)Thread->StackLimit) <= 03480 (KERNEL_LARGE_STACK_SIZE + PAGE_SIZE)); 03481 03482 if (NtGlobalFlag & FLG_DISABLE_PAGE_KERNEL_STACKS) { 03483 return; 03484 } 03485 03486 // 03487 // The first page of the stack is the page before the base 03488 // of the stack. 03489 // 03490 03491 if (Thread->LargeStack) { 03492 PointerPte = MiGetPteAddress ((PVOID)((PUCHAR)Thread->StackLimit)); 03493 03494 EndOfStackPte = MiGetPteAddress ((PVOID)((PUCHAR)Thread->InitialStack - 03495 KERNEL_LARGE_STACK_COMMIT)); 03496 // 03497 // Trim back the stack. Make sure that the stack does not grow, i.e. 03498 // StackLimit remains the limit. 03499 // 03500 03501 if (EndOfStackPte < PointerPte) { 03502 EndOfStackPte = PointerPte; 03503 } 03504 Thread->StackLimit = MiGetVirtualAddressMappedByPte (EndOfStackPte); 03505 } else { 03506 EndOfStackPte = MiGetPteAddress (Thread->StackLimit); 03507 } 03508 03509 #if defined(_IA64_) 03510 03511 if (Thread->LargeStack) { 03512 03513 PVOID TempAddress = (PVOID)((PUCHAR)Thread->BStoreLimit); 03514 03515 BaseOfKernelStack = (PVOID)(((ULONG_PTR)Thread->InitialBStore + 03516 KERNEL_LARGE_BSTORE_COMMIT) & 03517 ~(ULONG_PTR)(PAGE_SIZE - 1)); 03518 03519 // 03520 // Make sure the guard page is not set to valid. 03521 // 03522 03523 if (BaseOfKernelStack > TempAddress) { 03524 BaseOfKernelStack = TempAddress; 03525 } 03526 Thread->BStoreLimit = BaseOfKernelStack; 03527 } 03528 BaseOfKernelStack = ((PCHAR)Thread->BStoreLimit - PAGE_SIZE); 03529 #else 03530 BaseOfKernelStack = ((PCHAR)Thread->StackBase - PAGE_SIZE); 03531 #endif // _IA64_ 03532 03533 PointerPte = MiGetPteAddress (BaseOfKernelStack); 03534 03535 DiskRead = 0; 03536 SignaturePte = MiGetPteAddress ((PULONG_PTR)Thread->KernelStack - 1); 03537 ASSERT (SignaturePte->u.Hard.Valid == 0); 03538 if ((SignaturePte->u.Long != MM_KERNEL_DEMAND_ZERO_PTE) && 03539 (SignaturePte->u.Soft.Transition == 0)) { 03540 DiskRead = 1; 03541 } 03542 03543 LOCK_PFN (OldIrql); 03544 03545 while (PointerPte >= EndOfStackPte) { 03546 03547 #ifdef PROTECT_KSTACKS 03548 if (!((PointerPte->u.Long == KernelDemandZeroPte.u.Long) || 03549 (PointerPte->u.Soft.Protection == MM_KSTACK_OUTSWAPPED))) { 03550 KeBugCheckEx (MEMORY_MANAGEMENT, 03551 0x3451, 03552 (ULONG_PTR)PointerPte, 03553 (ULONG_PTR)Thread, 03554 0); 03555 } 03556 ASSERT (PointerPte->u.Hard.Valid == 0); 03557 if (PointerPte->u.Soft.Protection == MM_KSTACK_OUTSWAPPED) { 03558 PointerPte->u.Soft.Protection = PAGE_READWRITE; 03559 } 03560 #endif 03561 03562 ContainingPage = MI_GET_PAGE_FRAME_FROM_PTE (MiGetPteAddress (PointerPte)); 03563 MiMakeOutswappedPageResident (PointerPte, 03564 PointerPte, 03565 1, 03566 ContainingPage); 03567 03568 PointerPte -= 1; 03569 MmKernelStackResident += 1; 03570 } 03571 03572 // 03573 // Check the signature at the current stack location - 4. 03574 // 03575 03576 if (*((PULONG_PTR)Thread->KernelStack - 1) != (ULONG_PTR)Thread) { 03577 KeBugCheckEx (KERNEL_STACK_INPAGE_ERROR, 03578 DiskRead, 03579 *((PULONG_PTR)Thread->KernelStack - 1), 03580 0, 03581 (ULONG_PTR)Thread->KernelStack); 03582 } 03583 03584 UNLOCK_PFN (OldIrql); 03585 return; 03586 }

VOID MmInSwapProcess IN PKPROCESS  Process  ) 
 

Definition at line 3910 of file procsup.c.

References _MMSUPPORT::AllowWorkingSetAdjustment, ASSERT, FALSE, HYPER_SPACE, INITIALIZE_DIRECTORY_TABLE_BASE, _MMWORKING_SET_EXPANSION_HEAD::ListHead, LOCK_EXPANSION, LOCK_PFN, MI_CONVERT_PHYSICAL_TO_PFN, MI_GET_PAGE_FRAME_FROM_PTE, MI_IS_PHYSICAL_ADDRESS, MI_PFN_ELEMENT, MiGetPdeAddress, MiGetPdeOffset, MiGetPpeAddress, MiGetPpeOffset, MiGetPteAddress, MiGetPteOffset, MiHydra, MiMakeOutswappedPageResident(), MiMapPageInHyperSpace(), MiSessionInSwapProcess(), MiUnmapPageInHyperSpace, MM_WS_SWAPPED_OUT, MmWorkingSetExpansionHead, MmWorkingSetList, _EPROCESS::PaePageDirectoryPage, _EPROCESS::PaeTop, PAGE_SHIFT, _EPROCESS::PageDirectoryPte, _EPROCESS::ProcessOutswapEnabled, _EPROCESS::ProcessOutswapped, _MMPFN::PteAddress, _MMPFN::PteFrame, TRUE, _MMPTE::u, _MMSUPPORT::u, _MMPFN::u1, _MMPFN::u2, UNLOCK_EXPANSION, UNLOCK_PFN, _EPROCESS::Vm, _MMSUPPORT::WorkingSetExpansionLinks, _EPROCESS::WorkingSetPage, and _MMSUPPORT::WorkingSetSize.

Referenced by KiInSwapProcesses().

03916 : 03917 03918 This routine in swaps the specified process. 03919 03920 Arguments: 03921 03922 Process - Supplies a pointer to the process that is to be swapped 03923 into memory. 03924 03925 Return Value: 03926 03927 None. 03928 03929 --*/ 03930 03931 { 03932 KIRQL OldIrql; 03933 KIRQL OldIrql2; 03934 PEPROCESS OutProcess; 03935 PFN_NUMBER PdePage; 03936 PFN_NUMBER PageDirectoryPage; 03937 PMMPTE PageDirectoryMap; 03938 PMMPTE PageDirectoryParentMap; 03939 MMPTE TempPte; 03940 MMPTE TempPte2; 03941 PFN_NUMBER HyperSpacePageTable; 03942 PMMPTE HyperSpacePageTableMap; 03943 PFN_NUMBER WorkingSetPage; 03944 PMMPFN Pfn1; 03945 PMMPTE PointerPte; 03946 PFN_NUMBER ProcessPage; 03947 #if defined (_X86PAE_) 03948 ULONG i; 03949 PPAE_ENTRY PaeVa; 03950 PFN_NUMBER PdePage2; 03951 #endif 03952 03953 OutProcess = CONTAINING_RECORD (Process, EPROCESS, Pcb); 03954 03955 if (OutProcess->ProcessOutswapped == TRUE) { 03956 03957 // 03958 // The process is out of memory, rebuild the initialized page 03959 // structure. 03960 // 03961 03962 if (MI_IS_PHYSICAL_ADDRESS(OutProcess)) { 03963 ProcessPage = MI_CONVERT_PHYSICAL_TO_PFN (OutProcess); 03964 } else { 03965 PointerPte = MiGetPteAddress (OutProcess); 03966 ProcessPage = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 03967 } 03968 03969 LOCK_PFN (OldIrql); 03970 03971 #if defined (_WIN64) 03972 PdePage = MiMakeOutswappedPageResident (MiGetPteAddress (PDE_TBASE), 03973 (PMMPTE)&OutProcess->PageDirectoryPte, 03974 0, 03975 ProcessPage); 03976 #else 03977 PdePage = MiMakeOutswappedPageResident (MiGetPteAddress (PDE_BASE), 03978 (PMMPTE)&OutProcess->PageDirectoryPte, 03979 0, 03980 ProcessPage); 03981 #endif 03982 03983 // 03984 // Adjust the counts for the process page. 03985 // 03986 03987 Pfn1 = MI_PFN_ELEMENT (ProcessPage); 03988 Pfn1->u2.ShareCount -= 1; 03989 03990 ASSERT ((LONG)Pfn1->u2.ShareCount >= 1); 03991 03992 // 03993 // Adjust the counts properly for the page directory page. 03994 // 03995 03996 Pfn1 = MI_PFN_ELEMENT (PdePage); 03997 Pfn1->u2.ShareCount += 1; 03998 #if !defined (_WIN64) 03999 Pfn1->u1.Event = (PVOID)OutProcess; 04000 #endif 04001 Pfn1->PteFrame = PdePage; 04002 Pfn1->PteAddress = MiGetPteAddress (PDE_BASE); 04003 04004 #if defined (_WIN64) 04005 04006 // 04007 // Only the page directory parent page has really been read in above. 04008 // Get the page directory page now also. 04009 // 04010 04011 PageDirectoryParentMap = MiMapPageInHyperSpace (PdePage, &OldIrql2); 04012 04013 TempPte = PageDirectoryParentMap[MiGetPpeOffset(MmWorkingSetList)]; 04014 04015 MiUnmapPageInHyperSpace (OldIrql2); 04016 04017 PageDirectoryPage = MiMakeOutswappedPageResident ( 04018 MiGetPpeAddress (MmWorkingSetList), 04019 &TempPte, 04020 0, 04021 PdePage); 04022 04023 ASSERT (PageDirectoryPage == TempPte.u.Hard.PageFrameNumber); 04024 ASSERT (Pfn1->u2.ShareCount >= 3); 04025 04026 PageDirectoryParentMap = MiMapPageInHyperSpace (PdePage, &OldIrql2); 04027 04028 PageDirectoryParentMap[MiGetPpeOffset(PDE_TBASE)].u.Flush = 04029 OutProcess->PageDirectoryPte; 04030 PageDirectoryParentMap[MiGetPpeOffset(MmWorkingSetList)] = TempPte; 04031 04032 MiUnmapPageInHyperSpace (OldIrql2); 04033 04034 PdePage = PageDirectoryPage; 04035 #endif 04036 04037 #if defined (_X86PAE_) 04038 04039 OutProcess->PaePageDirectoryPage = PdePage; 04040 04041 // 04042 // Locate the additional page directory pages and make them resident. 04043 // 04044 04045 PaeVa = (PPAE_ENTRY)OutProcess->PaeTop; 04046 for (i = 0; i < PD_PER_SYSTEM - 1; i += 1) { 04047 PageDirectoryMap = MiMapPageInHyperSpace (PdePage, &OldIrql2); 04048 04049 TempPte = PageDirectoryMap[i]; 04050 04051 MiUnmapPageInHyperSpace (OldIrql2); 04052 04053 PdePage2 = MiMakeOutswappedPageResident ( 04054 MiGetPteAddress (PDE_BASE + (i << PAGE_SHIFT)), 04055 &TempPte, 04056 0, 04057 PdePage); 04058 04059 ASSERT (Pfn1->u2.ShareCount >= 1); 04060 04061 PageDirectoryMap = MiMapPageInHyperSpace (PdePage, &OldIrql2); 04062 PageDirectoryMap[i] = TempPte; 04063 MiUnmapPageInHyperSpace (OldIrql2); 04064 PaeVa->PteEntry[i].u.Long = (TempPte.u.Long & ~MM_PAE_PDPTE_MASK); 04065 } 04066 04067 TempPte.u.Flush = OutProcess->PageDirectoryPte; 04068 TempPte.u.Long &= ~MM_PAE_PDPTE_MASK; 04069 PaeVa->PteEntry[i].u.Flush = TempPte.u.Flush; 04070 04071 // 04072 // Locate the second page table page for hyperspace & make it resident. 04073 // 04074 04075 PageDirectoryMap = MiMapPageInHyperSpace (PdePage, &OldIrql2); 04076 04077 TempPte = PageDirectoryMap[MiGetPdeOffset(HYPER_SPACE2)]; 04078 04079 MiUnmapPageInHyperSpace (OldIrql2); 04080 04081 HyperSpacePageTable = MiMakeOutswappedPageResident ( 04082 MiGetPdeAddress (HYPER_SPACE2), 04083 &TempPte, 04084 0, 04085 PdePage); 04086 04087 ASSERT (Pfn1->u2.ShareCount >= 1); 04088 04089 PageDirectoryMap = MiMapPageInHyperSpace (PdePage, &OldIrql2); 04090 PageDirectoryMap[MiGetPdeOffset(HYPER_SPACE2)] = TempPte; 04091 MiUnmapPageInHyperSpace (OldIrql2); 04092 TempPte2 = TempPte; 04093 #endif 04094 04095 // 04096 // Locate the page table page for hyperspace and make it resident. 04097 // 04098 04099 PageDirectoryMap = MiMapPageInHyperSpace (PdePage, &OldIrql2); 04100 04101 TempPte = PageDirectoryMap[MiGetPdeOffset(MmWorkingSetList)]; 04102 04103 MiUnmapPageInHyperSpace (OldIrql2); 04104 04105 HyperSpacePageTable = MiMakeOutswappedPageResident ( 04106 MiGetPdeAddress (HYPER_SPACE), 04107 &TempPte, 04108 0, 04109 PdePage); 04110 04111 ASSERT (Pfn1->u2.ShareCount >= 3); 04112 04113 PageDirectoryMap = MiMapPageInHyperSpace (PdePage, &OldIrql2); 04114 04115 #if !defined (_WIN64) 04116 PageDirectoryMap[MiGetPdeOffset(PDE_BASE)].u.Flush = 04117 OutProcess->PageDirectoryPte; 04118 #endif 04119 04120 PageDirectoryMap[MiGetPdeOffset(MmWorkingSetList)] = TempPte; 04121 04122 MiUnmapPageInHyperSpace (OldIrql2); 04123 04124 // 04125 // Map in the hyper space page table page and retrieve the 04126 // PTE that maps the working set list. 04127 // 04128 04129 HyperSpacePageTableMap = MiMapPageInHyperSpace (HyperSpacePageTable, &OldIrql2); 04130 TempPte = HyperSpacePageTableMap[MiGetPteOffset(MmWorkingSetList)]; 04131 MiUnmapPageInHyperSpace (OldIrql2); 04132 Pfn1 = MI_PFN_ELEMENT (HyperSpacePageTable); 04133 04134 Pfn1->u1.WsIndex = 1; 04135 04136 WorkingSetPage = MiMakeOutswappedPageResident ( 04137 MiGetPteAddress (MmWorkingSetList), 04138 &TempPte, 04139 0, 04140 HyperSpacePageTable); 04141 04142 HyperSpacePageTableMap = MiMapPageInHyperSpace (HyperSpacePageTable, &OldIrql2); 04143 HyperSpacePageTableMap[MiGetPteOffset(MmWorkingSetList)] = TempPte; 04144 #if defined (_X86PAE_) 04145 HyperSpacePageTableMap[0] = TempPte2; 04146 #endif 04147 MiUnmapPageInHyperSpace (OldIrql2); 04148 04149 Pfn1 = MI_PFN_ELEMENT (WorkingSetPage); 04150 04151 Pfn1->u1.WsIndex = 2; 04152 04153 UNLOCK_PFN (OldIrql); 04154 04155 LOCK_EXPANSION (OldIrql); 04156 04157 // 04158 // Allow working set trimming on this process. 04159 // 04160 04161 OutProcess->Vm.AllowWorkingSetAdjustment = TRUE; 04162 if (OutProcess->Vm.WorkingSetExpansionLinks.Flink == MM_WS_SWAPPED_OUT) { 04163 InsertTailList (&MmWorkingSetExpansionHead.ListHead, 04164 &OutProcess->Vm.WorkingSetExpansionLinks); 04165 } 04166 UNLOCK_EXPANSION (OldIrql); 04167 04168 // 04169 // Set up process structures. 04170 // 04171 04172 OutProcess->WorkingSetPage = WorkingSetPage; 04173 04174 #if !defined (_X86PAE_) 04175 OutProcess->Vm.WorkingSetSize = 3; 04176 04177 INITIALIZE_DIRECTORY_TABLE_BASE (&Process->DirectoryTableBase[0], 04178 PdePage); 04179 INITIALIZE_DIRECTORY_TABLE_BASE (&Process->DirectoryTableBase[1], 04180 HyperSpacePageTable); 04181 #else 04182 // 04183 // The DirectoryTableBase[0] never changes for PAE processes. 04184 // 04185 04186 OutProcess->Vm.WorkingSetSize = 7; 04187 Process->DirectoryTableBase[1] = HyperSpacePageTable; 04188 #endif 04189 04190 OutProcess->ProcessOutswapped = FALSE; 04191 } 04192 04193 if (MiHydra == TRUE && OutProcess->Vm.u.Flags.ProcessInSession == 1) { 04194 MiSessionInSwapProcess (OutProcess); 04195 } 04196 04197 OutProcess->ProcessOutswapEnabled = FALSE; 04198 return; 04199 }

NTKERNELAPI BOOLEAN MmIsAddressValid IN PVOID  VirtualAddress  ) 
 

Definition at line 3883 of file pagfault.c.

References FALSE, MI_IS_PHYSICAL_ADDRESS, MiGetPdeAddress, MiGetPpeAddress, MiGetPteAddress, MiGetVirtualAddressMappedByPde, TRUE, and _MMPTE::u.

Referenced by IoFreeDumpRange(), IopCreateSummaryDump(), IopDriverCorrectnessProcessParams(), IopIsAddressRangeValid(), IopIsMemoryRangeReadable(), IoSetDumpRange(), IovpSeedStack(), MiDecrementShareCount(), MiDetachSession(), MiGatherMappedPages(), MiInitMachineDependent(), MiInsertPageInList(), MiInsertStandbyListAtFront(), MiLoadSystemImage(), MiMakeSystemAddressValid(), MiMakeSystemAddressValidPfn(), MiMakeSystemAddressValidPfnSystemWs(), MiMakeSystemAddressValidPfnWs(), MiRemoveImageSessionWide(), MiRestoreTransitionPte(), MiSegmentDelete(), MiSessionAddProcess(), MiSessionCommitImagePages(), MiSessionCommitPageTables(), MiSessionCreateInternal(), MiSessionRemoveProcess(), MiSessionWideReserveImageAddress(), MiSetPagingOfDriver(), MiShareSessionImage(), MiWaitForInPageComplete(), MmDbgReadCheck(), MmDbgReleaseAddress(), MmDbgTranslatePhysicalAddress(), MmDbgTranslatePhysicalAddress64(), MmDbgWriteCheck(), MmMapViewInSessionSpace(), MmSessionCreate(), MmSessionDelete(), MmSessionSetUnloadAddress(), MmSetKernelDumpRange(), MmUnmapViewInSessionSpace(), MmWorkingSetManager(), NtLockVirtualMemory(), NtUnlockVirtualMemory(), RtlWalkFrameChain(), and VerifierFreeTrackedPool().

03889 : 03890 03891 For a given virtual address this function returns TRUE if no page fault 03892 will occur for a read operation on the address, FALSE otherwise. 03893 03894 Note that after this routine was called, if appropriate locks are not 03895 held, a non-faulting address could fault. 03896 03897 Arguments: 03898 03899 VirtualAddress - Supplies the virtual address to check. 03900 03901 Return Value: 03902 03903 TRUE if a no page fault would be generated reading the virtual address, 03904 FALSE otherwise. 03905 03906 Environment: 03907 03908 Kernel mode. 03909 03910 --*/ 03911 03912 { 03913 PMMPTE PointerPte; 03914 03915 #if defined(_ALPHA_) || defined(_IA64_) 03916 03917 // 03918 // If this is within the physical addressing range, just return TRUE. 03919 // 03920 03921 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) { 03922 return TRUE; 03923 } 03924 03925 #endif // _ALPHA_ || _IA64_ 03926 03927 #if defined (_WIN64) 03928 PointerPte = MiGetPpeAddress (VirtualAddress); 03929 if (PointerPte->u.Hard.Valid == 0) { 03930 return FALSE; 03931 } 03932 #endif 03933 03934 PointerPte = MiGetPdeAddress (VirtualAddress); 03935 if (PointerPte->u.Hard.Valid == 0) { 03936 return FALSE; 03937 } 03938 #ifdef _X86_ 03939 if (PointerPte->u.Hard.LargePage == 1) { 03940 return TRUE; 03941 } 03942 #endif //_X86_ 03943 03944 PointerPte = MiGetPteAddress (VirtualAddress); 03945 if (PointerPte->u.Hard.Valid == 0) { 03946 return FALSE; 03947 } 03948 03949 #ifdef _X86_ 03950 // 03951 // Make sure we're not treating a page directory as a page table here for 03952 // the case where the page directory is mapping a large page. This is 03953 // because the large page bit is valid in PDE formats, but reserved in 03954 // PTE formats and will cause a trap. A virtual address like c0200000 03955 // triggers this case. It's not enough to just check the large page bit 03956 // in the PTE below because of course that bit's been reused by other 03957 // steppings of the processor so we have to look at the address too. 03958 // 03959 if (PointerPte->u.Hard.LargePage == 1) { 03960 PVOID Va; 03961 03962 Va = MiGetVirtualAddressMappedByPde (PointerPte); 03963 if (MI_IS_PHYSICAL_ADDRESS(Va)) { 03964 return FALSE; 03965 } 03966 } 03967 #endif 03968 03969 return TRUE; 03970 }

NTKERNELAPI LOGICAL MmIsDriverVerifying IN struct _DRIVER_OBJECT DriverObject  ) 
 

LOGICAL MmIsHydraAddress IN PVOID  VirtualAddress  ) 
 

Definition at line 5146 of file allocpag.c.

References MI_IS_SESSION_ADDRESS.

Referenced by KeBugCheckEx().

05152 : 05153 05154 This function returns TRUE if a Hydra address is specified. 05155 FALSE is returned if not. 05156 05157 Arguments: 05158 05159 VirtualAddress - Supplies the address in question. 05160 05161 Return Value: 05162 05163 See above. 05164 05165 Environment: 05166 05167 Kernel mode. Note this routine is present and nonpaged for both Hydra 05168 and non-Hydra systems. 05169 05170 --*/ 05171 05172 { 05173 return MI_IS_SESSION_ADDRESS (VirtualAddress); 05174 }

NTKERNELAPI BOOLEAN MmIsNonPagedSystemAddressValid IN PVOID  VirtualAddress  ) 
 

Definition at line 1460 of file mmsup.c.

References FALSE, MmPagedPoolEnd, MmPagedPoolStart, and TRUE.

Referenced by MmBuildMdlForNonPagedPool().

01466 : 01467 01468 For a given virtual address this function returns TRUE if the address 01469 is within the nonpagable portion of the system's address space, 01470 FALSE otherwise. 01471 01472 Arguments: 01473 01474 VirtualAddress - Supplies the virtual address to check. 01475 01476 Return Value: 01477 01478 TRUE if the address is within the nonpagable portion of the system 01479 address space, FALSE otherwise. 01480 01481 Environment: 01482 01483 Kernel mode. 01484 01485 --*/ 01486 01487 { 01488 // 01489 // Return TRUE if address is within the nonpagable portion 01490 // of the system. Check limits for paged pool and if not within 01491 // those limits, return TRUE. 01492 // 01493 01494 if ((VirtualAddress >= MmPagedPoolStart) && 01495 (VirtualAddress <= MmPagedPoolEnd)) { 01496 return FALSE; 01497 } 01498 01499 return TRUE; 01500 }

LOGICAL MmIsSpecialPoolAddressFree IN PVOID  VirtualAddress  ) 
 

Definition at line 5177 of file allocpag.c.

References ASSERT, FALSE, MI_SPECIAL_POOL_PTE_NONPAGABLE, MI_SPECIAL_POOL_PTE_PAGABLE, MiGetPteAddress, MmSpecialPoolEnd, MmSpecialPoolStart, TRUE, and _MMPTE::u.

Referenced by KeBugCheckEx().

05183 : 05184 05185 This function returns TRUE if a special pool address has been freed. 05186 FALSE is returned if it is inuse (ie: the caller overran). 05187 05188 Arguments: 05189 05190 VirtualAddress - Supplies the special pool address in question. 05191 05192 Return Value: 05193 05194 See above. 05195 05196 Environment: 05197 05198 Kernel mode. 05199 05200 --*/ 05201 05202 { 05203 PMMPTE PointerPte; 05204 05205 // 05206 // Caller must check that the address in in special pool. 05207 // 05208 05209 ASSERT (VirtualAddress >= MmSpecialPoolStart && VirtualAddress < MmSpecialPoolEnd); 05210 05211 PointerPte = MiGetPteAddress(VirtualAddress); 05212 05213 // 05214 // Take advantage of the fact that adjacent PTEs have the paged/nonpaged 05215 // bits set when in use and these bits are cleared on free. Note also 05216 // that freed pages get their PTEs chained together through PageFileHigh. 05217 // 05218 05219 if ((PointerPte->u.Soft.PageFileHigh == MI_SPECIAL_POOL_PTE_PAGABLE) || 05220 (PointerPte->u.Soft.PageFileHigh == MI_SPECIAL_POOL_PTE_NONPAGABLE)) { 05221 return FALSE; 05222 } 05223 05224 return TRUE; 05225 }

NTKERNELAPI BOOLEAN MmIsSystemAddressAccessable IN PVOID  VirtualAddress  ) 
 

Referenced by KdpPageInData().

LOGICAL MmIsSystemAddressLocked IN PVOID  VirtualAddress  ) 
 

Referenced by IovpCompleteRequest3(), and IovpInternalDeferredCompletion().

NTKERNELAPI BOOLEAN MmIsThisAnNtAsSystem VOID   ) 
 

Definition at line 3087 of file mminit.c.

References MmProductType.

Referenced by CcInitializeCacheManager(), ExpWorkerInitialization(), ExpWorkerThread(), FsRtlInitializeTunnels(), IoInitSystem(), MiBuildPagedPool(), ObInitSystem(), and PsChangeQuantumTable().

03090 { 03091 return (BOOLEAN)MmProductType; 03092 }

NTSTATUS MmLoadAndLockSystemImage IN PUNICODE_STRING  ImageFileName,
IN PUNICODE_STRING NamePrefix  OPTIONAL,
IN PUNICODE_STRING LoadedBaseName  OPTIONAL,
OUT PVOID *  Section,
OUT PVOID *  ImageBaseAddress
 

Definition at line 379 of file sysload.c.

References FALSE, MiLoadSystemImage(), PAGED_CODE, and TRUE.

Referenced by IopLoadDumpDriver().

00389 : 00390 00391 This routine reads the image pages from the specified section into 00392 the system and returns the address of the DLL's header. Very similar 00393 to MmLoadSystemImage, except that it also locks down the driver pages. 00394 This is needed for things like the dump driver because we cannot page it 00395 back in after the system has crashed (when we want to write the system 00396 dump to the pagefile). 00397 00398 At successful completion, the Section is referenced so it remains 00399 until the system image is unloaded. 00400 00401 Arguments: 00402 00403 ImageName - Supplies the Unicode name of the image to load. 00404 00405 ImageFileName - Supplies the full path name (including the image name) 00406 of the image to load. 00407 00408 NamePrefix - Supplies the prefix to use with the image name on load 00409 operations. This is used to load the same image multiple 00410 times, by using different prefixes 00411 00412 LoadedBaseName - If present, supplies the base name to use on the 00413 loaded image instead of the base name found on the 00414 image name. 00415 00416 ImageHandle - Returns an opaque pointer to the referenced section object 00417 of the image that was loaded. 00418 00419 ImageBaseAddress - Returns the image base within the system. 00420 00421 Return Value: 00422 00423 Status of the load operation. 00424 00425 --*/ 00426 00427 { 00428 PAGED_CODE(); 00429 00430 return MiLoadSystemImage ( 00431 ImageFileName, 00432 NamePrefix, 00433 LoadedBaseName, 00434 FALSE, 00435 ImageHandle, 00436 ImageBaseAddress, 00437 TRUE 00438 ); 00439 }

NTSTATUS MmLoadSystemImage IN PUNICODE_STRING  ImageFileName,
IN PUNICODE_STRING NamePrefix  OPTIONAL,
IN PUNICODE_STRING LoadedBaseName  OPTIONAL,
IN BOOLEAN  LoadInSessionSpace,
OUT PVOID *  Section,
OUT PVOID *  ImageBaseAddress
 

Definition at line 315 of file sysload.c.

References FALSE, MiLoadSystemImage(), and PAGED_CODE.

Referenced by IopLoadDriver(), MiResolveImageReferences(), and NtSetSystemInformation().

00326 : 00327 00328 This routine reads the image pages from the specified section into 00329 the system and returns the address of the DLL's header. 00330 00331 At successful completion, the Section is referenced so it remains 00332 until the system image is unloaded. 00333 00334 Arguments: 00335 00336 ImageName - Supplies the Unicode name of the image to load. 00337 00338 ImageFileName - Supplies the full path name (including the image name) 00339 of the image to load. 00340 00341 NamePrefix - Supplies the prefix to use with the image name on load 00342 operations. This is used to load the same image multiple 00343 times, by using different prefixes 00344 00345 LoadedBaseName - If present, supplies the base name to use on the 00346 loaded image instead of the base name found on the 00347 image name. 00348 00349 LoadInSessionSpace - Supplies whether to load this image in session space - 00350 ie: each session will have a different copy of this driver 00351 with pages shared as much as possible via copy on write. 00352 00353 ImageHandle - Returns an opaque pointer to the referenced section object 00354 of the image that was loaded. 00355 00356 ImageBaseAddress - Returns the image base within the system. 00357 00358 Return Value: 00359 00360 Status of the load operation. 00361 00362 --*/ 00363 00364 { 00365 PAGED_CODE(); 00366 00367 return MiLoadSystemImage ( 00368 ImageFileName, 00369 NamePrefix, 00370 LoadedBaseName, 00371 LoadInSessionSpace, 00372 ImageHandle, 00373 ImageBaseAddress, 00374 FALSE 00375 ); 00376 }

PUNICODE_STRING MmLocateUnloadedDriver IN PVOID  VirtualAddress  ) 
 

Definition at line 2836 of file sysload.c.

References _UNLOADED_DRIVERS::EndAddress, Index, MI_UNLOADED_DRIVERS, MiLastUnloadedDriver, MiUnloadedDrivers, _UNLOADED_DRIVERS::Name, NULL, and _UNLOADED_DRIVERS::StartAddress.

Referenced by KeBugCheckEx(), and KiDumpParameterImages().

02842 : 02843 02844 This routine attempts to find the specified virtual address in the 02845 unloaded driver list. 02846 02847 Arguments: 02848 02849 VirtualAddress - Supplies a virtual address that might be within a driver 02850 that has already unloaded. 02851 02852 Return Value: 02853 02854 A pointer to a Unicode string containing the unloaded driver's name. 02855 02856 Environment: 02857 02858 Kernel mode, bugcheck time. 02859 02860 --*/ 02861 02862 { 02863 PUNLOADED_DRIVERS Entry; 02864 ULONG i; 02865 ULONG Index; 02866 02867 // 02868 // No serialization is needed because we've crashed. 02869 // 02870 02871 if (MiUnloadedDrivers == NULL) { 02872 return NULL; 02873 } 02874 02875 Index = MiLastUnloadedDriver - 1; 02876 02877 for (i = 0; i < MI_UNLOADED_DRIVERS; i += 1) { 02878 if (Index >= MI_UNLOADED_DRIVERS) { 02879 Index = MI_UNLOADED_DRIVERS - 1; 02880 } 02881 Entry = &MiUnloadedDrivers[Index]; 02882 if (Entry->Name.Buffer != NULL) { 02883 if ((VirtualAddress >= Entry->StartAddress) && 02884 (VirtualAddress < Entry->EndAddress)) { 02885 return &Entry->Name; 02886 } 02887 } 02888 Index -= 1; 02889 } 02890 02891 return NULL; 02892 }

NTKERNELAPI PVOID MmLockPagableDataSection IN PVOID  AddressWithinSection  ) 
 

Definition at line 6864 of file iosup.c.

References DbgPrint, ExAcquireResourceShared, ExReleaseResource, KeBugCheckEx(), KeEnterCriticalRegion, KeLeaveCriticalRegion, MI_IS_PHYSICAL_ADDRESS, MiLookupDataTableEntry(), MM_DBG_LOCK_CODE, MmLockPagableSectionByHandle(), NULL, PAGED_CODE, PsLoadedModuleResource, RtlImageNtHeader(), SECTION_BASE_ADDRESS, and TRUE.

Referenced by SmbTraceStart().

06870 : 06871 06872 This functions locks the entire section that contains the specified 06873 section in memory. This allows pagable code to be brought into 06874 memory and to be used as if the code was not really pagable. This 06875 should not be done with a high degree of frequency. 06876 06877 Arguments: 06878 06879 AddressWithinSection - Supplies the address of a function 06880 contained within a section that should be brought in and locked 06881 in memory. 06882 06883 Return Value: 06884 06885 This function returns a value to be used in a subsequent call to 06886 MmUnlockPagableImageSection. 06887 06888 --*/ 06889 06890 { 06891 PLDR_DATA_TABLE_ENTRY DataTableEntry; 06892 ULONG i; 06893 PIMAGE_NT_HEADERS NtHeaders; 06894 PIMAGE_SECTION_HEADER NtSection; 06895 PIMAGE_SECTION_HEADER FoundSection; 06896 ULONG_PTR Rva; 06897 06898 PAGED_CODE(); 06899 06900 if (MI_IS_PHYSICAL_ADDRESS(AddressWithinSection)) { 06901 06902 // 06903 // Physical address, just return that as the handle. 06904 // 06905 06906 return AddressWithinSection; 06907 } 06908 06909 // 06910 // Search the loaded module list for the data table entry that describes 06911 // the DLL that was just unloaded. It is possible that an entry is not in 06912 // the list if a failure occurred at a point in loading the DLL just before 06913 // the data table entry was generated. 06914 // 06915 06916 FoundSection = NULL; 06917 06918 KeEnterCriticalRegion(); 06919 ExAcquireResourceShared (&PsLoadedModuleResource, TRUE); 06920 06921 DataTableEntry = MiLookupDataTableEntry (AddressWithinSection, TRUE); 06922 06923 Rva = (ULONG_PTR)((PUCHAR)AddressWithinSection - (ULONG_PTR)DataTableEntry->DllBase); 06924 06925 NtHeaders = (PIMAGE_NT_HEADERS)RtlImageNtHeader(DataTableEntry->DllBase); 06926 06927 NtSection = (PIMAGE_SECTION_HEADER)((ULONG_PTR)NtHeaders + 06928 sizeof(ULONG) + 06929 sizeof(IMAGE_FILE_HEADER) + 06930 NtHeaders->FileHeader.SizeOfOptionalHeader 06931 ); 06932 06933 for (i = 0; i < NtHeaders->FileHeader.NumberOfSections; i += 1) { 06934 06935 if ( Rva >= NtSection->VirtualAddress && 06936 Rva < NtSection->VirtualAddress + NtSection->SizeOfRawData ) { 06937 FoundSection = NtSection; 06938 06939 if (SECTION_BASE_ADDRESS(NtSection) != ((PUCHAR)DataTableEntry->DllBase + 06940 NtSection->VirtualAddress)) { 06941 06942 // 06943 // Overwrite the PointerToRelocations field (and on Win64, the 06944 // PointerToLinenumbers field also) so that it contains 06945 // the Va of this section and NumberOfLinenumbers so it contains 06946 // the Lock Count for the section. 06947 // 06948 06949 SECTION_BASE_ADDRESS(NtSection) = ((PUCHAR)DataTableEntry->DllBase + 06950 NtSection->VirtualAddress); 06951 NtSection->NumberOfLinenumbers = 0; 06952 } 06953 06954 // 06955 // Now lock in the code 06956 // 06957 06958 #if DBG 06959 if (MmDebug & MM_DBG_LOCK_CODE) { 06960 DbgPrint("MM Lock %wZ %8s %p -> %p : %p %3ld.\n", 06961 &DataTableEntry->BaseDllName, 06962 NtSection->Name, 06963 AddressWithinSection, 06964 NtSection, 06965 SECTION_BASE_ADDRESS(NtSection), 06966 NtSection->NumberOfLinenumbers); 06967 } 06968 #endif //DBG 06969 06970 MmLockPagableSectionByHandle ((PVOID)NtSection); 06971 06972 break; 06973 } 06974 NtSection += 1; 06975 } 06976 06977 ExReleaseResource (&PsLoadedModuleResource); 06978 KeLeaveCriticalRegion(); 06979 if (!FoundSection) { 06980 KeBugCheckEx (MEMORY_MANAGEMENT, 06981 0x1234, 06982 (ULONG_PTR)AddressWithinSection, 06983 0, 06984 0); 06985 } 06986 return (PVOID)FoundSection; 06987 }

NTKERNELAPI VOID MmLockPagableSectionByHandle IN PVOID  ImageSectionHandle  ) 
 

Definition at line 6284 of file iosup.c.

References ASSERT, FALSE, KeEnterCriticalRegion, KeLeaveCriticalRegion, KePulseEvent(), KernelMode, KeWaitForSingleObject(), LOCK_PFN2, LOCK_SYSTEM_WS, MI_IS_PHYSICAL_ADDRESS, MI_IS_SYSTEM_CACHE_ADDRESS, MiGetPteAddress, MiLockCode(), MiMakeSystemAddressValidPfnSystemWs(), MM_LOCK_BY_REFCOUNT, MmCollidedLockEvent, MmCollidedLockWait, MmSystemRangeStart, NULL, SECTION_BASE_ADDRESS, TRUE, UNLOCK_PFN2, UNLOCK_PFN_AND_THEN_WAIT, UNLOCK_SYSTEM_WS, UNLOCK_SYSTEM_WS_NO_IRQL, and WrVirtualMemory.

Referenced by ExpGetLockInformation(), ExpGetLookasideInformation(), ExpGetPoolInformation(), ExpGetProcessInformation(), KeSetPhysicalCacheTypeRange(), KiAmdK6MtrrSetMemoryType(), MiEmptyAllWorkingSets(), MiFindContiguousMemory(), MiLoadSystemImage(), MiMapViewInSystemSpace(), MiSetPagingOfDriver(), MiShareSessionImage(), MiUnmapLockedPagesInUserSpace(), MiUnmapViewInSystemSpace(), MmAdjustWorkingSetSize(), MmAllocateNonCachedMemory(), MmAllocatePagesForMdl(), MmFreeDriverInitialization(), MmFreeNonCachedMemory(), MmFreePagesFromMdl(), MmLockPagableDataSection(), MmLockPagedPool(), MmMapViewOfSection(), MmResetDriverPaging(), MmShutdownSystem(), MmUnloadSystemImage(), MmUnlockPagedPool(), NtQueryVirtualMemory(), PspQueryPooledQuotaLimits(), PspQueryQuotaLimits(), PspQueryWorkingSetWatch(), and PspSetQuotaLimits().

06291 : 06292 06293 This routine checks to see if the specified pages are resident in 06294 the process's working set and if so the reference count for the 06295 page is incremented. The allows the virtual address to be accessed 06296 without getting a hard page fault (have to go to the disk... except 06297 for extremely rare case when the page table page is removed from the 06298 working set and migrates to the disk. 06299 06300 If the virtual address is that of the system wide global "cache" the 06301 virtual address of the "locked" pages is always guaranteed to 06302 be valid. 06303 06304 NOTE: This routine is not to be used for general locking of user 06305 addresses - use MmProbeAndLockPages. This routine is intended for 06306 well behaved system code like the file system caches which allocates 06307 virtual addresses for mapping files AND guarantees that the mapping 06308 will not be modified (deleted or changed) while the pages are locked. 06309 06310 Arguments: 06311 06312 ImageSectionHandle - Supplies the value returned by a previous call 06313 to MmLockPagableDataSection. This is a pointer to the Section 06314 header for the image. 06315 06316 Return Value: 06317 06318 None. 06319 06320 Environment: 06321 06322 Kernel mode, IRQL of DISPATCH_LEVEL or below. 06323 06324 --*/ 06325 06326 { 06327 PIMAGE_SECTION_HEADER NtSection; 06328 PVOID BaseAddress; 06329 ULONG SizeToLock; 06330 PMMPTE PointerPte; 06331 PMMPTE LastPte; 06332 KIRQL OldIrql; 06333 KIRQL OldIrqlWs; 06334 ULONG Collision; 06335 06336 if (MI_IS_PHYSICAL_ADDRESS(ImageSectionHandle)) { 06337 06338 // 06339 // No need to lock physical addresses. 06340 // 06341 06342 return; 06343 } 06344 06345 NtSection = (PIMAGE_SECTION_HEADER)ImageSectionHandle; 06346 06347 BaseAddress = SECTION_BASE_ADDRESS(NtSection); 06348 06349 ASSERT (!MI_IS_SYSTEM_CACHE_ADDRESS(BaseAddress)); 06350 06351 ASSERT (BaseAddress >= MmSystemRangeStart); 06352 06353 SizeToLock = NtSection->SizeOfRawData; 06354 PointerPte = MiGetPteAddress(BaseAddress); 06355 LastPte = MiGetPteAddress((PCHAR)BaseAddress + SizeToLock - 1); 06356 06357 ASSERT (SizeToLock != 0); 06358 06359 // 06360 // The address must be within the system space. 06361 // 06362 06363 RetryLock: 06364 06365 LOCK_SYSTEM_WS (OldIrqlWs); 06366 LOCK_PFN2 (OldIrql); 06367 06368 MiMakeSystemAddressValidPfnSystemWs (&NtSection->NumberOfLinenumbers); 06369 06370 // 06371 // The NumberOfLinenumbers field is used to store the 06372 // lock count. 06373 // 06374 // Value of 0 means unlocked, 06375 // Value of 1 means lock in progress by another thread. 06376 // Value of 2 or more means locked. 06377 // 06378 // If the value is 1, this thread must block until the other thread's 06379 // lock operation is complete. 06380 // 06381 06382 NtSection->NumberOfLinenumbers += 1; 06383 06384 if (NtSection->NumberOfLinenumbers >= 3) { 06385 06386 // 06387 // Already locked, increment counter and return. 06388 // 06389 06390 UNLOCK_PFN2 (OldIrql); 06391 UNLOCK_SYSTEM_WS (OldIrqlWs); 06392 return; 06393 } 06394 06395 if (NtSection->NumberOfLinenumbers == 2) { 06396 06397 // 06398 // A lock is in progress. 06399 // Reset back to 1 and wait. 06400 // 06401 06402 NtSection->NumberOfLinenumbers = 1; 06403 MmCollidedLockWait = TRUE; 06404 06405 KeEnterCriticalRegion(); 06406 06407 // 06408 // The unlock IRQLs are deliberately reversed as the lock and mutex 06409 // are being released in reverse order. 06410 // 06411 06412 UNLOCK_SYSTEM_WS_NO_IRQL (); 06413 UNLOCK_PFN_AND_THEN_WAIT (OldIrqlWs); 06414 06415 KeWaitForSingleObject(&MmCollidedLockEvent, 06416 WrVirtualMemory, 06417 KernelMode, 06418 FALSE, 06419 (PLARGE_INTEGER)NULL); 06420 KeLeaveCriticalRegion(); 06421 goto RetryLock; 06422 } 06423 06424 // 06425 // Value was 0 when the lock was obtained. It is now 1 indicating 06426 // a lock is in progress. 06427 // 06428 06429 MiLockCode (PointerPte, LastPte, MM_LOCK_BY_REFCOUNT); 06430 06431 // 06432 // Set lock count to 2 (it was 1 when this started) and check 06433 // to see if any other threads tried to lock while this was happening. 06434 // 06435 06436 MiMakeSystemAddressValidPfnSystemWs (&NtSection->NumberOfLinenumbers); 06437 NtSection->NumberOfLinenumbers += 1; 06438 06439 ASSERT (NtSection->NumberOfLinenumbers == 2); 06440 06441 Collision = MmCollidedLockWait; 06442 MmCollidedLockWait = FALSE; 06443 06444 UNLOCK_PFN2 (OldIrql); 06445 UNLOCK_SYSTEM_WS (OldIrqlWs); 06446 06447 if (Collision) { 06448 06449 // 06450 // Wake up all waiters. 06451 // 06452 06453 KePulseEvent (&MmCollidedLockEvent, 0, FALSE); 06454 } 06455 06456 return; 06457 }

NTKERNELAPI VOID MmLockPagedPool IN PVOID  Address,
IN SIZE_T  Size
 

Definition at line 7821 of file iosup.c.

References ExPageLockHandle, LOCK_PFN, LOCK_SYSTEM_WS, MiGetPteAddress, MiLockCode(), MM_LOCK_BY_REFCOUNT, MmLockPagableSectionByHandle(), MmUnlockPagableImageSection(), UNLOCK_PFN, and UNLOCK_SYSTEM_WS.

Referenced by Ke386SetDescriptorProcess(), and MiSetImageProtect().

07828 : 07829 07830 Locks the specified address (which MUST reside in paged pool) into 07831 memory until MmUnlockPagedPool is called. 07832 07833 Arguments: 07834 07835 Address - Supplies the address in paged pool to lock. 07836 07837 SizeInBytes - Supplies the size in bytes to lock. 07838 07839 Return Value: 07840 07841 None. 07842 07843 Environment: 07844 07845 Kernel mode, IRQL of APC_LEVEL or below. 07846 07847 --*/ 07848 07849 { 07850 PMMPTE PointerPte; 07851 PMMPTE LastPte; 07852 KIRQL OldIrql; 07853 KIRQL OldIrqlWs; 07854 07855 MmLockPagableSectionByHandle(ExPageLockHandle); 07856 PointerPte = MiGetPteAddress (Address); 07857 LastPte = MiGetPteAddress ((PVOID)((PCHAR)Address + (SizeInBytes - 1))); 07858 LOCK_SYSTEM_WS (OldIrqlWs); 07859 LOCK_PFN (OldIrql); 07860 MiLockCode (PointerPte, LastPte, MM_LOCK_BY_REFCOUNT); 07861 UNLOCK_PFN (OldIrql); 07862 UNLOCK_SYSTEM_WS (OldIrqlWs); 07863 MmUnlockPagableImageSection(ExPageLockHandle); 07864 return; 07865 }

VOID MmMakeKernelResourceSectionWritable VOID   ) 
 

Definition at line 6495 of file sysload.c.

References KeFlushCurrentTb(), MI_IS_PHYSICAL_ADDRESS, MI_MAKE_VALID_PTE, MI_WRITE_VALID_PTE_NEW_PROTECTION, MiGetVirtualAddressMappedByPte, MM_READWRITE, NULL, and _MMPTE::u.

Referenced by KeGetBugMessageText().

06501 : 06502 06503 This function makes the kernel's resource section readwrite so the bugcheck 06504 code can write into it. 06505 06506 Arguments: 06507 06508 None. 06509 06510 Return Value: 06511 06512 None. 06513 06514 Environment: 06515 06516 Kernel mode. Any IRQL. 06517 06518 --*/ 06519 06520 { 06521 #if defined (_X86_) 06522 MMPTE TempPte; 06523 MMPTE PteContents; 06524 PMMPTE PointerPte; 06525 06526 if (MiKernelResourceStartPte == NULL) { 06527 return; 06528 } 06529 06530 PointerPte = MiKernelResourceStartPte; 06531 06532 if (MI_IS_PHYSICAL_ADDRESS (MiGetVirtualAddressMappedByPte (PointerPte))) { 06533 06534 // 06535 // Mapped physically, doesn't need to be made readwrite. 06536 // 06537 06538 return; 06539 } 06540 06541 // 06542 // Since the entry state and IRQL are unknown, just go through the 06543 // PTEs without a lock and make them all readwrite. 06544 // 06545 06546 do { 06547 PteContents = *PointerPte; 06548 #if defined(NT_UP) 06549 if (PteContents.u.Hard.Write == 0) 06550 #else 06551 if (PteContents.u.Hard.Writable == 0) 06552 #endif 06553 { 06554 MI_MAKE_VALID_PTE (TempPte, 06555 PteContents.u.Hard.PageFrameNumber, 06556 MM_READWRITE, 06557 PointerPte); 06558 #if !defined(NT_UP) 06559 TempPte.u.Hard.Writable = 1; 06560 #endif 06561 MI_WRITE_VALID_PTE_NEW_PROTECTION (PointerPte, TempPte); 06562 } 06563 PointerPte += 1; 06564 } while (PointerPte <= MiKernelResourceEndPte); 06565 06566 // 06567 // Don't do this more than once. 06568 // 06569 06570 MiKernelResourceStartPte = NULL; 06571 06572 // 06573 // Only flush this processor as the state of the others is unknown. 06574 // 06575 06576 KeFlushCurrentTb (); 06577 #endif 06578 }

NTKERNELAPI PVOID MmMapIoSpace IN PHYSICAL_ADDRESS  PhysicalAddress,
IN SIZE_T  NumberOfBytes,
IN MEMORY_CACHING_TYPE  CacheType
 

Definition at line 3415 of file iosup.c.

References ASSERT, BYTE_OFFSET, _MDL::ByteCount, _MDL::ByteOffset, COMPUTE_PAGES_SPANNED, ExAllocatePoolWithTag, FALSE, KeFeatureBits, KeFlushEntireTb(), KeInvalidateAllCaches(), KeSetPhysicalCacheTypeRange(), KF_PAT, _MDL::MappedSystemVa, MI_DISABLE_CACHING, MI_SET_PTE_WRITE_COMBINE, MI_WRITE_VALID_PTE, MiGetVirtualAddressMappedByPte, MiInsertPteTracker(), MiLockSystemSpace, MiReleaseDeadPteTrackers(), MiReleaseSystemPtes(), MiReserveSystemPtes(), MiSweepCacheMachineDependent(), MiTrackPtesAborted, MiUnlockSystemSpace, MiWriteCombiningPtes, MM_COLOR_ALIGNMENT, MM_COLOR_MASK_VIRTUAL, MmCached, MmMaximumCacheType, MmNonCached, MmTrackPtes, MmUSWCCached, MmWriteCombined, NonPagedPool, NT_SUCCESS, NTSTATUS(), NULL, PAGE_SHIFT, RtlGetCallersAddress(), _MDL::StartVa, Status, SystemPteSpace, TRUE, _MMPTE::u, and ValidKernelPte.

Referenced by CmpFindACPITable(), DriverEntry(), MmAllocateContiguousMemorySpecifyCache(), MmMapVideoDisplay(), and VerifierMapIoSpace().

03423 : 03424 03425 This function maps the specified physical address into the non-pagable 03426 portion of the system address space. 03427 03428 Arguments: 03429 03430 PhysicalAddress - Supplies the starting physical address to map. 03431 03432 NumberOfBytes - Supplies the number of bytes to map. 03433 03434 CacheType - Supplies MmNonCached if the physical address is to be mapped 03435 as non-cached, MmCached if the address should be cached, and 03436 MmWriteCombined if the address should be cached and 03437 write-combined as a frame buffer which is to be used only by 03438 the video port driver. All other callers should use 03439 MmUSWCCached. MmUSWCCached is available only if the PAT 03440 feature is present and available. 03441 03442 For I/O device registers, this is usually specified 03443 as MmNonCached. 03444 03445 Return Value: 03446 03447 Returns the virtual address which maps the specified physical addresses. 03448 The value NULL is returned if sufficient virtual address space for 03449 the mapping could not be found. 03450 03451 Environment: 03452 03453 Kernel mode, Should be IRQL of APC_LEVEL or below, but unfortunately 03454 callers are coming in at DISPATCH_LEVEL and it's too late to change the 03455 rules now. This means you can never make this routine pagable. 03456 03457 --*/ 03458 03459 { 03460 PFN_NUMBER NumberOfPages; 03461 PFN_NUMBER PageFrameIndex; 03462 PMMPTE PointerPte; 03463 PVOID BaseVa; 03464 MMPTE TempPte; 03465 KIRQL OldIrql; 03466 PMDL TempMdl; 03467 PFN_NUMBER MdlHack[(sizeof(MDL)/sizeof(PFN_NUMBER)) + 1]; 03468 PPFN_NUMBER Page; 03469 PLOCK_TRACKER Tracker; 03470 PVOID CallingAddress; 03471 PVOID CallersCaller; 03472 #ifdef i386 03473 NTSTATUS Status; 03474 #endif 03475 03476 #if !defined (_X86_) 03477 CallingAddress = (PVOID)_ReturnAddress(); 03478 CallersCaller = (PVOID)0; 03479 #endif 03480 03481 // 03482 // For compatibility for when CacheType used to be passed as a BOOLEAN 03483 // mask off the upper bits (TRUE == MmCached, FALSE == MmNonCached). 03484 // 03485 03486 CacheType &= 0xFF; 03487 03488 if (CacheType >= MmMaximumCacheType) { 03489 return (NULL); 03490 } 03491 03492 #if defined (i386) && !defined (_X86PAE_) 03493 ASSERT (PhysicalAddress.HighPart == 0); 03494 #endif 03495 03496 ASSERT (NumberOfBytes != 0); 03497 NumberOfPages = COMPUTE_PAGES_SPANNED (PhysicalAddress.LowPart, 03498 NumberOfBytes); 03499 03500 PointerPte = MiReserveSystemPtes((ULONG)NumberOfPages, 03501 SystemPteSpace, 03502 MM_COLOR_ALIGNMENT, 03503 (PhysicalAddress.LowPart & 03504 MM_COLOR_MASK_VIRTUAL), 03505 FALSE); 03506 if (PointerPte == NULL) { 03507 return(NULL); 03508 } 03509 03510 BaseVa = (PVOID)MiGetVirtualAddressMappedByPte (PointerPte); 03511 BaseVa = (PVOID)((PCHAR)BaseVa + BYTE_OFFSET(PhysicalAddress.LowPart)); 03512 03513 TempPte = ValidKernelPte; 03514 03515 #ifdef i386 03516 // 03517 // Set the physical range to proper caching type. If the PAT feature 03518 // is supported, then set the caching type in the PTE, otherwise modify 03519 // the MTRRs if applicable. If the cache type is MmUSWCCached and the 03520 // PAT is not supported then fail the call. 03521 // 03522 03523 if (KeFeatureBits & KF_PAT) { 03524 if ((CacheType == MmWriteCombined) || (CacheType == MmUSWCCached)) { 03525 if (MiWriteCombiningPtes == TRUE) { 03526 MI_SET_PTE_WRITE_COMBINE(TempPte); 03527 Status = STATUS_SUCCESS; 03528 } else { 03529 Status = STATUS_UNSUCCESSFUL; 03530 } 03531 } else { 03532 03533 // 03534 // For Non-MmFrameBufferCaching type use existing mm macros. 03535 // 03536 03537 Status = STATUS_SUCCESS; 03538 } 03539 } else { 03540 03541 // Set the MTRRs if possible. 03542 03543 Status = KeSetPhysicalCacheTypeRange( 03544 PhysicalAddress, 03545 NumberOfBytes, 03546 CacheType 03547 ); 03548 } 03549 03550 // 03551 // If range could not be set, determine what to do 03552 // 03553 03554 if (!NT_SUCCESS(Status)) { 03555 03556 if ((Status == STATUS_NOT_SUPPORTED) && 03557 ((CacheType == MmNonCached) || (CacheType == MmCached))) { 03558 03559 // 03560 // The range may not have been set into the proper cache 03561 // type. If the range is either MmNonCached or MmCached just 03562 // continue as the PTE will be marked properly. 03563 // 03564 03565 NOTHING; 03566 03567 } else if (Status == STATUS_UNSUCCESSFUL && CacheType == MmCached) { 03568 03569 // 03570 // If setting a range to Cached was unsuccessful things are not 03571 // optimal, but not fatal. The range can be returned to the 03572 // caller and it will have whatever caching type it has - possibly 03573 // something below fully cached. 03574 // 03575 03576 NOTHING; 03577 03578 } else { 03579 03580 // 03581 // If there's still a problem, fail the request. 03582 // 03583 03584 MiReleaseSystemPtes(PointerPte, NumberOfPages, SystemPteSpace); 03585 03586 return(NULL); 03587 } 03588 } 03589 #endif 03590 03591 if (CacheType == MmNonCached) { 03592 MI_DISABLE_CACHING (TempPte); 03593 } 03594 03595 #if defined(_IA64_) 03596 if (CacheType != MmCached) { 03597 KeFlushEntireTb(FALSE, TRUE); 03598 } 03599 #endif 03600 03601 PageFrameIndex = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT); 03602 03603 do { 03604 ASSERT (PointerPte->u.Hard.Valid == 0); 03605 TempPte.u.Hard.PageFrameNumber = PageFrameIndex; 03606 MI_WRITE_VALID_PTE (PointerPte, TempPte); 03607 PointerPte += 1; 03608 PageFrameIndex += 1; 03609 NumberOfPages -= 1; 03610 } while (NumberOfPages != 0); 03611 03612 #if defined(i386) 03613 // 03614 // WriteCombined is a non self-snooping memory type. This memory type 03615 // requires a writeback invalidation of all the caches on all processors 03616 // and each accompanying TB flush if the PAT is supported. 03617 // 03618 03619 if ((KeFeatureBits & KF_PAT) && ((CacheType == MmWriteCombined) 03620 || (CacheType == MmUSWCCached)) && (MiWriteCombiningPtes == TRUE)) { 03621 KeFlushEntireTb (FALSE, TRUE); 03622 KeInvalidateAllCaches (TRUE); 03623 } 03624 #endif 03625 03626 #if defined(_IA64_) 03627 if (CacheType != MmCached) { 03628 MiSweepCacheMachineDependent(BaseVa, NumberOfBytes, CacheType); 03629 } 03630 #endif 03631 03632 if (MmTrackPtes != 0) { 03633 03634 // 03635 // First free any zombie blocks as no locks are being held. 03636 // 03637 03638 MiReleaseDeadPteTrackers (); 03639 03640 Tracker = ExAllocatePoolWithTag (NonPagedPool, 03641 sizeof (PTE_TRACKER), 03642 'ySmM'); 03643 03644 if (Tracker != NULL) { 03645 #if defined (_X86_) 03646 RtlGetCallersAddress(&CallingAddress, &CallersCaller); 03647 #endif 03648 03649 TempMdl = (PMDL) &MdlHack; 03650 TempMdl->MappedSystemVa = BaseVa; 03651 TempMdl->StartVa = (PVOID)(ULONG_PTR)PhysicalAddress.QuadPart; 03652 TempMdl->ByteOffset = BYTE_OFFSET(PhysicalAddress.LowPart); 03653 TempMdl->ByteCount = (ULONG)NumberOfBytes; 03654 03655 Page = (PPFN_NUMBER) (TempMdl + 1); 03656 Page = (PPFN_NUMBER)-1; 03657 03658 MiLockSystemSpace(OldIrql); 03659 03660 MiInsertPteTracker (Tracker, 03661 TempMdl, 03662 COMPUTE_PAGES_SPANNED (PhysicalAddress.LowPart, 03663 NumberOfBytes), 03664 CallingAddress, 03665 CallersCaller); 03666 03667 MiUnlockSystemSpace(OldIrql); 03668 } 03669 else { 03670 MiTrackPtesAborted = TRUE; 03671 } 03672 } 03673 03674 return BaseVa; 03675 }

NTKERNELAPI PVOID MmMapLockedPages IN PMDL  MemoryDescriptorList,
IN KPROCESSOR_MODE  AccessMode
 

Definition at line 2009 of file iosup.c.

References HighPagePriority, MmCached, MmMapLockedPagesSpecifyCache(), NULL, and TRUE.

Referenced by VerifierMapLockedPages().

02016 : 02017 02018 This function maps physical pages described by a memory descriptor 02019 list into the system virtual address space or the user portion of 02020 the virtual address space. 02021 02022 Arguments: 02023 02024 MemoryDescriptorList - Supplies a valid Memory Descriptor List which has 02025 been updated by MmProbeAndLockPages. 02026 02027 02028 AccessMode - Supplies an indicator of where to map the pages; 02029 KernelMode indicates that the pages should be mapped in the 02030 system part of the address space, UserMode indicates the 02031 pages should be mapped in the user part of the address space. 02032 02033 Return Value: 02034 02035 Returns the base address where the pages are mapped. The base address 02036 has the same offset as the virtual address in the MDL. 02037 02038 This routine will raise an exception if the processor mode is USER_MODE 02039 and quota limits or VM limits are exceeded. 02040 02041 Environment: 02042 02043 Kernel mode. DISPATCH_LEVEL or below if access mode is KernelMode, 02044 APC_LEVEL or below if access mode is UserMode. 02045 02046 --*/ 02047 02048 { 02049 return MmMapLockedPagesSpecifyCache (MemoryDescriptorList, 02050 AccessMode, 02051 MmCached, 02052 NULL, 02053 TRUE, 02054 HighPagePriority); 02055 }

NTKERNELAPI PVOID MmMapLockedPagesSpecifyCache IN PMDL  MemoryDescriptorList,
IN KPROCESSOR_MODE  AccessMode,
IN MEMORY_CACHING_TYPE  CacheType,
IN PVOID  BaseAddress,
IN ULONG  BugCheckOnFailure,
IN MM_PAGE_PRIORITY  Priority
 

Definition at line 2058 of file iosup.c.

References ASSERT, COMPUTE_PAGES_SPANNED, ExAllocatePoolWithTag, ExFreePool(), FALSE, HighPagePriority, KeFlushEntireTb(), KeInvalidateAllCaches(), KernelMode, LOCK_PFN2, MDL_IO_SPACE, MDL_MAPPED_TO_SYSTEM_VA, MDL_MAPPING_CAN_FAIL, MDL_PAGES_LOCKED, MDL_PARTIAL, MDL_PARTIAL_HAS_BEEN_MAPPED, MDL_PHYSICAL_VIEW, MDL_SOURCE_IS_NONPAGED_POOL, MI_DISABLE_CACHING, MI_PFN_ELEMENT, MI_SET_PTE_WRITE_COMBINE, MI_WRITE_VALID_PTE, MiGetPteAddress, MiGetSystemPteAvailability(), MiGetVirtualAddressMappedByPte, MiInsertPteTracker(), MiLockSystemSpace, MiMapLockedPagesInUserSpace(), MiReleaseDeadPteTrackers(), MiReleaseSystemPtes(), MiReserveSystemPtes(), MiSweepCacheMachineDependent(), MiTrackPtesAborted, MiUnlockSystemSpace, MiWriteCombiningPtes, MM_COLOR_ALIGNMENT, MM_COLOR_MASK, MM_COLOR_MASK_VIRTUAL, MM_EMPTY_LIST, MM_KSEG0_BASE, MmCached, MmHardwareCoherentCached, MmNonCached, MmNonCachedUnordered, MmSystemLockPagesCount, MmTrackPtes, MmWriteCombined, NonPagedPool, NULL, PAGE_SHIFT, PAGE_SIZE, PTE_SHIFT, RtlGetCallersAddress(), SystemPteSpace, TRUE, _MMPTE::u, _MMPFN::u3, UNLOCK_PFN2, and ValidKernelPte.

Referenced by MiCloneProcessAddressSpace(), MiDoMappedCopy(), MmMapLockedPages(), NtStartProfile(), and VerifierMapLockedPagesSpecifyCache().

02069 : 02070 02071 This function maps physical pages described by a memory descriptor 02072 list into the system virtual address space or the user portion of 02073 the virtual address space. 02074 02075 Arguments: 02076 02077 MemoryDescriptorList - Supplies a valid Memory Descriptor List which has 02078 been updated by MmProbeAndLockPages. 02079 02080 AccessMode - Supplies an indicator of where to map the pages; 02081 KernelMode indicates that the pages should be mapped in the 02082 system part of the address space, UserMode indicates the 02083 pages should be mapped in the user part of the address space. 02084 02085 CacheType - Supplies the type of cache mapping to use for the MDL. 02086 MmCached indicates "normal" kernel or user mappings. 02087 02088 RequestedAddress - Supplies the base user address of the view. This is only 02089 used if the AccessMode is UserMode. If the initial 02090 value of this argument is not null, then the view will 02091 be allocated starting at the specified virtual 02092 address rounded down to the next 64kb address 02093 boundary. If the initial value of this argument is 02094 null, then the operating system will determine 02095 where to allocate the view. 02096 02097 BugCheckOnFailure - Supplies whether to bugcheck if the mapping cannot be 02098 obtained. This flag is only checked if the MDL's 02099 MDL_MAPPING_CAN_FAIL is zero, which implies that the 02100 default MDL behavior is to bugcheck. This flag then 02101 provides an additional avenue to avoid the bugcheck. 02102 Done this way in order to provide WDM compatibility. 02103 02104 Priority - Supplies an indication as to how important it is that this 02105 request succeed under low available PTE conditions. 02106 02107 Return Value: 02108 02109 Returns the base address where the pages are mapped. The base address 02110 has the same offset as the virtual address in the MDL. 02111 02112 This routine will raise an exception if the processor mode is USER_MODE 02113 and quota limits or VM limits are exceeded. 02114 02115 Environment: 02116 02117 Kernel mode. DISPATCH_LEVEL or below if access mode is KernelMode, 02118 APC_LEVEL or below if access mode is UserMode. 02119 02120 --*/ 02121 02122 { 02123 PFN_NUMBER NumberOfPages; 02124 PFN_NUMBER SavedPageCount; 02125 PPFN_NUMBER Page; 02126 PMMPTE PointerPte; 02127 PVOID BaseVa; 02128 MMPTE TempPte; 02129 PVOID StartingVa; 02130 PMMPFN Pfn2; 02131 KIRQL OldIrql; 02132 PFN_NUMBER NumberOfPtes; 02133 PVOID CallingAddress; 02134 PVOID CallersCaller; 02135 PVOID Tracker; 02136 02137 #if !defined (_X86_) 02138 CallingAddress = (PVOID)_ReturnAddress(); 02139 CallersCaller = (PVOID)0; 02140 #endif 02141 02142 StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa + 02143 MemoryDescriptorList->ByteOffset); 02144 02145 ASSERT (MemoryDescriptorList->ByteCount != 0); 02146 02147 if (AccessMode == KernelMode) { 02148 02149 Page = (PPFN_NUMBER)(MemoryDescriptorList + 1); 02150 NumberOfPages = COMPUTE_PAGES_SPANNED (StartingVa, 02151 MemoryDescriptorList->ByteCount); 02152 SavedPageCount = NumberOfPages; 02153 02154 // 02155 // Map the pages into the system part of the address space as 02156 // kernel read/write. 02157 // 02158 02159 ASSERT ((MemoryDescriptorList->MdlFlags & ( 02160 MDL_MAPPED_TO_SYSTEM_VA | 02161 MDL_SOURCE_IS_NONPAGED_POOL | 02162 MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0); 02163 ASSERT ((MemoryDescriptorList->MdlFlags & ( 02164 MDL_PAGES_LOCKED | 02165 MDL_PARTIAL)) != 0); 02166 02167 // 02168 // Map this with KSEG0 if possible. 02169 // 02170 02171 #if defined(_ALPHA_) 02172 #define KSEG0_MAXPAGE ((PFN_NUMBER)((KSEG2_BASE - KSEG0_BASE) >> PAGE_SHIFT)) 02173 #endif 02174 02175 #if defined(_X86_) || defined(_IA64_) 02176 #define KSEG0_MAXPAGE MmKseg2Frame 02177 #endif 02178 02179 #if defined(_IA64_) 02180 #define MM_KSEG0_BASE KSEG0_BASE 02181 #endif 02182 02183 if ((NumberOfPages == 1) && (CacheType == MmCached) && 02184 (*Page < KSEG0_MAXPAGE)) { 02185 BaseVa = (PVOID)(MM_KSEG0_BASE + (*Page << PAGE_SHIFT) + 02186 MemoryDescriptorList->ByteOffset); 02187 MemoryDescriptorList->MappedSystemVa = BaseVa; 02188 MemoryDescriptorList->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA; 02189 02190 goto Update; 02191 } 02192 02193 // 02194 // Make sure there are enough PTEs of the requested size. 02195 // 02196 02197 if ((Priority != HighPagePriority) && 02198 (MiGetSystemPteAvailability ((ULONG)NumberOfPages, Priority) == FALSE)) { 02199 return NULL; 02200 } 02201 02202 PointerPte = MiReserveSystemPtes ( 02203 (ULONG)NumberOfPages, 02204 SystemPteSpace, 02205 MM_COLOR_ALIGNMENT, 02206 (PtrToUlong(StartingVa) & 02207 MM_COLOR_MASK_VIRTUAL), 02208 MemoryDescriptorList->MdlFlags & MDL_MAPPING_CAN_FAIL ? 0 : BugCheckOnFailure); 02209 02210 if (PointerPte == NULL) { 02211 02212 // 02213 // Not enough system PTES are available. 02214 // 02215 02216 return NULL; 02217 } 02218 BaseVa = (PVOID)((PCHAR)MiGetVirtualAddressMappedByPte (PointerPte) + 02219 MemoryDescriptorList->ByteOffset); 02220 02221 NumberOfPtes = NumberOfPages; 02222 02223 TempPte = ValidKernelPte; 02224 02225 switch (CacheType) { 02226 02227 case MmNonCached: 02228 MI_DISABLE_CACHING (TempPte); 02229 break; 02230 02231 case MmCached: 02232 break; 02233 02234 case MmWriteCombined: 02235 MI_SET_PTE_WRITE_COMBINE (TempPte); 02236 break; 02237 02238 case MmHardwareCoherentCached: 02239 break; 02240 02241 #if 0 02242 case MmNonCachedUnordered: 02243 break; 02244 #endif 02245 02246 default: 02247 break; 02248 } 02249 02250 #if defined(_IA64_) 02251 if (CacheType != MmCached) { 02252 KeFlushEntireTb(FALSE, TRUE); 02253 } 02254 #endif 02255 02256 #if DBG 02257 LOCK_PFN2 (OldIrql); 02258 #endif //DBG 02259 02260 do { 02261 02262 if (*Page == MM_EMPTY_LIST) { 02263 break; 02264 } 02265 TempPte.u.Hard.PageFrameNumber = *Page; 02266 ASSERT (PointerPte->u.Hard.Valid == 0); 02267 02268 #if DBG 02269 if ((MemoryDescriptorList->MdlFlags & (MDL_IO_SPACE | MDL_PHYSICAL_VIEW)) == 0) { 02270 Pfn2 = MI_PFN_ELEMENT (*Page); 02271 ASSERT (Pfn2->u3.e2.ReferenceCount != 0); 02272 ASSERT ((((ULONG_PTR)PointerPte >> PTE_SHIFT) & MM_COLOR_MASK) == 02273 (((ULONG)Pfn2->u3.e1.PageColor))); 02274 } 02275 #endif //DBG 02276 02277 MI_WRITE_VALID_PTE (PointerPte, TempPte); 02278 Page += 1; 02279 PointerPte += 1; 02280 NumberOfPages -= 1; 02281 } while (NumberOfPages != 0); 02282 02283 #if DBG 02284 UNLOCK_PFN2 (OldIrql); 02285 #endif //DBG 02286 02287 #if defined(i386) 02288 // 02289 // If write combined was specified then flush all caches and TBs. 02290 // 02291 02292 if (CacheType == MmWriteCombined && MiWriteCombiningPtes == TRUE) { 02293 KeFlushEntireTb (FALSE, TRUE); 02294 KeInvalidateAllCaches (TRUE); 02295 } 02296 #endif 02297 02298 #if defined(_IA64_) 02299 if (CacheType != MmCached) { 02300 MiSweepCacheMachineDependent(BaseVa, SavedPageCount * PAGE_SIZE, CacheType); 02301 } 02302 #endif 02303 if (MmTrackPtes != 0) { 02304 02305 // 02306 // First free any zombie blocks as no locks are being held. 02307 // 02308 02309 MiReleaseDeadPteTrackers (); 02310 02311 Tracker = ExAllocatePoolWithTag (NonPagedPool, 02312 sizeof (PTE_TRACKER), 02313 'ySmM'); 02314 if (Tracker == NULL) { 02315 MiTrackPtesAborted = TRUE; 02316 } 02317 } 02318 02319 MiLockSystemSpace(OldIrql); 02320 if (MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) { 02321 02322 // 02323 // Another thread must have already mapped this. 02324 // Clean up the system PTES and release them. 02325 // 02326 02327 MiUnlockSystemSpace(OldIrql); 02328 02329 if (MmTrackPtes != 0) { 02330 if (Tracker != NULL) { 02331 ExFreePool(Tracker); 02332 } 02333 } 02334 02335 #if DBG 02336 if ((MemoryDescriptorList->MdlFlags & (MDL_IO_SPACE | MDL_PHYSICAL_VIEW)) == 0) { 02337 PMMPFN Pfn3; 02338 PFN_NUMBER j; 02339 PPFN_NUMBER Page1; 02340 02341 Page1 = (PPFN_NUMBER)(MemoryDescriptorList + 1); 02342 for (j = 0; j < SavedPageCount ;j += 1) { 02343 if (*Page == MM_EMPTY_LIST) { 02344 break; 02345 } 02346 Pfn3 = MI_PFN_ELEMENT (*Page1); 02347 ASSERT (Pfn3->u3.e2.ReferenceCount != 0); 02348 Page1 += 1; 02349 } 02350 } 02351 #endif //DBG 02352 PointerPte = MiGetPteAddress (BaseVa); 02353 02354 MiReleaseSystemPtes (PointerPte, 02355 (ULONG)SavedPageCount, 02356 SystemPteSpace); 02357 02358 return MemoryDescriptorList->MappedSystemVa; 02359 } 02360 02361 MemoryDescriptorList->MappedSystemVa = BaseVa; 02362 *(volatile ULONG *)&MmSystemLockPagesCount; //need to force order. 02363 MemoryDescriptorList->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA; 02364 02365 if ((MmTrackPtes != 0) && (Tracker != NULL)) { 02366 #if defined (_X86_) 02367 RtlGetCallersAddress(&CallingAddress, &CallersCaller); 02368 #endif 02369 MiInsertPteTracker (Tracker, 02370 MemoryDescriptorList, 02371 NumberOfPtes, 02372 CallingAddress, 02373 CallersCaller); 02374 } 02375 02376 MiUnlockSystemSpace(OldIrql); 02377 02378 Update: 02379 if ((MemoryDescriptorList->MdlFlags & MDL_PARTIAL) != 0) { 02380 MemoryDescriptorList->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED; 02381 } 02382 02383 return BaseVa; 02384 02385 } else { 02386 02387 return MiMapLockedPagesInUserSpace (MemoryDescriptorList, 02388 StartingVa, 02389 CacheType, 02390 RequestedAddress); 02391 } 02392 }

VOID MmMapMemoryDumpMdl IN OUT PMDL  MemoryDumpMdl  ) 
 

Definition at line 7197 of file iosup.c.

References ASSERT, BYTES_TO_PAGES, KiFlushSingleTb(), MiGetVirtualAddressMappedByPte, MM_KERNEL_DEMAND_ZERO_PTE, MmCrashDumpPte, PAGE_SHIFT, PAGE_SIZE, TRUE, _MMPTE::u, and ValidKernelPte.

Referenced by IopMapPhysicalMemory(), and IopWritePageToDisk().

07203 : 07204 07205 For use by crash dump routine ONLY. Maps an MDL into a fixed 07206 portion of the address space. Only 1 MDL can be mapped at a 07207 time. 07208 07209 Arguments: 07210 07211 MemoryDumpMdl - Supplies the MDL to map. 07212 07213 Return Value: 07214 07215 None, fields in MDL updated. 07216 07217 --*/ 07218 07219 { 07220 PFN_NUMBER NumberOfPages; 07221 PMMPTE PointerPte; 07222 PCHAR BaseVa; 07223 MMPTE TempPte; 07224 PPFN_NUMBER Page; 07225 07226 NumberOfPages = BYTES_TO_PAGES (MemoryDumpMdl->ByteCount + MemoryDumpMdl->ByteOffset); 07227 07228 ASSERT (NumberOfPages <= 16); 07229 07230 PointerPte = MmCrashDumpPte; 07231 BaseVa = (PCHAR)MiGetVirtualAddressMappedByPte(PointerPte); 07232 MemoryDumpMdl->MappedSystemVa = (PCHAR)BaseVa + MemoryDumpMdl->ByteOffset; 07233 TempPte = ValidKernelPte; 07234 Page = (PPFN_NUMBER)(MemoryDumpMdl + 1); 07235 07236 // 07237 // If the pages don't span the entire dump virtual address range, 07238 // build a barrier. Otherwise use the default barrier provided at the 07239 // end of the dump virtual address range. 07240 // 07241 07242 if (NumberOfPages < 16) { 07243 KiFlushSingleTb (TRUE, BaseVa + (NumberOfPages << PAGE_SHIFT)); 07244 (PointerPte + NumberOfPages)->u.Long = MM_KERNEL_DEMAND_ZERO_PTE; 07245 } 07246 07247 do { 07248 07249 KiFlushSingleTb (TRUE, BaseVa); 07250 07251 TempPte.u.Hard.PageFrameNumber = *Page; 07252 07253 // 07254 // Note this PTE may be valid or invalid prior to the overwriting here. 07255 // 07256 07257 *PointerPte = TempPte; 07258 07259 Page += 1; 07260 PointerPte += 1; 07261 BaseVa += PAGE_SIZE; 07262 NumberOfPages -= 1; 07263 } while (NumberOfPages != 0); 07264 07265 return; 07266 }

NTKERNELAPI NTSTATUS MmMapUserAddressesToPage IN PVOID  BaseAddress,
IN SIZE_T  NumberOfBytes,
IN PVOID  PageAddress
 

Definition at line 5161 of file iosup.c.

References _EPROCESS::AddressSpaceDeleted, ASSERT, _MMVAD::EndingVpn, KeFlushEntireTb(), KeFlushSingleTb(), LOCK_PFN, LOCK_WS_AND_ADDRESS_SPACE, MI_VA_TO_VPN, MI_VPN_TO_VA, MI_VPN_TO_VA_ENDING, MiFillMemoryPte, MiGetPteAddress, MiLocateAddress(), MmGetPhysicalAddress(), NTSTATUS(), NULL, PAGE_SHIFT, PAGED_CODE, PsGetCurrentProcess, _MMVAD::StartingVpn, Status, TRUE, _MMVAD::u, _MMPTE::u, UNLOCK_PFN, UNLOCK_WS_AND_ADDRESS_SPACE, and VOID().

05169 : 05170 05171 This function maps a range of addresses in a physical memory VAD to the 05172 specified page address. This is typically used by a driver to nicely 05173 remove an application's access to things like video memory when the 05174 application is not responding to requests to relinquish it. 05175 05176 Note the entire range must be currently mapped (ie, all the PTEs must 05177 be valid) by the caller. 05178 05179 Arguments: 05180 05181 BaseAddress - Supplies the base virtual address where the physical 05182 address is mapped. 05183 05184 NumberOfBytes - Supplies the number of bytes to remap to the new address. 05185 05186 PageAddress - Supplies the virtual address of the page this is remapped to. 05187 This must be nonpaged memory. 05188 05189 Return Value: 05190 05191 Various NTSTATUS codes. 05192 05193 Environment: 05194 05195 Kernel mode, IRQL of APC_LEVEL or below. 05196 05197 --*/ 05198 05199 { 05200 PMMVAD Vad; 05201 PMMPTE PointerPte; 05202 MMPTE PteContents; 05203 PMMPTE LastPte; 05204 PEPROCESS Process; 05205 NTSTATUS Status; 05206 PVOID EndingAddress; 05207 PFN_NUMBER PageFrameNumber; 05208 SIZE_T NumberOfPtes; 05209 PHYSICAL_ADDRESS PhysicalAddress; 05210 KIRQL OldIrql; 05211 05212 PAGED_CODE(); 05213 05214 if (BaseAddress > MM_HIGHEST_USER_ADDRESS) { 05215 return STATUS_INVALID_PARAMETER_1; 05216 } 05217 05218 if ((ULONG_PTR)BaseAddress + NumberOfBytes > (ULONG64)MM_HIGHEST_USER_ADDRESS) { 05219 return STATUS_INVALID_PARAMETER_2; 05220 } 05221 05222 Process = PsGetCurrentProcess(); 05223 05224 EndingAddress = (PVOID)((PCHAR)BaseAddress + NumberOfBytes - 1); 05225 05226 LOCK_WS_AND_ADDRESS_SPACE (Process); 05227 05228 // 05229 // Make sure the address space was not deleted. 05230 // 05231 05232 if (Process->AddressSpaceDeleted != 0) { 05233 Status = STATUS_PROCESS_IS_TERMINATING; 05234 goto ErrorReturn; 05235 } 05236 05237 Vad = (PMMVAD)MiLocateAddress (BaseAddress); 05238 05239 if (Vad == NULL) { 05240 05241 // 05242 // No virtual address descriptor located. 05243 // 05244 05245 Status = STATUS_MEMORY_NOT_ALLOCATED; 05246 goto ErrorReturn; 05247 } 05248 05249 if (NumberOfBytes == 0) { 05250 05251 // 05252 // If the region size is specified as 0, the base address 05253 // must be the starting address for the region. The entire VAD 05254 // will then be repointed. 05255 // 05256 05257 if (MI_VA_TO_VPN (BaseAddress) != Vad->StartingVpn) { 05258 Status = STATUS_FREE_VM_NOT_AT_BASE; 05259 goto ErrorReturn; 05260 } 05261 05262 BaseAddress = MI_VPN_TO_VA (Vad->StartingVpn); 05263 EndingAddress = MI_VPN_TO_VA_ENDING (Vad->EndingVpn); 05264 NumberOfBytes = (PCHAR)EndingAddress - (PCHAR)BaseAddress + 1; 05265 } 05266 05267 // 05268 // Found the associated virtual address descriptor. 05269 // 05270 05271 if (Vad->EndingVpn < MI_VA_TO_VPN (EndingAddress)) { 05272 05273 // 05274 // The entire range to remap is not contained within a single 05275 // virtual address descriptor. Return an error. 05276 // 05277 05278 Status = STATUS_INVALID_PARAMETER_2; 05279 goto ErrorReturn; 05280 } 05281 05282 if (Vad->u.VadFlags.PhysicalMapping == 0) { 05283 05284 // 05285 // The virtual address descriptor is not a physical mapping. 05286 // 05287 05288 Status = STATUS_INVALID_ADDRESS; 05289 goto ErrorReturn; 05290 } 05291 05292 PointerPte = MiGetPteAddress (BaseAddress); 05293 LastPte = MiGetPteAddress (EndingAddress); 05294 NumberOfPtes = LastPte - PointerPte + 1; 05295 05296 PhysicalAddress = MmGetPhysicalAddress (PageAddress); 05297 PageFrameNumber = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT); 05298 05299 PteContents = *PointerPte; 05300 PteContents.u.Hard.PageFrameNumber = PageFrameNumber; 05301 05302 #if DBG 05303 05304 // 05305 // All the PTEs must be valid or the filling will corrupt the 05306 // UsedPageTableCounts. 05307 // 05308 05309 do { 05310 ASSERT (PointerPte->u.Hard.Valid == 1); 05311 PointerPte += 1; 05312 } while (PointerPte < LastPte); 05313 PointerPte = MiGetPteAddress (BaseAddress); 05314 #endif 05315 05316 // 05317 // Fill the PTEs and flush at the end - no race here because it doesn't 05318 // matter whether the user app sees the old or the new data until we 05319 // return (writes going to either page is acceptable prior to return 05320 // from this function). There is no race with I/O and ProbeAndLockPages 05321 // because the PFN lock is acquired here. 05322 // 05323 05324 LOCK_PFN (OldIrql); 05325 05326 #if !defined (_X86PAE_) 05327 MiFillMemoryPte (PointerPte, 05328 NumberOfPtes * sizeof (MMPTE), 05329 PteContents.u.Long); 05330 #else 05331 05332 // 05333 // Note that the PAE architecture must very carefully fill these PTEs. 05334 // 05335 05336 do { 05337 ASSERT (PointerPte->u.Hard.Valid == 1); 05338 PointerPte += 1; 05339 (VOID)KeInterlockedSwapPte ((PHARDWARE_PTE)PointerPte, 05340 (PHARDWARE_PTE)&PteContents); 05341 } while (PointerPte < LastPte); 05342 PointerPte = MiGetPteAddress (BaseAddress); 05343 05344 #endif 05345 05346 if (NumberOfPtes == 1) { 05347 05348 (VOID)KeFlushSingleTb (BaseAddress, 05349 TRUE, 05350 TRUE, 05351 (PHARDWARE_PTE)PointerPte, 05352 PteContents.u.Flush); 05353 } 05354 else { 05355 KeFlushEntireTb (TRUE, TRUE); 05356 } 05357 05358 UNLOCK_PFN (OldIrql); 05359 05360 Status = STATUS_SUCCESS; 05361 05362 ErrorReturn: 05363 05364 UNLOCK_WS_AND_ADDRESS_SPACE (Process); 05365 05366 return Status; 05367 }

NTKERNELAPI PVOID MmMapVideoDisplay IN PHYSICAL_ADDRESS  PhysicalAddress,
IN SIZE_T  NumberOfBytes,
IN MEMORY_CACHING_TYPE  CacheType
 

Definition at line 7516 of file iosup.c.

References ASSERT, BYTE_OFFSET, COMPUTE_PAGES_SPANNED, ExAllocatePoolWithTag, ExFreePool(), FALSE, MI_DISABLE_CACHING, MI_SET_GLOBAL_STATE, MI_WRITE_VALID_PTE, MiFillMemoryPte, MiGetSubsectionAddressForPte, MiGetVirtualAddressMappedByPte, MiProtoAddressForPte, MiReleaseSystemPtes(), MiReserveSystemPtes(), MM_NOCACHE, MM_READWRITE, MM_VA_MAPPED_BY_PDE, MM_ZERO_KERNEL_PTE, MmMapIoSpace(), MmPageSizeInfo, MMPTE, NonPagedPool, NULL, PAGE_SHIFT, PAGE_SIZE, PAGED_CODE, PagedPool, PTE_SHIFT, _SUBSECTION::PtesInSubsection, _SUBSECTION::StartingSector, _SUBSECTION::SubsectionBase, SystemPteSpace, TRUE, _MMPTE::u, _SUBSECTION::u, ValidKernelPte, X64K, and ZeroKernelPte.

07524 : 07525 07526 This function maps the specified physical address into the non-pagable 07527 portion of the system address space. 07528 07529 Arguments: 07530 07531 PhysicalAddress - Supplies the starting physical address to map. 07532 07533 NumberOfBytes - Supplies the number of bytes to map. 07534 07535 CacheType - Supplies MmNonCached if the physical address is to be mapped 07536 as non-cached, MmCached if the address should be cached, and 07537 MmWriteCombined if the address should be cached and 07538 write-combined as a frame buffer. For I/O device registers, 07539 this is usually specified as MmNonCached. 07540 07541 Return Value: 07542 07543 Returns the virtual address which maps the specified physical addresses. 07544 The value NULL is returned if sufficient virtual address space for 07545 the mapping could not be found. 07546 07547 Environment: 07548 07549 Kernel mode, IRQL of APC_LEVEL or below. 07550 07551 --*/ 07552 07553 { 07554 PMMPTE PointerPte; 07555 PVOID BaseVa; 07556 #ifdef LARGE_PAGES 07557 MMPTE TempPte; 07558 PFN_NUMBER PageFrameIndex; 07559 PFN_NUMBER NumberOfPages; 07560 ULONG size; 07561 PMMPTE protoPte; 07562 PMMPTE largePte; 07563 ULONG pageSize; 07564 PSUBSECTION Subsection; 07565 ULONG Alignment; 07566 ULONG EmPageSize; 07567 #endif LARGE_PAGES 07568 ULONG LargePages; 07569 07570 LargePages = FALSE; 07571 PointerPte = NULL; 07572 07573 #if defined (i386) && !defined (_X86PAE_) 07574 ASSERT (PhysicalAddress.HighPart == 0); 07575 #endif 07576 07577 PAGED_CODE(); 07578 07579 ASSERT (NumberOfBytes != 0); 07580 07581 #ifdef LARGE_PAGES 07582 NumberOfPages = COMPUTE_PAGES_SPANNED (PhysicalAddress.LowPart, 07583 NumberOfBytes); 07584 07585 TempPte = ValidKernelPte; 07586 MI_DISABLE_CACHING (TempPte); 07587 PageFrameIndex = (PFN_NUMBER)(PhysicalAddress.QuadPart >> PAGE_SHIFT); 07588 TempPte.u.Hard.PageFrameNumber = PageFrameIndex; 07589 07590 if ((NumberOfBytes > X64K) && (!MmLargeVideoMapped)) { 07591 size = (NumberOfBytes - 1) >> (PAGE_SHIFT + 1); 07592 pageSize = PAGE_SIZE; 07593 07594 while (size != 0) { 07595 size = size >> 2; 07596 pageSize = pageSize << 2; 07597 } 07598 07599 Alignment = pageSize << 1; 07600 if (Alignment < MM_VA_MAPPED_BY_PDE) { 07601 Alignment = MM_VA_MAPPED_BY_PDE; 07602 } 07603 07604 #if defined(_IA64_) 07605 07606 // 07607 // Convert pageSize to the EM specific page-size field format 07608 // 07609 07610 EmPageSize = 0; 07611 size = pageSize - 1 ; 07612 07613 while (size) { 07614 size = size >> 1; 07615 EmPageSize += 1; 07616 } 07617 07618 if (NumberOfBytes > pageSize) { 07619 07620 if (MmPageSizeInfo & (pageSize << 1)) { 07621 07622 // 07623 // if larger page size is supported in the implementation 07624 // 07625 07626 pageSize = pageSize << 1; 07627 EmPageSize += 1; 07628 07629 } 07630 else { 07631 07632 EmPageSize = EmPageSize | pageSize; 07633 07634 } 07635 } 07636 07637 pageSize = EmPageSize; 07638 #endif 07639 07640 NumberOfPages = Alignment >> PAGE_SHIFT; 07641 07642 PointerPte = MiReserveSystemPtes(NumberOfPages, 07643 SystemPteSpace, 07644 Alignment, 07645 0, 07646 FALSE); 07647 07648 if (PointerPte == NULL) { 07649 goto MapWithSmallPages; 07650 } 07651 07652 protoPte = ExAllocatePoolWithTag (PagedPool, 07653 sizeof (MMPTE), 07654 'bSmM'); 07655 07656 if (protoPte == NULL) { 07657 MiReleaseSystemPtes(PointerPte, NumberOfPages, SystemPteSpace); 07658 goto MapWithSmallPages; 07659 } 07660 07661 Subsection = ExAllocatePoolWithTag (NonPagedPool, 07662 sizeof(SUBSECTION) + (4 * sizeof(MMPTE)), 07663 'bSmM'); 07664 07665 if (Subsection == NULL) { 07666 ExFreePool (protoPte); 07667 MiReleaseSystemPtes(PointerPte, NumberOfPages, SystemPteSpace); 07668 goto MapWithSmallPages; 07669 } 07670 07671 MiFillMemoryPte (PointerPte, 07672 Alignment >> (PAGE_SHIFT - PTE_SHIFT), 07673 MM_ZERO_KERNEL_PTE); 07674 07675 // 07676 // Build large page descriptor and fill in all the PTEs. 07677 // 07678 07679 Subsection->StartingSector = pageSize; 07680 Subsection->EndingSector = (ULONG)NumberOfPages; 07681 Subsection->u.LongFlags = 0; 07682 Subsection->u.SubsectionFlags.LargePages = 1; 07683 Subsection->u.SubsectionFlags.Protection = MM_READWRITE | MM_NOCACHE; 07684 Subsection->PtesInSubsection = Alignment; 07685 Subsection->SubsectionBase = PointerPte; 07686 07687 largePte = (PMMPTE)(Subsection + 1); 07688 07689 // 07690 // Build the first 2 PTEs as entries for the TLB to 07691 // map the specified physical address. 07692 // 07693 07694 *largePte = TempPte; 07695 largePte += 1; 07696 07697 if (NumberOfBytes > pageSize) { 07698 *largePte = TempPte; 07699 largePte->u.Hard.PageFrameNumber += (pageSize >> PAGE_SHIFT); 07700 } else { 07701 *largePte = ZeroKernelPte; 07702 } 07703 07704 // 07705 // Build the first prototype PTE as a paging file format PTE 07706 // referring to the subsection. 07707 // 07708 07709 protoPte->u.Long = MiGetSubsectionAddressForPte(Subsection); 07710 protoPte->u.Soft.Prototype = 1; 07711 protoPte->u.Soft.Protection = MM_READWRITE | MM_NOCACHE; 07712 07713 // 07714 // Set the PTE up for all the user's PTE entries, proto pte 07715 // format pointing to the 3rd prototype PTE. 07716 // 07717 07718 TempPte.u.Long = MiProtoAddressForPte (protoPte); 07719 MI_SET_GLOBAL_STATE (TempPte, 1); 07720 LargePages = TRUE; 07721 MmLargeVideoMapped = TRUE; 07722 } 07723 07724 if (PointerPte != NULL) { 07725 BaseVa = (PVOID)MiGetVirtualAddressMappedByPte (PointerPte); 07726 BaseVa = (PVOID)((PCHAR)BaseVa + BYTE_OFFSET(PhysicalAddress.LowPart)); 07727 07728 do { 07729 ASSERT (PointerPte->u.Hard.Valid == 0); 07730 MI_WRITE_VALID_PTE (PointerPte, TempPte); 07731 PointerPte += 1; 07732 NumberOfPages -= 1; 07733 } while (NumberOfPages != 0); 07734 } else { 07735 07736 MapWithSmallPages: 07737 07738 #endif //LARGE_PAGES 07739 07740 BaseVa = MmMapIoSpace (PhysicalAddress, 07741 NumberOfBytes, 07742 CacheType); 07743 #ifdef LARGE_PAGES 07744 } 07745 #endif //LARGE_PAGES 07746 07747 return BaseVa; 07748 }

NTKERNELAPI NTSTATUS MmMapViewInSessionSpace IN PVOID  Section,
OUT PVOID *  MappedBase,
IN OUT PSIZE_T  ViewSize
 

Definition at line 3211 of file mapview.c.

References ASSERT, MiHydra, MiMapViewInSystemSpace(), MmIsAddressValid(), MmSession, MmSessionSpace, PAGED_CODE, PsGetCurrentProcess, _MM_SESSION_SPACE::Session, and TRUE.

03219 : 03220 03221 This routine maps the specified section into the current process's 03222 session address space. 03223 03224 Arguments: 03225 03226 Section - Supplies a pointer to the section to map. 03227 03228 *MappedBase - Returns the address where the section was mapped. 03229 03230 ViewSize - Supplies the size of the view to map. If this 03231 is specified as zero, the whole section is mapped. 03232 Returns the actual size mapped. 03233 03234 Return Value: 03235 03236 Status of the map view operation. 03237 03238 Environment: 03239 03240 Kernel Mode, IRQL of dispatch level. 03241 03242 --*/ 03243 03244 { 03245 PMMSESSION Session; 03246 03247 PAGED_CODE(); 03248 03249 if (MiHydra == TRUE) { 03250 if (PsGetCurrentProcess()->Vm.u.Flags.ProcessInSession == 0) { 03251 return STATUS_NOT_MAPPED_VIEW; 03252 } 03253 ASSERT (MmIsAddressValid(MmSessionSpace) == TRUE); 03254 Session = &MmSessionSpace->Session; 03255 } 03256 else { 03257 Session = &MmSession; 03258 } 03259 03260 return MiMapViewInSystemSpace (Section, 03261 Session, 03262 MappedBase, 03263 ViewSize); 03264 }

NTSTATUS MmMapViewInSystemCache IN PVOID  SectionToMap,
OUT PVOID *  CapturedBase,
IN OUT PLARGE_INTEGER  SectionOffset,
IN OUT PULONG  CapturedViewSize
 

Definition at line 63 of file mapcache.c.

References ASSERT, _SEGMENT::ControlArea, KeBugCheckEx(), KeFlushEntireTb(), L, LOCK_PFN, MiGetPteAddress, MiGetVirtualAddressMappedByPte, MiProtoAddressForKernelPte, MM_COLOR_MASK, MM_EMPTY_LIST, MM_EMPTY_PTE_LIST, MmFirstFreeSystemCache, MmFlushSystemCache, MmSystemCacheEnd, MmSystemCachePteBase, _SUBSECTION::NextSubsection, NULL, _CONTROL_AREA::NumberOfMappedViews, _CONTROL_AREA::NumberOfSectionReferences, _CONTROL_AREA::NumberOfSystemCacheViews, PAGE_SHIFT, PAGE_SIZE, PSECTION, PTE_SHIFT, _SUBSECTION::PtesInSubsection, _CONTROL_AREA::Segment, _SUBSECTION::SubsectionBase, TRUE, _CONTROL_AREA::u, _MMPTE::u, UNLOCK_PFN, and ZeroKernelPte.

Referenced by CcGetVacbMiss().

00072 : 00073 00074 This function maps a view in the specified subject process to 00075 the section object. The page protection is identical to that 00076 of the prototype PTE. 00077 00078 This function is a kernel mode interface to allow LPC to map 00079 a section given the section pointer to map. 00080 00081 This routine assumes all arguments have been probed and captured. 00082 00083 Arguments: 00084 00085 SectionToMap - Supplies a pointer to the section object. 00086 00087 BaseAddress - Supplies a pointer to a variable that will receive 00088 the base address of the view. If the initial value 00089 of this argument is not null, then the view will 00090 be allocated starting at the specified virtual 00091 address rounded down to the next 64kb address 00092 boundary. If the initial value of this argument is 00093 null, then the operating system will determine 00094 where to allocate the view using the information 00095 specified by the ZeroBits argument value and the 00096 section allocation attributes (i.e. based and 00097 tiled). 00098 00099 SectionOffset - Supplies the offset from the beginning of the 00100 section to the view in bytes. This value must be a multiple 00101 of 256k. 00102 00103 ViewSize - Supplies a pointer to a variable that will receive 00104 the actual size in bytes of the view. 00105 The initial values of this argument specifies the 00106 size of the view in bytes and is rounded up to the 00107 next host page size boundary and must be less than or equal 00108 to 256k. 00109 00110 Return Value: 00111 00112 Returns the status 00113 00114 TBS 00115 00116 Environment: 00117 00118 Kernel mode. 00119 00120 --*/ 00121 00122 { 00123 PSECTION Section; 00124 ULONG PteOffset; 00125 KIRQL OldIrql; 00126 PMMPTE PointerPte; 00127 PMMPTE LastPte; 00128 PMMPTE ProtoPte; 00129 PMMPTE LastProto; 00130 PSUBSECTION Subsection; 00131 PVOID EndingVa; 00132 PCONTROL_AREA ControlArea; 00133 00134 Section = SectionToMap; 00135 00136 // 00137 // Assert the view size is less 256kb and the section offset 00138 // is aligned on a 256k boundary. 00139 // 00140 00141 ASSERT (*CapturedViewSize <= 256L*1024L); 00142 ASSERT ((SectionOffset->LowPart & (256L*1024L - 1)) == 0); 00143 00144 // 00145 // Make sure the section is not an image section or a page file 00146 // backed section. 00147 // 00148 00149 if (Section->u.Flags.Image) { 00150 return STATUS_NOT_MAPPED_DATA; 00151 } 00152 00153 ControlArea = Section->Segment->ControlArea; 00154 00155 ASSERT (*CapturedViewSize != 0); 00156 00157 ASSERT (ControlArea->u.Flags.GlobalOnlyPerSession == 0); 00158 00159 Subsection = (PSUBSECTION)(ControlArea + 1); 00160 00161 LOCK_PFN (OldIrql); 00162 00163 ASSERT (ControlArea->u.Flags.BeingCreated == 0); 00164 ASSERT (ControlArea->u.Flags.BeingDeleted == 0); 00165 ASSERT (ControlArea->u.Flags.BeingPurged == 0); 00166 00167 // 00168 // Find a free 256k base in the cache. 00169 // 00170 00171 if (MmFirstFreeSystemCache == (PMMPTE)MM_EMPTY_LIST) { 00172 UNLOCK_PFN (OldIrql); 00173 return STATUS_NO_MEMORY; 00174 } 00175 00176 if (MmFirstFreeSystemCache == MmFlushSystemCache) { 00177 00178 // 00179 // All system cache PTEs have been used, flush the entire 00180 // TB to remove any stale TB entries. 00181 // 00182 00183 KeFlushEntireTb (TRUE, TRUE); 00184 MmFlushSystemCache = NULL; 00185 } 00186 00187 PointerPte = MmFirstFreeSystemCache; 00188 00189 // 00190 // Update next free entry. 00191 // 00192 00193 ASSERT (PointerPte->u.Hard.Valid == 0); 00194 00195 if (PointerPte->u.List.NextEntry == MM_EMPTY_PTE_LIST) { 00196 KeBugCheckEx (MEMORY_MANAGEMENT, 00197 0x778, 00198 (ULONG_PTR)PointerPte, 00199 0, 00200 0); 00201 MmFirstFreeSystemCache = (PMMPTE)MM_EMPTY_LIST; 00202 } 00203 else { 00204 MmFirstFreeSystemCache = MmSystemCachePteBase + PointerPte->u.List.NextEntry; 00205 ASSERT (MmFirstFreeSystemCache <= MiGetPteAddress (MmSystemCacheEnd)); 00206 } 00207 00208 // 00209 // Increment the count of the number of views for the 00210 // section object. This requires the PFN lock to be held. 00211 // 00212 00213 ControlArea->NumberOfMappedViews += 1; 00214 ControlArea->NumberOfSystemCacheViews += 1; 00215 ASSERT (ControlArea->NumberOfSectionReferences != 0); 00216 00217 UNLOCK_PFN (OldIrql); 00218 00219 *CapturedBase = MiGetVirtualAddressMappedByPte (PointerPte); 00220 00221 EndingVa = (PVOID)(((ULONG_PTR)*CapturedBase + 00222 *CapturedViewSize - 1L) | (PAGE_SIZE - 1L)); 00223 00224 // 00225 // An unoccupied address range has been found, put the PTEs in 00226 // the range into prototype PTEs. 00227 // 00228 00229 #if DBG 00230 00231 // 00232 // Zero out the next pointer field. 00233 // 00234 00235 PointerPte->u.List.NextEntry = 0; 00236 #endif //DBG 00237 00238 LastPte = MiGetPteAddress (EndingVa); 00239 00240 // 00241 // Calculate the first prototype PTE address. 00242 // 00243 00244 PteOffset = (ULONG)(SectionOffset->QuadPart >> PAGE_SHIFT); 00245 00246 // 00247 // Make sure the PTEs are not in the extended part of the 00248 // segment. 00249 // 00250 00251 while (PteOffset >= Subsection->PtesInSubsection) { 00252 PteOffset -= Subsection->PtesInSubsection; 00253 Subsection = Subsection->NextSubsection; 00254 } 00255 00256 ProtoPte = &Subsection->SubsectionBase[PteOffset]; 00257 00258 LastProto = &Subsection->SubsectionBase[Subsection->PtesInSubsection]; 00259 00260 while (PointerPte <= LastPte) { 00261 00262 if (ProtoPte >= LastProto) { 00263 00264 // 00265 // Handle extended subsections. 00266 // 00267 00268 Subsection = Subsection->NextSubsection; 00269 ProtoPte = Subsection->SubsectionBase; 00270 LastProto = &Subsection->SubsectionBase[ 00271 Subsection->PtesInSubsection]; 00272 } 00273 ASSERT (PointerPte->u.Long == ZeroKernelPte.u.Long); 00274 PointerPte->u.Long = MiProtoAddressForKernelPte (ProtoPte); 00275 00276 ASSERT (((ULONG_PTR)PointerPte & (MM_COLOR_MASK << PTE_SHIFT)) == 00277 (((ULONG_PTR)ProtoPte & (MM_COLOR_MASK << PTE_SHIFT)))); 00278 00279 PointerPte += 1; 00280 ProtoPte += 1; 00281 } 00282 00283 return STATUS_SUCCESS; 00284 }

NTKERNELAPI NTSTATUS MmMapViewInSystemSpace IN PVOID  Section,
OUT PVOID *  MappedBase,
IN PSIZE_T  ViewSize
 

NTKERNELAPI NTSTATUS MmMapViewOfSection IN PVOID  SectionToMap,
IN PEPROCESS  Process,
IN OUT PVOID *  CapturedBase,
IN ULONG_PTR  ZeroBits,
IN SIZE_T  CommitSize,
IN OUT PLARGE_INTEGER  SectionOffset,
IN OUT PSIZE_T  CapturedViewSize,
IN SECTION_INHERIT  InheritDisposition,
IN ULONG  AllocationType,
IN ULONG  Protect
 

Definition at line 753 of file mapview.c.

References _SEGMENT::ControlArea, EXCEPTION_EXECUTE_HANDLER, ExPageLockHandle, FALSE, _SEGMENT::ImageCommitment, KeAttachProcess(), KeDetachProcess(), LOCK_ADDRESS_SPACE, MiMakeProtectionMask(), MiMapViewOfDataSection(), MiMapViewOfImageSection(), MiMapViewOfPhysicalSection(), MmLockPagableSectionByHandle(), MmUnlockPagableImageSection(), NTSTATUS(), PAGED_CODE, PSECTION, PsGetCurrentProcess, _CONTROL_AREA::Segment, TRUE, _CONTROL_AREA::u, UNLOCK_ADDRESS_SPACE, and UNLOCK_WS_AND_ADDRESS_SPACE.

Referenced by CommitReadOnlyMemory(), InitMapSharedSection(), MapDesktop(), MiGetWritablePagesInSection(), MiLoadImageSection(), MmCreatePeb(), MmInitializeProcessAddressSpace(), NtAcceptConnectPort(), NtMapViewOfSection(), NtMapViewOfSuperSection(), NtSecureConnectPort(), PspMapSystemDll(), and UserCreateHeap().

00768 : 00769 00770 This function maps a view in the specified subject process to 00771 the section object. 00772 00773 This function is a kernel mode interface to allow LPC to map 00774 a section given the section pointer to map. 00775 00776 This routine assumes all arguments have been probed and captured. 00777 00778 ******************************************************************** 00779 ******************************************************************** 00780 ******************************************************************** 00781 00782 NOTE: 00783 00784 CapturedViewSize, SectionOffset, and CapturedBase must be 00785 captured in non-paged system space (i.e., kernel stack). 00786 00787 ******************************************************************** 00788 ******************************************************************** 00789 ******************************************************************** 00790 00791 Arguments: 00792 00793 SectionToMap - Supplies a pointer to the section object. 00794 00795 Process - Supplies a pointer to the process object. 00796 00797 BaseAddress - Supplies a pointer to a variable that will receive 00798 the base address of the view. If the initial value 00799 of this argument is not null, then the view will 00800 be allocated starting at the specified virtual 00801 address rounded down to the next 64kb address 00802 boundary. If the initial value of this argument is 00803 null, then the operating system will determine 00804 where to allocate the view using the information 00805 specified by the ZeroBits argument value and the 00806 section allocation attributes (i.e. based and 00807 tiled). 00808 00809 ZeroBits - Supplies the number of high order address bits that 00810 must be zero in the base address of the section 00811 view. The value of this argument must be less than 00812 21 and is only used when the operating system 00813 determines where to allocate the view (i.e. when 00814 BaseAddress is null). 00815 00816 CommitSize - Supplies the size of the initially committed region 00817 of the view in bytes. This value is rounded up to 00818 the next host page size boundary. 00819 00820 SectionOffset - Supplies the offset from the beginning of the 00821 section to the view in bytes. This value is 00822 rounded down to the next host page size boundary. 00823 00824 ViewSize - Supplies a pointer to a variable that will receive 00825 the actual size in bytes of the view. If the value 00826 of this argument is zero, then a view of the 00827 section will be mapped starting at the specified 00828 section offset and continuing to the end of the 00829 section. Otherwise the initial value of this 00830 argument specifies the size of the view in bytes 00831 and is rounded up to the next host page size 00832 boundary. 00833 00834 InheritDisposition - Supplies a value that specifies how the 00835 view is to be shared by a child process created 00836 with a create process operation. 00837 00838 AllocationType - Supplies the type of allocation. 00839 00840 Protect - Supplies the protection desired for the region of 00841 initially committed pages. 00842 00843 Return Value: 00844 00845 Returns the status 00846 00847 TBS 00848 00849 00850 --*/ 00851 { 00852 BOOLEAN Attached; 00853 PSECTION Section; 00854 PCONTROL_AREA ControlArea; 00855 ULONG ProtectionMask; 00856 NTSTATUS status; 00857 BOOLEAN ReleasedWsMutex; 00858 BOOLEAN WriteCombined; 00859 SIZE_T ImageCommitment; 00860 00861 PAGED_CODE(); 00862 00863 Attached = FALSE; 00864 ReleasedWsMutex = TRUE; 00865 00866 Section = (PSECTION)SectionToMap; 00867 00868 // 00869 // Check to make sure the section is not smaller than the view size. 00870 // 00871 00872 if ((LONGLONG)*CapturedViewSize > Section->SizeOfSection.QuadPart) { 00873 if ((AllocationType & MEM_RESERVE) == 0) { 00874 return STATUS_INVALID_VIEW_SIZE; 00875 } 00876 } 00877 00878 if (AllocationType & MEM_RESERVE) { 00879 if (((Section->InitialPageProtection & PAGE_READWRITE) | 00880 (Section->InitialPageProtection & PAGE_EXECUTE_READWRITE)) == 0) { 00881 00882 return STATUS_SECTION_PROTECTION; 00883 } 00884 } 00885 00886 if (Section->u.Flags.NoCache) { 00887 Protect |= PAGE_NOCACHE; 00888 } 00889 00890 // 00891 // Note that write combining is only relevant to physical memory sections 00892 // because they are never trimmed - the write combining bits in a PTE entry 00893 // are not preserved across trims. 00894 // 00895 00896 if (Protect & PAGE_WRITECOMBINE) { 00897 Protect &= ~PAGE_WRITECOMBINE; 00898 WriteCombined = TRUE; 00899 } 00900 else { 00901 WriteCombined = FALSE; 00902 } 00903 00904 // 00905 // Check the protection field. This could raise an exception. 00906 // 00907 00908 try { 00909 ProtectionMask = MiMakeProtectionMask (Protect); 00910 } except (EXCEPTION_EXECUTE_HANDLER) { 00911 return GetExceptionCode(); 00912 } 00913 00914 ControlArea = Section->Segment->ControlArea; 00915 ImageCommitment = Section->Segment->ImageCommitment; 00916 00917 // 00918 // If the specified process is not the current process, attach 00919 // to the specified process. 00920 // 00921 00922 if (PsGetCurrentProcess() != Process) { 00923 KeAttachProcess (&Process->Pcb); 00924 Attached = TRUE; 00925 } 00926 00927 // 00928 // Get the address creation mutex to block multiple threads 00929 // creating or deleting address space at the same time. 00930 // 00931 00932 LOCK_ADDRESS_SPACE (Process); 00933 00934 // 00935 // Make sure the address space was not deleted, if so, return an error. 00936 // 00937 00938 if (Process->AddressSpaceDeleted != 0) { 00939 status = STATUS_PROCESS_IS_TERMINATING; 00940 goto ErrorReturn; 00941 } 00942 00943 // 00944 // Map the view base on the type. 00945 // 00946 00947 ReleasedWsMutex = FALSE; 00948 00949 if (ControlArea->u.Flags.PhysicalMemory) { 00950 00951 MmLockPagableSectionByHandle(ExPageLockHandle); 00952 status = MiMapViewOfPhysicalSection (ControlArea, 00953 Process, 00954 CapturedBase, 00955 SectionOffset, 00956 CapturedViewSize, 00957 ProtectionMask, 00958 ZeroBits, 00959 AllocationType, 00960 WriteCombined, 00961 &ReleasedWsMutex); 00962 MmUnlockPagableImageSection(ExPageLockHandle); 00963 00964 } else if (ControlArea->u.Flags.Image) { 00965 if (AllocationType & MEM_RESERVE) { 00966 status = STATUS_INVALID_PARAMETER_9; 00967 } 00968 else if (WriteCombined == TRUE) { 00969 status = STATUS_INVALID_PARAMETER_10; 00970 } else { 00971 00972 status = MiMapViewOfImageSection (ControlArea, 00973 Process, 00974 CapturedBase, 00975 SectionOffset, 00976 CapturedViewSize, 00977 Section, 00978 InheritDisposition, 00979 ZeroBits, 00980 ImageCommitment, 00981 &ReleasedWsMutex); 00982 } 00983 00984 } else { 00985 00986 // 00987 // Not an image section, therefore it is a data section. 00988 // 00989 00990 if (WriteCombined == TRUE) { 00991 status = STATUS_INVALID_PARAMETER_10; 00992 } 00993 else { 00994 status = MiMapViewOfDataSection (ControlArea, 00995 Process, 00996 CapturedBase, 00997 SectionOffset, 00998 CapturedViewSize, 00999 Section, 01000 InheritDisposition, 01001 ProtectionMask, 01002 CommitSize, 01003 ZeroBits, 01004 AllocationType, 01005 &ReleasedWsMutex 01006 ); 01007 } 01008 } 01009 01010 ErrorReturn: 01011 if (!ReleasedWsMutex) { 01012 UNLOCK_WS_AND_ADDRESS_SPACE (Process); 01013 } 01014 else { 01015 UNLOCK_ADDRESS_SPACE (Process); 01016 } 01017 01018 if (Attached) { 01019 KeDetachProcess(); 01020 } 01021 01022 return status; 01023 }

NTSTATUS MmMemoryUsage IN PVOID  Buffer,
IN ULONG  Size,
IN ULONG  Type,
OUT PULONG  Length
 

Definition at line 730 of file dmpaddr.c.

Referenced by NtQuerySystemInformation().

00736 { 00737 return STATUS_NOT_IMPLEMENTED; 00738 }

VOID MmOutPageKernelStack IN PKTHREAD  Thread  ) 
 

Definition at line 3180 of file procsup.c.

References ASSERT, Count, KeFlushEntireTb(), KeFlushMultipleTb(), KernelDemandZeroPte, LOCK_PFN, MAX_STACK_PAGES, MI_GET_PAGE_FRAME_FROM_PTE, MI_MAKE_VALID_PTE_TRANSITION, MI_PFN_ELEMENT, MI_SET_PFN_DELETED, MiDecrementShareAndValidCount, MiDecrementShareCount(), MiDecrementShareCountOnly, MiGetPteAddress, MM_BUMP_COUNTER, MM_KSTACK_OUTSWAPPED, MM_MAXIMUM_FLUSH_COUNT, MmKernelStackResident, MmResidentAvailablePages, NtGlobalFlag, _MMPFN::OriginalPte, PAGE_SHIFT, PAGE_SIZE, _MMPFN::PteFrame, TRUE, _MMPTE::u, UNLOCK_PFN, and ZeroPte.

Referenced by KiOutSwapKernelStacks().

03186 : 03187 03188 This routine makes the specified kernel stack non-resident and 03189 puts the pages on the transition list. Note, that if the 03190 CurrentStackPointer is within the first page of the stack, the 03191 contents of the second page of the stack is not useful and the 03192 page is freed. 03193 03194 Arguments: 03195 03196 Thread - Supplies a pointer to the thread whose stack should be 03197 removed. 03198 03199 Return Value: 03200 03201 None. 03202 03203 Environment: 03204 03205 Kernel mode. 03206 03207 --*/ 03208 03209 #if defined(_IA64_) 03210 #define MAX_STACK_PAGES ((KERNEL_LARGE_STACK_SIZE + KERNEL_LARGE_BSTORE_SIZE) / PAGE_SIZE) 03211 #else 03212 #define MAX_STACK_PAGES (KERNEL_LARGE_STACK_SIZE / PAGE_SIZE) 03213 #endif 03214 03215 { 03216 PMMPTE PointerPte; 03217 PMMPTE LastPte; 03218 PMMPTE EndOfStackPte; 03219 PMMPFN Pfn1; 03220 PFN_NUMBER PageFrameIndex; 03221 KIRQL OldIrql; 03222 MMPTE TempPte; 03223 PVOID BaseOfKernelStack; 03224 PMMPTE FlushPte[MAX_STACK_PAGES]; 03225 PVOID FlushVa[MAX_STACK_PAGES]; 03226 MMPTE FlushPteSave[MAX_STACK_PAGES]; 03227 ULONG StackSize; 03228 ULONG Count; 03229 PMMPTE LimitPte; 03230 PMMPTE LowestLivePte; 03231 03232 ASSERT (((PCHAR)Thread->StackBase - (PCHAR)Thread->StackLimit) <= 03233 (KERNEL_LARGE_STACK_SIZE + PAGE_SIZE)); 03234 03235 if (NtGlobalFlag & FLG_DISABLE_PAGE_KERNEL_STACKS) { 03236 return; 03237 } 03238 03239 // 03240 // The first page of the stack is the page before the base 03241 // of the stack. 03242 // 03243 03244 BaseOfKernelStack = ((PCHAR)Thread->StackBase - PAGE_SIZE); 03245 PointerPte = MiGetPteAddress (BaseOfKernelStack); 03246 LastPte = MiGetPteAddress ((PULONG)Thread->KernelStack - 1); 03247 if (Thread->LargeStack) { 03248 StackSize = KERNEL_LARGE_STACK_SIZE >> PAGE_SHIFT; 03249 03250 // 03251 // The stack pagein won't necessarily bring back all the pages. 03252 // Make sure that we account now for the ones that will disappear. 03253 // 03254 03255 LimitPte = MiGetPteAddress (Thread->StackLimit); 03256 03257 LowestLivePte = MiGetPteAddress ((PVOID)((PUCHAR)Thread->InitialStack - 03258 KERNEL_LARGE_STACK_COMMIT)); 03259 03260 if (LowestLivePte < LimitPte) { 03261 LowestLivePte = LimitPte; 03262 } 03263 } else { 03264 StackSize = KERNEL_STACK_SIZE >> PAGE_SHIFT; 03265 LowestLivePte = MiGetPteAddress (Thread->StackLimit); 03266 } 03267 EndOfStackPte = PointerPte - StackSize; 03268 03269 ASSERT (LowestLivePte <= LastPte); 03270 03271 // 03272 // Put a signature at the current stack location - 4. 03273 // 03274 03275 *((PULONG_PTR)Thread->KernelStack - 1) = (ULONG_PTR)Thread; 03276 03277 Count = 0; 03278 03279 LOCK_PFN (OldIrql); 03280 03281 do { 03282 ASSERT (PointerPte->u.Hard.Valid == 1); 03283 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 03284 TempPte = *PointerPte; 03285 MI_MAKE_VALID_PTE_TRANSITION (TempPte, 0); 03286 03287 #ifdef PROTECT_KSTACKS 03288 TempPte.u.Soft.Protection = MM_KSTACK_OUTSWAPPED; 03289 { 03290 PMMPFN x; 03291 x = MI_PFN_ELEMENT(PageFrameIndex); 03292 x->OriginalPte.u.Soft.Protection = MM_KSTACK_OUTSWAPPED; 03293 } 03294 #endif 03295 03296 FlushPteSave[Count] = TempPte; 03297 FlushPte[Count] = PointerPte; 03298 FlushVa[Count] = BaseOfKernelStack; 03299 03300 MiDecrementShareCount (PageFrameIndex); 03301 PointerPte -= 1; 03302 Count += 1; 03303 BaseOfKernelStack = ((PCHAR)BaseOfKernelStack - PAGE_SIZE); 03304 } while (PointerPte >= LastPte); 03305 03306 while (PointerPte != EndOfStackPte) { 03307 if (PointerPte->u.Hard.Valid == 0) { 03308 break; 03309 } 03310 03311 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 03312 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 03313 MiDecrementShareAndValidCount (Pfn1->PteFrame); 03314 MI_SET_PFN_DELETED (Pfn1); 03315 MiDecrementShareCountOnly (MI_GET_PAGE_FRAME_FROM_PTE (PointerPte)); 03316 03317 FlushPteSave[Count] = KernelDemandZeroPte; 03318 03319 #ifdef PROTECT_KSTACKS 03320 FlushPteSave[Count].u.Soft.Protection = MM_KSTACK_OUTSWAPPED; 03321 #endif 03322 03323 FlushPte[Count] = PointerPte; 03324 03325 FlushVa[Count] = BaseOfKernelStack; 03326 Count += 1; 03327 03328 // 03329 // Account for any pages that won't ever come back in. 03330 // 03331 03332 if (PointerPte < LowestLivePte) { 03333 ASSERT (Thread->LargeStack); 03334 MmResidentAvailablePages += 1; 03335 MM_BUMP_COUNTER(12, 1); 03336 } 03337 03338 PointerPte -= 1; 03339 BaseOfKernelStack = ((PCHAR)BaseOfKernelStack - PAGE_SIZE); 03340 } 03341 03342 #if defined(_IA64_) 03343 // 03344 // do for RSE stack space too. 03345 // 03346 03347 BaseOfKernelStack = Thread->StackBase; 03348 PointerPte = MiGetPteAddress (BaseOfKernelStack); 03349 LastPte = MiGetPteAddress ((PULONG)Thread->KernelBStore); 03350 03351 if (Thread->LargeStack) { 03352 StackSize = KERNEL_LARGE_BSTORE_SIZE >> PAGE_SHIFT; 03353 } else { 03354 StackSize = KERNEL_BSTORE_SIZE >> PAGE_SHIFT; 03355 } 03356 EndOfStackPte = PointerPte + StackSize; 03357 03358 do { 03359 ASSERT (PointerPte->u.Hard.Valid == 1); 03360 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 03361 TempPte = *PointerPte; 03362 MI_MAKE_VALID_PTE_TRANSITION (TempPte, 0); 03363 03364 #ifdef PROTECT_KSTACKS 03365 TempPte.u.Soft.Protection = MM_KSTACK_OUTSWAPPED; 03366 { 03367 PMMPFN x; 03368 x = MI_PFN_ELEMENT(PageFrameIndex); 03369 x->OriginalPte.u.Soft.Protection = MM_KSTACK_OUTSWAPPED; 03370 } 03371 #endif 03372 03373 FlushPteSave[Count] = TempPte; 03374 FlushPte[Count] = PointerPte; 03375 FlushVa[Count] = BaseOfKernelStack; 03376 03377 MiDecrementShareCount (PageFrameIndex); 03378 PointerPte += 1; 03379 Count += 1; 03380 BaseOfKernelStack = ((PCHAR)BaseOfKernelStack + PAGE_SIZE); 03381 } while (PointerPte <= LastPte); 03382 03383 while (PointerPte != EndOfStackPte) { 03384 if (PointerPte->u.Hard.Valid == 0) { 03385 break; 03386 } 03387 03388 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 03389 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 03390 MiDecrementShareAndValidCount (Pfn1->PteFrame); 03391 MI_SET_PFN_DELETED (Pfn1); 03392 MiDecrementShareCountOnly (MI_GET_PAGE_FRAME_FROM_PTE (PointerPte)); 03393 03394 FlushPteSave[Count] = KernelDemandZeroPte; 03395 03396 #ifdef PROTECT_KSTACKS 03397 FlushPteSave[Count].u.Soft.Protection = MM_KSTACK_OUTSWAPPED; 03398 #endif 03399 03400 FlushPte[Count] = PointerPte; 03401 FlushVa[Count] = BaseOfKernelStack; 03402 Count += 1; 03403 03404 PointerPte += 1; 03405 BaseOfKernelStack = ((PCHAR)BaseOfKernelStack + PAGE_SIZE); 03406 } 03407 03408 #endif // _IA64_ 03409 03410 ASSERT (Count <= MAX_STACK_PAGES); 03411 03412 if (Count < MM_MAXIMUM_FLUSH_COUNT) { 03413 KeFlushMultipleTb (Count, 03414 &FlushVa[0], 03415 TRUE, 03416 TRUE, 03417 &((PHARDWARE_PTE)FlushPte[0]), 03418 ZeroPte.u.Flush); 03419 } else { 03420 KeFlushEntireTb (TRUE, TRUE); 03421 } 03422 03423 // 03424 // Increase the available pages by the number of pages that where 03425 // deleted and turned into demand zero. 03426 // 03427 03428 MmKernelStackResident -= Count; 03429 03430 // 03431 // Put the right contents back into the PTEs 03432 // 03433 03434 do { 03435 Count -= 1; 03436 *FlushPte[Count] = FlushPteSave[Count]; 03437 } while (Count != 0); 03438 03439 03440 UNLOCK_PFN (OldIrql); 03441 return; 03442 }

VOID MmOutSwapProcess IN PKPROCESS  Process  ) 
 

Definition at line 3590 of file procsup.c.

References _MMSUPPORT::AllowWorkingSetAdjustment, ASSERT, _KPROCESS::DirectoryTableBase, FALSE, LOCK_EXPANSION, LOCK_PFN, MI_CONVERT_PHYSICAL_TO_PFN, MI_GET_PAGE_FRAME_FROM_PTE, MI_IS_PHYSICAL_ADDRESS, MI_MAKE_VALID_PTE_TRANSITION, MI_PFN_ELEMENT, MiDecrementShareCount(), MiGetPdeOffset, MiGetPpeOffset, MiGetPteAddress, MiGetPteOffset, MiHydra, MiMapPageInHyperSpace(), MiSessionOutSwapProcess(), MiUnmapPageInHyperSpace, MM_DBG_SWAP_PROCESS, MM_IO_IN_PROGRESS, MM_PROCESS_COMMIT_CHARGE, MM_READWRITE, MM_WS_SWAPPED_OUT, MmWorkingSetList, _EPROCESS::PaeTop, _EPROCESS::PageDirectoryPte, _EPROCESS::Pcb, PMMPTE, _EPROCESS::ProcessOutswapEnabled, _EPROCESS::ProcessOutswapped, _MMPFN::PteAddress, _MMPFN::PteFrame, TRUE, _MMSUPPORT::u, _MMPTE::u, _MMPFN::u2, _MMPFN::u3, UNLOCK_EXPANSION, UNLOCK_PFN, _EPROCESS::Vm, _MMSUPPORT::WorkingSetExpansionLinks, _EPROCESS::WorkingSetPage, and _MMSUPPORT::WorkingSetSize.

Referenced by KiOutSwapProcesses().

03596 : 03597 03598 This routine out swaps the specified process. 03599 03600 Arguments: 03601 03602 Process - Supplies a pointer to the process that is swapped out of memory. 03603 03604 Return Value: 03605 03606 None. 03607 03608 --*/ 03609 03610 { 03611 KIRQL OldIrql; 03612 KIRQL OldIrql2; 03613 PEPROCESS OutProcess; 03614 PMMPTE PointerPte; 03615 PMMPFN Pfn1; 03616 PFN_NUMBER HyperSpacePageTable; 03617 PMMPTE HyperSpacePageTableMap; 03618 PFN_NUMBER PpePage; 03619 PFN_NUMBER PdePage; 03620 PMMPTE PageDirectoryMap; 03621 PFN_NUMBER ProcessPage; 03622 MMPTE TempPte; 03623 #if defined (_X86PAE_) 03624 ULONG i; 03625 MMPTE TempPte2; 03626 PFN_NUMBER PdePage2; 03627 PFN_NUMBER HyperPage2; 03628 PPAE_ENTRY PaeVa; 03629 #endif 03630 03631 OutProcess = CONTAINING_RECORD (Process, EPROCESS, Pcb); 03632 03633 OutProcess->ProcessOutswapEnabled = TRUE; 03634 03635 #if DBG 03636 if ((MmDebug & MM_DBG_SWAP_PROCESS) != 0) { 03637 return; 03638 } 03639 #endif //DBG 03640 03641 if (MiHydra == TRUE && OutProcess->Vm.u.Flags.ProcessInSession == 1) { 03642 MiSessionOutSwapProcess (OutProcess); 03643 } 03644 03645 if ((OutProcess->Vm.WorkingSetSize == MM_PROCESS_COMMIT_CHARGE) && 03646 (OutProcess->Vm.AllowWorkingSetAdjustment)) { 03647 03648 LOCK_EXPANSION (OldIrql); 03649 03650 ASSERT (OutProcess->ProcessOutswapped == FALSE); 03651 03652 if (OutProcess->Vm.u.Flags.BeingTrimmed == TRUE) { 03653 03654 // 03655 // An outswap is not allowed at this point because the process 03656 // has been attached to and is being trimmed. 03657 // 03658 03659 UNLOCK_EXPANSION (OldIrql); 03660 return; 03661 } 03662 03663 // 03664 // Swap the process working set info and page parent/directory/table 03665 // pages from memory. 03666 // 03667 03668 OutProcess->ProcessOutswapped = TRUE; 03669 03670 UNLOCK_EXPANSION (OldIrql); 03671 03672 LOCK_PFN (OldIrql); 03673 03674 // 03675 // Remove the working set list page from the process. 03676 // 03677 03678 #if !defined (_X86PAE_) 03679 HyperSpacePageTable = 03680 MI_GET_PAGE_FRAME_FROM_PTE((PMMPTE)(&(OutProcess->Pcb.DirectoryTableBase[1]))); 03681 #else 03682 HyperSpacePageTable = (PFN_NUMBER)OutProcess->Pcb.DirectoryTableBase[1]; 03683 #endif 03684 03685 HyperSpacePageTableMap = MiMapPageInHyperSpace (HyperSpacePageTable, &OldIrql2); 03686 03687 TempPte = HyperSpacePageTableMap[MiGetPteOffset(MmWorkingSetList)]; 03688 03689 MI_MAKE_VALID_PTE_TRANSITION (TempPte, 03690 MM_READWRITE); 03691 03692 HyperSpacePageTableMap[MiGetPteOffset(MmWorkingSetList)] = TempPte; 03693 03694 #if defined (_X86PAE_) 03695 TempPte2 = HyperSpacePageTableMap[0]; 03696 03697 HyperPage2 = MI_GET_PAGE_FRAME_FROM_PTE((PMMPTE)&TempPte2); 03698 03699 MI_MAKE_VALID_PTE_TRANSITION (TempPte2, 03700 MM_READWRITE); 03701 03702 HyperSpacePageTableMap[0] = TempPte2; 03703 #endif 03704 03705 MiUnmapPageInHyperSpace (OldIrql2); 03706 03707 #if DBG 03708 Pfn1 = MI_PFN_ELEMENT (OutProcess->WorkingSetPage); 03709 ASSERT (Pfn1->u3.e1.Modified == 1); 03710 #endif 03711 MiDecrementShareCount (OutProcess->WorkingSetPage); 03712 03713 // 03714 // Remove the hyper space page from the process. 03715 // 03716 03717 Pfn1 = MI_PFN_ELEMENT (HyperSpacePageTable); 03718 PdePage = Pfn1->PteFrame; 03719 ASSERT (PdePage); 03720 03721 PageDirectoryMap = MiMapPageInHyperSpace (PdePage, &OldIrql2); 03722 03723 TempPte = PageDirectoryMap[MiGetPdeOffset(MmWorkingSetList)]; 03724 03725 ASSERT (TempPte.u.Hard.Valid == 1); 03726 ASSERT (TempPte.u.Hard.PageFrameNumber == HyperSpacePageTable); 03727 03728 MI_MAKE_VALID_PTE_TRANSITION (TempPte, 03729 MM_READWRITE); 03730 03731 PageDirectoryMap[MiGetPdeOffset(MmWorkingSetList)] = TempPte; 03732 03733 ASSERT (Pfn1->u3.e1.Modified == 1); 03734 03735 MiDecrementShareCount (HyperSpacePageTable); 03736 03737 #if defined (_X86PAE_) 03738 03739 // 03740 // Remove the second hyper space page from the process. 03741 // 03742 03743 Pfn1 = MI_PFN_ELEMENT (HyperPage2); 03744 03745 ASSERT (Pfn1->u3.e1.Modified == 1); 03746 03747 PdePage = Pfn1->PteFrame; 03748 ASSERT (PdePage); 03749 03750 PageDirectoryMap[MiGetPdeOffset(HYPER_SPACE2)] = TempPte2; 03751 03752 MiDecrementShareCount (HyperPage2); 03753 03754 // 03755 // Remove the additional page directory pages. 03756 // 03757 03758 PaeVa = (PPAE_ENTRY)OutProcess->PaeTop; 03759 for (i = 0; i < PD_PER_SYSTEM - 1; i += 1) { 03760 03761 TempPte = PageDirectoryMap[i]; 03762 PdePage2 = MI_GET_PAGE_FRAME_FROM_PTE((PMMPTE)&TempPte); 03763 03764 MI_MAKE_VALID_PTE_TRANSITION (TempPte, 03765 MM_READWRITE); 03766 03767 PageDirectoryMap[i] = TempPte; 03768 Pfn1 = MI_PFN_ELEMENT (PdePage2); 03769 ASSERT (Pfn1->u3.e1.Modified == 1); 03770 03771 MiDecrementShareCount (PdePage2); 03772 PaeVa->PteEntry[i].u.Long = TempPte.u.Long; 03773 } 03774 03775 #if DBG 03776 TempPte = PageDirectoryMap[i]; 03777 PdePage2 = MI_GET_PAGE_FRAME_FROM_PTE((PMMPTE)&TempPte); 03778 Pfn1 = MI_PFN_ELEMENT (PdePage2); 03779 ASSERT (Pfn1->u3.e1.Modified == 1); 03780 #endif 03781 03782 #endif 03783 03784 #if defined (_WIN64) 03785 03786 MiUnmapPageInHyperSpace (OldIrql2); 03787 03788 // 03789 // Remove the page directory page (64-bit version). 03790 // 03791 03792 Pfn1 = MI_PFN_ELEMENT (PdePage); 03793 PpePage = Pfn1->PteFrame; 03794 ASSERT (PpePage); 03795 ASSERT (PpePage == MI_GET_PAGE_FRAME_FROM_PTE((PMMPTE)(&(OutProcess->Pcb.DirectoryTableBase[0])))); 03796 03797 PageDirectoryMap = MiMapPageInHyperSpace (PpePage, &OldIrql2); 03798 03799 TempPte = PageDirectoryMap[MiGetPpeOffset(MmWorkingSetList)]; 03800 03801 ASSERT (TempPte.u.Hard.Valid == 1); 03802 ASSERT (TempPte.u.Hard.PageFrameNumber == PdePage); 03803 03804 MI_MAKE_VALID_PTE_TRANSITION (TempPte, 03805 MM_READWRITE); 03806 03807 PageDirectoryMap[MiGetPpeOffset(MmWorkingSetList)] = TempPte; 03808 03809 ASSERT (Pfn1->u3.e1.Modified == 1); 03810 03811 MiDecrementShareCount (HyperSpacePageTable); 03812 03813 // 03814 // Remove the top level page directory parent page. 03815 // 03816 03817 TempPte = PageDirectoryMap[MiGetPpeOffset(PDE_TBASE)]; 03818 03819 MI_MAKE_VALID_PTE_TRANSITION (TempPte, 03820 MM_READWRITE); 03821 03822 PageDirectoryMap[MiGetPpeOffset(PDE_TBASE)] = TempPte; 03823 03824 Pfn1 = MI_PFN_ELEMENT (PpePage); 03825 03826 #else 03827 03828 // 03829 // Remove the top level page directory page. 03830 // 03831 03832 TempPte = PageDirectoryMap[MiGetPdeOffset(PDE_BASE)]; 03833 03834 MI_MAKE_VALID_PTE_TRANSITION (TempPte, 03835 MM_READWRITE); 03836 03837 PageDirectoryMap[MiGetPdeOffset(PDE_BASE)] = TempPte; 03838 03839 Pfn1 = MI_PFN_ELEMENT (PdePage); 03840 03841 #endif 03842 03843 MiUnmapPageInHyperSpace (OldIrql2); 03844 03845 // 03846 // Decrement share count so the top level page directory page gets 03847 // removed. This can cause the PteCount to equal the sharecount as the 03848 // page directory page no longer contains itself, yet can have 03849 // itself as a transition page. 03850 // 03851 03852 Pfn1->u2.ShareCount -= 2; 03853 Pfn1->PteAddress = (PMMPTE)&OutProcess->PageDirectoryPte; 03854 03855 OutProcess->PageDirectoryPte = TempPte.u.Flush; 03856 03857 #if defined (_X86PAE_) 03858 PaeVa->PteEntry[i].u.Long = TempPte.u.Long; 03859 #endif 03860 03861 if (MI_IS_PHYSICAL_ADDRESS(OutProcess)) { 03862 ProcessPage = MI_CONVERT_PHYSICAL_TO_PFN (OutProcess); 03863 } else { 03864 PointerPte = MiGetPteAddress (OutProcess); 03865 ProcessPage = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 03866 } 03867 03868 Pfn1->PteFrame = ProcessPage; 03869 Pfn1 = MI_PFN_ELEMENT (ProcessPage); 03870 03871 // 03872 // Increment the share count for the process page. 03873 // 03874 03875 Pfn1->u2.ShareCount += 1; 03876 UNLOCK_PFN (OldIrql); 03877 03878 LOCK_EXPANSION (OldIrql); 03879 if (OutProcess->Vm.WorkingSetExpansionLinks.Flink > 03880 MM_IO_IN_PROGRESS) { 03881 03882 // 03883 // The entry must be on the list. 03884 // 03885 RemoveEntryList (&OutProcess->Vm.WorkingSetExpansionLinks); 03886 OutProcess->Vm.WorkingSetExpansionLinks.Flink = MM_WS_SWAPPED_OUT; 03887 } 03888 UNLOCK_EXPANSION (OldIrql); 03889 03890 OutProcess->WorkingSetPage = 0; 03891 OutProcess->Vm.WorkingSetSize = 0; 03892 #if defined(_IA64_) 03893 03894 // 03895 // Force assignment of new PID as we have removed 03896 // the page directory page. 03897 // Note that a TB flush would not work here as we 03898 // are in the wrong process context. 03899 // 03900 03901 Process->ProcessRegion.SequenceNumber = 0; 03902 #endif _IA64_ 03903 03904 } 03905 03906 return; 03907 }

NTKERNELAPI PVOID MmPageEntireDriver IN PVOID  AddressWithinSection  ) 
 

Definition at line 2472 of file sysload.c.

References FALSE, MI_IS_SESSION_IMAGE_ADDRESS, MiGetPteAddress, MiLookupDataTableEntry(), MiSetPagingOfDriver(), MmDisablePagingExecutive, NULL, PAGE_SHIFT, PAGED_CODE, PSECTION, and TRUE.

Referenced by DriverEntry(), MiLoadSystemImage(), and NtSetSystemInformation().

02478 : 02479 02480 This routine allows a driver to page out all of its code and 02481 data regardless of the attributes of the various image sections. 02482 02483 Note, this routine can be called multiple times with no 02484 intervening calls to MmResetDriverPaging. 02485 02486 Arguments: 02487 02488 AddressWithinSection - Supplies an address within the driver, e.g. 02489 DriverEntry. 02490 02491 Return Value: 02492 02493 Base address of driver. 02494 02495 Environment: 02496 02497 Kernel mode, APC_LEVEL or below. 02498 02499 --*/ 02500 02501 { 02502 PLDR_DATA_TABLE_ENTRY DataTableEntry; 02503 PMMPTE FirstPte; 02504 PMMPTE LastPte; 02505 PVOID BaseAddress; 02506 PSECTION SectionPointer; 02507 BOOLEAN SessionSpace; 02508 02509 PAGED_CODE(); 02510 02511 // 02512 // Don't page kernel mode code if disabled via registry. 02513 // 02514 02515 DataTableEntry = MiLookupDataTableEntry (AddressWithinSection, FALSE); 02516 02517 if (DataTableEntry == NULL) { 02518 return NULL; 02519 } 02520 02521 SectionPointer = (PSECTION)DataTableEntry->SectionPointer; 02522 02523 if (MmDisablePagingExecutive) { 02524 return DataTableEntry->DllBase; 02525 } 02526 02527 SessionSpace = MI_IS_SESSION_IMAGE_ADDRESS (AddressWithinSection); 02528 02529 if ((SectionPointer != NULL) && (SectionPointer != (PVOID)-1)) { 02530 02531 // 02532 // Driver is mapped as an image (ie: win32k), this is always pagable. 02533 // For session space, an image that has been loaded at its desired 02534 // address is also always pagable. If there was an address collision, 02535 // then we fall through because we have to explicitly page it. 02536 // 02537 02538 if (SessionSpace == TRUE) { 02539 if (SectionPointer->Segment && 02540 SectionPointer->Segment->BasedAddress == SectionPointer->Segment->SystemImageBase) { 02541 return DataTableEntry->DllBase; 02542 } 02543 } 02544 else { 02545 return DataTableEntry->DllBase; 02546 } 02547 } 02548 02549 BaseAddress = DataTableEntry->DllBase; 02550 FirstPte = MiGetPteAddress (BaseAddress); 02551 LastPte = (FirstPte - 1) + (DataTableEntry->SizeOfImage >> PAGE_SHIFT); 02552 02553 MiSetPagingOfDriver (FirstPte, LastPte, SessionSpace); 02554 02555 return BaseAddress; 02556 }

NTKERNELAPI VOID MmProbeAndLockPages IN OUT PMDL  MemoryDescriptorList,
IN KPROCESSOR_MODE  AccessMode,
IN LOCK_OPERATION  Operation
 

Definition at line 238 of file iosup.c.

References ADDRESS_AND_SIZE_TO_SPAN_PAGES, ASSERT, CHAR, COMPUTE_PAGES_SPANNED, DbgPrint, _MI_PHYSICAL_VIEW::EndVa, EXCEPTION_EXECUTE_HANDLER, ExRaiseStatus(), failure, FALSE, IoReadAccess, KernelMode, LOCK_PFN2, MDL_IO_SPACE, MDL_MAPPED_TO_SYSTEM_VA, MDL_PAGES_LOCKED, MDL_PARTIAL, MDL_PHYSICAL_VIEW, MDL_SOURCE_IS_NONPAGED_POOL, MDL_WRITE_OPERATION, MI_ADD_LOCKED_PAGE_CHARGE, MI_CONVERT_PHYSICAL_TO_PFN, MI_GET_PAGE_FRAME_FROM_PTE, MI_INSTRUMENT_PROBE_RAISES, MI_IS_PHYSICAL_ADDRESS, MI_IS_SYSTEM_CACHE_ADDRESS, MI_NONPAGABLE_MEMORY_AVAILABLE, MI_PFN_ELEMENT, MiAddMdlTracker(), MiGetPdeAddress, MiGetPpeAddress, MiGetPteAddress, MiIsPteOnPdeBoundary, MiIsPteOnPpeBoundary, MM_EMPTY_LIST, MM_PTE_WRITE_MASK, MM_READWRITE, MmAccessFault(), MmHighestPhysicalPage, MmLockPagesLimit, MmReferenceCountCheck, MmResetPageFaultReadAhead, MmSavePageFaultReadAhead, MmSetPageFaultReadAhead, MmSystemLockPagesCount, MmTrackLockedPages, MmUnlockPages(), NT_SUCCESS, NTSTATUS(), NULL, _EPROCESS::NumberOfLockedPages, PAGE_SIZE, _EPROCESS::PhysicalVadList, ProbeForWriteChar, PsGetCurrentProcess, PsGetCurrentThread, RtlGetCallersAddress(), _MI_PHYSICAL_VIEW::StartVa, TRUE, _MMVAD::u, _MMPTE::u, _MMPFN::u3, UNLOCK_PFN2, and _MI_PHYSICAL_VIEW::Vad.

Referenced by BuildQueryDirectoryIrp(), CcMdlRead(), CcPrepareMdlWrite(), CcZeroData(), ExLockUserBuffer(), IoBuildAsynchronousFsdRequest(), IoBuildDeviceIoControlRequest(), IopSetEaOrQuotaInformationFile(), IopXxxControlFile(), MiDoMappedCopy(), MiGetWorkingSetInfo(), MmProbeAndLockProcessPages(), MmProbeAndLockSelectedPages(), NtNotifyChangeDirectoryFile(), NtQueryEaFile(), NtQueryQuotaInformationFile(), NtReadFile(), NtSetEaFile(), NtStartProfile(), NtWriteFile(), UdfCreateUserMdl(), VdmQueryDirectoryFile(), and VerifierProbeAndLockPages().

00246 : 00247 00248 This routine probes the specified pages, makes the pages resident and 00249 locks the physical pages mapped by the virtual pages in memory. The 00250 Memory descriptor list is updated to describe the physical pages. 00251 00252 Arguments: 00253 00254 MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List 00255 (MDL). The supplied MDL must supply a virtual 00256 address, byte offset and length field. The 00257 physical page portion of the MDL is updated when 00258 the pages are locked in memory. 00259 00260 AccessMode - Supplies the access mode in which to probe the arguments. 00261 One of KernelMode or UserMode. 00262 00263 Operation - Supplies the operation type. One of IoReadAccess, IoWriteAccess 00264 or IoModifyAccess. 00265 00266 Return Value: 00267 00268 None - exceptions are raised. 00269 00270 Environment: 00271 00272 Kernel mode. APC_LEVEL and below for pagable addresses, 00273 DISPATCH_LEVEL and below for non-pagable addresses. 00274 00275 --*/ 00276 00277 { 00278 PPFN_NUMBER Page; 00279 MMPTE PteContents; 00280 PMMPTE PointerPte; 00281 PMMPTE PointerPde; 00282 PMMPTE PointerPpe; 00283 PVOID Va; 00284 PVOID EndVa; 00285 PVOID AlignedVa; 00286 PMMPFN Pfn1; 00287 PFN_NUMBER PageFrameIndex; 00288 PEPROCESS CurrentProcess; 00289 KIRQL OldIrql; 00290 PFN_NUMBER NumberOfPagesToLock; 00291 PFN_NUMBER NumberOfPagesSpanned; 00292 NTSTATUS status; 00293 NTSTATUS ProbeStatus; 00294 PETHREAD Thread; 00295 ULONG SavedState; 00296 LOGICAL AddressIsPhysical; 00297 PLIST_ENTRY NextEntry; 00298 PMI_PHYSICAL_VIEW PhysicalView; 00299 PCHAR StartVa; 00300 PVOID CallingAddress; 00301 PVOID CallersCaller; 00302 00303 #if !defined (_X86_) 00304 CallingAddress = (PVOID)_ReturnAddress(); 00305 CallersCaller = (PVOID)0; 00306 #endif 00307 00308 #if DBG 00309 if (MiPrintLockedPages != 0) { 00310 MiVerifyLockedPageCharges (); 00311 } 00312 #endif 00313 00314 ASSERT (MemoryDescriptorList->ByteCount != 0); 00315 ASSERT (((ULONG)MemoryDescriptorList->ByteOffset & ~(PAGE_SIZE - 1)) == 0); 00316 00317 Page = (PPFN_NUMBER)(MemoryDescriptorList + 1); 00318 00319 ASSERT (((ULONG_PTR)MemoryDescriptorList->StartVa & (PAGE_SIZE - 1)) == 0); 00320 AlignedVa = (PVOID)MemoryDescriptorList->StartVa; 00321 00322 ASSERT ((MemoryDescriptorList->MdlFlags & ( 00323 MDL_PAGES_LOCKED | 00324 MDL_MAPPED_TO_SYSTEM_VA | 00325 MDL_SOURCE_IS_NONPAGED_POOL | 00326 MDL_PARTIAL | 00327 MDL_IO_SPACE)) == 0); 00328 00329 Va = (PCHAR)AlignedVa + MemoryDescriptorList->ByteOffset; 00330 StartVa = Va; 00331 00332 PointerPte = MiGetPteAddress (Va); 00333 00334 // 00335 // Endva is one byte past the end of the buffer, if ACCESS_MODE is not 00336 // kernel, make sure the EndVa is in user space AND the byte count 00337 // does not cause it to wrap. 00338 // 00339 00340 EndVa = (PVOID)((PCHAR)Va + MemoryDescriptorList->ByteCount); 00341 00342 if ((AccessMode != KernelMode) && 00343 ((EndVa > (PVOID)MM_USER_PROBE_ADDRESS) || (Va >= EndVa))) { 00344 *Page = MM_EMPTY_LIST; 00345 MI_INSTRUMENT_PROBE_RAISES(0); 00346 ExRaiseStatus (STATUS_ACCESS_VIOLATION); 00347 return; 00348 } 00349 00350 // 00351 // There is an optimization which could be performed here. If 00352 // the operation is for WriteAccess and the complete page is 00353 // being modified, we can remove the current page, if it is not 00354 // resident, and substitute a demand zero page. 00355 // Note, that after analysis by marking the thread and then 00356 // noting if a page read was done, this rarely occurs. 00357 // 00358 00359 MemoryDescriptorList->Process = (PEPROCESS)NULL; 00360 00361 Thread = PsGetCurrentThread (); 00362 00363 if (!MI_IS_PHYSICAL_ADDRESS(Va)) { 00364 00365 AddressIsPhysical = FALSE; 00366 ProbeStatus = STATUS_SUCCESS; 00367 00368 NumberOfPagesToLock = COMPUTE_PAGES_SPANNED (Va, 00369 MemoryDescriptorList->ByteCount); 00370 00371 ASSERT (NumberOfPagesToLock != 0); 00372 00373 NumberOfPagesSpanned = NumberOfPagesToLock; 00374 00375 PointerPpe = MiGetPpeAddress (Va); 00376 PointerPde = MiGetPdeAddress (Va); 00377 00378 MmSavePageFaultReadAhead (Thread, &SavedState); 00379 MmSetPageFaultReadAhead (Thread, (ULONG)(NumberOfPagesToLock - 1)); 00380 00381 try { 00382 00383 do { 00384 00385 *Page = MM_EMPTY_LIST; 00386 00387 // 00388 // Make sure the page is resident. 00389 // 00390 00391 *(volatile CHAR *)Va; 00392 00393 if ((Operation != IoReadAccess) && 00394 (Va <= MM_HIGHEST_USER_ADDRESS)) { 00395 00396 // 00397 // Probe for write access as well. 00398 // 00399 00400 ProbeForWriteChar ((PCHAR)Va); 00401 } 00402 00403 NumberOfPagesToLock -= 1; 00404 00405 MmSetPageFaultReadAhead (Thread, (ULONG)(NumberOfPagesToLock - 1)); 00406 Va = (PVOID)(((ULONG_PTR)(PCHAR)Va + PAGE_SIZE) & ~(PAGE_SIZE - 1)); 00407 Page += 1; 00408 } while (Va < EndVa); 00409 00410 ASSERT (NumberOfPagesToLock == 0); 00411 00412 } except (EXCEPTION_EXECUTE_HANDLER) { 00413 ProbeStatus = GetExceptionCode(); 00414 } 00415 00416 // 00417 // We may still fault again below but it's generally rare. 00418 // Restore this thread's normal fault behavior now. 00419 // 00420 00421 MmResetPageFaultReadAhead (Thread, SavedState); 00422 00423 if (ProbeStatus != STATUS_SUCCESS) { 00424 MI_INSTRUMENT_PROBE_RAISES(1); 00425 ExRaiseStatus (ProbeStatus); 00426 return; 00427 } 00428 } 00429 else { 00430 AddressIsPhysical = TRUE; 00431 *Page = MM_EMPTY_LIST; 00432 } 00433 00434 Va = AlignedVa; 00435 Page = (PPFN_NUMBER)(MemoryDescriptorList + 1); 00436 00437 // 00438 // Indicate that this is a write operation. 00439 // 00440 00441 if (Operation != IoReadAccess) { 00442 MemoryDescriptorList->MdlFlags |= MDL_WRITE_OPERATION; 00443 } else { 00444 MemoryDescriptorList->MdlFlags &= ~(MDL_WRITE_OPERATION); 00445 } 00446 00447 // 00448 // Acquire the PFN database lock. 00449 // 00450 00451 LOCK_PFN2 (OldIrql); 00452 00453 if (Va <= MM_HIGHEST_USER_ADDRESS) { 00454 00455 // 00456 // These are addresses with user space, check to see if the 00457 // working set size will allow these pages to be locked. 00458 // 00459 00460 ASSERT (NumberOfPagesSpanned != 0); 00461 00462 CurrentProcess = PsGetCurrentProcess (); 00463 00464 // 00465 // Check for a transfer to/from a physical VAD - no reference counts 00466 // may be modified for these pages. 00467 // 00468 00469 NextEntry = CurrentProcess->PhysicalVadList.Flink; 00470 while (NextEntry != &CurrentProcess->PhysicalVadList) { 00471 00472 PhysicalView = CONTAINING_RECORD(NextEntry, 00473 MI_PHYSICAL_VIEW, 00474 ListEntry); 00475 00476 if ((PhysicalView->Vad->u.VadFlags.UserPhysicalPages == 0) && 00477 (PhysicalView->Vad->u.VadFlags.PhysicalMapping == 0)) { 00478 NextEntry = NextEntry->Flink; 00479 continue; 00480 } 00481 00482 if (StartVa < PhysicalView->StartVa) { 00483 00484 if ((PCHAR)EndVa - 1 >= PhysicalView->StartVa) { 00485 00486 // 00487 // The range encompasses a physical VAD. This is not 00488 // allowed. 00489 // 00490 00491 UNLOCK_PFN2 (OldIrql); 00492 MI_INSTRUMENT_PROBE_RAISES(2); 00493 ExRaiseStatus (STATUS_ACCESS_VIOLATION); 00494 return; 00495 } 00496 00497 NextEntry = NextEntry->Flink; 00498 continue; 00499 } 00500 00501 if (StartVa <= PhysicalView->EndVa) { 00502 00503 // 00504 // Ensure that the entire range lies within the VAD. 00505 // 00506 00507 if ((PCHAR)EndVa - 1 > PhysicalView->EndVa) { 00508 00509 // 00510 // The range goes past the end of the VAD - not allowed. 00511 // 00512 00513 UNLOCK_PFN2 (OldIrql); 00514 MI_INSTRUMENT_PROBE_RAISES(3); 00515 ExRaiseStatus (STATUS_ACCESS_VIOLATION); 00516 return; 00517 } 00518 00519 if (PhysicalView->Vad->u.VadFlags.UserPhysicalPages == 1) { 00520 00521 // 00522 // All the PTEs must still be checked and reference 00523 // counts bumped on the pages. Just don't charge 00524 // against the working set. 00525 // 00526 00527 NextEntry = NextEntry->Flink; 00528 continue; 00529 } 00530 00531 // 00532 // The range lies within a physical VAD. 00533 // 00534 00535 if (Operation != IoReadAccess) { 00536 00537 // 00538 // Ensure the VAD is writable. Changing individual PTE 00539 // protections in a physical VAD is not allowed. 00540 // 00541 00542 if ((PhysicalView->Vad->u.VadFlags.Protection & MM_READWRITE) == 0) { 00543 UNLOCK_PFN2 (OldIrql); 00544 MI_INSTRUMENT_PROBE_RAISES(4); 00545 ExRaiseStatus (STATUS_ACCESS_VIOLATION); 00546 return; 00547 } 00548 } 00549 00550 // 00551 // Don't charge page locking for this transfer as it is all 00552 // physical, just initialize the MDL. Note the pages do not 00553 // have to be physically contiguous, so the frames must be 00554 // extracted from the PTEs. 00555 // 00556 00557 MemoryDescriptorList->MdlFlags |= (MDL_PHYSICAL_VIEW | MDL_PAGES_LOCKED); 00558 MemoryDescriptorList->Process = CurrentProcess; 00559 00560 do { 00561 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 00562 *Page = PageFrameIndex; 00563 Page += 1; 00564 PointerPte += 1; 00565 Va = (PVOID)((PCHAR)Va + PAGE_SIZE); 00566 } while (Va < EndVa); 00567 00568 UNLOCK_PFN2 (OldIrql); 00569 return; 00570 } 00571 NextEntry = NextEntry->Flink; 00572 } 00573 00574 CurrentProcess->NumberOfLockedPages += NumberOfPagesSpanned; 00575 00576 MemoryDescriptorList->Process = CurrentProcess; 00577 } 00578 00579 MemoryDescriptorList->MdlFlags |= MDL_PAGES_LOCKED; 00580 00581 do { 00582 00583 if (AddressIsPhysical == TRUE) { 00584 00585 // 00586 // On certain architectures, virtual addresses 00587 // may be physical and hence have no corresponding PTE. 00588 // 00589 00590 PageFrameIndex = MI_CONVERT_PHYSICAL_TO_PFN (Va); 00591 00592 } else { 00593 00594 #if defined (_WIN64) 00595 while ((PointerPpe->u.Hard.Valid == 0) || 00596 (PointerPde->u.Hard.Valid == 0) || 00597 (PointerPte->u.Hard.Valid == 0)) 00598 #else 00599 while ((PointerPde->u.Hard.Valid == 0) || 00600 (PointerPte->u.Hard.Valid == 0)) 00601 #endif 00602 { 00603 00604 // 00605 // PDE is not resident, release PFN lock touch the page and make 00606 // it appear. 00607 // 00608 00609 UNLOCK_PFN2 (OldIrql); 00610 00611 MmSetPageFaultReadAhead (Thread, 0); 00612 00613 status = MmAccessFault (FALSE, Va, KernelMode, (PVOID)0); 00614 00615 MmResetPageFaultReadAhead (Thread, SavedState); 00616 00617 if (!NT_SUCCESS(status)) { 00618 00619 // 00620 // An exception occurred. Unlock the pages locked 00621 // so far. 00622 // 00623 00624 failure: 00625 if (MmTrackLockedPages == TRUE) { 00626 00627 // 00628 // Adjust the MDL length so that MmUnlockPages only 00629 // processes the part that was completed. 00630 // 00631 00632 ULONG PagesLocked; 00633 00634 PagesLocked = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartVa, 00635 MemoryDescriptorList->ByteCount); 00636 00637 #if defined (_X86_) 00638 RtlGetCallersAddress(&CallingAddress, &CallersCaller); 00639 #endif 00640 MiAddMdlTracker (MemoryDescriptorList, 00641 CallingAddress, 00642 CallersCaller, 00643 PagesLocked, 00644 0); 00645 } 00646 00647 MmUnlockPages (MemoryDescriptorList); 00648 00649 // 00650 // Raise an exception of access violation to the caller. 00651 // 00652 00653 MI_INSTRUMENT_PROBE_RAISES(7); 00654 ExRaiseStatus (status); 00655 return; 00656 } 00657 00658 LOCK_PFN2 (OldIrql); 00659 } 00660 00661 PteContents = *PointerPte; 00662 ASSERT (PteContents.u.Hard.Valid == 1); 00663 00664 if (Va <= MM_HIGHEST_USER_ADDRESS) { 00665 if (Operation != IoReadAccess) { 00666 00667 if ((PteContents.u.Long & MM_PTE_WRITE_MASK) == 0) { 00668 00669 // 00670 // The caller has made the page protection more 00671 // restrictive, this should never be done once the 00672 // request has been issued ! Rather than wading 00673 // through the PFN database entry to see if it 00674 // could possibly work out, give the caller an 00675 // access violation. 00676 // 00677 00678 #if DBG 00679 DbgPrint ("MmProbeAndLockPages: PTE %p %p changed\n", 00680 PointerPte, 00681 PteContents.u.Long); 00682 ASSERT (FALSE); 00683 #endif 00684 00685 UNLOCK_PFN2 (OldIrql); 00686 status = STATUS_ACCESS_VIOLATION; 00687 goto failure; 00688 } 00689 } 00690 } 00691 00692 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (&PteContents); 00693 } 00694 00695 if (PageFrameIndex > MmHighestPhysicalPage) { 00696 00697 // 00698 // This is an I/O space address don't allow operations 00699 // on addresses not in the PFN database. 00700 // 00701 00702 MemoryDescriptorList->MdlFlags |= MDL_IO_SPACE; 00703 00704 } else { 00705 ASSERT ((MemoryDescriptorList->MdlFlags & MDL_IO_SPACE) == 0); 00706 00707 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 00708 00709 #if PFN_CONSISTENCY 00710 ASSERT(Pfn1->u3.e1.PageTablePage == 0); 00711 #endif 00712 00713 // 00714 // Check to make sure this page is not locked down an unusually 00715 // high number of times. 00716 // 00717 00718 if (Pfn1->u3.e2.ReferenceCount >= MmReferenceCountCheck) { 00719 UNLOCK_PFN2 (OldIrql); 00720 ASSERT (FALSE); 00721 status = STATUS_WORKING_SET_QUOTA; 00722 goto failure; 00723 } 00724 00725 // 00726 // Check to make sure the systemwide locked pages count is fluid. 00727 // 00728 00729 if (MI_NONPAGABLE_MEMORY_AVAILABLE() <= 0) { 00730 00731 // 00732 // If this page is for paged pool or privileged code/data, 00733 // then force it in. 00734 // 00735 00736 if ((Va > MM_HIGHEST_USER_ADDRESS) && 00737 (!MI_IS_SYSTEM_CACHE_ADDRESS(Va))) { 00738 MI_INSTRUMENT_PROBE_RAISES(8); 00739 goto ok; 00740 } 00741 00742 MI_INSTRUMENT_PROBE_RAISES(5); 00743 UNLOCK_PFN2 (OldIrql); 00744 status = STATUS_WORKING_SET_QUOTA; 00745 goto failure; 00746 } 00747 00748 // 00749 // Check to make sure any administrator-desired limit is obeyed. 00750 // 00751 00752 if (MmSystemLockPagesCount + 1 >= MmLockPagesLimit) { 00753 00754 // 00755 // If this page is for paged pool or privileged code/data, 00756 // then force it in. 00757 // 00758 00759 if ((Va > MM_HIGHEST_USER_ADDRESS) && 00760 (!MI_IS_SYSTEM_CACHE_ADDRESS(Va))) { 00761 MI_INSTRUMENT_PROBE_RAISES(9); 00762 goto ok; 00763 } 00764 00765 MI_INSTRUMENT_PROBE_RAISES(6); 00766 UNLOCK_PFN2 (OldIrql); 00767 status = STATUS_WORKING_SET_QUOTA; 00768 goto failure; 00769 } 00770 00771 ok: 00772 MI_ADD_LOCKED_PAGE_CHARGE(Pfn1, 0); 00773 00774 Pfn1->u3.e2.ReferenceCount += 1; 00775 } 00776 00777 *Page = PageFrameIndex; 00778 00779 Page += 1; 00780 PointerPte += 1; 00781 if (MiIsPteOnPdeBoundary(PointerPte)) { 00782 PointerPde += 1; 00783 if (MiIsPteOnPpeBoundary(PointerPte)) { 00784 PointerPpe += 1; 00785 } 00786 } 00787 00788 Va = (PVOID)((PCHAR)Va + PAGE_SIZE); 00789 } while (Va < EndVa); 00790 00791 UNLOCK_PFN2 (OldIrql); 00792 00793 if ((MmTrackLockedPages == TRUE) && (AlignedVa <= MM_HIGHEST_USER_ADDRESS)) { 00794 00795 ASSERT (NumberOfPagesSpanned != 0); 00796 00797 #if defined (_X86_) 00798 RtlGetCallersAddress(&CallingAddress, &CallersCaller); 00799 #endif 00800 00801 MiAddMdlTracker (MemoryDescriptorList, 00802 CallingAddress, 00803 CallersCaller, 00804 NumberOfPagesSpanned, 00805 1); 00806 } 00807 00808 return; 00809 }

NTKERNELAPI VOID MmProbeAndLockProcessPages IN OUT PMDL  MemoryDescriptorList,
IN PEPROCESS  Process,
IN KPROCESSOR_MODE  AccessMode,
IN LOCK_OPERATION  Operation
 

Definition at line 813 of file iosup.c.

References EXCEPTION_EXECUTE_HANDLER, ExRaiseStatus(), FALSE, KeAttachProcess(), KeDetachProcess(), MmProbeAndLockPages(), NTSTATUS(), PsGetCurrentProcess, Status, and TRUE.

Referenced by VerifierProbeAndLockProcessPages().

00822 : 00823 00824 This routine probes and locks the address range specified by 00825 the MemoryDescriptorList in the specified Process for the AccessMode 00826 and Operation. 00827 00828 Arguments: 00829 00830 MemoryDescriptorList - Supplies a pre-initialized MDL that describes the 00831 address range to be probed and locked. 00832 00833 Process - Specifies the address of the process whose address range is 00834 to be locked. 00835 00836 AccessMode - The mode for which the probe should check access to the range. 00837 00838 Operation - Supplies the type of access which for which to check the range. 00839 00840 Return Value: 00841 00842 None. 00843 00844 --*/ 00845 00846 { 00847 LOGICAL Attached; 00848 NTSTATUS Status; 00849 00850 Attached = FALSE; 00851 Status = STATUS_SUCCESS; 00852 00853 if (Process != PsGetCurrentProcess ()) { 00854 KeAttachProcess (&Process->Pcb); 00855 Attached = TRUE; 00856 } 00857 00858 try { 00859 00860 MmProbeAndLockPages (MemoryDescriptorList, 00861 AccessMode, 00862 Operation); 00863 00864 } except (EXCEPTION_EXECUTE_HANDLER) { 00865 Status = GetExceptionCode(); 00866 } 00867 00868 if (Attached) { 00869 KeDetachProcess(); 00870 } 00871 00872 if (Status != STATUS_SUCCESS) { 00873 ExRaiseStatus (Status); 00874 } 00875 return; 00876 }

NTKERNELAPI VOID MmProbeAndLockSelectedPages IN OUT PMDL  MemoryDescriptorList,
IN PFILE_SEGMENT_ELEMENT  SegmentArray,
IN KPROCESSOR_MODE  AccessMode,
IN LOCK_OPERATION  Operation
 

Definition at line 1152 of file iosup.c.

References ASSERT, BYTES_TO_PAGES, MDL_IO_SPACE, MDL_MAPPED_TO_SYSTEM_VA, MDL_PAGES_LOCKED, MDL_PARTIAL, MDL_SOURCE_IS_NONPAGED_POOL, _MDL::MdlFlags, MiAddMdlTracker(), MiFreeMdlTracker(), MmInitializeMdl, MmProbeAndLockPages(), MmTrackLockedPages, MmUnlockPages(), PAGE_SHIFT, PAGE_SIZE, PAGED_CODE, _MDL::Process, RtlGetCallersAddress(), _MDL::StartVa, and TRUE.

Referenced by NtReadFileScatter(), NtWriteFileGather(), and VerifierProbeAndLockSelectedPages().

01161 : 01162 01163 This routine probes the specified pages, makes the pages resident and 01164 locks the physical pages mapped by the virtual pages in memory. The 01165 Memory descriptor list is updated to describe the physical pages. 01166 01167 Arguments: 01168 01169 MemoryDescriptorList - Supplies a pointer to a Memory Descriptor List 01170 (MDL). The MDL must supply the length. The 01171 physical page portion of the MDL is updated when 01172 the pages are locked in memory. 01173 01174 SegmentArray - Supplies a pointer to a list of buffer segments to be 01175 probed and locked. 01176 01177 AccessMode - Supplies the access mode in which to probe the arguments. 01178 One of KernelMode or UserMode. 01179 01180 Operation - Supplies the operation type. One of IoReadAccess, IoWriteAccess 01181 or IoModifyAccess. 01182 01183 Return Value: 01184 01185 None - exceptions are raised. 01186 01187 Environment: 01188 01189 Kernel mode. APC_LEVEL and below. 01190 01191 --*/ 01192 01193 { 01194 PMDL TempMdl; 01195 PFN_NUMBER MdlHack[(sizeof(MDL)/sizeof(PFN_NUMBER)) + 1]; 01196 PPFN_NUMBER Page; 01197 PFILE_SEGMENT_ELEMENT LastSegment; 01198 PVOID CallingAddress; 01199 PVOID CallersCaller; 01200 ULONG NumberOfPagesToLock; 01201 01202 PAGED_CODE(); 01203 01204 #if !defined (_X86_) 01205 CallingAddress = (PVOID)_ReturnAddress(); 01206 CallersCaller = (PVOID)0; 01207 #endif 01208 01209 NumberOfPagesToLock = 0; 01210 01211 ASSERT (MemoryDescriptorList->ByteCount != 0); 01212 ASSERT (((ULONG_PTR)MemoryDescriptorList->ByteOffset & ~(PAGE_SIZE - 1)) == 0); 01213 01214 ASSERT ((MemoryDescriptorList->MdlFlags & ( 01215 MDL_PAGES_LOCKED | 01216 MDL_MAPPED_TO_SYSTEM_VA | 01217 MDL_SOURCE_IS_NONPAGED_POOL | 01218 MDL_PARTIAL | 01219 MDL_IO_SPACE)) == 0); 01220 01221 // 01222 // Initialize TempMdl. 01223 // 01224 01225 TempMdl = (PMDL) &MdlHack; 01226 01227 MmInitializeMdl( TempMdl, SegmentArray->Buffer, PAGE_SIZE ); 01228 01229 Page = (PPFN_NUMBER) (MemoryDescriptorList + 1); 01230 01231 // 01232 // Calculate the end of the segment list. 01233 // 01234 01235 LastSegment = SegmentArray + 01236 BYTES_TO_PAGES(MemoryDescriptorList->ByteCount); 01237 01238 ASSERT(SegmentArray < LastSegment); 01239 01240 // 01241 // Build a small Mdl for each segment and call probe and lock pages. 01242 // Then copy the PFNs to the real mdl. The first page is processed 01243 // outside of the try/finally to ensure that the flags and process 01244 // field are correctly set in case MmUnlockPages needs to be called. 01245 // 01246 01247 // 01248 // Even systems without 64 bit pointers are required to zero the 01249 // upper 32 bits of the segment address so use alignment rather 01250 // than the buffer pointer. 01251 // 01252 01253 SegmentArray += 1; 01254 MmProbeAndLockPages( TempMdl, AccessMode, Operation ); 01255 01256 if (MmTrackLockedPages == TRUE) { 01257 01258 // 01259 // Since we move the page from the temp MDL to the real one below 01260 // and never free the temp one, fixup our accounting now. 01261 // 01262 01263 if (MiFreeMdlTracker (TempMdl, 1) == TRUE) { 01264 NumberOfPagesToLock += 1; 01265 } 01266 } 01267 01268 *Page++ = *((PPFN_NUMBER) (TempMdl + 1)); 01269 01270 // 01271 // Copy the flags and process fields. 01272 // 01273 01274 MemoryDescriptorList->MdlFlags |= TempMdl->MdlFlags; 01275 MemoryDescriptorList->Process = TempMdl->Process; 01276 01277 try { 01278 01279 while (SegmentArray < LastSegment) { 01280 01281 // 01282 // Even systems without 64 bit pointers are required to zero the 01283 // upper 32 bits of the segment address so use alignment rather 01284 // than the buffer pointer. 01285 // 01286 01287 TempMdl->StartVa = (PVOID)(ULONG_PTR)SegmentArray->Buffer; 01288 TempMdl->MdlFlags = 0; 01289 01290 SegmentArray += 1; 01291 MmProbeAndLockPages( TempMdl, AccessMode, Operation ); 01292 01293 01294 if (MmTrackLockedPages == TRUE) { 01295 01296 // 01297 // Since we move the page from the temp MDL to the real one 01298 // below and never free the temp one, fixup our accounting now. 01299 // 01300 01301 if (MiFreeMdlTracker (TempMdl, 1) == TRUE) { 01302 NumberOfPagesToLock += 1; 01303 } 01304 } 01305 01306 *Page++ = *((PPFN_NUMBER) (TempMdl + 1)); 01307 } 01308 } finally { 01309 01310 if (abnormal_termination()) { 01311 01312 // 01313 // Adjust the MDL length so that MmUnlockPages only processes 01314 // the part that was completed. 01315 // 01316 01317 MemoryDescriptorList->ByteCount = 01318 (ULONG) (Page - (PPFN_NUMBER) (MemoryDescriptorList + 1)) << PAGE_SHIFT; 01319 01320 if (MmTrackLockedPages == TRUE) { 01321 #if defined (_X86_) 01322 RtlGetCallersAddress(&CallingAddress, &CallersCaller); 01323 #endif 01324 MiAddMdlTracker (MemoryDescriptorList, 01325 CallingAddress, 01326 CallersCaller, 01327 NumberOfPagesToLock, 01328 2); 01329 } 01330 01331 MmUnlockPages( MemoryDescriptorList ); 01332 } 01333 else if (MmTrackLockedPages == TRUE) { 01334 #if defined (_X86_) 01335 RtlGetCallersAddress(&CallingAddress, &CallersCaller); 01336 #endif 01337 MiAddMdlTracker (MemoryDescriptorList, 01338 CallingAddress, 01339 CallersCaller, 01340 NumberOfPagesToLock, 01341 3); 01342 } 01343 } 01344 }

LOGICAL MmProtectSpecialPool IN PVOID  VirtualAddress,
IN ULONG  NewProtect
 

Definition at line 5228 of file allocpag.c.

References MiProtectSpecialPool(), and MiSpecialPoolPtes.

Referenced by IovpProtectedIrpMakeTouchable(), and IovpProtectedIrpMakeUntouchable().

05235 : 05236 05237 This function protects a special pool allocation. 05238 05239 Arguments: 05240 05241 VirtualAddress - Supplies the special pool address to protect. 05242 05243 NewProtect - Supplies the protection to set the pages to (PAGE_XX). 05244 05245 Return Value: 05246 05247 TRUE if the protection was successfully applied, FALSE if not. 05248 05249 Environment: 05250 05251 Kernel mode, IRQL at APC_LEVEL or below for pagable pool, DISPATCH or 05252 below for nonpagable pool. 05253 05254 Note that setting an allocation to NO_ACCESS implies that an accessible 05255 protection must be applied by the caller prior to this allocation being 05256 freed. 05257 05258 Note this is a nonpagable wrapper so that machines without special pool 05259 can still support code attempting to protect special pool at 05260 DISPATCH_LEVEL. 05261 05262 --*/ 05263 05264 { 05265 if (MiSpecialPoolPtes == 0) { 05266 05267 // 05268 // The special pool allocation code was never initialized. 05269 // 05270 05271 return (ULONG)-1; 05272 } 05273 05274 return MiProtectSpecialPool (VirtualAddress, NewProtect); 05275 }

BOOLEAN MmPurgeSection IN PSECTION_OBJECT_POINTERS  SectionObjectPointer,
IN PLARGE_INTEGER Offset  OPTIONAL,
IN SIZE_T  RegionSize,
IN ULONG  IgnoreCacheViews
 

Definition at line 1548 of file flushsec.c.

References ASSERT, BYTE_OFFSET, DbgPrint, DISPATCH_LEVEL, FALSE, _FILE_OBJECT::FileName, _CONTROL_AREA::FilePointer, FreePageList, KeBugCheckEx(), KeEnterCriticalRegion, KeLeaveCriticalRegion, KernelMode, KeWaitForSingleObject(), LOCK_PFN, MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE, MI_PFN_ELEMENT, MI_SET_PFN_DELETED, MI_WRITE_INVALID_PTE, MiCancelWriteOfMappedPfn(), MiCanFileBeTruncatedInternal(), MiCheckControlArea(), MiCheckProtoPtePageState(), MiDecrementShareCount(), MiInsertPageInList(), MiIsPteOnPdeBoundary, MiMakeSystemAddressValidPfn(), MiReleasePageFileSpace(), MiUnlinkPageFromList(), MM_DBG_FLUSH_SECTION, MmMappedFileIoComplete, MmPageLocationList, _CONTROL_AREA::ModifiedWriteCount, _SUBSECTION::NextSubsection, NULL, _CONTROL_AREA::NumberOfMappedViews, _CONTROL_AREA::NumberOfPfnReferences, _CONTROL_AREA::NumberOfSystemCacheViews, Offset, _MMPFN::OriginalPte, PAGE_SHIFT, PAGE_SIZE, _MMPFN::PteAddress, _MMPFN::PteFrame, _SUBSECTION::PtesInSubsection, _SUBSECTION::SubsectionBase, TRUE, _CONTROL_AREA::u, _MMPTE::u, _MMPFN::u3, UNLOCK_PFN, UNLOCK_PFN_AND_THEN_WAIT, and WrPageOut.

Referenced by CcPurgeCacheSection().

01557 : 01558 01559 This function determines if any views of the specified section 01560 are mapped, and if not, purges valid pages (even modified ones) 01561 from the specified section and returns any used pages to the free 01562 list. This is accomplished by examining the prototype PTEs 01563 from the specified offset to the end of the section, and if 01564 any prototype PTEs are in the transition state, putting the 01565 prototype PTE back into its original state and putting the 01566 physical page on the free list. 01567 01568 NOTE: 01569 01570 If there is an I/O operation ongoing for one of the pages, 01571 that page is eliminated from the segment and allowed to "float" 01572 until the i/o is complete. Once the share count goes to zero 01573 the page will be added to the free page list. 01574 01575 Arguments: 01576 01577 SectionObjectPointer - Supplies a pointer to the section objects. 01578 01579 Offset - Supplies the offset into the section in which to begin 01580 purging pages. If this argument is not present, then the 01581 whole section is purged without regard to the region size 01582 argument. 01583 01584 01585 RegionSize - Supplies the size of the region to purge. If this 01586 is specified as zero and Offset is specified, the 01587 region from Offset to the end of the file is purged. 01588 01589 Note: The largest value acceptable for RegionSize is 01590 0xFFFF0000; 01591 01592 IgnoreCacheViews - Supplies FALSE if mapped views in the system 01593 cache should cause the function to return FALSE. 01594 This is the normal case. 01595 Supplies TRUE if mapped views should be ignored 01596 and the flush should occur. NOTE THAT IF TRUE 01597 IS SPECIFIED AND ANY DATA PURGED IS CURRENTLY MAPPED 01598 AND VALID A BUGCHECK WILL OCCUR!! 01599 01600 Return Value: 01601 01602 Returns TRUE if either no section exists for the file object or 01603 the section is not mapped and the purge was done, FALSE otherwise. 01604 01605 Note that FALSE is returned if during the purge operation, a page 01606 could not be purged due to a non-zero reference count. 01607 01608 --*/ 01609 01610 { 01611 PCONTROL_AREA ControlArea; 01612 PMMPTE PointerPte; 01613 PMMPTE LastPte; 01614 PMMPTE FinalPte; 01615 MMPTE PteContents; 01616 PMMPFN Pfn1; 01617 KIRQL OldIrql; 01618 ULONG PteOffset; 01619 PSUBSECTION Subsection; 01620 PSUBSECTION LastSubsection; 01621 LARGE_INTEGER LocalOffset; 01622 BOOLEAN DeleteSegment = FALSE; 01623 BOOLEAN LockHeld; 01624 BOOLEAN ReturnValue; 01625 PFN_NUMBER PageFrameIndex; 01626 #if DBG 01627 PFN_NUMBER LastLocked = 0; 01628 #endif //DBG 01629 01630 // 01631 // This is needed in case a page is on the mapped page writer list - 01632 // the PFN lock will need to be released and APCs disabled. 01633 // 01634 01635 ASSERT (KeGetCurrentIrql() < DISPATCH_LEVEL); 01636 01637 // 01638 // Capture caller's file size, since we may modify it. 01639 // 01640 01641 if (ARGUMENT_PRESENT(Offset)) { 01642 01643 LocalOffset = *Offset; 01644 Offset = &LocalOffset; 01645 } 01646 01647 // 01648 // See if we can truncate this file to where the caller wants 01649 // us to. 01650 // 01651 01652 if (!MiCanFileBeTruncatedInternal(SectionObjectPointer, Offset, TRUE, &OldIrql)) { 01653 return FALSE; 01654 } 01655 01656 // 01657 // PFN LOCK IS NOW HELD! 01658 // 01659 01660 ControlArea = (PCONTROL_AREA)(SectionObjectPointer->DataSectionObject); 01661 if (ControlArea == NULL) { 01662 UNLOCK_PFN (OldIrql); 01663 return TRUE; 01664 01665 // 01666 // Even though MiCanFileBeTruncatedInternal returned TRUE, there could 01667 // still be a system cache mapped view. We cannot truncate if 01668 // the Cache Manager has a view mapped. 01669 // 01670 01671 } else if ((IgnoreCacheViews == FALSE) && 01672 (ControlArea->NumberOfSystemCacheViews != 0)) { 01673 UNLOCK_PFN (OldIrql); 01674 return FALSE; 01675 } 01676 01677 // 01678 // Prevent races when the control area is being deleted as the clean 01679 // path releases the PFN lock midway through. File objects may still have 01680 // section object pointers and data section objects that point at this 01681 // control area, hence the purge can be issued. 01682 // 01683 // Check for this and fail the purge as the control area (and the section 01684 // object pointers/data section objects) will be going away momentarily. 01685 // Note that even though drivers have these data section objects, no one 01686 // currently has an open section for this control area and no one is 01687 // allowed to open one until the clean path finishes. 01688 // 01689 01690 if (ControlArea->u.Flags.BeingDeleted == 1) { 01691 UNLOCK_PFN (OldIrql); 01692 return FALSE; 01693 } 01694 01695 // 01696 // Purge the section - locate the subsection which 01697 // contains the PTEs. 01698 // 01699 01700 ASSERT (ControlArea->u.Flags.GlobalOnlyPerSession == 0); 01701 01702 Subsection = (PSUBSECTION)(ControlArea + 1); 01703 01704 if (!ARGUMENT_PRESENT (Offset)) { 01705 01706 // 01707 // If the offset is not specified, flush the complete file ignoring 01708 // the region size. 01709 // 01710 01711 PointerPte = &Subsection->SubsectionBase[0]; 01712 RegionSize = 0; 01713 01714 } else { 01715 01716 PteOffset = (ULONG)(Offset->QuadPart >> PAGE_SHIFT); 01717 01718 // 01719 // Make sure the PTEs are not in the extended part of the 01720 // segment. 01721 // 01722 01723 while (PteOffset >= Subsection->PtesInSubsection) { 01724 PteOffset -= Subsection->PtesInSubsection; 01725 Subsection = Subsection->NextSubsection; 01726 if (Subsection == NULL) { 01727 01728 // 01729 // The offset must be equal to the size of 01730 // the section, don't purge anything just return. 01731 // 01732 01733 //ASSERT (PteOffset == 0); 01734 UNLOCK_PFN (OldIrql); 01735 return TRUE; 01736 } 01737 } 01738 01739 ASSERT (PteOffset < Subsection->PtesInSubsection); 01740 PointerPte = &Subsection->SubsectionBase[PteOffset]; 01741 } 01742 01743 01744 // 01745 // Locate the address of the last prototype PTE to be flushed. 01746 // 01747 01748 if (RegionSize == 0) { 01749 01750 // 01751 // Flush to end of section. 01752 // 01753 01754 LastSubsection = Subsection; 01755 while (LastSubsection->NextSubsection != NULL) { 01756 LastSubsection = LastSubsection->NextSubsection; 01757 } 01758 01759 // 01760 // Set the final PTE to 1 beyond the last page. 01761 // 01762 01763 FinalPte = &LastSubsection->SubsectionBase 01764 [LastSubsection->PtesInSubsection]; 01765 } else { 01766 01767 // 01768 // Calculate the end of the region. 01769 // 01770 01771 PteOffset += 01772 (ULONG) (((RegionSize + BYTE_OFFSET(Offset->LowPart)) - 1) >> PAGE_SHIFT); 01773 01774 LastSubsection = Subsection; 01775 01776 while (PteOffset >= LastSubsection->PtesInSubsection) { 01777 PteOffset -= LastSubsection->PtesInSubsection; 01778 if (LastSubsection->NextSubsection == NULL) { 01779 PteOffset = LastSubsection->PtesInSubsection - 1; 01780 break; 01781 } 01782 LastSubsection = LastSubsection->NextSubsection; 01783 } 01784 01785 ASSERT (PteOffset < LastSubsection->PtesInSubsection); 01786 01787 // 01788 // Point final PTE to 1 beyond the end. 01789 // 01790 01791 FinalPte = &LastSubsection->SubsectionBase[PteOffset + 1]; 01792 } 01793 01794 // 01795 // Increment the number of mapped views to 01796 // prevent the section from being deleted while the purge is 01797 // in progress. 01798 // 01799 01800 ControlArea->NumberOfMappedViews += 1; 01801 01802 // 01803 // Set being purged so no one can map a view 01804 // while the purge is going on. 01805 // 01806 01807 ControlArea->u.Flags.BeingPurged = 1; 01808 ControlArea->u.Flags.WasPurged = 1; 01809 01810 UNLOCK_PFN (OldIrql); 01811 LockHeld = FALSE; 01812 ReturnValue = TRUE; 01813 01814 for (;;) { 01815 01816 if (LastSubsection != Subsection) { 01817 01818 // 01819 // Flush to the last PTE in this subsection. 01820 // 01821 01822 LastPte = &Subsection->SubsectionBase[Subsection->PtesInSubsection]; 01823 } else { 01824 01825 // 01826 // Flush to the end of the range. 01827 // 01828 01829 LastPte = FinalPte; 01830 } 01831 01832 // 01833 // If the page table page containing the PTEs is not 01834 // resident, then no PTEs can be in the valid or transition 01835 // state! Skip over the PTEs. 01836 // 01837 01838 if (!MiCheckProtoPtePageState(PointerPte, LockHeld)) { 01839 PointerPte = (PMMPTE)(((ULONG_PTR)PointerPte | (PAGE_SIZE - 1)) + 1); 01840 } 01841 01842 while (PointerPte < LastPte) { 01843 01844 // 01845 // If the page table page containing the PTEs is not 01846 // resident, then no PTEs can be in the valid or transition 01847 // state! Skip over the PTEs. 01848 // 01849 01850 if (MiIsPteOnPdeBoundary(PointerPte)) { 01851 if (!MiCheckProtoPtePageState(PointerPte, LockHeld)) { 01852 PointerPte = (PMMPTE)((PCHAR)PointerPte + PAGE_SIZE); 01853 continue; 01854 } 01855 } 01856 01857 PteContents = *PointerPte; 01858 01859 if (PteContents.u.Hard.Valid == 1) { 01860 01861 // 01862 // A valid PTE was found, it must be mapped in the 01863 // system cache. Just exit the loop and return FALSE 01864 // and let the caller fix this. 01865 // 01866 01867 ReturnValue = FALSE; 01868 break; 01869 } 01870 01871 if ((PteContents.u.Soft.Prototype == 0) && 01872 (PteContents.u.Soft.Transition == 1)) { 01873 01874 if (!LockHeld) { 01875 LockHeld = TRUE; 01876 LOCK_PFN (OldIrql); 01877 MiMakeSystemAddressValidPfn (PointerPte); 01878 continue; 01879 } 01880 01881 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE(&PteContents); 01882 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 01883 01884 if ((Pfn1->OriginalPte.u.Soft.Prototype != 1) || 01885 (Pfn1->OriginalPte.u.Hard.Valid != 0) || 01886 (Pfn1->PteAddress != PointerPte)) { 01887 01888 // 01889 // The pool containing the prototype PTEs has been 01890 // corrupted. Pool corruption like this is fatal. 01891 // 01892 01893 KeBugCheckEx (POOL_CORRUPTION_IN_FILE_AREA, 01894 0x2, 01895 (ULONG_PTR)PointerPte, 01896 (ULONG_PTR)Pfn1->PteAddress, 01897 (ULONG_PTR)PteContents.u.Long); 01898 } 01899 01900 #if DBG 01901 if ((Pfn1->u3.e2.ReferenceCount != 0) && 01902 (Pfn1->u3.e1.WriteInProgress == 0)) { 01903 01904 // 01905 // There must be an I/O in progress on this 01906 // page. 01907 // 01908 01909 if (MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE(&PteContents) != LastLocked) { 01910 UNLOCK_PFN (OldIrql); 01911 01912 #if DBG 01913 if (MmDebug & MM_DBG_FLUSH_SECTION) { 01914 DbgPrint("MM:PURGE - page %lx locked, file:%Z\n", 01915 PageFrameIndex, 01916 &ControlArea->FilePointer->FileName 01917 ); 01918 } 01919 #endif 01920 LastLocked = MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (&PteContents); 01921 //DbgBreakPoint(); 01922 LOCK_PFN (OldIrql); 01923 MiMakeSystemAddressValidPfn (PointerPte); 01924 continue; 01925 } 01926 } 01927 #endif //DBG 01928 01929 // 01930 // If the modified page writer has page locked for I/O 01931 // wait for the I/O's to be completed and the pages 01932 // to be unlocked. The eliminates a race condition 01933 // when the modified page writer locks the pages, then 01934 // a purge occurs and completes before the mapped 01935 // writer thread runs. 01936 // 01937 01938 if (Pfn1->u3.e1.WriteInProgress == 1) { 01939 01940 // 01941 // A 3 or more thread deadlock can occur where: 01942 // 01943 // 1. The mapped page writer thread has issued a write 01944 // and is in the filesystem code waiting for a resource. 01945 // 01946 // 2. Thread 2 owns the resource above but is waiting for 01947 // the filesystem's quota mutex. 01948 // 01949 // 3. Thread 3 owns the quota mutex and is right here 01950 // doing a purge from the cache manager when he notices 01951 // the page to be purged is either already being written 01952 // or is in the mapped page writer list. If it is 01953 // already being written everything will unjam. If it 01954 // is still on the mapped page writer list awaiting 01955 // processing, then it must be cancelled - otherwise 01956 // if this thread were to wait, deadlock can occur. 01957 // 01958 // The alternative to all this is for the filesystems to 01959 // always release the quota mutex before purging but the 01960 // filesystem overhead to do this is substantial. 01961 // 01962 01963 if (MiCancelWriteOfMappedPfn (PageFrameIndex) == TRUE) { 01964 01965 // 01966 // Stopping any failed writes (even deliberately 01967 // cancelled ones) automatically cause a delay. A 01968 // successful stop also results in the PFN lock 01969 // being released and reacquired. So loop back to 01970 // the top now as the world may have changed. 01971 // 01972 01973 MiMakeSystemAddressValidPfn (PointerPte); 01974 continue; 01975 } 01976 01977 ASSERT (ControlArea->ModifiedWriteCount != 0); 01978 ASSERT (Pfn1->u3.e2.ReferenceCount != 0); 01979 01980 ControlArea->u.Flags.SetMappedFileIoComplete = 1; 01981 01982 KeEnterCriticalRegion(); 01983 UNLOCK_PFN_AND_THEN_WAIT(OldIrql); 01984 01985 KeWaitForSingleObject(&MmMappedFileIoComplete, 01986 WrPageOut, 01987 KernelMode, 01988 FALSE, 01989 (PLARGE_INTEGER)NULL); 01990 LOCK_PFN (OldIrql); 01991 KeLeaveCriticalRegion(); 01992 MiMakeSystemAddressValidPfn (PointerPte); 01993 continue; 01994 } 01995 01996 if (Pfn1->u3.e1.ReadInProgress == 1) { 01997 01998 // 01999 // The page currently is being read in from the 02000 // disk. Treat this just like a valid PTE and 02001 // return false. 02002 // 02003 02004 ReturnValue = FALSE; 02005 break; 02006 } 02007 02008 ASSERT (!((Pfn1->OriginalPte.u.Soft.Prototype == 0) && 02009 (Pfn1->OriginalPte.u.Soft.Transition == 1))); 02010 02011 MI_WRITE_INVALID_PTE (PointerPte, Pfn1->OriginalPte); 02012 02013 ASSERT (Pfn1->OriginalPte.u.Hard.Valid == 0); 02014 02015 ControlArea->NumberOfPfnReferences -= 1; 02016 ASSERT ((LONG)ControlArea->NumberOfPfnReferences >= 0); 02017 02018 MiUnlinkPageFromList (Pfn1); 02019 02020 MI_SET_PFN_DELETED (Pfn1); 02021 02022 MiDecrementShareCount (Pfn1->PteFrame); 02023 02024 // 02025 // If the reference count for the page is zero, insert 02026 // it into the free page list, otherwise leave it alone 02027 // and when the reference count is decremented to zero 02028 // the page will go to the free list. 02029 // 02030 02031 if (Pfn1->u3.e2.ReferenceCount == 0) { 02032 MiReleasePageFileSpace (Pfn1->OriginalPte); 02033 MiInsertPageInList (MmPageLocationList[FreePageList], 02034 MI_GET_PAGE_FRAME_FROM_TRANSITION_PTE (&PteContents)); 02035 } 02036 } 02037 PointerPte += 1; 02038 02039 if ((MiIsPteOnPdeBoundary(PointerPte)) && (LockHeld)) { 02040 02041 // 02042 // Unlock PFN so large requests will not block other 02043 // threads on MP systems. 02044 // 02045 02046 UNLOCK_PFN (OldIrql); 02047 LockHeld = FALSE; 02048 } 02049 02050 } //end while 02051 02052 if (LockHeld) { 02053 UNLOCK_PFN (OldIrql); 02054 LockHeld = FALSE; 02055 } 02056 02057 if ((LastSubsection != Subsection) && (ReturnValue)) { 02058 02059 // 02060 // Get the next subsection in the list. 02061 // 02062 02063 Subsection = Subsection->NextSubsection; 02064 PointerPte = Subsection->SubsectionBase; 02065 02066 } else { 02067 02068 // 02069 // The last range has been flushed, exit the top FOR loop 02070 // and return. 02071 // 02072 02073 break; 02074 } 02075 } //end for 02076 02077 LOCK_PFN (OldIrql); 02078 02079 ASSERT ((LONG)ControlArea->NumberOfMappedViews >= 1); 02080 ControlArea->NumberOfMappedViews -= 1; 02081 02082 ControlArea->u.Flags.BeingPurged = 0; 02083 02084 // 02085 // Check to see if the control area should be deleted. This 02086 // will release the PFN lock. 02087 // 02088 02089 MiCheckControlArea (ControlArea, NULL, OldIrql); 02090 return ReturnValue; 02091 }

VOID MmPurgeWorkingSet IN PEPROCESS  Process,
IN PVOID  BaseAddress,
IN SIZE_T  RegionSize
 

SIZE_T MmQuerySpecialPoolBlockSize IN PVOID  P  ) 
 

Definition at line 2927 of file allocpag.c.

References ASSERT, Header, MI_SPECIAL_POOL_PAGABLE, MI_SPECIAL_POOL_VERIFIER, MmSpecialPoolEnd, MmSpecialPoolStart, PAGE_ALIGN, PAGE_SIZE, POOL_OVERHEAD, and PPOOL_HEADER.

Referenced by ExQueryPoolBlockSize().

02933 : 02934 02935 This routine returns the size of a special pool allocation. 02936 02937 Arguments: 02938 02939 VirtualAddress - Supplies the special pool virtual address to query. 02940 02941 Return Value: 02942 02943 The size in bytes of the allocation. 02944 02945 Environment: 02946 02947 Kernel mode, APC_LEVEL or below for pagable addresses, DISPATCH_LEVEL or 02948 below for nonpaged addresses. 02949 02950 --*/ 02951 02952 { 02953 PPOOL_HEADER Header; 02954 02955 ASSERT ((P >= MmSpecialPoolStart) && (P < MmSpecialPoolEnd)); 02956 02957 if (((ULONG_PTR)P & (PAGE_SIZE - 1))) { 02958 Header = PAGE_ALIGN (P); 02959 } 02960 else { 02961 Header = (PPOOL_HEADER)((PCHAR)PAGE_ALIGN (P) + PAGE_SIZE - POOL_OVERHEAD); 02962 } 02963 02964 return (SIZE_T)(Header->Ulong1 & ~(MI_SPECIAL_POOL_PAGABLE | MI_SPECIAL_POOL_VERIFIER)); 02965 }

NTKERNELAPI MM_SYSTEMSIZE MmQuerySystemSize VOID   ) 
 

Definition at line 3073 of file mminit.c.

References MmSystemSize.

Referenced by CcAllocateInitializeBcb(), CcInitializeCacheManager(), ExInitializeRegion(), ExpWorkerInitialization(), IoInitSystem(), KeBalanceSetManager(), ObInitSystem(), PspInitPhase0(), and UdfInitializeGlobalData().

03076 { 03077 // 03078 // 12Mb is small 03079 // 12-19 is medium 03080 // > 19 is large 03081 // 03082 return MmSystemSize; 03083 }

BOOLEAN MmRaisePoolQuota IN POOL_TYPE  PoolType,
IN SIZE_T  OldQuotaLimit,
OUT PSIZE_T  NewQuotaLimit
 

Definition at line 1118 of file mmquota.c.

References _MM_PAGED_POOL_INFO::AllocatedPagedPool, FALSE, MmAllocatedNonPagedPool, MmAvailablePages, MmMaximumNonPagedPoolInBytes, MMNONPAGED_QUOTA_INCREASE, MMPAGED_QUOTA_CHECK, MMPAGED_QUOTA_INCREASE, MmPagedPoolInfo, MmSizeOfPagedPoolInBytes, MmTotalNonPagedPoolQuota, MmTotalPagedPoolQuota, PAGE_SHIFT, PagedPool, and TRUE.

Referenced by PsChargePoolQuota(), PsChargeSharedPoolQuota(), and PspSetQuotaLimits().

01126 : 01127 01128 This function is called (with a spinlock) whenever PS detects a quota 01129 limit has been exceeded. The purpose of this function is to attempt to 01130 increase the specified quota. 01131 01132 Arguments: 01133 01134 PoolType - Supplies the pool type of the quota to be raised 01135 01136 OldQuotaLimit - Supplies the current quota limit for this pool type 01137 01138 NewQuotaLimit - Returns the new limit 01139 01140 Return Value: 01141 01142 TRUE - The API succeeded and the quota limit was raised. 01143 01144 FALSE - We were unable to raise the quota limit. 01145 01146 Environment: 01147 01148 Kernel mode, QUOTA SPIN LOCK HELD!! 01149 01150 --*/ 01151 01152 { 01153 SIZE_T Limit; 01154 PMM_PAGED_POOL_INFO PagedPoolInfo; 01155 01156 if (PoolType == PagedPool) { 01157 01158 // 01159 // Check commit limit and make sure at least 1mb is available. 01160 // Check to make sure 4mb of paged pool still exists. 01161 // 01162 01163 PagedPoolInfo = &MmPagedPoolInfo; 01164 01165 if ((MmSizeOfPagedPoolInBytes >> PAGE_SHIFT) < 01166 (PagedPoolInfo->AllocatedPagedPool + ((MMPAGED_QUOTA_CHECK) >> PAGE_SHIFT))) { 01167 01168 return FALSE; 01169 } 01170 01171 MmTotalPagedPoolQuota += (MMPAGED_QUOTA_INCREASE); 01172 *NewQuotaLimit = OldQuotaLimit + (MMPAGED_QUOTA_INCREASE); 01173 return TRUE; 01174 01175 } else { 01176 01177 if ( (ULONG_PTR)(MmAllocatedNonPagedPool + ((1*1024*1024) >> PAGE_SHIFT)) < (MmMaximumNonPagedPoolInBytes >> PAGE_SHIFT)) { 01178 goto aok; 01179 } 01180 01181 // 01182 // Make sure 200 pages and 5mb of nonpaged pool expansion 01183 // available. Raise quota by 64k. 01184 // 01185 01186 if ((MmAvailablePages < 200) || 01187 (MmResidentAvailablePages < ((MMNONPAGED_QUOTA_CHECK) >> PAGE_SHIFT))) { 01188 01189 return FALSE; 01190 } 01191 01192 if (MmAvailablePages > ((4*1024*1024) >> PAGE_SHIFT)) { 01193 Limit = (1*1024*1024) >> PAGE_SHIFT; 01194 } else { 01195 Limit = (4*1024*1024) >> PAGE_SHIFT; 01196 } 01197 01198 if ((ULONG_PTR)((MmMaximumNonPagedPoolInBytes >> PAGE_SHIFT)) < 01199 (MmAllocatedNonPagedPool + Limit)) { 01200 01201 return FALSE; 01202 } 01203 aok: 01204 MmTotalNonPagedPoolQuota += (MMNONPAGED_QUOTA_INCREASE); 01205 *NewQuotaLimit = OldQuotaLimit + (MMNONPAGED_QUOTA_INCREASE); 01206 return TRUE; 01207 } 01208 }

VOID MmReleaseDumpAddresses IN PFN_NUMBER  Pages  ) 
 

Definition at line 7270 of file iosup.c.

References KiFlushSingleTb(), MiGetVirtualAddressMappedByPte, MM_ZERO_PTE, MmCrashDumpPte, PAGE_SIZE, TRUE, and _MMPTE::u.

07276 : 07277 07278 For use by hibernate routine ONLY. Puts zeros back into the 07279 used dump PTEs. 07280 07281 Arguments: 07282 07283 None 07284 07285 Return Value: 07286 07287 None 07288 07289 --*/ 07290 07291 { 07292 PMMPTE PointerPte; 07293 PCHAR BaseVa; 07294 07295 PointerPte = MmCrashDumpPte; 07296 BaseVa = (PCHAR)MiGetVirtualAddressMappedByPte(PointerPte); 07297 07298 while (Pages) { 07299 07300 KiFlushSingleTb (TRUE, BaseVa); 07301 07302 PointerPte->u.Long = MM_ZERO_PTE; 07303 PointerPte += 1; 07304 BaseVa += PAGE_SIZE; 07305 Pages -= 1; 07306 } 07307 }

NTKERNELAPI NTSTATUS MmRemovePhysicalMemory IN PPHYSICAL_ADDRESS  StartAddress,
IN OUT PLARGE_INTEGER  NumberOfBytes
 

Definition at line 437 of file dynmem.c.

References ASSERT, ASSERT64, BadPageList, _PHYSICAL_MEMORY_RUN::BasePage, BYTE_OFFSET, DbgPrint, ExAllocatePoolWithTag, ExFreePool(), FALSE, FreePageList, KeDelayExecutionThread(), KeFlushSingleTb(), KernelMode, LOCK_PFN, MI_GET_PAGE_FRAME_FROM_PTE, MI_IS_PHYSICAL_ADDRESS, MI_NONPAGABLE_MEMORY_AVAILABLE, MI_PFN_ELEMENT, MI_SET_PFN_DELETED, MiDecrementReferenceCount(), MiDelayPageFaults, MiEmptyAllWorkingSets(), MiFlushAllPages(), MiGetPteAddress, MiInsertPageInList(), MiRemovePhysicalPages(), MiReturnCommitment(), MiTrimRemovalPagesOnly, MiUnlinkPageFromList(), MmChargeCommitmentLock, MmDynamicMemoryMutex, MmDynamicPfn, MmHalfSecond, MmHighestPhysicalPage, MmHighestPossiblePhysicalPage, MmNumberOfPhysicalPages, MmPageLocationList, MmPfnDatabase, MmPhysicalMemoryBlock, MmResidentAvailablePages, MmTotalCommitLimit, MmTotalCommitLimitMaximum, MmTotalCommittedPages, NonPagedPool, NTSTATUS(), NULL, _PHYSICAL_MEMORY_DESCRIPTOR::NumberOfPages, _PHYSICAL_MEMORY_DESCRIPTOR::NumberOfRuns, _MMPFN::OriginalPte, PAGE_ALIGN, PAGE_SHIFT, PAGE_SIZE, _PHYSICAL_MEMORY_RUN::PageCount, PASSIVE_LEVEL, PFN_REMOVED, _MMPFN::PteAddress, _MMPFN::PteFrame, ROUND_TO_PAGES, _PHYSICAL_MEMORY_DESCRIPTOR::Run, StandbyPageList, Status, TRUE, _MMPTE::u, _MMPFN::u1, _MMPFN::u2, _MMPFN::u3, UNLOCK_PFN, and ZeroKernelPte.

00444 : 00445 00446 This routine attempts to remove the specified physical address range 00447 from the system. 00448 00449 Arguments: 00450 00451 StartAddress - Supplies the starting physical address. 00452 00453 NumberOfBytes - Supplies a pointer to the number of bytes being removed. 00454 00455 Return Value: 00456 00457 NTSTATUS. 00458 00459 Environment: 00460 00461 Kernel mode. PASSIVE level. No locks held. 00462 00463 --*/ 00464 00465 { 00466 ULONG i; 00467 ULONG Additional; 00468 PFN_NUMBER Page; 00469 PFN_NUMBER LastPage; 00470 PFN_NUMBER OriginalLastPage; 00471 PFN_NUMBER start; 00472 PFN_NUMBER PagesReleased; 00473 PMMPFN Pfn1; 00474 PMMPFN StartPfn; 00475 PMMPFN EndPfn; 00476 KIRQL OldIrql; 00477 PFN_NUMBER StartPage; 00478 PFN_NUMBER EndPage; 00479 PFN_COUNT NumberOfPages; 00480 SPFN_NUMBER MaxPages; 00481 PFN_NUMBER PageFrameIndex; 00482 PFN_NUMBER RemovedPages; 00483 LOGICAL Inserted; 00484 NTSTATUS Status; 00485 PMMPTE PointerPte; 00486 PMMPTE EndPte; 00487 PVOID VirtualAddress; 00488 PPHYSICAL_MEMORY_DESCRIPTOR OldPhysicalMemoryBlock; 00489 PPHYSICAL_MEMORY_DESCRIPTOR NewPhysicalMemoryBlock; 00490 PPHYSICAL_MEMORY_RUN NewRun; 00491 LOGICAL PfnDatabaseIsPhysical; 00492 00493 ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL); 00494 00495 ASSERT (BYTE_OFFSET(NumberOfBytes->LowPart) == 0); 00496 ASSERT (BYTE_OFFSET(StartAddress->LowPart) == 0); 00497 00498 if (MI_IS_PHYSICAL_ADDRESS(MmPfnDatabase)) { 00499 00500 // 00501 // The system must be configured for dynamic memory addition. This is 00502 // not strictly required to remove the memory, but it's better to check 00503 // for it now under the assumption that the administrator is probably 00504 // going to want to add this range of memory back in - better to give 00505 // the error now and refuse the removal than to refuse the addition 00506 // later. 00507 // 00508 00509 if (MmDynamicPfn == FALSE) { 00510 return STATUS_NOT_SUPPORTED; 00511 } 00512 00513 PfnDatabaseIsPhysical = TRUE; 00514 } 00515 else { 00516 PfnDatabaseIsPhysical = FALSE; 00517 } 00518 00519 StartPage = (PFN_NUMBER)(StartAddress->QuadPart >> PAGE_SHIFT); 00520 NumberOfPages = (PFN_COUNT)(NumberOfBytes->QuadPart >> PAGE_SHIFT); 00521 00522 EndPage = StartPage + NumberOfPages; 00523 00524 if (EndPage - 1 > MmHighestPossiblePhysicalPage) { 00525 00526 // 00527 // Truncate the request into something that can be mapped by the PFN 00528 // database. 00529 // 00530 00531 EndPage = MmHighestPossiblePhysicalPage + 1; 00532 NumberOfPages = (PFN_COUNT)(EndPage - StartPage); 00533 } 00534 00535 // 00536 // The range cannot wrap. 00537 // 00538 00539 if (StartPage >= EndPage) { 00540 return STATUS_INVALID_PARAMETER_1; 00541 } 00542 00543 StartPfn = MI_PFN_ELEMENT (StartPage); 00544 EndPfn = MI_PFN_ELEMENT (EndPage); 00545 00546 ExAcquireFastMutex (&MmDynamicMemoryMutex); 00547 00548 #if DBG 00549 MiDynmemData[0] += 1; 00550 #endif 00551 00552 // 00553 // Decrease all commit limits to reflect the removed memory. 00554 // 00555 00556 ExAcquireSpinLock (&MmChargeCommitmentLock, &OldIrql); 00557 00558 ASSERT (MmTotalCommitLimit <= MmTotalCommitLimitMaximum); 00559 00560 if ((NumberOfPages + 100 > MmTotalCommitLimit - MmTotalCommittedPages) || 00561 (MmTotalCommittedPages > MmTotalCommitLimit)) { 00562 00563 #if DBG 00564 MiDynmemData[1] += 1; 00565 #endif 00566 ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql); 00567 ExReleaseFastMutex (&MmDynamicMemoryMutex); 00568 return STATUS_INSUFFICIENT_RESOURCES; 00569 } 00570 00571 MmTotalCommitLimit -= NumberOfPages; 00572 MmTotalCommitLimitMaximum -= NumberOfPages; 00573 00574 ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql); 00575 00576 // 00577 // Check for outstanding promises that cannot be broken. 00578 // 00579 00580 LOCK_PFN (OldIrql); 00581 00582 MaxPages = MI_NONPAGABLE_MEMORY_AVAILABLE() - 100; 00583 00584 if ((SPFN_NUMBER)NumberOfPages > MaxPages) { 00585 #if DBG 00586 MiDynmemData[2] += 1; 00587 #endif 00588 UNLOCK_PFN (OldIrql); 00589 Status = STATUS_INSUFFICIENT_RESOURCES; 00590 goto giveup2; 00591 } 00592 00593 MmResidentAvailablePages -= NumberOfPages; 00594 MmNumberOfPhysicalPages -= NumberOfPages; 00595 00596 // 00597 // The range must be contained in a single entry. It is permissible for 00598 // it to be part of a single entry, but it must not cross multiple entries. 00599 // 00600 00601 Additional = (ULONG)-2; 00602 00603 start = 0; 00604 do { 00605 00606 Page = MmPhysicalMemoryBlock->Run[start].BasePage; 00607 LastPage = Page + MmPhysicalMemoryBlock->Run[start].PageCount; 00608 00609 if ((StartPage >= Page) && (EndPage <= LastPage)) { 00610 if ((StartPage == Page) && (EndPage == LastPage)) { 00611 Additional = (ULONG)-1; 00612 } 00613 else if ((StartPage == Page) || (EndPage == LastPage)) { 00614 Additional = 0; 00615 } 00616 else { 00617 Additional = 1; 00618 } 00619 break; 00620 } 00621 00622 start += 1; 00623 00624 } while (start != MmPhysicalMemoryBlock->NumberOfRuns); 00625 00626 if (Additional == (ULONG)-2) { 00627 #if DBG 00628 MiDynmemData[3] += 1; 00629 #endif 00630 MmResidentAvailablePages += NumberOfPages; 00631 MmNumberOfPhysicalPages += NumberOfPages; 00632 UNLOCK_PFN (OldIrql); 00633 Status = STATUS_CONFLICTING_ADDRESSES; 00634 goto giveup2; 00635 } 00636 00637 for (Pfn1 = StartPfn; Pfn1 < EndPfn; Pfn1 += 1) { 00638 Pfn1->u3.e1.RemovalRequested = 1; 00639 } 00640 00641 // 00642 // The free and zero lists must be pruned now before releasing the PFN 00643 // lock otherwise if another thread allocates the page from these lists, 00644 // the allocation will clear the RemovalRequested flag forever. 00645 // 00646 00647 RemovedPages = MiRemovePhysicalPages (StartPage, EndPage); 00648 00649 if (RemovedPages != NumberOfPages) { 00650 00651 #if DBG 00652 retry: 00653 #endif 00654 00655 Pfn1 = StartPfn; 00656 00657 InterlockedIncrement (&MiDelayPageFaults); 00658 00659 for (i = 0; i < 5; i += 1) { 00660 00661 UNLOCK_PFN (OldIrql); 00662 00663 // 00664 // Attempt to move pages to the standby list. Note that only the 00665 // pages with RemovalRequested set are moved. 00666 // 00667 00668 MiTrimRemovalPagesOnly = TRUE; 00669 00670 MiEmptyAllWorkingSets (); 00671 00672 MiTrimRemovalPagesOnly = FALSE; 00673 00674 MiFlushAllPages (); 00675 00676 KeDelayExecutionThread (KernelMode, FALSE, &MmHalfSecond); 00677 00678 LOCK_PFN (OldIrql); 00679 00680 RemovedPages += MiRemovePhysicalPages (StartPage, EndPage); 00681 00682 if (RemovedPages == NumberOfPages) { 00683 break; 00684 } 00685 00686 // 00687 // RemovedPages doesn't include pages that were freed directly to 00688 // the bad page list via MiDecrementReferenceCount. So use the above 00689 // check purely as an optimization - and walk here when necessary. 00690 // 00691 00692 for ( ; Pfn1 < EndPfn; Pfn1 += 1) { 00693 if (Pfn1->u3.e1.PageLocation != BadPageList) { 00694 break; 00695 } 00696 } 00697 00698 if (Pfn1 == EndPfn) { 00699 RemovedPages = NumberOfPages; 00700 break; 00701 } 00702 } 00703 00704 InterlockedDecrement (&MiDelayPageFaults); 00705 } 00706 00707 if (RemovedPages != NumberOfPages) { 00708 #if DBG 00709 MiDynmemData[4] += 1; 00710 if (MiShowStuckPages != 0) { 00711 00712 RemovedPages = 0; 00713 for (Pfn1 = StartPfn; Pfn1 < EndPfn; Pfn1 += 1) { 00714 if (Pfn1->u3.e1.PageLocation != BadPageList) { 00715 RemovedPages += 1; 00716 } 00717 } 00718 00719 ASSERT (RemovedPages != 0); 00720 00721 DbgPrint("MmRemovePhysicalMemory : could not get %d of %d pages\n", 00722 RemovedPages, NumberOfPages); 00723 00724 if (MiShowStuckPages & 0x2) { 00725 00726 ULONG PfnsPrinted; 00727 ULONG EnoughShown; 00728 PMMPFN FirstPfn; 00729 PFN_COUNT PfnCount; 00730 00731 PfnCount = 0; 00732 PfnsPrinted = 0; 00733 EnoughShown = 100; 00734 00735 if (MiShowStuckPages & 0x4) { 00736 EnoughShown = (ULONG)-1; 00737 } 00738 00739 DbgPrint("Stuck PFN list: "); 00740 for (Pfn1 = StartPfn; Pfn1 < EndPfn; Pfn1 += 1) { 00741 if (Pfn1->u3.e1.PageLocation != BadPageList) { 00742 if (PfnCount == 0) { 00743 FirstPfn = Pfn1; 00744 } 00745 PfnCount += 1; 00746 } 00747 else { 00748 if (PfnCount != 0) { 00749 DbgPrint("%x -> %x ; ", FirstPfn - MmPfnDatabase, 00750 (FirstPfn - MmPfnDatabase) + PfnCount - 1); 00751 PfnsPrinted += 1; 00752 if (PfnsPrinted == EnoughShown) { 00753 break; 00754 } 00755 PfnCount = 0; 00756 } 00757 } 00758 } 00759 if (PfnCount != 0) { 00760 DbgPrint("%x -> %x ; ", FirstPfn - MmPfnDatabase, 00761 (FirstPfn - MmPfnDatabase) + PfnCount - 1); 00762 } 00763 DbgPrint("\n"); 00764 } 00765 if (MiShowStuckPages & 0x8) { 00766 DbgBreakPoint (); 00767 } 00768 if (MiShowStuckPages & 0x10) { 00769 goto retry; 00770 } 00771 } 00772 #endif 00773 UNLOCK_PFN (OldIrql); 00774 Status = STATUS_NO_MEMORY; 00775 goto giveup; 00776 } 00777 00778 #if DBG 00779 for (Pfn1 = StartPfn; Pfn1 < EndPfn; Pfn1 += 1) { 00780 ASSERT (Pfn1->u3.e1.PageLocation == BadPageList); 00781 } 00782 #endif 00783 00784 // 00785 // All the pages in the range have been removed. Update the physical 00786 // memory blocks and other associated housekeeping. 00787 // 00788 00789 if (Additional == 0) { 00790 00791 // 00792 // The range can be split off from an end of an existing chunk so no 00793 // pool growth or shrinkage is required. 00794 // 00795 00796 NewPhysicalMemoryBlock = MmPhysicalMemoryBlock; 00797 OldPhysicalMemoryBlock = NULL; 00798 } 00799 else { 00800 00801 // 00802 // The range cannot be split off from an end of an existing chunk so 00803 // pool growth or shrinkage is required. 00804 // 00805 00806 UNLOCK_PFN (OldIrql); 00807 00808 i = (sizeof(PHYSICAL_MEMORY_DESCRIPTOR) + 00809 (sizeof(PHYSICAL_MEMORY_RUN) * (MmPhysicalMemoryBlock->NumberOfRuns + Additional))); 00810 00811 NewPhysicalMemoryBlock = ExAllocatePoolWithTag (NonPagedPool, 00812 i, 00813 ' mM'); 00814 00815 if (NewPhysicalMemoryBlock == NULL) { 00816 Status = STATUS_INSUFFICIENT_RESOURCES; 00817 #if DBG 00818 MiDynmemData[5] += 1; 00819 #endif 00820 goto giveup; 00821 } 00822 00823 OldPhysicalMemoryBlock = MmPhysicalMemoryBlock; 00824 RtlZeroMemory (NewPhysicalMemoryBlock, i); 00825 00826 LOCK_PFN (OldIrql); 00827 } 00828 00829 // 00830 // Remove or split the requested range from the existing memory block. 00831 // 00832 00833 NewPhysicalMemoryBlock->NumberOfRuns = MmPhysicalMemoryBlock->NumberOfRuns + Additional; 00834 NewPhysicalMemoryBlock->NumberOfPages = MmPhysicalMemoryBlock->NumberOfPages - NumberOfPages; 00835 00836 NewRun = &NewPhysicalMemoryBlock->Run[0]; 00837 start = 0; 00838 Inserted = FALSE; 00839 00840 do { 00841 00842 Page = MmPhysicalMemoryBlock->Run[start].BasePage; 00843 LastPage = Page + MmPhysicalMemoryBlock->Run[start].PageCount; 00844 00845 if (Inserted == FALSE) { 00846 00847 if ((StartPage >= Page) && (EndPage <= LastPage)) { 00848 00849 if ((StartPage == Page) && (EndPage == LastPage)) { 00850 ASSERT (Additional == -1); 00851 start += 1; 00852 continue; 00853 } 00854 else if ((StartPage == Page) || (EndPage == LastPage)) { 00855 ASSERT (Additional == 0); 00856 if (StartPage == Page) { 00857 MmPhysicalMemoryBlock->Run[start].BasePage += NumberOfPages; 00858 } 00859 MmPhysicalMemoryBlock->Run[start].PageCount -= NumberOfPages; 00860 } 00861 else { 00862 ASSERT (Additional == 1); 00863 00864 OriginalLastPage = LastPage; 00865 00866 MmPhysicalMemoryBlock->Run[start].PageCount = 00867 StartPage - MmPhysicalMemoryBlock->Run[start].BasePage; 00868 00869 *NewRun = MmPhysicalMemoryBlock->Run[start]; 00870 NewRun += 1; 00871 00872 NewRun->BasePage = EndPage; 00873 NewRun->PageCount = OriginalLastPage - EndPage; 00874 NewRun += 1; 00875 00876 start += 1; 00877 continue; 00878 } 00879 00880 Inserted = TRUE; 00881 } 00882 } 00883 00884 *NewRun = MmPhysicalMemoryBlock->Run[start]; 00885 NewRun += 1; 00886 start += 1; 00887 00888 } while (start != MmPhysicalMemoryBlock->NumberOfRuns); 00889 00890 // 00891 // Repoint the MmPhysicalMemoryBlock at the new chunk. 00892 // Free the old block after releasing the PFN lock. 00893 // 00894 00895 MmPhysicalMemoryBlock = NewPhysicalMemoryBlock; 00896 00897 if (EndPage - 1 == MmHighestPhysicalPage) { 00898 MmHighestPhysicalPage = StartPage - 1; 00899 } 00900 00901 // 00902 // Throw away all the removed pages that are currently enqueued. 00903 // 00904 00905 for (Pfn1 = StartPfn; Pfn1 < EndPfn; Pfn1 += 1) { 00906 00907 ASSERT (Pfn1->u3.e1.PageLocation == BadPageList); 00908 ASSERT (Pfn1->u3.e1.RemovalRequested == 1); 00909 00910 MiUnlinkPageFromList (Pfn1); 00911 00912 ASSERT (Pfn1->u1.Flink == 0); 00913 ASSERT (Pfn1->u2.Blink == 0); 00914 ASSERT (Pfn1->u3.e2.ReferenceCount == 0); 00915 ASSERT64 (Pfn1->UsedPageTableEntries == 0); 00916 00917 Pfn1->PteAddress = PFN_REMOVED; 00918 Pfn1->u3.e2.ShortFlags = 0; 00919 Pfn1->OriginalPte.u.Long = ZeroKernelPte.u.Long; 00920 Pfn1->PteFrame = 0; 00921 } 00922 00923 // 00924 // Now that the removed pages have been discarded, eliminate the PFN 00925 // entries that mapped them. Straddling entries left over from an 00926 // adjacent earlier removal are not collapsed at this point. 00927 // 00928 // 00929 00930 PagesReleased = 0; 00931 00932 if (PfnDatabaseIsPhysical == FALSE) { 00933 00934 VirtualAddress = (PVOID)ROUND_TO_PAGES(MI_PFN_ELEMENT(StartPage)); 00935 PointerPte = MiGetPteAddress (VirtualAddress); 00936 EndPte = MiGetPteAddress (PAGE_ALIGN(MI_PFN_ELEMENT(EndPage))); 00937 00938 while (PointerPte < EndPte) { 00939 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 00940 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 00941 ASSERT (Pfn1->u2.ShareCount == 1); 00942 ASSERT (Pfn1->u3.e2.ReferenceCount == 1); 00943 Pfn1->u2.ShareCount = 0; 00944 MI_SET_PFN_DELETED (Pfn1); 00945 #if DBG 00946 Pfn1->u3.e1.PageLocation = StandbyPageList; 00947 #endif //DBG 00948 MiDecrementReferenceCount (PageFrameIndex); 00949 00950 KeFlushSingleTb (VirtualAddress, 00951 TRUE, 00952 TRUE, 00953 (PHARDWARE_PTE)PointerPte, 00954 ZeroKernelPte.u.Flush); 00955 00956 PagesReleased += 1; 00957 PointerPte += 1; 00958 VirtualAddress = (PVOID)((PCHAR)VirtualAddress + PAGE_SIZE); 00959 } 00960 00961 MmResidentAvailablePages += PagesReleased; 00962 } 00963 00964 #if DBG 00965 MiDynmemData[6] += 1; 00966 #endif 00967 00968 UNLOCK_PFN (OldIrql); 00969 00970 if (PagesReleased != 0) { 00971 MiReturnCommitment (PagesReleased); 00972 } 00973 00974 ExReleaseFastMutex (&MmDynamicMemoryMutex); 00975 00976 if (OldPhysicalMemoryBlock != NULL) { 00977 ExFreePool (OldPhysicalMemoryBlock); 00978 } 00979 00980 NumberOfBytes->QuadPart = (ULONGLONG)NumberOfPages * PAGE_SIZE; 00981 00982 return STATUS_SUCCESS; 00983 00984 giveup: 00985 00986 // 00987 // All the pages in the range were not obtained. Back everything out. 00988 // 00989 00990 PageFrameIndex = StartPage; 00991 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 00992 00993 LOCK_PFN (OldIrql); 00994 00995 while (PageFrameIndex < EndPage) { 00996 00997 ASSERT (Pfn1->u3.e1.RemovalRequested == 1); 00998 00999 Pfn1->u3.e1.RemovalRequested = 0; 01000 01001 if ((Pfn1->u3.e1.PageLocation == BadPageList) && 01002 (Pfn1->u3.e1.ParityError == 0)) { 01003 01004 MiUnlinkPageFromList (Pfn1); 01005 MiInsertPageInList (MmPageLocationList[FreePageList], 01006 PageFrameIndex); 01007 } 01008 01009 Pfn1 += 1; 01010 PageFrameIndex += 1; 01011 } 01012 01013 MmResidentAvailablePages += NumberOfPages; 01014 MmNumberOfPhysicalPages += NumberOfPages; 01015 01016 UNLOCK_PFN (OldIrql); 01017 01018 giveup2: 01019 01020 ExAcquireSpinLock (&MmChargeCommitmentLock, &OldIrql); 01021 MmTotalCommitLimit += NumberOfPages; 01022 MmTotalCommitLimitMaximum += NumberOfPages; 01023 ExReleaseSpinLock (&MmChargeCommitmentLock, OldIrql); 01024 01025 ExReleaseFastMutex (&MmDynamicMemoryMutex); 01026 01027 return Status; 01028 }

NTKERNELAPI VOID MmResetDriverPaging IN PVOID  AddressWithinSection  ) 
 

Definition at line 2560 of file sysload.c.

References ASSERT, DbgPrint, ExPageLockHandle, FALSE, LOCK_PFN, LOCK_SYSTEM_WS, MI_IS_PHYSICAL_ADDRESS, MiGetPteAddress, MiLockCode(), MiLookupDataTableEntry(), MM_LOCK_BY_NONPAGE, MmDisablePagingExecutive, MmLockPagableSectionByHandle(), MmUnlockPagableImageSection(), NULL, PAGED_CODE, RtlImageNtHeader(), UNLOCK_PFN, and UNLOCK_SYSTEM_WS.

02566 : 02567 02568 This routines resets the driver paging to what the image specified. 02569 Hence image sections such as the IAT, .text, .data will be locked 02570 down in memory. 02571 02572 Note, there is no requirement that MmPageEntireDriver was called. 02573 02574 Arguments: 02575 02576 AddressWithinSection - Supplies an address within the driver, e.g. 02577 DriverEntry. 02578 02579 Return Value: 02580 02581 None. 02582 02583 Environment: 02584 02585 Kernel mode, APC_LEVEL or below. 02586 02587 --*/ 02588 02589 { 02590 PLDR_DATA_TABLE_ENTRY DataTableEntry; 02591 PMMPTE LastPte; 02592 PMMPTE PointerPte; 02593 PVOID Base; 02594 ULONG i; 02595 PIMAGE_NT_HEADERS NtHeaders; 02596 PIMAGE_SECTION_HEADER FoundSection; 02597 KIRQL OldIrql; 02598 KIRQL OldIrqlWs; 02599 02600 PAGED_CODE(); 02601 02602 // 02603 // Don't page kernel mode code if disabled via registry. 02604 // 02605 02606 if (MmDisablePagingExecutive) { 02607 return; 02608 } 02609 02610 if (MI_IS_PHYSICAL_ADDRESS(AddressWithinSection)) { 02611 return; 02612 } 02613 02614 // 02615 // If the driver has pagable code, make it paged. 02616 // 02617 02618 DataTableEntry = MiLookupDataTableEntry (AddressWithinSection, FALSE); 02619 02620 if ((DataTableEntry->SectionPointer != NULL) && 02621 (DataTableEntry->SectionPointer != (PVOID)-1)) { 02622 02623 // 02624 // Driver is mapped by image hence already paged. 02625 // 02626 02627 return; 02628 } 02629 02630 Base = DataTableEntry->DllBase; 02631 02632 NtHeaders = (PIMAGE_NT_HEADERS)RtlImageNtHeader(Base); 02633 02634 FoundSection = (PIMAGE_SECTION_HEADER)((PCHAR)NtHeaders + 02635 sizeof(ULONG) + 02636 sizeof(IMAGE_FILE_HEADER) + 02637 NtHeaders->FileHeader.SizeOfOptionalHeader 02638 ); 02639 02640 i = NtHeaders->FileHeader.NumberOfSections; 02641 PointerPte = NULL; 02642 02643 while (i > 0) { 02644 #if DBG 02645 if ((*(PULONG)FoundSection->Name == 'tini') || 02646 (*(PULONG)FoundSection->Name == 'egap')) { 02647 DbgPrint("driver %wZ has lower case sections (init or pagexxx)\n", 02648 &DataTableEntry->FullDllName); 02649 } 02650 #endif 02651 02652 // 02653 // Don't lock down code for sections marked as discardable or 02654 // sections marked with the first 4 characters PAGE or .eda 02655 // (for the .edata section) or INIT. 02656 // 02657 02658 if (((FoundSection->Characteristics & IMAGE_SCN_MEM_DISCARDABLE) != 0) || 02659 (*(PULONG)FoundSection->Name == 'EGAP') || 02660 (*(PULONG)FoundSection->Name == 'ade.') || 02661 (*(PULONG)FoundSection->Name == 'TINI')) { 02662 02663 NOTHING; 02664 02665 } else { 02666 02667 // 02668 // This section is nonpagable. 02669 // 02670 02671 PointerPte = MiGetPteAddress ( 02672 (PCHAR)Base + FoundSection->VirtualAddress); 02673 LastPte = MiGetPteAddress ((PCHAR)Base + 02674 FoundSection->VirtualAddress + 02675 (FoundSection->SizeOfRawData - 1)); 02676 ASSERT (PointerPte <= LastPte); 02677 MmLockPagableSectionByHandle(ExPageLockHandle); 02678 LOCK_SYSTEM_WS (OldIrqlWs); 02679 LOCK_PFN (OldIrql); 02680 MiLockCode (PointerPte, LastPte, MM_LOCK_BY_NONPAGE); 02681 UNLOCK_PFN (OldIrql); 02682 UNLOCK_SYSTEM_WS (OldIrqlWs); 02683 MmUnlockPagableImageSection(ExPageLockHandle); 02684 } 02685 i -= 1; 02686 FoundSection += 1; 02687 } 02688 return; 02689 }

LOGICAL MmResourcesAvailable IN POOL_TYPE  PoolType,
IN SIZE_T  NumberOfBytes,
IN EX_POOL_PRIORITY  Priority
 

Definition at line 589 of file allocpag.c.

References ASSERT, BASE_POOL_TYPE_MASK, BYTES_TO_PAGES, EX_POOL_PRIORITY, FALSE, HighPoolPriority, KeSetEvent(), LOCK_PFN2, MI_MEMORY_MAKER, MI_SESSION_POOL_SIZE, MI_UNUSED_SEGMENTS_SURPLUS, MiIssuePageExtendRequestNoWait(), MmAllocatedNonPagedPool, MmMaximumNonPagedPoolInBytes, MmPagedPoolInfo, MmSessionSpace, MmSizeOfPagedPoolInBytes, MmTotalCommitLimitMaximum, MmTotalCommittedPages, MmUnusedSegmentCleanup, MmUnusedSegmentForceFree, MmUnusedSegmentList, MUST_SUCCEED_POOL_TYPE_MASK, NonPagedPool, NormalPoolPriority, PAGE_SHIFT, PagedPool, _MM_SESSION_SPACE::PagedPoolBytes, PsGetCurrentThread, SESSION_POOL_MASK, TRUE, and UNLOCK_PFN2.

Referenced by ExAllocatePoolWithTagPriority().

00597 : 00598 00599 This function examines various resources to determine if this 00600 pool allocation should be allowed to proceed. 00601 00602 Arguments: 00603 00604 PoolType - Supplies the type of pool to retrieve information about. 00605 00606 NumberOfBytes - Supplies the number of bytes to allocate. 00607 00608 Priority - Supplies an indication as to how important it is that this 00609 request succeed under low available resource conditions. 00610 Return Value: 00611 00612 TRUE if the pool allocation should be allowed to proceed, FALSE if not. 00613 00614 --*/ 00615 00616 { 00617 KIRQL OldIrql; 00618 PFN_NUMBER NumberOfPages; 00619 SIZE_T FreePoolInBytes; 00620 PETHREAD Thread; 00621 LOGICAL SignalDereferenceThread; 00622 00623 ASSERT (Priority != HighPoolPriority); 00624 ASSERT ((PoolType & MUST_SUCCEED_POOL_TYPE_MASK) == 0); 00625 00626 NumberOfPages = BYTES_TO_PAGES (NumberOfBytes); 00627 00628 if ((PoolType & BASE_POOL_TYPE_MASK) == NonPagedPool) { 00629 FreePoolInBytes = MmMaximumNonPagedPoolInBytes - (MmAllocatedNonPagedPool << PAGE_SHIFT); 00630 } 00631 else if (PoolType & SESSION_POOL_MASK) { 00632 FreePoolInBytes = MI_SESSION_POOL_SIZE - MmSessionSpace->PagedPoolBytes; 00633 } 00634 else { 00635 FreePoolInBytes = MmSizeOfPagedPoolInBytes - (MmPagedPoolInfo.AllocatedPagedPool << PAGE_SHIFT); 00636 } 00637 00638 // 00639 // Check available VA space. 00640 // 00641 00642 if (Priority == NormalPoolPriority) { 00643 if ((SIZE_T)NumberOfBytes + 512*1024 > FreePoolInBytes) { 00644 Thread = PsGetCurrentThread (); 00645 if (!MI_MEMORY_MAKER(Thread)) { 00646 goto nopool; 00647 } 00648 } 00649 } 00650 else { 00651 if ((SIZE_T)NumberOfBytes + 2*1024*1024 > FreePoolInBytes) { 00652 Thread = PsGetCurrentThread (); 00653 if (!MI_MEMORY_MAKER(Thread)) { 00654 goto nopool; 00655 } 00656 } 00657 } 00658 00659 // 00660 // Paged allocations (session and normal) can also fail for lack of commit. 00661 // 00662 00663 if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) { 00664 if (MmTotalCommittedPages + NumberOfPages > MmTotalCommitLimitMaximum) { 00665 Thread = PsGetCurrentThread (); 00666 if (!MI_MEMORY_MAKER(Thread)) { 00667 MiIssuePageExtendRequestNoWait (NumberOfPages); 00668 goto nopool; 00669 } 00670 } 00671 } 00672 00673 return TRUE; 00674 00675 nopool: 00676 00677 // 00678 // Running low on pool - if this request is not for session pool, 00679 // force unused segment trimming when appropriate. 00680 // 00681 00682 if ((PoolType & SESSION_POOL_MASK) == 0) { 00683 00684 if (MI_UNUSED_SEGMENTS_SURPLUS()) { 00685 KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE); 00686 } 00687 else { 00688 SignalDereferenceThread = FALSE; 00689 LOCK_PFN2 (OldIrql); 00690 if (MmUnusedSegmentForceFree == 0) { 00691 if (!IsListEmpty(&MmUnusedSegmentList)) { 00692 SignalDereferenceThread = TRUE; 00693 MmUnusedSegmentForceFree = 30; 00694 } 00695 } 00696 UNLOCK_PFN2 (OldIrql); 00697 if (SignalDereferenceThread == TRUE) { 00698 KeSetEvent (&MmUnusedSegmentCleanup, 0, FALSE); 00699 } 00700 } 00701 } 00702 00703 return FALSE; 00704 }

NTKERNELAPI VOID MmReturnMemoryForHibernate IN PMDL  Mdl  ) 
 

Definition at line 8038 of file iosup.c.

References LOCK_PFN2, MiDecrementReferenceCount(), PAGE_SHIFT, and UNLOCK_PFN2.

08044 : 08045 08046 Returns memory from MmGatherMemoryForHibername. 08047 08048 Arguments: 08049 08050 Mdl - Supplies an MDL, the start VA field should be NULL. The length 08051 field indicates how many pages to obtain. 08052 08053 Return Value: 08054 08055 None. 08056 08057 Environment: 08058 08059 Kernel mode, IRQL of APC_LEVEL or below. 08060 08061 --*/ 08062 08063 { 08064 KIRQL OldIrql; 08065 PFN_NUMBER PagesNeeded; 08066 PPFN_NUMBER Pages; 08067 08068 PagesNeeded = (Mdl->ByteCount >> PAGE_SHIFT); 08069 Pages = (PPFN_NUMBER)(Mdl + 1); 08070 08071 LOCK_PFN2 (OldIrql); 08072 do { 08073 MiDecrementReferenceCount (*Pages); 08074 Pages += 1; 08075 PagesNeeded -= 1; 08076 } while (PagesNeeded); 08077 UNLOCK_PFN2 (OldIrql); 08078 return; 08079 }

VOID MmReturnPoolQuota IN POOL_TYPE  PoolType,
IN SIZE_T  ReturnedQuota
 

Definition at line 1212 of file mmquota.c.

References MmTotalNonPagedPoolQuota, MmTotalPagedPoolQuota, and PagedPool.

Referenced by PsReturnPoolQuota().

01219 : 01220 01221 Returns pool quota. 01222 01223 Arguments: 01224 01225 PoolType - Supplies the pool type of the quota to be returned. 01226 01227 ReturnedQuota - Number of bytes returned. 01228 01229 Return Value: 01230 01231 NONE. 01232 01233 Environment: 01234 01235 Kernel mode, QUOTA SPIN LOCK HELD!! 01236 01237 --*/ 01238 01239 { 01240 01241 if (PoolType == PagedPool) { 01242 MmTotalPagedPoolQuota -= ReturnedQuota; 01243 } else { 01244 MmTotalNonPagedPoolQuota -= ReturnedQuota; 01245 } 01246 01247 return; 01248 } }

NTKERNELAPI HANDLE MmSecureVirtualMemory IN PVOID  Address,
IN SIZE_T  Size,
IN ULONG  ProbeMode
 

Definition at line 4177 of file mapview.c.

References ASSERT, CHAR, _MMVAD::EndingVpn, _MMSECURE_ENTRY::EndVpn, ExAllocatePoolWithTag, EXCEPTION_EXECUTE_HANDLER, ExFreePool(), FALSE, Handle, _MMVAD::LeftChild, _MMSECURE_ENTRY::List, LOCK_ADDRESS_SPACE, LOCK_WS_UNSAFE, MI_VA_TO_VPN, MiDoesPdeExistAndMakeValid(), MiDoesPpeExistAndMakeValid, MiGetPdeAddress, MiGetPteAddress, MiGetVirtualAddressMappedByPte, MiInsertConflictInList(), MiIsPteOnPdeBoundary, MiLocateAddress(), MiPhysicalViewAdjuster(), MiRemoveConflictFromList(), MM_EXECUTE_READWRITE, MM_EXECUTE_WRITECOPY, MM_NOACCESS, MM_READWRITE, MMSECURE_ENTRY, MMVAD, MMVADKEY, NonPagedPool, NULL, PAGE_SIZE, PAGED_CODE, _MMVAD::Parent, PMMSECURE_ENTRY, ProbeForWrite(), PsGetCurrentProcess, _MMVAD::RightChild, Size, _MMVAD::StartingVpn, _MMSECURE_ENTRY::StartVpn, _MMVAD::u, _MMPTE::u, _MMVAD::u2, _MMSECURE_ENTRY::u2, _MMVAD::u3, UNLOCK_ADDRESS_SPACE, UNLOCK_WS_UNSAFE, _EPROCESS::VadFreeHint, _EPROCESS::VadHint, and _EPROCESS::VadRoot.

04185 : 04186 04187 This routine probes the requested address range and protects 04188 the specified address range from having its protection made 04189 more restricted and being deleted. 04190 04191 MmUnsecureVirtualMemory is used to allow the range to return 04192 to a normal state. 04193 04194 Arguments: 04195 04196 Address - Supplies the base address to probe and secure. 04197 04198 Size - Supplies the size of the range to secure. 04199 04200 ProbeMode - Supplies one of PAGE_READONLY or PAGE_READWRITE. 04201 04202 Return Value: 04203 04204 Returns a handle to be used to unsecure the range. 04205 If the range could not be locked because of protection 04206 problems or noncommitted memory, the value (HANDLE)0 04207 is returned. 04208 04209 Environment: 04210 04211 Kernel Mode. 04212 04213 --*/ 04214 04215 { 04216 ULONG_PTR EndAddress; 04217 PVOID StartAddress; 04218 CHAR Temp; 04219 ULONG Probe; 04220 HANDLE Handle; 04221 PMMVAD Vad; 04222 PMMVAD NewVad; 04223 PMMSECURE_ENTRY Secure; 04224 PEPROCESS Process; 04225 PMMPTE PointerPpe; 04226 PMMPTE PointerPde; 04227 PMMPTE PointerPte; 04228 PMMPTE LastPte; 04229 MMLOCK_CONFLICT Conflict; 04230 ULONG Waited; 04231 04232 PAGED_CODE(); 04233 04234 if ((ULONG_PTR)Address + Size > (ULONG_PTR)MM_HIGHEST_USER_ADDRESS || (ULONG_PTR)Address + Size <= (ULONG_PTR)Address) { 04235 return (HANDLE)0; 04236 } 04237 04238 Handle = (HANDLE)0; 04239 04240 Probe = (ProbeMode == PAGE_READONLY); 04241 04242 Process = PsGetCurrentProcess(); 04243 StartAddress = Address; 04244 04245 LOCK_ADDRESS_SPACE (Process); 04246 04247 // 04248 // Check for a private committed VAD first instead of probing to avoid all 04249 // the page faults and zeroing. If we find one, then we run the PTEs 04250 // instead. 04251 // 04252 04253 if (Size >= 64 * 1024) { 04254 EndAddress = (ULONG_PTR)StartAddress + Size - 1; 04255 Vad = MiLocateAddress (StartAddress); 04256 04257 if (Vad == NULL) { 04258 goto Return1; 04259 } 04260 04261 if (Vad->u.VadFlags.UserPhysicalPages == 1) { 04262 goto Return1; 04263 } 04264 04265 if (Vad->u.VadFlags.MemCommit == 0) { 04266 goto LongWay; 04267 } 04268 04269 if (Vad->u.VadFlags.PrivateMemory == 0) { 04270 goto LongWay; 04271 } 04272 04273 if (Vad->u.VadFlags.PhysicalMapping == 1) { 04274 goto LongWay; 04275 } 04276 04277 ASSERT (Vad->u.VadFlags.Protection); 04278 04279 if ((MI_VA_TO_VPN (StartAddress) < Vad->StartingVpn) || 04280 (MI_VA_TO_VPN (EndAddress) > Vad->EndingVpn)) { 04281 goto Return1; 04282 } 04283 04284 if (Vad->u.VadFlags.Protection == MM_NOACCESS) { 04285 goto LongWay; 04286 } 04287 04288 if (ProbeMode == PAGE_READONLY) { 04289 if (Vad->u.VadFlags.Protection > MM_EXECUTE_WRITECOPY) { 04290 goto LongWay; 04291 } 04292 } 04293 else { 04294 if (Vad->u.VadFlags.Protection != MM_READWRITE && 04295 Vad->u.VadFlags.Protection != MM_EXECUTE_READWRITE) { 04296 goto LongWay; 04297 } 04298 } 04299 04300 // 04301 // Check individual page permissions. 04302 // 04303 04304 PointerPde = MiGetPdeAddress (StartAddress); 04305 PointerPpe = MiGetPteAddress (PointerPde); 04306 PointerPte = MiGetPteAddress (StartAddress); 04307 LastPte = MiGetPteAddress (EndAddress); 04308 04309 LOCK_WS_UNSAFE (Process); 04310 04311 do { 04312 04313 while (MiDoesPpeExistAndMakeValid (PointerPpe, 04314 Process, 04315 FALSE, 04316 &Waited) == FALSE) { 04317 // 04318 // Page directory parent entry is empty, go to the next one. 04319 // 04320 04321 PointerPpe += 1; 04322 PointerPde = MiGetVirtualAddressMappedByPte (PointerPpe); 04323 PointerPte = MiGetVirtualAddressMappedByPte (PointerPde); 04324 if (PointerPte > LastPte) { 04325 UNLOCK_WS_UNSAFE (Process); 04326 goto EditVad; 04327 } 04328 } 04329 04330 Waited = 0; 04331 04332 while (MiDoesPdeExistAndMakeValid (PointerPde, 04333 Process, 04334 FALSE, 04335 &Waited) == FALSE) { 04336 // 04337 // This page directory entry is empty, go to the next one. 04338 // 04339 04340 PointerPde += 1; 04341 PointerPpe = MiGetPteAddress (PointerPde); 04342 PointerPte = MiGetVirtualAddressMappedByPte (PointerPde); 04343 if (PointerPte > LastPte) { 04344 UNLOCK_WS_UNSAFE (Process); 04345 goto EditVad; 04346 } 04347 #if defined (_WIN64) 04348 if (MiIsPteOnPdeBoundary (PointerPde)) { 04349 Waited = 1; 04350 break; 04351 } 04352 #endif 04353 } 04354 04355 } while (Waited != 0); 04356 04357 while (PointerPte <= LastPte) { 04358 04359 if (MiIsPteOnPdeBoundary (PointerPte)) { 04360 04361 PointerPde = MiGetPteAddress (PointerPte); 04362 PointerPpe = MiGetPteAddress (PointerPde); 04363 04364 do { 04365 04366 while (MiDoesPpeExistAndMakeValid (PointerPpe, 04367 Process, 04368 FALSE, 04369 &Waited) == FALSE) { 04370 // 04371 // Page directory parent entry is empty, go to the next one. 04372 // 04373 04374 PointerPpe += 1; 04375 PointerPde = MiGetVirtualAddressMappedByPte (PointerPpe); 04376 PointerPte = MiGetVirtualAddressMappedByPte (PointerPde); 04377 04378 if (PointerPte > LastPte) { 04379 UNLOCK_WS_UNSAFE (Process); 04380 goto EditVad; 04381 } 04382 } 04383 04384 Waited = 0; 04385 04386 while (MiDoesPdeExistAndMakeValid (PointerPde, 04387 Process, 04388 FALSE, 04389 &Waited) == FALSE) { 04390 // 04391 // This page directory entry is empty, go to the next one. 04392 // 04393 04394 PointerPde += 1; 04395 PointerPpe = MiGetPteAddress (PointerPde); 04396 PointerPte = MiGetVirtualAddressMappedByPte (PointerPde); 04397 if (PointerPte > LastPte) { 04398 UNLOCK_WS_UNSAFE (Process); 04399 goto EditVad; 04400 } 04401 #if defined (_WIN64) 04402 if (MiIsPteOnPdeBoundary (PointerPde)) { 04403 Waited = 1; 04404 break; 04405 } 04406 #endif 04407 } 04408 04409 } while (Waited != 0); 04410 } 04411 if (PointerPte->u.Long) { 04412 UNLOCK_WS_UNSAFE (Process); 04413 goto LongWay; 04414 } 04415 PointerPte += 1; 04416 } 04417 UNLOCK_WS_UNSAFE (Process); 04418 } 04419 else { 04420 LongWay: 04421 04422 MiInsertConflictInList (&Conflict); 04423 04424 try { 04425 04426 if (ProbeMode == PAGE_READONLY) { 04427 04428 EndAddress = (ULONG_PTR)Address + Size - 1; 04429 EndAddress = (EndAddress & ~(PAGE_SIZE - 1)) + PAGE_SIZE; 04430 04431 do { 04432 Temp = *(volatile CHAR *)Address; 04433 Address = (PVOID)(((ULONG_PTR)Address & ~(PAGE_SIZE - 1)) + PAGE_SIZE); 04434 } while ((ULONG_PTR)Address != EndAddress); 04435 } else { 04436 ProbeForWrite (Address, (ULONG)Size, 1); // ****** temp ****** 04437 } 04438 04439 } except (EXCEPTION_EXECUTE_HANDLER) { 04440 MiRemoveConflictFromList (&Conflict); 04441 goto Return1; 04442 } 04443 04444 MiRemoveConflictFromList (&Conflict); 04445 04446 // 04447 // Locate VAD and add in secure descriptor. 04448 // 04449 04450 EndAddress = (ULONG_PTR)StartAddress + Size - 1; 04451 Vad = MiLocateAddress (StartAddress); 04452 04453 if (Vad == NULL) { 04454 goto Return1; 04455 } 04456 04457 if (Vad->u.VadFlags.UserPhysicalPages == 1) { 04458 goto Return1; 04459 } 04460 04461 if ((MI_VA_TO_VPN (StartAddress) < Vad->StartingVpn) || 04462 (MI_VA_TO_VPN (EndAddress) > Vad->EndingVpn)) { 04463 04464 // 04465 // Not within the section virtual address descriptor, 04466 // return an error. 04467 // 04468 04469 goto Return1; 04470 } 04471 } 04472 04473 EditVad: 04474 04475 // 04476 // If this is a short VAD, it needs to be reallocated as a large 04477 // VAD. 04478 // 04479 04480 if ((Vad->u.VadFlags.PrivateMemory) && (!Vad->u.VadFlags.NoChange)) { 04481 04482 NewVad = ExAllocatePoolWithTag (NonPagedPool, 04483 sizeof(MMVAD), 04484 MMVADKEY); 04485 if (NewVad == NULL) { 04486 goto Return1; 04487 } 04488 04489 RtlZeroMemory (NewVad, sizeof(MMVAD)); 04490 RtlCopyMemory (NewVad, Vad, sizeof(MMVAD_SHORT)); 04491 NewVad->u.VadFlags.NoChange = 1; 04492 NewVad->u2.VadFlags2.OneSecured = 1; 04493 NewVad->u2.VadFlags2.StoredInVad = 1; 04494 NewVad->u2.VadFlags2.ReadOnly = Probe; 04495 NewVad->u3.Secured.StartVpn = (ULONG_PTR)StartAddress; 04496 NewVad->u3.Secured.EndVpn = EndAddress; 04497 04498 // 04499 // Replace the current VAD with this expanded VAD. 04500 // 04501 04502 LOCK_WS_UNSAFE (Process); 04503 if (Vad->Parent) { 04504 if (Vad->Parent->RightChild == Vad) { 04505 Vad->Parent->RightChild = NewVad; 04506 } else { 04507 ASSERT (Vad->Parent->LeftChild == Vad); 04508 Vad->Parent->LeftChild = NewVad; 04509 } 04510 } else { 04511 Process->VadRoot = NewVad; 04512 } 04513 if (Vad->LeftChild) { 04514 Vad->LeftChild->Parent = NewVad; 04515 } 04516 if (Vad->RightChild) { 04517 Vad->RightChild->Parent = NewVad; 04518 } 04519 if (Process->VadHint == Vad) { 04520 Process->VadHint = NewVad; 04521 } 04522 if (Process->VadFreeHint == Vad) { 04523 Process->VadFreeHint = NewVad; 04524 } 04525 04526 if ((Vad->u.VadFlags.PhysicalMapping == 1) || 04527 (Vad->u.VadFlags.WriteWatch == 1)) { 04528 04529 MiPhysicalViewAdjuster (Process, Vad, NewVad); 04530 } 04531 04532 UNLOCK_WS_UNSAFE (Process); 04533 ExFreePool (Vad); 04534 Handle = (HANDLE)&NewVad->u2.LongFlags2; 04535 goto Return1; 04536 } 04537 04538 // 04539 // This is already a large VAD, add the secure entry. 04540 // 04541 04542 if (Vad->u2.VadFlags2.OneSecured) { 04543 04544 // 04545 // This VAD already is secured. Move the info out of the 04546 // block into pool. 04547 // 04548 04549 Secure = ExAllocatePoolWithTag (NonPagedPool, 04550 sizeof (MMSECURE_ENTRY), 04551 'eSmM'); 04552 if (Secure == NULL) { 04553 goto Return1; 04554 } 04555 04556 ASSERT (Vad->u.VadFlags.NoChange == 1); 04557 Vad->u2.VadFlags2.OneSecured = 0; 04558 Vad->u2.VadFlags2.MultipleSecured = 1; 04559 Secure->u2.LongFlags2 = (ULONG) Vad->u.LongFlags; 04560 Secure->u2.VadFlags2.StoredInVad = 0; 04561 Secure->StartVpn = Vad->u3.Secured.StartVpn; 04562 Secure->EndVpn = Vad->u3.Secured.EndVpn; 04563 04564 InitializeListHead (&Vad->u3.List); 04565 InsertTailList (&Vad->u3.List, 04566 &Secure->List); 04567 } 04568 04569 if (Vad->u2.VadFlags2.MultipleSecured) { 04570 04571 // 04572 // This VAD already has a secured element in its list, allocate and 04573 // add in the new secured element. 04574 // 04575 04576 Secure = ExAllocatePoolWithTag (NonPagedPool, 04577 sizeof (MMSECURE_ENTRY), 04578 'eSmM'); 04579 if (Secure == NULL) { 04580 goto Return1; 04581 } 04582 04583 Secure->u2.LongFlags2 = 0; 04584 Secure->u2.VadFlags2.ReadOnly = Probe; 04585 Secure->StartVpn = (ULONG_PTR)StartAddress; 04586 Secure->EndVpn = EndAddress; 04587 04588 InsertTailList (&Vad->u3.List, 04589 &Secure->List); 04590 Handle = (HANDLE)Secure; 04591 04592 } else { 04593 04594 // 04595 // This list does not have a secure element. Put it in the VAD. 04596 // 04597 04598 Vad->u.VadFlags.NoChange = 1; 04599 Vad->u2.VadFlags2.OneSecured = 1; 04600 Vad->u2.VadFlags2.StoredInVad = 1; 04601 Vad->u2.VadFlags2.ReadOnly = Probe; 04602 Vad->u3.Secured.StartVpn = (ULONG_PTR)StartAddress; 04603 Vad->u3.Secured.EndVpn = EndAddress; 04604 Handle = (HANDLE)&Vad->u2.LongFlags2; 04605 } 04606 04607 Return1: 04608 UNLOCK_ADDRESS_SPACE (Process); 04609 return Handle; 04610 }

NTSTATUS MmSessionCreate OUT PULONG  SessionId  ) 
 

Definition at line 974 of file session.c.

References ASSERT, FALSE, KeEnterCriticalRegion, KeLeaveCriticalRegion, LOCK_EXPANSION, MI_SESSION_SPACE_END, MiDereferenceSession(), MiGetPdeAddress, MiGetPpeAddress, MiHydra, MiSessionCount, MiSessionCreateInternal(), MiSessionInitializeWorkingSetList(), MmIsAddressValid(), MmSessionBase, MmSessionSpace, NT_SUCCESS, NTSTATUS(), PsGetCurrentProcess, Status, _MMSUPPORT::u, _MMPTE::u, _MM_SESSION_SPACE::u, UNLOCK_EXPANSION, _EPROCESS::Vm, and ZeroKernelPte.

Referenced by NtSetSystemInformation().

00980 : 00981 00982 Called from NtSetSystemInformation() to create a session space 00983 in the calling process with the specified SessionId. An error is returned 00984 if the calling process already has a session Space. 00985 00986 Arguments: 00987 00988 SessionId - Supplies a pointer to place the resulting session id in. 00989 00990 Return Value: 00991 00992 Various NTSTATUS error codes. 00993 00994 Environment: 00995 00996 Kernel mode, no mutexes held. 00997 00998 --*/ 00999 01000 { 01001 KIRQL OldIrql; 01002 NTSTATUS Status; 01003 PEPROCESS CurrentProcess; 01004 #if DBG 01005 PMMPTE StartPde; 01006 PMMPTE EndPde; 01007 #endif 01008 01009 if (MiHydra == FALSE) { 01010 return STATUS_INVALID_SYSTEM_SERVICE; 01011 } 01012 01013 CurrentProcess = PsGetCurrentProcess(); 01014 01015 // 01016 // A simple check to see if the calling process already has a session space. 01017 // No need to go through all this if it does. Creation races are caught 01018 // below and recovered from regardless. 01019 // 01020 01021 if (CurrentProcess->Vm.u.Flags.ProcessInSession == 1) { 01022 return STATUS_ALREADY_COMMITTED; 01023 } 01024 01025 if (CurrentProcess->Vm.u.Flags.SessionLeader == 0) { 01026 01027 // 01028 // Only the session manager can create a session. 01029 // 01030 01031 return STATUS_INVALID_SYSTEM_SERVICE; 01032 } 01033 01034 #if DBG 01035 ASSERT (MmIsAddressValid(MmSessionSpace) == FALSE); 01036 01037 #if defined (_WIN64) 01038 ASSERT ((MiGetPpeAddress(MmSessionBase))->u.Long == ZeroKernelPte.u.Long); 01039 #else 01040 StartPde = MiGetPdeAddress (MmSessionBase); 01041 EndPde = MiGetPdeAddress (MI_SESSION_SPACE_END); 01042 01043 while (StartPde < EndPde) { 01044 ASSERT (StartPde->u.Long == ZeroKernelPte.u.Long); 01045 StartPde += 1; 01046 } 01047 #endif 01048 01049 #endif 01050 01051 KeEnterCriticalRegion(); 01052 01053 Status = MiSessionCreateInternal (SessionId); 01054 01055 if (!NT_SUCCESS(Status)) { 01056 KeLeaveCriticalRegion(); 01057 return Status; 01058 } 01059 01060 LOCK_EXPANSION (OldIrql); 01061 01062 MiSessionCount += 1; 01063 01064 UNLOCK_EXPANSION (OldIrql); 01065 01066 // 01067 // Add the session space to the working set list. 01068 // 01069 01070 Status = MiSessionInitializeWorkingSetList (); 01071 01072 if (!NT_SUCCESS(Status)) { 01073 MiDereferenceSession (); 01074 KeLeaveCriticalRegion(); 01075 return Status; 01076 } 01077 01078 KeLeaveCriticalRegion(); 01079 01080 MmSessionSpace->u.Flags.Initialized = 1; 01081 01082 LOCK_EXPANSION (OldIrql); 01083 01084 CurrentProcess->Vm.u.Flags.ProcessInSession = 1; 01085 01086 UNLOCK_EXPANSION (OldIrql); 01087 01088 return Status; 01089 }

NTSTATUS MmSessionDelete IN ULONG  SessionId  ) 
 

VOID MmSessionLeader IN PEPROCESS  Process  ) 
 

Definition at line 109 of file session.c.

00115 : 00116 00117 Mark the argument process as having the ability to create or delete session 00118 spaces. This is only granted to the session manager process. 00119 00120 Arguments: 00121 00122 Process - Supplies a pointer to the privileged process. 00123 00124 Return Value: 00125 00126 None. 00127 00128 Environment: 00129 00130 Kernel mode. 00131 00132 --*/ 00133 00134 { 00135 Process->Vm.u.Flags.SessionLeader = 1; 00136 }

VOID MmSessionSetUnloadAddress IN PDRIVER_OBJECT  pWin32KDevice  ) 
 

Definition at line 140 of file session.c.

References ASSERT, MiHydra, MmIsAddressValid(), MmSessionSpace, PsGetCurrentProcess, TRUE, and _MM_SESSION_SPACE::Win32KDriverObject.

Referenced by NtSetSystemInformation().

00146 : 00147 00148 Copy the win32k.sys driver object to the session structure for use during 00149 unload. 00150 00151 Arguments: 00152 00153 NewProcess - Supplies a pointer to the win32k driver object. 00154 00155 Return Value: 00156 00157 None. 00158 00159 Environment: 00160 00161 Kernel mode. 00162 00163 --*/ 00164 00165 { 00166 if (MiHydra == TRUE && PsGetCurrentProcess()->Vm.u.Flags.ProcessInSession == 1) { 00167 00168 ASSERT (MmIsAddressValid(MmSessionSpace) == TRUE); 00169 00170 RtlMoveMemory (&MmSessionSpace->Win32KDriverObject, 00171 pWin32KDevice, 00172 sizeof(DRIVER_OBJECT)); 00173 } 00174 }

BOOLEAN MmSetAddressRangeModified IN PVOID  Address,
IN SIZE_T  Length
 

Definition at line 5920 of file iosup.c.

References Count, FALSE, KeFlushEntireTb(), KeFlushMultipleTb(), KeFlushSingleTb(), LOCK_PFN2, MI_IS_PTE_DIRTY, MI_PFN_ELEMENT, MI_SET_PTE_CLEAN, MI_WRITE_VALID_PTE_NEW_PROTECTION, MiGetPteAddress, MiReleasePageFileSpace(), MM_MAXIMUM_FLUSH_COUNT, NULL, _MMPFN::OriginalPte, PAGE_SIZE, TRUE, _MMPTE::u, _MMPFN::u3, UNLOCK_PFN2, VOID(), and ZeroPte.

Referenced by CcFlushCache(), CcMapAndCopy(), CcPurgeAndClearCacheSection(), CcUnpinRepinnedBcb(), and CcZeroData().

05927 : 05928 05929 This routine sets the modified bit in the PFN database for the 05930 pages that correspond to the specified address range. 05931 05932 Note that the dirty bit in the PTE is cleared by this operation. 05933 05934 Arguments: 05935 05936 Address - Supplies the address of the start of the range. This 05937 range must reside within the system cache. 05938 05939 Length - Supplies the length of the range. 05940 05941 Return Value: 05942 05943 TRUE if at least one PTE was dirty in the range, FALSE otherwise. 05944 05945 Environment: 05946 05947 Kernel mode. APC_LEVEL and below for pagable addresses, 05948 DISPATCH_LEVEL and below for non-pagable addresses. 05949 05950 --*/ 05951 05952 { 05953 PMMPTE PointerPte; 05954 PMMPTE LastPte; 05955 PMMPFN Pfn1; 05956 PMMPTE FlushPte; 05957 MMPTE PteContents; 05958 MMPTE FlushContents; 05959 KIRQL OldIrql; 05960 PVOID VaFlushList[MM_MAXIMUM_FLUSH_COUNT]; 05961 ULONG Count; 05962 BOOLEAN Result; 05963 05964 Count = 0; 05965 Result = FALSE; 05966 05967 // 05968 // Loop on the copy on write case until the page is only 05969 // writable. 05970 // 05971 05972 PointerPte = MiGetPteAddress (Address); 05973 LastPte = MiGetPteAddress ((PVOID)((PCHAR)Address + Length - 1)); 05974 05975 LOCK_PFN2 (OldIrql); 05976 05977 do { 05978 05979 PteContents = *PointerPte; 05980 05981 if (PteContents.u.Hard.Valid == 1) { 05982 05983 Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber); 05984 Pfn1->u3.e1.Modified = 1; 05985 05986 if ((Pfn1->OriginalPte.u.Soft.Prototype == 0) && 05987 (Pfn1->u3.e1.WriteInProgress == 0)) { 05988 MiReleasePageFileSpace (Pfn1->OriginalPte); 05989 Pfn1->OriginalPte.u.Soft.PageFileHigh = 0; 05990 } 05991 05992 #ifdef NT_UP 05993 // 05994 // On uniprocessor systems no need to flush if this processor 05995 // doesn't think the PTE is dirty. 05996 // 05997 05998 if (MI_IS_PTE_DIRTY (PteContents)) { 05999 Result = TRUE; 06000 #else //NT_UP 06001 Result |= (BOOLEAN)(MI_IS_PTE_DIRTY (PteContents)); 06002 #endif //NT_UP 06003 MI_SET_PTE_CLEAN (PteContents); 06004 MI_WRITE_VALID_PTE_NEW_PROTECTION (PointerPte, PteContents); 06005 FlushContents = PteContents; 06006 FlushPte = PointerPte; 06007 06008 // 06009 // Clear the write bit in the PTE so new writes can be tracked. 06010 // 06011 06012 if (Count != MM_MAXIMUM_FLUSH_COUNT) { 06013 VaFlushList[Count] = Address; 06014 Count += 1; 06015 } 06016 #ifdef NT_UP 06017 } 06018 #endif //NT_UP 06019 } 06020 PointerPte += 1; 06021 Address = (PVOID)((PCHAR)Address + PAGE_SIZE); 06022 } while (PointerPte <= LastPte); 06023 06024 if (Count != 0) { 06025 if (Count == 1) { 06026 06027 (VOID)KeFlushSingleTb (VaFlushList[0], 06028 FALSE, 06029 TRUE, 06030 (PHARDWARE_PTE)FlushPte, 06031 FlushContents.u.Flush); 06032 06033 } else if (Count != MM_MAXIMUM_FLUSH_COUNT) { 06034 06035 KeFlushMultipleTb (Count, 06036 &VaFlushList[0], 06037 FALSE, 06038 TRUE, 06039 NULL, 06040 *(PHARDWARE_PTE)&ZeroPte.u.Flush); 06041 06042 } else { 06043 KeFlushEntireTb (FALSE, TRUE); 06044 } 06045 } 06046 UNLOCK_PFN2 (OldIrql); 06047 return Result; 06048 }

NTSTATUS MmSetBankedSection IN HANDLE  ProcessHandle,
IN PVOID  VirtualAddress,
IN ULONG  BankLength,
IN BOOLEAN  ReadWriteBank,
IN PBANKED_SECTION_ROUTINE  BankRoutine,
IN PVOID  Context
 

Definition at line 7311 of file iosup.c.

References ASSERT, _MMBANKED_SECTION::BankedRoutine, _MMBANKED_SECTION::BankShift, _MMBANKED_SECTION::BankSize, _MMBANKED_SECTION::BankTemplate, _MMBANKED_SECTION::BasedPte, _MMBANKED_SECTION::BasePhysicalPage, _MMBANKED_SECTION::Context, _MMBANKED_SECTION::CurrentMappedPte, _MMVAD::EndingVpn, ExAllocatePoolWithTag, KeAttachProcess(), KeDetachProcess(), KeFlushEntireTb(), KernelMode, LOCK_WS_AND_ADDRESS_SPACE, MI_GET_PAGE_FRAME_FROM_PTE, MI_MAKE_VALID_PTE, MI_SET_PTE_DIRTY, MI_VA_TO_VPN, MI_VPN_TO_VA, MiGetPteAddress, MiLocateAddress(), MM_READWRITE, MMBANKED_SECTION, NonPagedPool, NT_SUCCESS, NTSTATUS(), NULL, ObDereferenceObject, ObReferenceObjectByHandle(), PAGE_SHIFT, PAGE_SIZE, PAGED_CODE, PBANKED_SECTION_ROUTINE, PMMBANKED_SECTION, PsProcessType, PTE_SHIFT, _MMVAD::StartingVpn, Status, TRUE, _MMVAD::u, _MMPTE::u, _MMVAD::u4, UNLOCK_WS_AND_ADDRESS_SPACE, and ZeroPte.

07322 : 07323 07324 This function declares a mapped video buffer as a banked 07325 section. This allows banked video devices (i.e., even 07326 though the video controller has a megabyte or so of memory, 07327 only a small bank (like 64k) can be mapped at any one time. 07328 07329 In order to overcome this problem, the pager handles faults 07330 to this memory, unmaps the current bank, calls off to the 07331 video driver and then maps in the new bank. 07332 07333 This function creates the necessary structures to allow the 07334 video driver to be called from the pager. 07335 07336 ********************* NOTE NOTE NOTE ************************* 07337 At this time only read/write banks are supported! 07338 07339 Arguments: 07340 07341 ProcessHandle - Supplies a handle to the process in which to 07342 support the banked video function. 07343 07344 VirtualAddress - Supplies the virtual address where the video 07345 buffer is mapped in the specified process. 07346 07347 BankLength - Supplies the size of the bank. 07348 07349 ReadWriteBank - Supplies TRUE if the bank is read and write. 07350 07351 BankRoutine - Supplies a pointer to the routine that should be 07352 called by the pager. 07353 07354 Context - Supplies a context to be passed by the pager to the 07355 BankRoutine. 07356 07357 Return Value: 07358 07359 Returns the status of the function. 07360 07361 Environment: 07362 07363 Kernel mode, APC_LEVEL or below. 07364 07365 --*/ 07366 07367 { 07368 NTSTATUS Status; 07369 PEPROCESS Process; 07370 PMMVAD Vad; 07371 PMMPTE PointerPte; 07372 PMMPTE LastPte; 07373 MMPTE TempPte; 07374 ULONG_PTR size; 07375 LONG count; 07376 ULONG NumberOfPtes; 07377 PMMBANKED_SECTION Bank; 07378 07379 PAGED_CODE (); 07380 07381 UNREFERENCED_PARAMETER (ReadWriteBank); 07382 07383 // 07384 // Reference the specified process handle for VM_OPERATION access. 07385 // 07386 07387 Status = ObReferenceObjectByHandle ( ProcessHandle, 07388 PROCESS_VM_OPERATION, 07389 PsProcessType, 07390 KernelMode, 07391 (PVOID *)&Process, 07392 NULL ); 07393 07394 if (!NT_SUCCESS(Status)) { 07395 return Status; 07396 } 07397 07398 KeAttachProcess (&Process->Pcb); 07399 07400 // 07401 // Get the address creation mutex to block multiple threads from 07402 // creating or deleting address space at the same time and 07403 // get the working set mutex so virtual address descriptors can 07404 // be inserted and walked. Block APCs so an APC which takes a page 07405 // fault does not corrupt various structures. 07406 // 07407 07408 LOCK_WS_AND_ADDRESS_SPACE (Process); 07409 07410 // 07411 // Make sure the address space was not deleted, if so, return an error. 07412 // 07413 07414 if (Process->AddressSpaceDeleted != 0) { 07415 Status = STATUS_PROCESS_IS_TERMINATING; 07416 goto ErrorReturn; 07417 } 07418 07419 Vad = MiLocateAddress (VirtualAddress); 07420 07421 if ((Vad == NULL) || 07422 (Vad->StartingVpn != MI_VA_TO_VPN (VirtualAddress)) || 07423 (Vad->u.VadFlags.PhysicalMapping == 0)) { 07424 Status = STATUS_NOT_MAPPED_DATA; 07425 goto ErrorReturn; 07426 } 07427 07428 size = PAGE_SIZE + ((Vad->EndingVpn - Vad->StartingVpn) << PAGE_SHIFT); 07429 if ((size % BankLength) != 0) { 07430 Status = STATUS_INVALID_VIEW_SIZE; 07431 goto ErrorReturn; 07432 } 07433 07434 count = -1; 07435 NumberOfPtes = BankLength; 07436 07437 do { 07438 NumberOfPtes = NumberOfPtes >> 1; 07439 count += 1; 07440 } while (NumberOfPtes != 0); 07441 07442 // 07443 // Turn VAD into Banked VAD 07444 // 07445 07446 NumberOfPtes = BankLength >> PAGE_SHIFT; 07447 07448 Bank = ExAllocatePoolWithTag (NonPagedPool, 07449 sizeof (MMBANKED_SECTION) + 07450 (NumberOfPtes - 1) * sizeof(MMPTE), 07451 ' mM'); 07452 if (Bank == NULL) { 07453 Status = STATUS_INSUFFICIENT_RESOURCES; 07454 goto ErrorReturn; 07455 } 07456 07457 Bank->BankShift = PTE_SHIFT + count - PAGE_SHIFT; 07458 07459 PointerPte = MiGetPteAddress(MI_VPN_TO_VA (Vad->StartingVpn)); 07460 ASSERT (PointerPte->u.Hard.Valid == 1); 07461 07462 Vad->u4.Banked = Bank; 07463 Bank->BasePhysicalPage = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 07464 Bank->BasedPte = PointerPte; 07465 Bank->BankSize = BankLength; 07466 Bank->BankedRoutine = BankRoutine; 07467 Bank->Context = Context; 07468 Bank->CurrentMappedPte = PointerPte; 07469 07470 // 07471 // Build the template PTEs structure. 07472 // 07473 07474 count = 0; 07475 TempPte = ZeroPte; 07476 07477 MI_MAKE_VALID_PTE (TempPte, 07478 Bank->BasePhysicalPage, 07479 MM_READWRITE, 07480 PointerPte); 07481 07482 if (TempPte.u.Hard.Write) { 07483 MI_SET_PTE_DIRTY (TempPte); 07484 } 07485 07486 do { 07487 Bank->BankTemplate[count] = TempPte; 07488 TempPte.u.Hard.PageFrameNumber += 1; 07489 count += 1; 07490 } while ((ULONG)count < NumberOfPtes ); 07491 07492 LastPte = MiGetPteAddress (MI_VPN_TO_VA (Vad->EndingVpn)); 07493 07494 // 07495 // Set all PTEs within this range to zero. Any faults within 07496 // this range will call the banked routine before making the 07497 // page valid. 07498 // 07499 07500 RtlFillMemory (PointerPte, 07501 (size >> (PAGE_SHIFT - PTE_SHIFT)), 07502 (UCHAR)ZeroPte.u.Long); 07503 07504 KeFlushEntireTb (TRUE, TRUE); 07505 07506 Status = STATUS_SUCCESS; 07507 ErrorReturn: 07508 07509 UNLOCK_WS_AND_ADDRESS_SPACE (Process); 07510 KeDetachProcess(); 07511 ObDereferenceObject (Process); 07512 return Status; 07513 }

NTKERNELAPI VOID FASTCALL MmSetHardFaultNotifyRoutine IN PHARD_FAULT_NOTIFY_ROUTINE  NotifyRoutine  ) 
 

VOID MmSetKernelDumpRange IN PVOID  DumpContext  ) 
 

VOID MmSetMemoryPriorityProcess IN PEPROCESS  Process,
IN UCHAR  MemoryPriority
 

Definition at line 5567 of file procsup.c.

References FALSE, KeAttachProcess(), KeDetachProcess(), LOCK_EXPANSION, LOCK_WS, _MMSUPPORT::MaximumWorkingSetSize, MEMORY_PRIORITY_BACKGROUND, _MMSUPPORT::MinimumWorkingSetSize, MiTrimWorkingSet(), MmAvailablePages, MmMoreThanEnoughFreePages, MmNumberOfPhysicalPages, MmSmallSystem, MmSystemSize, MmWorkingSetList, MmWorkingSetReductionMax, PAGE_SIZE, PsGetCurrentProcess, _MMWSL::Quota, TRUE, UNLOCK_EXPANSION, UNLOCK_WS, and _MMSUPPORT::WorkingSetSize.

Referenced by NtSetInformationProcess(), and PsSetProcessPriorityByClass().

05574 : 05575 05576 Sets the memory priority of a process. 05577 05578 Arguments: 05579 05580 Process - Supplies the process to update 05581 05582 MemoryPriority - Supplies the new memory priority of the process 05583 05584 Return Value: 05585 05586 None. 05587 05588 --*/ 05589 05590 { 05591 KIRQL OldIrql; 05592 UCHAR OldPriority; 05593 05594 if (MmSystemSize == MmSmallSystem && MmNumberOfPhysicalPages < ((15*1024*1024)/PAGE_SIZE)) { 05595 05596 // 05597 // If this is a small system, make every process BACKGROUND. 05598 // 05599 05600 MemoryPriority = MEMORY_PRIORITY_BACKGROUND; 05601 } 05602 05603 LOCK_EXPANSION (OldIrql); 05604 05605 OldPriority = Process->Vm.MemoryPriority; 05606 Process->Vm.MemoryPriority = MemoryPriority; 05607 05608 UNLOCK_EXPANSION (OldIrql); 05609 05610 #ifndef _MI_USE_CLAIMS_ 05611 if (OldPriority > MemoryPriority && MmAvailablePages < MmMoreThanEnoughFreePages) { 05612 // 05613 // The priority is being lowered, see if the working set 05614 // should be trimmed. 05615 // 05616 05617 PMMSUPPORT VmSupport; 05618 ULONG i; 05619 ULONG Trim; 05620 LOGICAL Attached; 05621 05622 VmSupport = &Process->Vm; 05623 i = VmSupport->WorkingSetSize - VmSupport->MaximumWorkingSetSize; 05624 if ((LONG)i > 0) { 05625 Trim = i; 05626 if (Trim > MmWorkingSetReductionMax) { 05627 Trim = MmWorkingSetReductionMax; 05628 } 05629 if (Process != PsGetCurrentProcess()) { 05630 KeAttachProcess (&Process->Pcb); 05631 Attached = TRUE; 05632 } 05633 else { 05634 Attached = FALSE; 05635 } 05636 LOCK_WS (Process); 05637 05638 Trim = MiTrimWorkingSet (Trim, 05639 VmSupport, 05640 FALSE); 05641 05642 MmWorkingSetList->Quota = VmSupport->WorkingSetSize; 05643 if (MmWorkingSetList->Quota < VmSupport->MinimumWorkingSetSize) { 05644 MmWorkingSetList->Quota = VmSupport->MinimumWorkingSetSize; 05645 } 05646 05647 UNLOCK_WS (Process); 05648 if (Attached == TRUE) { 05649 KeDetachProcess(); 05650 } 05651 } 05652 } 05653 #endif 05654 return; 05655 }

NTKERNELAPI VOID FASTCALL MmSetPageFaultNotifyRoutine IN PPAGE_FAULT_NOTIFY_ROUTINE  NotifyRoutine  ) 
 

BOOLEAN MmSetPageProtection IN PVOID  VirtualAddress,
IN SIZE_T  NumberOfBytes,
IN ULONG  NewProtect
 

Definition at line 4027 of file iosup.c.

References BYTES_TO_PAGES, EXCEPTION_EXECUTE_HANDLER, FALSE, KeFlushSingleTb(), LOCK_PFN, MI_IS_PHYSICAL_ADDRESS, MI_MAKE_VALID_PTE, MiGetPteAddress, MiMakeProtectionMask(), PAGE_SHIFT, TRUE, _MMPTE::u, and UNLOCK_PFN.

Referenced by KiI386PentiumLockErrataFixup().

04035 : 04036 04037 This function sets the specified virtual address range to the desired 04038 protection. This assumes that the virtual addresses are backed by PTEs 04039 which can be set (ie: not in kseg0 or large pages). 04040 04041 Arguments: 04042 04043 VirtualAddress - Supplies the start address to protect. 04044 04045 NumberOfBytes - Supplies the number of bytes to set. 04046 04047 NewProtect - Supplies the protection to set the pages to (PAGE_XX). 04048 04049 Return Value: 04050 04051 TRUE if the protection was applied, FALSE if not. 04052 04053 Environment: 04054 04055 Kernel mode, IRQL of APC_LEVEL or below. 04056 04057 --*/ 04058 04059 { 04060 PFN_NUMBER i; 04061 PFN_NUMBER NumberOfPages; 04062 PMMPTE PointerPte; 04063 MMPTE TempPte; 04064 MMPTE NewPteContents; 04065 KIRQL OldIrql; 04066 ULONG ProtectionMask; 04067 04068 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) { 04069 return FALSE; 04070 } 04071 04072 try { 04073 ProtectionMask = MiMakeProtectionMask (NewProtect); 04074 } except (EXCEPTION_EXECUTE_HANDLER) { 04075 return FALSE; 04076 } 04077 04078 PointerPte = MiGetPteAddress (VirtualAddress); 04079 NumberOfPages = BYTES_TO_PAGES (NumberOfBytes); 04080 04081 LOCK_PFN (OldIrql); 04082 04083 for (i = 0; i < NumberOfPages; i += 1) { 04084 TempPte.u.Long = PointerPte->u.Long; 04085 04086 MI_MAKE_VALID_PTE (NewPteContents, 04087 TempPte.u.Hard.PageFrameNumber, 04088 ProtectionMask, 04089 PointerPte); 04090 04091 KeFlushSingleTb ((PVOID)((PUCHAR)VirtualAddress + (i << PAGE_SHIFT)), 04092 TRUE, 04093 TRUE, 04094 (PHARDWARE_PTE)PointerPte, 04095 NewPteContents.u.Flush); 04096 04097 PointerPte += 1; 04098 } 04099 04100 UNLOCK_PFN (OldIrql); 04101 04102 return TRUE; 04103 }

LOGICAL MmSetSpecialPool IN LOGICAL  Enable  ) 
 

Definition at line 3141 of file allocpag.c.

References LOCK_PFN2, MiSpecialPoolEnabled, and UNLOCK_PFN2.

03147 : 03148 03149 This routine enables/disables special pool. This allows callers to ensure 03150 that subsequent allocations do not come from special pool. It is relied 03151 upon by callers that require KSEG0 addresses. 03152 03153 Arguments: 03154 03155 Enable - Supplies TRUE to enable special pool, FALSE to disable it. 03156 03157 Return Value: 03158 03159 Current special pool state (enabled or disabled). 03160 03161 Environment: 03162 03163 Kernel mode, IRQL of DISPATCH_LEVEL or below. 03164 03165 --*/ 03166 03167 { 03168 KIRQL OldIrql; 03169 LOGICAL OldEnable; 03170 03171 LOCK_PFN2 (OldIrql); 03172 03173 OldEnable = MiSpecialPoolEnabled; 03174 03175 MiSpecialPoolEnabled = Enable; 03176 03177 UNLOCK_PFN2 (OldIrql); 03178 03179 return OldEnable; 03180 }

NTSTATUS MmSetVerifierInformation IN OUT PVOID  SystemInformation,
IN ULONG  SystemInformationLength
 

Definition at line 4226 of file verifier.c.

References EXCEPTION_EXECUTE_HANDLER, ExRaiseStatus(), FALSE, KeEnterCriticalRegion, KeLeaveCriticalRegion, KeReleaseMutant(), KernelMode, KeWaitForSingleObject(), MmSystemLoadLock, MmVerifierData, NTSTATUS(), NULL, PAGED_CODE, Status, VerifierModifyableOptions, VerifierOptionChanges, and WrVirtualMemory.

Referenced by NtSetSystemInformation().

04233 : 04234 04235 This routine sets any driver verifier flags that can be done without 04236 rebooting. 04237 04238 Arguments: 04239 04240 SystemInformation - Gets and returns the driver verification flags. 04241 04242 SystemInformationLength - Supplies the length of the SystemInformation 04243 buffer. 04244 04245 Return Value: 04246 04247 Returns the status of the operation. 04248 04249 Environment: 04250 04251 The SystemInformation buffer is in user space and our caller has wrapped 04252 a try-except around this entire routine. Capture any exceptions here and 04253 release resources accordingly. 04254 04255 --*/ 04256 04257 { 04258 ULONG UserFlags; 04259 ULONG NewFlags; 04260 ULONG NewFlagsOn; 04261 ULONG NewFlagsOff; 04262 NTSTATUS Status; 04263 PULONG UserVerifyBuffer; 04264 04265 PAGED_CODE(); 04266 04267 if (SystemInformationLength < sizeof (ULONG)) { 04268 ExRaiseStatus (STATUS_INFO_LENGTH_MISMATCH); 04269 } 04270 04271 UserVerifyBuffer = (PULONG)SystemInformation; 04272 04273 // 04274 // Synchronize all changes to the flags here. 04275 // 04276 04277 Status = STATUS_SUCCESS; 04278 04279 KeEnterCriticalRegion(); 04280 04281 KeWaitForSingleObject (&MmSystemLoadLock, 04282 WrVirtualMemory, 04283 KernelMode, 04284 FALSE, 04285 (PLARGE_INTEGER)NULL); 04286 04287 try { 04288 04289 UserFlags = *UserVerifyBuffer; 04290 04291 // 04292 // Ensure nothing is being set or cleared that isn't supported. 04293 // 04294 // 04295 04296 NewFlagsOn = UserFlags & VerifierModifyableOptions; 04297 04298 NewFlags = MmVerifierData.Level | NewFlagsOn; 04299 04300 // 04301 // Any bits set in NewFlagsOff must be zeroed in the NewFlags. 04302 // 04303 04304 NewFlagsOff = ((~UserFlags) & VerifierModifyableOptions); 04305 04306 NewFlags &= ~NewFlagsOff; 04307 04308 if (NewFlags != MmVerifierData.Level) { 04309 VerifierOptionChanges += 1; 04310 MmVerifierData.Level = NewFlags; 04311 *UserVerifyBuffer = NewFlags; 04312 } 04313 04314 } except (EXCEPTION_EXECUTE_HANDLER) { 04315 Status = GetExceptionCode(); 04316 } 04317 04318 KeReleaseMutant (&MmSystemLoadLock, 1, FALSE, FALSE); 04319 04320 KeLeaveCriticalRegion(); 04321 04322 return Status; 04323 }

BOOLEAN MmShutdownSystem VOID   ) 
 

Definition at line 41 of file shutdown.c.

References ASSERT, _MMPAGING_FILE::Bitmap, _MDL::ByteCount, _SUBSECTION::ControlArea, ExPageLockHandle, FALSE, _MMPAGING_FILE::File, _CONTROL_AREA::FilePointer, _MMPFNLIST::Flink, IoSynchronousPageWrite(), KeClearEvent, KeDelayExecutionThread(), KeInitializeEvent, KernelMode, KeSetEvent(), KeWaitForSingleObject(), LOCK_PFN, _MDL::MappedSystemVa, MDL_MAPPED_TO_SYSTEM_VA, MDL_PAGES_LOCKED, _MDL::MdlFlags, MI_ADD_LOCKED_PAGE_CHARGE_FOR_MODIFIED_PAGE, MI_PFN_ELEMENT, MI_REMOVE_LOCKED_PAGE_CHARGE, MI_SET_PFN_DELETED, MiCheckControlArea(), MiDecrementReferenceCount(), MiGetSubsectionAddress, MiRemoveZeroPage(), MiStartingOffset(), MiUnlinkPageFromList(), MM_EMPTY_LIST, MM_MAXIMUM_WRITE_CLUSTER, MmAvailablePages, MmFreeGoal, MmInitializeMdl, MmLockPagableSectionByHandle(), MmModifiedPageListHead, MmModifiedPageWriterEvent, MmModifiedWriteClusterSize, MmNumberOfPagingFiles, MmPagingFile, MmSystemShutdown, MmTwentySeconds, MmUnlockPagableImageSection(), MmUnmapLockedPages(), MmZeroPageFile, NT_SUCCESS, NTSTATUS(), NULL, _CONTROL_AREA::NumberOfMappedViews, _CONTROL_AREA::NumberOfPfnReferences, _MMPFN::OriginalPte, PAGE_SHIFT, PAGE_SIZE, _MMPFN::PteAddress, _MMPAGING_FILE::Size, _MDL::StartVa, Status, TRUE, _MMPTE::u, _CONTROL_AREA::u, _MMPFN::u1, _MMPFN::u2, _MMPFN::u3, UNLOCK_PFN, and WrPageOut.

00047 : 00048 00049 This function performs the shutdown of memory management. This 00050 is accomplished by writing out all modified pages which are 00051 destined for files other than the paging file. 00052 00053 Arguments: 00054 00055 None. 00056 00057 Return Value: 00058 00059 TRUE if the pages were successfully written, FALSE otherwise. 00060 00061 --*/ 00062 00063 { 00064 PFN_NUMBER ModifiedPage; 00065 PMMPFN Pfn1; 00066 PSUBSECTION Subsection; 00067 PCONTROL_AREA ControlArea; 00068 PPFN_NUMBER Page; 00069 PFN_NUMBER MdlHack[(sizeof(MDL)/sizeof(PFN_NUMBER)) + MM_MAXIMUM_WRITE_CLUSTER]; 00070 PMDL Mdl; 00071 NTSTATUS Status; 00072 KEVENT IoEvent; 00073 IO_STATUS_BLOCK IoStatus; 00074 KIRQL OldIrql; 00075 LARGE_INTEGER StartingOffset; 00076 ULONG count; 00077 PFN_NUMBER j; 00078 ULONG k; 00079 PFN_NUMBER first; 00080 ULONG write; 00081 PMMPAGING_FILE PagingFile; 00082 00083 // 00084 // Don't do this more than once. 00085 // 00086 00087 if (!MmSystemShutdown) { 00088 00089 MmLockPagableSectionByHandle(ExPageLockHandle); 00090 00091 Mdl = (PMDL)&MdlHack; 00092 Page = (PPFN_NUMBER)(Mdl + 1); 00093 00094 KeInitializeEvent (&IoEvent, NotificationEvent, FALSE); 00095 00096 MmInitializeMdl(Mdl, 00097 NULL, 00098 PAGE_SIZE); 00099 00100 Mdl->MdlFlags |= MDL_PAGES_LOCKED; 00101 00102 LOCK_PFN (OldIrql); 00103 00104 ModifiedPage = MmModifiedPageListHead.Flink; 00105 while (ModifiedPage != MM_EMPTY_LIST) { 00106 00107 // 00108 // There are modified pages. 00109 // 00110 00111 Pfn1 = MI_PFN_ELEMENT (ModifiedPage); 00112 00113 if (Pfn1->OriginalPte.u.Soft.Prototype == 1) { 00114 00115 // 00116 // This page is destined for a file. 00117 // 00118 00119 Subsection = MiGetSubsectionAddress (&Pfn1->OriginalPte); 00120 ControlArea = Subsection->ControlArea; 00121 if ((!ControlArea->u.Flags.Image) && 00122 (!ControlArea->u.Flags.NoModifiedWriting)) { 00123 00124 MiUnlinkPageFromList (Pfn1); 00125 00126 // 00127 // Issue the write. 00128 // 00129 00130 Pfn1->u3.e1.Modified = 0; 00131 00132 // 00133 // Up the reference count for the physical page as there 00134 // is I/O in progress. 00135 // 00136 00137 MI_ADD_LOCKED_PAGE_CHARGE_FOR_MODIFIED_PAGE (Pfn1, 26); 00138 Pfn1->u3.e2.ReferenceCount += 1; 00139 00140 *Page = ModifiedPage; 00141 ControlArea->NumberOfMappedViews += 1; 00142 ControlArea->NumberOfPfnReferences += 1; 00143 00144 UNLOCK_PFN (OldIrql); 00145 00146 StartingOffset.QuadPart = MiStartingOffset (Subsection, 00147 Pfn1->PteAddress); 00148 00149 Mdl->StartVa = (PVOID)ULongToPtr(Pfn1->u3.e1.PageColor << PAGE_SHIFT); 00150 KeClearEvent (&IoEvent); 00151 Status = IoSynchronousPageWrite ( 00152 ControlArea->FilePointer, 00153 Mdl, 00154 &StartingOffset, 00155 &IoEvent, 00156 &IoStatus ); 00157 00158 // 00159 // Ignore all I/O failures - there is nothing that can be 00160 // done at this point. 00161 // 00162 00163 if (!NT_SUCCESS(Status)) { 00164 KeSetEvent (&IoEvent, 0, FALSE); 00165 } 00166 00167 Status = KeWaitForSingleObject (&IoEvent, 00168 WrPageOut, 00169 KernelMode, 00170 FALSE, 00171 (PLARGE_INTEGER)&MmTwentySeconds); 00172 00173 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) { 00174 MmUnmapLockedPages (Mdl->MappedSystemVa, Mdl); 00175 } 00176 00177 if (Status == STATUS_TIMEOUT) { 00178 00179 // 00180 // The write did not complete in 20 seconds, assume 00181 // that the file systems are hung and return an 00182 // error. 00183 // 00184 00185 LOCK_PFN (OldIrql); 00186 Pfn1->u3.e1.Modified = 1; 00187 MI_REMOVE_LOCKED_PAGE_CHARGE (Pfn1, 27); 00188 MiDecrementReferenceCount (ModifiedPage); 00189 ControlArea->NumberOfMappedViews -= 1; 00190 ControlArea->NumberOfPfnReferences -= 1; 00191 00192 // 00193 // This routine returns with the PFN lock released! 00194 // 00195 00196 MiCheckControlArea (ControlArea, NULL, OldIrql); 00197 00198 MmUnlockPagableImageSection(ExPageLockHandle); 00199 00200 return FALSE; 00201 } 00202 00203 LOCK_PFN (OldIrql); 00204 MI_REMOVE_LOCKED_PAGE_CHARGE (Pfn1, 27); 00205 MiDecrementReferenceCount (ModifiedPage); 00206 ControlArea->NumberOfMappedViews -= 1; 00207 ControlArea->NumberOfPfnReferences -= 1; 00208 00209 // 00210 // This routine returns with the PFN lock released! 00211 // 00212 00213 MiCheckControlArea (ControlArea, NULL, OldIrql); 00214 LOCK_PFN (OldIrql); 00215 00216 // 00217 // Restart scan at the front of the list. 00218 // 00219 00220 ModifiedPage = MmModifiedPageListHead.Flink; 00221 continue; 00222 } 00223 } 00224 ModifiedPage = Pfn1->u1.Flink; 00225 } 00226 00227 UNLOCK_PFN (OldIrql); 00228 00229 // 00230 // If a high number of modified pages still exist, start the 00231 // modified page writer and wait for 5 seconds. 00232 // 00233 00234 if (MmAvailablePages < (MmFreeGoal * 2)) { 00235 LARGE_INTEGER FiveSeconds = {(ULONG)(-5 * 1000 * 1000 * 10), -1}; 00236 00237 KeSetEvent (&MmModifiedPageWriterEvent, 0, FALSE); 00238 KeDelayExecutionThread (KernelMode, 00239 FALSE, 00240 (PLARGE_INTEGER)&FiveSeconds); 00241 } 00242 00243 // 00244 // Indicate to the modified page writer that the system has 00245 // shutdown. 00246 // 00247 00248 MmSystemShutdown = 1; 00249 00250 // 00251 // Check to see if the paging file should be overwritten. 00252 // Only free blocks are written. 00253 // 00254 00255 if (MmZeroPageFile) { 00256 00257 // 00258 // Get pages to complete the write request. 00259 // 00260 00261 Mdl->StartVa = NULL; 00262 j = 0; 00263 k = 0; 00264 Page = (PPFN_NUMBER)(Mdl + 1); 00265 00266 LOCK_PFN (OldIrql); 00267 00268 if (MmAvailablePages < (MmModifiedWriteClusterSize + 20)) { 00269 UNLOCK_PFN(OldIrql); 00270 MmUnlockPagableImageSection(ExPageLockHandle); 00271 return TRUE; 00272 } 00273 00274 do { 00275 *Page = MiRemoveZeroPage ((ULONG)j); 00276 Pfn1 = MI_PFN_ELEMENT (*Page); 00277 Pfn1->u3.e2.ReferenceCount = 1; 00278 ASSERT (Pfn1->u2.ShareCount == 0); 00279 Pfn1->OriginalPte.u.Long = 0; 00280 MI_SET_PFN_DELETED (Pfn1); 00281 Page += 1; 00282 j += 1; 00283 } while (j < MmModifiedWriteClusterSize); 00284 00285 while (k < MmNumberOfPagingFiles) { 00286 00287 PagingFile = MmPagingFile[k]; 00288 00289 count = 0; 00290 write = FALSE; 00291 00292 for (j = 1; j < PagingFile->Size; j += 1) { 00293 00294 if (RtlCheckBit (PagingFile->Bitmap, j) == 0) { 00295 00296 if (count == 0) { 00297 first = j; 00298 } 00299 count += 1; 00300 if (count == MmModifiedWriteClusterSize) { 00301 write = TRUE; 00302 } 00303 } else { 00304 if (count != 0) { 00305 00306 // 00307 // Issue a write. 00308 // 00309 00310 write = TRUE; 00311 } 00312 } 00313 00314 if ((j == (PagingFile->Size - 1)) && 00315 (count != 0)) { 00316 write = TRUE; 00317 } 00318 00319 if (write) { 00320 00321 UNLOCK_PFN (OldIrql); 00322 00323 StartingOffset.QuadPart = (LONGLONG)first << PAGE_SHIFT; 00324 Mdl->ByteCount = count << PAGE_SHIFT; 00325 KeClearEvent (&IoEvent); 00326 00327 Status = IoSynchronousPageWrite (PagingFile->File, 00328 Mdl, 00329 &StartingOffset, 00330 &IoEvent, 00331 &IoStatus); 00332 00333 // 00334 // Ignore all I/O failures - there is nothing that can 00335 // be done at this point. 00336 // 00337 00338 if (!NT_SUCCESS(Status)) { 00339 KeSetEvent (&IoEvent, 0, FALSE); 00340 } 00341 00342 Status = KeWaitForSingleObject (&IoEvent, 00343 WrPageOut, 00344 KernelMode, 00345 FALSE, 00346 (PLARGE_INTEGER)&MmTwentySeconds); 00347 00348 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) { 00349 MmUnmapLockedPages (Mdl->MappedSystemVa, Mdl); 00350 } 00351 00352 if (Status == STATUS_TIMEOUT) { 00353 00354 // 00355 // The write did not complete in 20 seconds, assume 00356 // that the file systems are hung and return an 00357 // error. 00358 // 00359 00360 j = 0; 00361 Page = (PPFN_NUMBER)(Mdl + 1); 00362 LOCK_PFN (OldIrql); 00363 do { 00364 MiDecrementReferenceCount (*Page); 00365 Page += 1; 00366 j += 1; 00367 } while (j < MmModifiedWriteClusterSize); 00368 UNLOCK_PFN (OldIrql); 00369 00370 MmUnlockPagableImageSection(ExPageLockHandle); 00371 return FALSE; 00372 } 00373 00374 count = 0; 00375 write = FALSE; 00376 LOCK_PFN (OldIrql); 00377 } 00378 } 00379 k += 1; 00380 } 00381 j = 0; 00382 Page = (PPFN_NUMBER)(Mdl + 1); 00383 do { 00384 MiDecrementReferenceCount (*Page); 00385 Page += 1; 00386 j += 1; 00387 } while (j < MmModifiedWriteClusterSize); 00388 UNLOCK_PFN (OldIrql); 00389 } 00390 MmUnlockPagableImageSection(ExPageLockHandle); 00391 } 00392 return TRUE; 00393 } }

NTKERNELAPI SIZE_T MmSizeOfMdl IN PVOID  Base,
IN SIZE_T  Length
 

Definition at line 5815 of file iosup.c.

References ADDRESS_AND_SIZE_TO_SPAN_PAGES.

Referenced by ExLockUserBuffer(), MiCreateImageFileMap(), MmCreateMdl(), and NtStartProfile().

05822 : 05823 05824 This function returns the number of bytes required for an MDL for a 05825 given buffer and size. 05826 05827 Arguments: 05828 05829 Base - Supplies the base virtual address for the buffer. 05830 05831 Length - Supplies the size of the buffer in bytes. 05832 05833 Return Value: 05834 05835 Returns the number of bytes required to contain the MDL. 05836 05837 Environment: 05838 05839 Kernel mode. Any IRQL level. 05840 05841 --*/ 05842 05843 { 05844 return( sizeof( MDL ) + 05845 (ADDRESS_AND_SIZE_TO_SPAN_PAGES( Base, Length ) * 05846 sizeof( PFN_NUMBER )) 05847 ); 05848 }

ULONG MmSizeOfTriageInformation VOID   ) 
 

Referenced by IopWriteTriageDump().

ULONG MmSizeOfUnloadedDriverInformation VOID   ) 
 

Referenced by IopWriteTriageDump().

LOGICAL MmTrimAllSystemPagableMemory IN LOGICAL  PurgeTransition  ) 
 

Definition at line 2986 of file wsmanage.c.

References APC_LEVEL, ASSERT, ExReleaseResourceLite(), ExTryToAcquireResourceExclusiveLite(), FALSE, KeLowerIrql(), KeTryToAcquireSpinLock(), _MMWORKING_SET_EXPANSION_HEAD::ListHead, LOCK_EXPANSION, LOCK_PFN, MI_PFN_ELEMENT, MI_SET_PFN_DELETED, MiDecrementReferenceCount(), MiEmptyWorkingSet(), MiRemovePageFromList(), MiTrimAllPageFaultCount, MiTrimInProgressCount, MM_NO_WS_EXPANSION, MM_SET_EXPANSION_OWNER, MM_WS_EXPANSION_IN_PROGRESS, MmExpansionLock, MmStandbyPageListHead, MmSystemCacheWs, MmSystemLockOwner, MmSystemWsLock, MmWorkingSetExpansionHead, NULL, _MMPFN::OriginalPte, _MMSUPPORT::PageFaultCount, PsGetCurrentThread, _MMPFNLIST::Total, TRUE, _MMSUPPORT::u, _MMPFN::u2, _MMPFN::u3, UNLOCK_EXPANSION, UNLOCK_PFN, _MMSUPPORT::WorkingSetExpansionLinks, _MMSUPPORT::WorkingSetSize, WSLE_NUMBER, and ZeroPte.

Referenced by ViTrimAllSystemPagableMemory().

02992 : 02993 02994 This routine unmaps all pagable system memory. This does not unmap user 02995 memory or locked down kernel memory. Thus, the memory being unmapped 02996 resides in paged pool, pagable kernel/driver code & data, special pool 02997 and the system cache. 02998 02999 Note that pages with a reference count greater than 1 are skipped (ie: 03000 they remain valid, as they are assumed to be locked down). This prevents 03001 us from unmapping all of the system cache entries, etc. 03002 03003 Non-locked down kernel stacks must be outpaged by modifying the balance 03004 set manager to operate in conjunction with a support routine. This is not 03005 done here. 03006 03007 Arguments: 03008 03009 PurgeTransition - Supplies whether to purge all the clean pages from the 03010 transition list. 03011 03012 Return Value: 03013 03014 TRUE if accomplished, FALSE if not. 03015 03016 Environment: 03017 03018 Kernel mode. APC_LEVEL or below. 03019 03020 --*/ 03021 03022 { 03023 KIRQL OldIrql; 03024 KIRQL OldIrql2; 03025 PLIST_ENTRY Next; 03026 PMMSUPPORT VmSupport; 03027 WSLE_NUMBER PagesInUse; 03028 PFN_NUMBER PageFrameIndex; 03029 LOGICAL LockAvailable; 03030 PMMPFN Pfn1; 03031 PETHREAD CurrentThread; 03032 ULONG flags; 03033 03034 // 03035 // It's ok to check this without acquiring the system WS lock. 03036 // 03037 03038 if (MiTrimAllPageFaultCount == MmSystemCacheWs.PageFaultCount) { 03039 return FALSE; 03040 } 03041 03042 // 03043 // Working set mutexes will be acquired which require APC_LEVEL or below. 03044 // 03045 03046 if (KeGetCurrentIrql() > APC_LEVEL) { 03047 return FALSE; 03048 } 03049 03050 // 03051 // Just return if it's too early during system initialization or if 03052 // another thread/processor is racing here to do the work for us. 03053 // 03054 03055 if (InterlockedIncrement (&MiTrimInProgressCount) > 1) { 03056 InterlockedDecrement (&MiTrimInProgressCount); 03057 return FALSE; 03058 } 03059 03060 #if defined(_X86_) 03061 03062 _asm { 03063 pushfd 03064 pop flags 03065 } 03066 03067 if ((flags & EFLAGS_INTERRUPT_MASK) == 0) { 03068 InterlockedDecrement (&MiTrimInProgressCount); 03069 return FALSE; 03070 } 03071 03072 #endif 03073 03074 LockAvailable = KeTryToAcquireSpinLock (&MmExpansionLock, &OldIrql); 03075 03076 if (LockAvailable == FALSE) { 03077 InterlockedDecrement (&MiTrimInProgressCount); 03078 return FALSE; 03079 } 03080 03081 MM_SET_EXPANSION_OWNER (); 03082 03083 CurrentThread = PsGetCurrentThread(); 03084 03085 // 03086 // If the system cache resource is owned by this thread then don't bother 03087 // trying to trim now. Note that checking the MmSystemLockOwner is not 03088 // sufficient as this flag is cleared just before actually releasing it. 03089 // 03090 03091 if ((CurrentThread == MmSystemLockOwner) || 03092 (ExTryToAcquireResourceExclusiveLite(&MmSystemWsLock) == FALSE)) { 03093 UNLOCK_EXPANSION (OldIrql); 03094 InterlockedDecrement (&MiTrimInProgressCount); 03095 return FALSE; 03096 } 03097 03098 Next = MmWorkingSetExpansionHead.ListHead.Flink; 03099 03100 while (Next != &MmWorkingSetExpansionHead.ListHead) { 03101 if (Next == &MmSystemCacheWs.WorkingSetExpansionLinks) { 03102 break; 03103 } 03104 Next = Next->Flink; 03105 } 03106 03107 if (Next != &MmSystemCacheWs.WorkingSetExpansionLinks) { 03108 ExReleaseResourceLite(&MmSystemWsLock); 03109 UNLOCK_EXPANSION (OldIrql); 03110 InterlockedDecrement (&MiTrimInProgressCount); 03111 return FALSE; 03112 } 03113 03114 RemoveEntryList (Next); 03115 03116 VmSupport = &MmSystemCacheWs; 03117 VmSupport->WorkingSetExpansionLinks.Flink = MM_NO_WS_EXPANSION; 03118 VmSupport->WorkingSetExpansionLinks.Blink = MM_WS_EXPANSION_IN_PROGRESS; 03119 ASSERT (VmSupport->u.Flags.BeingTrimmed == 0); 03120 VmSupport->u.Flags.BeingTrimmed = 1; 03121 03122 MiTrimAllPageFaultCount = VmSupport->PageFaultCount; 03123 03124 PagesInUse = VmSupport->WorkingSetSize; 03125 03126 // 03127 // There are 2 issues here that are carefully dealt with : 03128 // 03129 // 1. APCs must be disabled while any resources are held to prevent 03130 // suspend APCs from deadlocking the system. 03131 // 2. Once the system cache has been marked MM_WS_EXPANSION_IN_PROGRESS, 03132 // either the thread must not be preempted or the system cache working 03133 // set lock must be held throughout. Otherwise a high priority thread 03134 // can fault on a system code and data address and the two pages will 03135 // thrash forever (at high priority) because no system working set 03136 // expansion is allowed while MM_WS_EXPANSION_IN_PROGRESS is set. 03137 // The decision was to hold the system working set lock throughout. 03138 // 03139 03140 MmSystemLockOwner = PsGetCurrentThread (); 03141 03142 UNLOCK_EXPANSION (APC_LEVEL); 03143 03144 MiEmptyWorkingSet (VmSupport, FALSE); 03145 03146 LOCK_EXPANSION (OldIrql2); 03147 ASSERT (OldIrql2 == APC_LEVEL); 03148 03149 ASSERT (VmSupport->WorkingSetExpansionLinks.Flink == MM_NO_WS_EXPANSION); 03150 03151 ASSERT (VmSupport->u.Flags.BeingTrimmed == 1); 03152 VmSupport->u.Flags.BeingTrimmed = 0; 03153 03154 ASSERT (VmSupport->WorkingSetExpansionLinks.Blink == 03155 MM_WS_EXPANSION_IN_PROGRESS); 03156 03157 InsertTailList (&MmWorkingSetExpansionHead.ListHead, 03158 &VmSupport->WorkingSetExpansionLinks); 03159 03160 UNLOCK_EXPANSION (APC_LEVEL); 03161 03162 // 03163 // Since MiEmptyWorkingSet will attempt to recursively acquire and release 03164 // the MmSystemWsLock, the MmSystemLockOwner field may get cleared. 03165 // This means here the resource must be explicitly released instead of 03166 // using UNLOCK_SYSTEM_WS. 03167 // 03168 03169 MmSystemLockOwner = NULL; 03170 ExReleaseResourceLite (&MmSystemWsLock); 03171 KeLowerIrql (OldIrql); 03172 ASSERT (KeGetCurrentIrql() <= APC_LEVEL); 03173 03174 if (PurgeTransition == TRUE) { 03175 03176 // 03177 // Run the transition list and free all the entries so transition 03178 // faults are not satisfied for any of the non modified pages that were 03179 // freed. 03180 // 03181 03182 LOCK_PFN (OldIrql); 03183 03184 while (MmStandbyPageListHead.Total != 0) { 03185 03186 PageFrameIndex = MiRemovePageFromList (&MmStandbyPageListHead); 03187 03188 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 03189 03190 ASSERT (Pfn1->u2.ShareCount == 0); 03191 ASSERT (Pfn1->u3.e2.ReferenceCount == 0); 03192 03193 Pfn1->u3.e2.ReferenceCount += 1; 03194 Pfn1->OriginalPte = ZeroPte; 03195 Pfn1->u3.e1.Modified = 0; 03196 MI_SET_PFN_DELETED (Pfn1); 03197 03198 MiDecrementReferenceCount (PageFrameIndex); 03199 } 03200 03201 UNLOCK_PFN (OldIrql); 03202 } 03203 03204 InterlockedDecrement (&MiTrimInProgressCount); 03205 03206 return TRUE; 03207 } }

NTSTATUS MmUnloadSystemImage IN PVOID  Section  ) 
 

Definition at line 2896 of file sysload.c.

References ASSERT, _MM_SESSION_SPACE::CommittedPages, DbgPrint, ExAcquireResourceExclusive, ExFreePool(), ExPageLockHandle, ExpCheckForResource(), ExReleaseResource, FALSE, _IMAGE_ENTRY_IN_SESSION::ImageCountInThisSession, KeBugCheckEx(), KeCheckForTimer(), KeEnterCriticalRegion, KeLeaveCriticalRegion, KeReleaseMutant(), KernelMode, KeWaitForSingleObject(), LOADED_AT_BOOT, LOCK_PFN, LOCK_SESSION_SPACE_WS, MI_IS_SESSION_IMAGE_ADDRESS, MiActiveVerifierThunks, MiClearImports(), MiDeleteSystemPagableVm(), MiDereferenceImports(), MiGetPteAddress, MiReleaseSystemPtes(), MiRememberUnloadedDriver(), MiRemoveImageSessionWide(), MiReturnCommitment(), MiSessionLookupImage(), MiSessionWideGetImageSize(), MiVerifierCheckThunks(), MiVerifyingDriverUnloading(), MM_BUMP_COUNTER, MM_BUMP_SESS_COUNTER, MM_DBG_COMMIT_RETURN_DRIVER_UNLOAD, MM_DBG_COMMIT_RETURN_DRIVER_UNLOAD1, MM_DBG_SESSION_COMMIT_IMAGE_UNLOAD, MM_TRACK_COMMIT, MmDisablePagingExecutive, MmDriverCommit, MmLockPagableSectionByHandle(), MmResidentAvailablePages, MmSessionSpace, MmSnapUnloads, MmSystemLoadLock, MmTotalSystemDriverPages, MmUnlockPagableImageSection(), NT_SUCCESS, NTSTATUS(), NULL, ObDereferenceObject, PAGE_SHIFT, PERFINFO_IMAGE_UNLOAD, PLOAD_IMPORTS, PSECTION, PsLoadedModuleList, PsLoadedModuleResource, PsLoadedModuleSpinLock, RtlFreeAnsiString(), RtlUnicodeStringToAnsiString(), _MM_SESSION_SPACE::SessionId, Status, SystemPteSpace, TRUE, UNLOCK_PFN, UNLOCK_SESSION_SPACE_WS, WrVirtualMemory, and ZeroKernelPte.

Referenced by IoFreeDumpStack(), IopDeleteDriver(), IopLoadDriver(), MiCallDllUnloadAndUnloadDll(), MiLoadSystemImage(), MiResolveImageReferences(), MiSessionUnloadAllImages(), and NtSetSystemInformation().

02902 : 02903 02904 This routine unloads a previously loaded system image and returns 02905 the allocated resources. 02906 02907 Arguments: 02908 02909 ImageHandle - Supplies a pointer to the section object of the 02910 image to unload. 02911 02912 Return Value: 02913 02914 Various NTSTATUS codes. 02915 02916 --*/ 02917 02918 { 02919 PLDR_DATA_TABLE_ENTRY DataTableEntry; 02920 PMMPTE LastPte; 02921 PFN_NUMBER PagesRequired; 02922 PFN_NUMBER ResidentPages; 02923 PMMPTE PointerPte; 02924 PFN_NUMBER NumberOfPtes; 02925 KIRQL OldIrql; 02926 PVOID BasedAddress; 02927 SIZE_T NumberOfBytes; 02928 BOOLEAN MustFree; 02929 SIZE_T CommittedPages; 02930 BOOLEAN ViewDeleted; 02931 PIMAGE_ENTRY_IN_SESSION DriverImage; 02932 NTSTATUS Status; 02933 PVOID StillQueued; 02934 PSECTION SectionPointer; 02935 02936 // 02937 // Arbitrary process context so prevent suspend APCs now. 02938 // 02939 02940 KeEnterCriticalRegion(); 02941 02942 KeWaitForSingleObject (&MmSystemLoadLock, 02943 WrVirtualMemory, 02944 KernelMode, 02945 FALSE, 02946 (PLARGE_INTEGER)NULL); 02947 02948 MmLockPagableSectionByHandle(ExPageLockHandle); 02949 02950 ViewDeleted = FALSE; 02951 DataTableEntry = (PLDR_DATA_TABLE_ENTRY)ImageHandle; 02952 BasedAddress = DataTableEntry->DllBase; 02953 02954 #if DBGXX 02955 // 02956 // MiUnloadSystemImageByForce violates this check so remove it for now. 02957 // 02958 02959 if (PsLoadedModuleList.Flink) { 02960 LOGICAL Found; 02961 PLIST_ENTRY NextEntry; 02962 PLDR_DATA_TABLE_ENTRY DataTableEntry2; 02963 02964 Found = FALSE; 02965 NextEntry = PsLoadedModuleList.Flink; 02966 while (NextEntry != &PsLoadedModuleList) { 02967 02968 DataTableEntry2 = CONTAINING_RECORD(NextEntry, 02969 LDR_DATA_TABLE_ENTRY, 02970 InLoadOrderLinks); 02971 if (DataTableEntry == DataTableEntry2) { 02972 Found = TRUE; 02973 break; 02974 } 02975 NextEntry = NextEntry->Flink; 02976 } 02977 ASSERT (Found == TRUE); 02978 } 02979 #endif 02980 02981 #if DBG_SYSLOAD 02982 if (DataTableEntry->SectionPointer == NULL) { 02983 DbgPrint ("MM: Called to unload boot driver %wZ\n", 02984 &DataTableEntry->FullDllName); 02985 } 02986 else { 02987 DbgPrint ("MM: Called to unload non-boot driver %wZ\n", 02988 &DataTableEntry->FullDllName); 02989 } 02990 #endif 02991 02992 // 02993 // Any driver loaded at boot that did not have its import list 02994 // and LoadCount reconstructed cannot be unloaded because we don't 02995 // know how many other drivers may be linked to it. 02996 // 02997 02998 if (DataTableEntry->LoadedImports == (PVOID)LOADED_AT_BOOT) { 02999 MmUnlockPagableImageSection(ExPageLockHandle); 03000 KeReleaseMutant (&MmSystemLoadLock, 1, FALSE, FALSE); 03001 KeLeaveCriticalRegion(); 03002 return STATUS_SUCCESS; 03003 } 03004 03005 ASSERT (DataTableEntry->LoadCount != 0); 03006 03007 if (MI_IS_SESSION_IMAGE_ADDRESS (BasedAddress)) { 03008 03009 // 03010 // A printer driver may be referenced multiple times for the 03011 // same session space. Only unload the last reference. 03012 // 03013 03014 DriverImage = MiSessionLookupImage (BasedAddress); 03015 03016 ASSERT (DriverImage); 03017 03018 ASSERT (DriverImage->ImageCountInThisSession); 03019 03020 if (DriverImage->ImageCountInThisSession > 1) { 03021 03022 DriverImage->ImageCountInThisSession -= 1; 03023 MmUnlockPagableImageSection(ExPageLockHandle); 03024 KeReleaseMutant (&MmSystemLoadLock, 1, FALSE, FALSE); 03025 KeLeaveCriticalRegion(); 03026 03027 return STATUS_SUCCESS; 03028 } 03029 03030 // 03031 // The reference count for this image has dropped to zero in this 03032 // session, so we can delete this session's view of the image. 03033 // 03034 03035 Status = MiSessionWideGetImageSize (BasedAddress, 03036 &NumberOfBytes, 03037 &CommittedPages); 03038 03039 if (!NT_SUCCESS(Status)) { 03040 03041 KeBugCheckEx (MEMORY_MANAGEMENT, 03042 0x41286, 03043 (ULONG_PTR)MmSessionSpace->SessionId, 03044 (ULONG_PTR)BasedAddress, 03045 0); 03046 } 03047 03048 // 03049 // Free the session space taken up by the image, unmapping it from 03050 // the current VA space - note this does not remove page table pages 03051 // from the session PageTables[]. Each data page is only freed 03052 // if there are no other references to it (ie: from any other 03053 // sessions). 03054 // 03055 03056 PointerPte = MiGetPteAddress (BasedAddress); 03057 LastPte = MiGetPteAddress ((ULONG_PTR)BasedAddress + NumberOfBytes); 03058 03059 PagesRequired = MiDeleteSystemPagableVm (PointerPte, 03060 (PFN_NUMBER)(LastPte - PointerPte), 03061 ZeroKernelPte, 03062 TRUE, 03063 &ResidentPages); 03064 03065 if (MmDisablePagingExecutive == 0) { 03066 03067 SectionPointer = (PSECTION)DataTableEntry->SectionPointer; 03068 03069 if ((SectionPointer == NULL) || 03070 (SectionPointer == (PVOID)-1) || 03071 (SectionPointer->Segment == NULL) || 03072 (SectionPointer->Segment->BasedAddress != SectionPointer->Segment->SystemImageBase)) { 03073 03074 MmTotalSystemDriverPages -= (ULONG)(PagesRequired - ResidentPages); 03075 } 03076 } 03077 03078 LOCK_SESSION_SPACE_WS (OldIrql); 03079 MmSessionSpace->CommittedPages -= CommittedPages; 03080 03081 MM_BUMP_SESS_COUNTER(MM_DBG_SESSION_COMMIT_IMAGE_UNLOAD, 03082 CommittedPages); 03083 03084 UNLOCK_SESSION_SPACE_WS (OldIrql); 03085 03086 ViewDeleted = TRUE; 03087 03088 // 03089 // Return the commitment we took out on the pagefile when the 03090 // image was allocated. 03091 // 03092 03093 MiReturnCommitment (CommittedPages); 03094 MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_DRIVER_UNLOAD, CommittedPages); 03095 03096 // 03097 // Tell the session space image handler that we are releasing 03098 // our claim to the image. 03099 // 03100 03101 Status = MiRemoveImageSessionWide (BasedAddress); 03102 03103 ASSERT (NT_SUCCESS (Status)); 03104 } 03105 03106 ASSERT (DataTableEntry->LoadCount != 0); 03107 03108 DataTableEntry->LoadCount -= 1; 03109 03110 if (DataTableEntry->LoadCount != 0) { 03111 MmUnlockPagableImageSection(ExPageLockHandle); 03112 KeReleaseMutant (&MmSystemLoadLock, 1, FALSE, FALSE); 03113 KeLeaveCriticalRegion(); 03114 return STATUS_SUCCESS; 03115 } 03116 03117 #if DBG 03118 if (MI_IS_SESSION_IMAGE_ADDRESS (BasedAddress)) { 03119 ASSERT (MiSessionLookupImage (BasedAddress) == NULL); 03120 } 03121 #endif 03122 03123 if (MmSnapUnloads) { 03124 #if 0 03125 StillQueued = KeCheckForTimer (DataTableEntry->DllBase, 03126 DataTableEntry->SizeOfImage); 03127 03128 if (StillQueued != NULL) { 03129 KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION, 03130 0x18, 03131 (ULONG_PTR)StillQueued, 03132 (ULONG_PTR)-1, 03133 (ULONG_PTR)DataTableEntry->DllBase); 03134 } 03135 03136 StillQueued = ExpCheckForResource (DataTableEntry->DllBase, 03137 DataTableEntry->SizeOfImage); 03138 03139 if (StillQueued != NULL) { 03140 KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION, 03141 0x19, 03142 (ULONG_PTR)StillQueued, 03143 (ULONG_PTR)-1, 03144 (ULONG_PTR)DataTableEntry->DllBase); 03145 } 03146 #endif 03147 } 03148 03149 if (DataTableEntry->Flags & LDRP_IMAGE_VERIFYING) { 03150 MiVerifyingDriverUnloading (DataTableEntry); 03151 } 03152 03153 if (MiActiveVerifierThunks != 0) { 03154 MiVerifierCheckThunks (DataTableEntry); 03155 } 03156 03157 // 03158 // Unload symbols from debugger. 03159 // 03160 03161 if (DataTableEntry->Flags & LDRP_DEBUG_SYMBOLS_LOADED) { 03162 03163 // 03164 // TEMP TEMP TEMP rip out when debugger converted 03165 // 03166 03167 ANSI_STRING AnsiName; 03168 NTSTATUS Status; 03169 03170 Status = RtlUnicodeStringToAnsiString( &AnsiName, 03171 &DataTableEntry->BaseDllName, 03172 TRUE ); 03173 03174 if (NT_SUCCESS( Status)) { 03175 DbgUnLoadImageSymbols( &AnsiName, 03176 BasedAddress, 03177 (ULONG)-1); 03178 RtlFreeAnsiString( &AnsiName ); 03179 } 03180 } 03181 03182 // 03183 // No unload can happen till after Mm has finished Phase 1 initialization. 03184 // Therefore, large pages are already in effect (if this platform supports 03185 // it). 03186 // 03187 03188 if (ViewDeleted == FALSE) { 03189 03190 NumberOfPtes = DataTableEntry->SizeOfImage >> PAGE_SHIFT; 03191 03192 if (MmSnapUnloads) { 03193 MiRememberUnloadedDriver (&DataTableEntry->BaseDllName, 03194 BasedAddress, 03195 (ULONG)(NumberOfPtes << PAGE_SHIFT)); 03196 } 03197 03198 if (DataTableEntry->Flags & LDRP_SYSTEM_MAPPED) { 03199 03200 PointerPte = MiGetPteAddress (BasedAddress); 03201 03202 PagesRequired = MiDeleteSystemPagableVm (PointerPte, 03203 NumberOfPtes, 03204 ZeroKernelPte, 03205 FALSE, 03206 &ResidentPages); 03207 03208 MmTotalSystemDriverPages -= (ULONG)(PagesRequired - ResidentPages); 03209 03210 // 03211 // Note that drivers loaded at boot that have not been relocated 03212 // have no system PTEs or commit charged. 03213 // 03214 03215 MiReleaseSystemPtes (PointerPte, 03216 (ULONG)NumberOfPtes, 03217 SystemPteSpace); 03218 03219 LOCK_PFN (OldIrql); 03220 MmResidentAvailablePages += ResidentPages; 03221 MM_BUMP_COUNTER(21, ResidentPages); 03222 UNLOCK_PFN (OldIrql); 03223 03224 // 03225 // Only return commitment for drivers that weren't loaded by the 03226 // boot loader. 03227 // 03228 03229 if (DataTableEntry->SectionPointer != NULL) { 03230 MiReturnCommitment (PagesRequired); 03231 MM_TRACK_COMMIT (MM_DBG_COMMIT_RETURN_DRIVER_UNLOAD1, PagesRequired); 03232 MmDriverCommit -= (ULONG)PagesRequired; 03233 } 03234 } 03235 else { 03236 03237 // 03238 // This must be a boot driver that was not relocated into 03239 // system PTEs. If large or super pages are enabled, the 03240 // image pages must be freed without referencing the 03241 // non-existent page table pages. If large/super pages are 03242 // not enabled, note that system PTEs were not used to map the 03243 // image and thus, cannot be freed. 03244 03245 // 03246 // This is further complicated by the fact that the INIT and/or 03247 // discardable portions of these images may have already been freed. 03248 // 03249 } 03250 } 03251 03252 // 03253 // Search the loaded module list for the data table entry that describes 03254 // the DLL that was just unloaded. It is possible an entry is not in the 03255 // list if a failure occurred at a point in loading the DLL just before 03256 // the data table entry was generated. 03257 // 03258 03259 if (DataTableEntry->InLoadOrderLinks.Flink != NULL) { 03260 KeEnterCriticalRegion(); 03261 ExAcquireResourceExclusive (&PsLoadedModuleResource, TRUE); 03262 03263 ExAcquireSpinLock (&PsLoadedModuleSpinLock, &OldIrql); 03264 03265 RemoveEntryList(&DataTableEntry->InLoadOrderLinks); 03266 ExReleaseSpinLock (&PsLoadedModuleSpinLock, OldIrql); 03267 03268 ExReleaseResource (&PsLoadedModuleResource); 03269 KeLeaveCriticalRegion(); 03270 03271 MustFree = TRUE; 03272 } 03273 else { 03274 MustFree = FALSE; 03275 } 03276 03277 // 03278 // Handle unloading of any dependent DLLs that we loaded automatically 03279 // for this image. 03280 // 03281 03282 MiDereferenceImports ((PLOAD_IMPORTS)DataTableEntry->LoadedImports); 03283 03284 MiClearImports (DataTableEntry); 03285 03286 // 03287 // Free this loader entry. 03288 // 03289 03290 if (MustFree == TRUE) { 03291 03292 if (DataTableEntry->FullDllName.Buffer != NULL) { 03293 ExFreePool (DataTableEntry->FullDllName.Buffer); 03294 } 03295 03296 if (DataTableEntry->BaseDllName.Buffer != NULL) { 03297 ExFreePool (DataTableEntry->BaseDllName.Buffer); 03298 } 03299 03300 // 03301 // Dereference the section object if there is one. 03302 // There should only be one for win32k.sys and Hydra session images. 03303 // 03304 03305 if ((DataTableEntry->SectionPointer != NULL) && 03306 (DataTableEntry->SectionPointer != (PVOID)-1)) { 03307 03308 ObDereferenceObject (DataTableEntry->SectionPointer); 03309 } 03310 03311 ExFreePool((PVOID)DataTableEntry); 03312 } 03313 03314 MmUnlockPagableImageSection(ExPageLockHandle); 03315 03316 KeReleaseMutant (&MmSystemLoadLock, 1, FALSE, FALSE); 03317 KeLeaveCriticalRegion(); 03318 03319 PERFINFO_IMAGE_UNLOAD(BasedAddress); 03320 03321 return STATUS_SUCCESS; 03322 }

VOID MmUnlockCachedPage IN PVOID  AddressInCache  ) 
 

Definition at line 1833 of file mapcache.c.

References ASSERT, KeBugCheckEx(), LOCK_PFN, MI_PFN_ELEMENT, MI_REMOVE_LOCKED_PAGE_CHARGE, MiGetPteAddress, _MMPTE::u, _MMPFN::u3, and UNLOCK_PFN.

Referenced by CcFreeActiveVacb().

01839 : 01840 01841 This routine unlocks a previous locked cached page. 01842 01843 Arguments: 01844 01845 AddressInCache - Supplies the address where the page was locked 01846 in the system cache. This must be the same 01847 address that MmCopyToCachedPage was called with. 01848 01849 Return Value: 01850 01851 None. 01852 01853 --*/ 01854 01855 { 01856 PMMPTE PointerPte; 01857 PMMPFN Pfn1; 01858 KIRQL OldIrql; 01859 01860 PointerPte = MiGetPteAddress (AddressInCache); 01861 01862 ASSERT (PointerPte->u.Hard.Valid == 1); 01863 Pfn1 = MI_PFN_ELEMENT (PointerPte->u.Hard.PageFrameNumber); 01864 01865 LOCK_PFN (OldIrql); 01866 01867 if (Pfn1->u3.e2.ReferenceCount <= 1) { 01868 KeBugCheckEx (MEMORY_MANAGEMENT, 01869 0x777, 01870 (ULONG_PTR)PointerPte->u.Hard.PageFrameNumber, 01871 Pfn1->u3.e2.ReferenceCount, 01872 (ULONG_PTR)AddressInCache); 01873 return; 01874 } 01875 01876 MI_REMOVE_LOCKED_PAGE_CHARGE(Pfn1, 25); 01877 Pfn1->u3.e2.ReferenceCount -= 1; 01878 01879 UNLOCK_PFN (OldIrql); 01880 return; 01881 } }

NTKERNELAPI VOID MmUnlockPagableImageSection IN PVOID  ImageSectionHandle  ) 
 

Definition at line 7065 of file iosup.c.

References ASSERT, FALSE, KePulseEvent(), LOCK_PFN2, MI_GET_PAGE_FRAME_FROM_PTE, MI_IS_PHYSICAL_ADDRESS, MI_PFN_ELEMENT, MI_REMOVE_LOCKED_PAGE_CHARGE, MiDecrementReferenceCount(), MiGetPteAddress, MmCollidedLockEvent, MmCollidedLockWait, MmLockedCode, SECTION_BASE_ADDRESS, _MMPTE::u, _MMPFN::u3, and UNLOCK_PFN2.

Referenced by ExpGetLockInformation(), ExpGetLookasideInformation(), ExpGetPoolInformation(), ExpGetProcessInformation(), IoUnregisterShutdownNotification(), Ke386ConfigureCyrixProcessor(), KeSetPhysicalCacheTypeRange(), KiAmdK6MtrrSetMemoryType(), MiEmptyAllWorkingSets(), MiFindContiguousMemory(), MiFreeInitializationCode(), MiLoadSystemImage(), MiMapViewInSystemSpace(), MiSetPagingOfDriver(), MiShareSessionImage(), MiUnmapLockedPagesInUserSpace(), MiUnmapViewInSystemSpace(), MmAdjustWorkingSetSize(), MmAllocateNonCachedMemory(), MmAllocatePagesForMdl(), MmFreeDriverInitialization(), MmFreeNonCachedMemory(), MmFreePagesFromMdl(), MmLockPagedPool(), MmMapViewOfSection(), MmResetDriverPaging(), MmShutdownSystem(), MmUnloadSystemImage(), MmUnlockPagedPool(), NtQueryVirtualMemory(), PspQueryPooledQuotaLimits(), PspQueryQuotaLimits(), PspQueryWorkingSetWatch(), PspSetQuotaLimits(), and SmbTraceStop().

07071 : 07072 07073 This function unlocks from memory, the pages locked by a preceding call to 07074 MmLockPagableDataSection. 07075 07076 Arguments: 07077 07078 ImageSectionHandle - Supplies the value returned by a previous call 07079 to MmLockPagableDataSection. 07080 07081 Return Value: 07082 07083 None. 07084 07085 --*/ 07086 07087 { 07088 PIMAGE_SECTION_HEADER NtSection; 07089 PMMPTE PointerPte; 07090 PMMPTE LastPte; 07091 PFN_NUMBER PageFrameIndex; 07092 PMMPFN Pfn1; 07093 KIRQL OldIrql; 07094 PVOID BaseAddress; 07095 ULONG SizeToUnlock; 07096 ULONG Collision; 07097 07098 if (MI_IS_PHYSICAL_ADDRESS(ImageSectionHandle)) { 07099 07100 // 07101 // No need to lock physical addresses. 07102 // 07103 07104 return; 07105 } 07106 07107 NtSection = (PIMAGE_SECTION_HEADER)ImageSectionHandle; 07108 07109 BaseAddress = SECTION_BASE_ADDRESS(NtSection); 07110 SizeToUnlock = NtSection->SizeOfRawData; 07111 07112 PointerPte = MiGetPteAddress(BaseAddress); 07113 LastPte = MiGetPteAddress((PCHAR)BaseAddress + SizeToUnlock - 1); 07114 07115 // 07116 // Address must be within the system cache. 07117 // 07118 07119 LOCK_PFN2 (OldIrql); 07120 07121 // 07122 // The NumberOfLinenumbers field is used to store the 07123 // lock count. 07124 // 07125 07126 ASSERT (NtSection->NumberOfLinenumbers >= 2); 07127 NtSection->NumberOfLinenumbers -= 1; 07128 07129 if (NtSection->NumberOfLinenumbers != 1) { 07130 UNLOCK_PFN2 (OldIrql); 07131 return; 07132 } 07133 07134 do { 07135 ASSERT (PointerPte->u.Hard.Valid == 1); 07136 07137 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 07138 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 07139 07140 ASSERT (Pfn1->u3.e2.ReferenceCount > 1); 07141 07142 MI_REMOVE_LOCKED_PAGE_CHARGE (Pfn1, 37); 07143 07144 MiDecrementReferenceCount (PageFrameIndex); 07145 07146 PointerPte += 1; 07147 07148 } while (PointerPte <= LastPte); 07149 07150 NtSection->NumberOfLinenumbers -= 1; 07151 ASSERT (NtSection->NumberOfLinenumbers == 0); 07152 Collision = MmCollidedLockWait; 07153 MmCollidedLockWait = FALSE; 07154 MmLockedCode -= SizeToUnlock; 07155 07156 UNLOCK_PFN2 (OldIrql); 07157 07158 if (Collision) { 07159 KePulseEvent (&MmCollidedLockEvent, 0, FALSE); 07160 } 07161 07162 return; 07163 }

NTKERNELAPI VOID MmUnlockPagedPool IN PVOID  Address,
IN SIZE_T  Size
 

Definition at line 7869 of file iosup.c.

References ASSERT, ExPageLockHandle, LOCK_PFN2, MI_GET_PAGE_FRAME_FROM_PTE, MI_PFN_ELEMENT, MI_REMOVE_LOCKED_PAGE_CHARGE, MiDecrementReferenceCount(), MiGetPteAddress, MmLockPagableSectionByHandle(), MmUnlockPagableImageSection(), _MMPTE::u, _MMPFN::u3, and UNLOCK_PFN2.

Referenced by Ke386SetDescriptorProcess(), and MiSetImageProtect().

07876 : 07877 07878 Unlocks paged pool that was locked with MmLockPagedPool. 07879 07880 Arguments: 07881 07882 Address - Supplies the address in paged pool to unlock. 07883 07884 Size - Supplies the size to unlock. 07885 07886 Return Value: 07887 07888 None. 07889 07890 Environment: 07891 07892 Kernel mode, IRQL of APC_LEVEL or below. 07893 07894 --*/ 07895 07896 { 07897 PMMPTE PointerPte; 07898 PMMPTE LastPte; 07899 KIRQL OldIrql; 07900 PFN_NUMBER PageFrameIndex; 07901 PMMPFN Pfn1; 07902 07903 MmLockPagableSectionByHandle(ExPageLockHandle); 07904 PointerPte = MiGetPteAddress (Address); 07905 LastPte = MiGetPteAddress ((PVOID)((PCHAR)Address + (SizeInBytes - 1))); 07906 LOCK_PFN2 (OldIrql); 07907 07908 do { 07909 ASSERT (PointerPte->u.Hard.Valid == 1); 07910 07911 PageFrameIndex = MI_GET_PAGE_FRAME_FROM_PTE (PointerPte); 07912 Pfn1 = MI_PFN_ELEMENT (PageFrameIndex); 07913 07914 ASSERT (Pfn1->u3.e2.ReferenceCount > 1); 07915 07916 MI_REMOVE_LOCKED_PAGE_CHARGE (Pfn1, 35); 07917 07918 MiDecrementReferenceCount (PageFrameIndex); 07919 07920 PointerPte += 1; 07921 } while (PointerPte <= LastPte); 07922 07923 UNLOCK_PFN2 (OldIrql); 07924 MmUnlockPagableImageSection(ExPageLockHandle); 07925 return; 07926 }

NTKERNELAPI VOID MmUnlockPages IN PMDL  MemoryDescriptorList  ) 
 

NTKERNELAPI VOID MmUnmapIoSpace IN PVOID  BaseAddress,
IN SIZE_T  NumberOfBytes
 

Definition at line 3678 of file iosup.c.

References ASSERT, COMPUTE_PAGES_SPANNED, MiGetPteAddress, MiInsertDeadPteTrackingBlock(), MiLockSystemSpace, MiReleaseSystemPtes(), MiRemovePteTracker(), MiUnlockSystemSpace, MmTrackPtes, NULL, PAGED_CODE, and SystemPteSpace.

Referenced by CmpFindACPITable(), CmpMatchAcpiCreatorIdRule(), CmpMatchAcpiCreatorRevisionRule(), CmpMatchAcpiOemIdRule(), CmpMatchAcpiOemRevisionRule(), CmpMatchAcpiOemTableIdRule(), CmpMatchAcpiRevisionRule(), DriverEntry(), MmFreeContiguousMemorySpecifyCache(), MmUnmapVideoDisplay(), and VerifierUnmapIoSpace().

03685 : 03686 03687 This function unmaps a range of physical address which were previously 03688 mapped via an MmMapIoSpace function call. 03689 03690 Arguments: 03691 03692 BaseAddress - Supplies the base virtual address where the physical 03693 address was previously mapped. 03694 03695 NumberOfBytes - Supplies the number of bytes which were mapped. 03696 03697 Return Value: 03698 03699 None. 03700 03701 Environment: 03702 03703 Kernel mode, Should be IRQL of APC_LEVEL or below, but unfortunately 03704 callers are coming in at DISPATCH_LEVEL and it's too late to change the 03705 rules now. This means you can never make this routine pagable. 03706 03707 --*/ 03708 03709 { 03710 PFN_NUMBER NumberOfPages; 03711 PMMPTE FirstPte; 03712 KIRQL OldIrql; 03713 PVOID PoolBlock; 03714 03715 PAGED_CODE(); 03716 ASSERT (NumberOfBytes != 0); 03717 NumberOfPages = COMPUTE_PAGES_SPANNED (BaseAddress, NumberOfBytes); 03718 FirstPte = MiGetPteAddress (BaseAddress); 03719 MiReleaseSystemPtes(FirstPte, (ULONG)NumberOfPages, SystemPteSpace); 03720 03721 if (MmTrackPtes != 0) { 03722 MiLockSystemSpace(OldIrql); 03723 03724 PoolBlock = MiRemovePteTracker (NULL, 03725 FirstPte, 03726 NumberOfPages); 03727 MiUnlockSystemSpace(OldIrql); 03728 03729 // 03730 // Can't free the pool block here because we may be getting called 03731 // from the fault path in MiWaitForInPageComplete holding the PFN 03732 // lock. Queue the block for later release. 03733 // 03734 03735 if (PoolBlock) { 03736 MiInsertDeadPteTrackingBlock (PoolBlock); 03737 } 03738 } 03739 03740 return; 03741 }

NTKERNELAPI VOID MmUnmapLockedPages IN PVOID  BaseAddress,
IN PMDL  MemoryDescriptorList
 

Definition at line 3124 of file iosup.c.

References ASSERT, COMPUTE_PAGES_SPANNED, LOCK_PFN2, MDL_IO_SPACE, MDL_LOCK_HELD, MDL_MAPPED_TO_SYSTEM_VA, MDL_PARENT_MAPPED_SYSTEM_VA, MDL_PARTIAL_HAS_BEEN_MAPPED, MDL_PHYSICAL_VIEW, MI_GET_PAGE_FRAME_FROM_PTE, MI_IS_PHYSICAL_ADDRESS, MI_PFN_ELEMENT, MiGetPteAddress, MiInsertDeadPteTrackingBlock(), MiLockSystemSpace, MiReleaseSystemPtes(), MiRemovePteTracker(), MiUnlockSystemSpace, MiUnmapLockedPagesInUserSpace(), MmTrackPtes, SystemPteSpace, _MMPTE::u, _MMPFN::u3, and UNLOCK_PFN2.

Referenced by CcZeroData(), ExpProfileDelete(), MiCheckForCrashDump(), MiCleanSection(), MiCloneProcessAddressSpace(), MiCreateImageFileMap(), MiDoMappedCopy(), MiFlushSectionInternal(), MiMakeOutswappedPageResident(), MiWaitForInPageComplete(), MiWriteComplete(), MmShutdownSystem(), MmUnlockPages(), NtStartProfile(), NtStopProfile(), and VerifierUnmapLockedPages().

03131 : 03132 03133 This routine unmaps locked pages which were previously mapped via 03134 a MmMapLockedPages call. 03135 03136 Arguments: 03137 03138 BaseAddress - Supplies the base address where the pages were previously 03139 mapped. 03140 03141 MemoryDescriptorList - Supplies a valid Memory Descriptor List which has 03142 been updated by MmProbeAndLockPages. 03143 03144 Return Value: 03145 03146 None. 03147 03148 Environment: 03149 03150 Kernel mode. DISPATCH_LEVEL or below if base address is within 03151 system space; APC_LEVEL or below if base address is user space. 03152 03153 --*/ 03154 03155 { 03156 PFN_NUMBER NumberOfPages; 03157 PFN_NUMBER i; 03158 PPFN_NUMBER Page; 03159 PMMPTE PointerPte; 03160 PMMPTE PointerBase; 03161 PVOID StartingVa; 03162 KIRQL OldIrql; 03163 PVOID PoolBlock; 03164 03165 ASSERT (MemoryDescriptorList->ByteCount != 0); 03166 ASSERT ((MemoryDescriptorList->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0); 03167 03168 if (MI_IS_PHYSICAL_ADDRESS (BaseAddress)) { 03169 03170 // 03171 // MDL is not mapped into virtual space, just clear the fields 03172 // and return. 03173 // 03174 03175 MemoryDescriptorList->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA | 03176 MDL_PARTIAL_HAS_BEEN_MAPPED); 03177 return; 03178 } 03179 03180 if (BaseAddress > MM_HIGHEST_USER_ADDRESS) { 03181 03182 StartingVa = (PVOID)((PCHAR)MemoryDescriptorList->StartVa + 03183 MemoryDescriptorList->ByteOffset); 03184 03185 NumberOfPages = COMPUTE_PAGES_SPANNED (StartingVa, 03186 MemoryDescriptorList->ByteCount); 03187 03188 PointerBase = MiGetPteAddress (BaseAddress); 03189 03190 03191 ASSERT ((MemoryDescriptorList->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) != 0); 03192 03193 03194 #if DBG 03195 PointerPte = PointerBase; 03196 i = NumberOfPages; 03197 Page = (PPFN_NUMBER)(MemoryDescriptorList + 1); 03198 if ((MemoryDescriptorList->MdlFlags & MDL_LOCK_HELD) == 0) { 03199 LOCK_PFN2 (OldIrql); 03200 } 03201 03202 while (i != 0) { 03203 ASSERT (PointerPte->u.Hard.Valid == 1); 03204 ASSERT (*Page == MI_GET_PAGE_FRAME_FROM_PTE (PointerPte)); 03205 if ((MemoryDescriptorList->MdlFlags & (MDL_IO_SPACE | MDL_PHYSICAL_VIEW)) == 0) { 03206 PMMPFN Pfn3; 03207 Pfn3 = MI_PFN_ELEMENT (*Page); 03208 ASSERT (Pfn3->u3.e2.ReferenceCount != 0); 03209 } 03210 03211 Page += 1; 03212 PointerPte += 1; 03213 i -= 1; 03214 } 03215 03216 if ((MemoryDescriptorList->MdlFlags & MDL_LOCK_HELD) == 0) { 03217 UNLOCK_PFN2 (OldIrql); 03218 } 03219 #endif //DBG 03220 03221 MemoryDescriptorList->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA | 03222 MDL_PARTIAL_HAS_BEEN_MAPPED); 03223 03224 if (MmTrackPtes != 0) { 03225 MiLockSystemSpace(OldIrql); 03226 PoolBlock = MiRemovePteTracker (MemoryDescriptorList, 03227 PointerBase, 03228 NumberOfPages); 03229 MiUnlockSystemSpace(OldIrql); 03230 03231 // 03232 // Can't free the pool block here because we may be getting called 03233 // from the fault path in MiWaitForInPageComplete holding the PFN 03234 // lock. Queue the block for later release. 03235 // 03236 03237 if (PoolBlock) { 03238 MiInsertDeadPteTrackingBlock (PoolBlock); 03239 } 03240 } 03241 03242 MiReleaseSystemPtes (PointerBase, (ULONG)NumberOfPages, SystemPteSpace); 03243 return; 03244 03245 } else { 03246 03247 MiUnmapLockedPagesInUserSpace (BaseAddress, 03248 MemoryDescriptorList); 03249 } 03250 }

NTKERNELAPI VOID MmUnmapVideoDisplay IN PVOID  BaseAddress,
IN SIZE_T  NumberOfBytes
 

Definition at line 7751 of file iosup.c.

References ASSERT, COMPUTE_PAGES_SPANNED, ExFreePool(), FALSE, KSEG0_BASE, MiGetPteAddress, MiGetSubsectionAddress, MiPteToProto, MiReleaseSystemPtes(), MmUnmapIoSpace(), PAGED_CODE, _SUBSECTION::SubsectionBase, SystemPteSpace, _MMPTE::u, and X64K.

07758 : 07759 07760 This function unmaps a range of physical address which were previously 07761 mapped via an MmMapVideoDisplay function call. 07762 07763 Arguments: 07764 07765 BaseAddress - Supplies the base virtual address where the physical 07766 address was previously mapped. 07767 07768 NumberOfBytes - Supplies the number of bytes which were mapped. 07769 07770 Return Value: 07771 07772 None. 07773 07774 Environment: 07775 07776 Kernel mode, IRQL of APC_LEVEL or below. 07777 07778 --*/ 07779 07780 { 07781 07782 #ifdef LARGE_PAGES 07783 PFN_NUMBER NumberOfPages; 07784 ULONG i; 07785 PMMPTE FirstPte; 07786 KIRQL OldIrql; 07787 PMMPTE LargePte; 07788 PSUBSECTION Subsection; 07789 07790 PAGED_CODE(); 07791 07792 ASSERT (NumberOfBytes != 0); 07793 NumberOfPages = COMPUTE_PAGES_SPANNED (BaseAddress, NumberOfBytes); 07794 FirstPte = MiGetPteAddress (BaseAddress); 07795 07796 if ((NumberOfBytes > X64K) && (FirstPte->u.Hard.Valid == 0)) { 07797 07798 ASSERT (MmLargeVideoMapped); 07799 LargePte = MiPteToProto (FirstPte); 07800 Subsection = MiGetSubsectionAddress (LargePte); 07801 ASSERT (Subsection->SubsectionBase == FirstPte); 07802 07803 NumberOfPages = Subsection->EndingSector; 07804 ExFreePool (Subsection); 07805 ExFreePool (LargePte); 07806 MmLargeVideoMapped = FALSE; 07807 KeFillFixedEntryTb ((PHARDWARE_PTE)FirstPte, (PVOID)KSEG0_BASE, LARGE_ENTRY); 07808 } 07809 MiReleaseSystemPtes(FirstPte, NumberOfPages, SystemPteSpace); 07810 return; 07811 07812 #else // LARGE_PAGES 07813 07814 MmUnmapIoSpace (BaseAddress, NumberOfBytes); 07815 return; 07816 #endif //LARGE_PAGES 07817 }

NTKERNELAPI NTSTATUS MmUnmapViewInSessionSpace IN PVOID  MappedBase  ) 
 

Definition at line 3639 of file mapview.c.

References ASSERT, MiHydra, MiUnmapViewInSystemSpace(), MmIsAddressValid(), MmSession, MmSessionSpace, PAGED_CODE, PsGetCurrentProcess, _MM_SESSION_SPACE::Session, and TRUE.

03645 : 03646 03647 This routine unmaps the specified section from the system's address space. 03648 03649 Arguments: 03650 03651 MappedBase - Supplies the address of the view to unmap. 03652 03653 Return Value: 03654 03655 Status of the map view operation. 03656 03657 Environment: 03658 03659 Kernel Mode, IRQL of dispatch level. 03660 03661 --*/ 03662 03663 { 03664 PMMSESSION Session; 03665 03666 PAGED_CODE(); 03667 03668 if (MiHydra == TRUE) { 03669 if (PsGetCurrentProcess()->Vm.u.Flags.ProcessInSession == 0) { 03670 return STATUS_NOT_MAPPED_VIEW; 03671 } 03672 ASSERT (MmIsAddressValid(MmSessionSpace) == TRUE); 03673 Session = &MmSessionSpace->Session; 03674 } 03675 else { 03676 Session = &MmSession; 03677 } 03678 03679 return MiUnmapViewInSystemSpace (Session, MappedBase); 03680 }

VOID MmUnmapViewInSystemCache IN PVOID  BaseAddress,
IN PVOID  SectionToUnmap,
IN ULONG  AddToFront
 

Definition at line 381 of file mapcache.c.

References APC_LEVEL, ASSERT, _SEGMENT::ControlArea, FALSE, LOCK_PFN, LOCK_SYSTEM_WS, MI_CAPTURE_DIRTY_BIT_TO_PFN, MI_GET_PAGE_FRAME_FROM_PTE, MI_PFN_ELEMENT, MI_SET_PTE_IN_WORKING_SET, MI_WRITE_INVALID_PTE, MiCheckControlArea(), MiDecrementShareAndValidCount, MiDecrementShareCount(), MiGetPteAddress, MiLocateWsle(), MiReleaseWsle(), MiRemoveWsle(), MM_EMPTY_PTE_LIST, MmFlushSystemCache, MmFrontOfList, MmLastFreeSystemCache, MmSystemCachePteBase, MmSystemCacheWorkingSetList, MmSystemCacheWs, NULL, _CONTROL_AREA::NumberOfMappedViews, _CONTROL_AREA::NumberOfSystemCacheViews, PAGE_SIZE, PSECTION, _CONTROL_AREA::Segment, TRUE, _MMPTE::u, _MMPFN::u1, _MMPFN::u2, UNLOCK_PFN, UNLOCK_SYSTEM_WS, WSLE_NUMBER, X256K, and ZeroKernelPte.

Referenced by CcUnmapVacb().

00389 : 00390 00391 This function unmaps a view from the system cache. 00392 00393 NOTE: When this function is called, no pages may be locked in 00394 the cache for the specified view. 00395 00396 Arguments: 00397 00398 BaseAddress - Supplies the base address of the section in the 00399 system cache. 00400 00401 SectionToUnmap - Supplies a pointer to the section which the 00402 base address maps. 00403 00404 AddToFront - Supplies TRUE if the unmapped pages should be 00405 added to the front of the standby list (i.e., their 00406 value in the cache is low). FALSE otherwise 00407 00408 Return Value: 00409 00410 none. 00411 00412 Environment: 00413 00414 Kernel mode. 00415 00416 --*/ 00417 00418 { 00419 PMMPTE PointerPte; 00420 PMMPFN Pfn1; 00421 PMMPTE FirstPte; 00422 MMPTE PteContents; 00423 KIRQL OldIrql; 00424 KIRQL OldIrqlWs; 00425 PFN_NUMBER i; 00426 WSLE_NUMBER WorkingSetIndex; 00427 PCONTROL_AREA ControlArea; 00428 ULONG WsHeld; 00429 PFN_NUMBER PdeFrameNumber; 00430 00431 WsHeld = FALSE; 00432 00433 ASSERT (KeGetCurrentIrql() <= APC_LEVEL); 00434 00435 PointerPte = MiGetPteAddress (BaseAddress); 00436 FirstPte = PointerPte; 00437 ControlArea = ((PSECTION)SectionToUnmap)->Segment->ControlArea; 00438 PdeFrameNumber = MI_GET_PAGE_FRAME_FROM_PTE (MiGetPteAddress (PointerPte)); 00439 00440 // 00441 // Get the control area for the segment which is mapped here. 00442 // 00443 00444 i = 0; 00445 00446 do { 00447 00448 // 00449 // The cache is organized in chunks of 256k bytes, clear 00450 // the first chunk then check to see if this is the last 00451 // chunk. 00452 // 00453 00454 // 00455 // The page table page is always resident for the system cache. 00456 // Check each PTE, it is in one of two states, either valid or 00457 // prototype PTE format. 00458 // 00459 00460 PteContents = *(volatile MMPTE *)PointerPte; 00461 if (PteContents.u.Hard.Valid == 1) { 00462 00463 if (!WsHeld) { 00464 WsHeld = TRUE; 00465 LOCK_SYSTEM_WS (OldIrqlWs); 00466 continue; 00467 } 00468 00469 Pfn1 = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber); 00470 00471 WorkingSetIndex = MiLocateWsle (BaseAddress, 00472 MmSystemCacheWorkingSetList, 00473 Pfn1->u1.WsIndex ); 00474 MiRemoveWsle (WorkingSetIndex, 00475 MmSystemCacheWorkingSetList ); 00476 MiReleaseWsle (WorkingSetIndex, &MmSystemCacheWs); 00477 00478 MI_SET_PTE_IN_WORKING_SET (PointerPte, 0); 00479 00480 // 00481 // The PTE is valid. 00482 // 00483 00484 LOCK_PFN (OldIrql); 00485 00486 // 00487 // Capture the state of the modified bit for this PTE. 00488 // 00489 00490 MI_CAPTURE_DIRTY_BIT_TO_PFN (PointerPte, Pfn1); 00491 00492 // 00493 // Decrement the share and valid counts of the page table 00494 // page which maps this PTE. 00495 // 00496 00497 MiDecrementShareAndValidCount (PdeFrameNumber); 00498 00499 // 00500 // Decrement the share count for the physical page. 00501 // 00502 00503 #if DBG 00504 if (ControlArea->NumberOfMappedViews == 1) { 00505 PMMPFN Pfn; 00506 Pfn = MI_PFN_ELEMENT (PteContents.u.Hard.PageFrameNumber); 00507 ASSERT (Pfn->u2.ShareCount == 1); 00508 } 00509 #endif //DBG 00510 00511 00512 MmFrontOfList = AddToFront; 00513 MiDecrementShareCount (MI_GET_PAGE_FRAME_FROM_PTE (&PteContents)); 00514 MmFrontOfList = FALSE; 00515 UNLOCK_PFN (OldIrql); 00516 } else { 00517 if (WsHeld) { 00518 UNLOCK_SYSTEM_WS (OldIrqlWs); 00519 WsHeld = FALSE; 00520 } 00521 00522 ASSERT ((PteContents.u.Long == ZeroKernelPte.u.Long) || 00523 (PteContents.u.Soft.Prototype == 1)); 00524 NOTHING; 00525 } 00526 MI_WRITE_INVALID_PTE (PointerPte, ZeroKernelPte); 00527 00528 PointerPte += 1; 00529 BaseAddress = (PVOID)((PCHAR)BaseAddress + PAGE_SIZE); 00530 i += 1; 00531 } while (i < (X256K / PAGE_SIZE)); 00532 00533 if (WsHeld) { 00534 UNLOCK_SYSTEM_WS (OldIrqlWs); 00535 } 00536 00537 FirstPte->u.List.NextEntry = MM_EMPTY_PTE_LIST; 00538 00539 LOCK_PFN (OldIrql); 00540 00541 // 00542 // Free this entry to the end of the list. 00543 // 00544 00545 if (MmFlushSystemCache == NULL) { 00546 00547 // 00548 // If there is no entry marked to initiate a TB flush when 00549 // reused, mark this entry as the one. This way the TB 00550 // only needs to be flushed when the list wraps. 00551 // 00552 00553 MmFlushSystemCache = FirstPte; 00554 } 00555 00556 MmLastFreeSystemCache->u.List.NextEntry = FirstPte - MmSystemCachePteBase; 00557 MmLastFreeSystemCache = FirstPte; 00558 00559 // 00560 // Decrement the number of mapped views for the segment 00561 // and check to see if the segment should be deleted. 00562 // 00563 00564 ControlArea->NumberOfMappedViews -= 1; 00565 ControlArea->NumberOfSystemCacheViews -= 1; 00566 00567 // 00568 // Check to see if the control area (segment) should be deleted. 00569 // This routine releases the PFN lock. 00570 // 00571 00572 MiCheckControlArea (ControlArea, NULL, OldIrql); 00573 00574 return; 00575 }

NTKERNELAPI NTSTATUS MmUnmapViewInSystemSpace IN PVOID  MappedBase  ) 
 

Definition at line 3608 of file mapview.c.

References MiUnmapViewInSystemSpace(), MmSession, and PAGED_CODE.

Referenced by NtMapViewOfSection().

03614 : 03615 03616 This routine unmaps the specified section from the system's address space. 03617 03618 Arguments: 03619 03620 MappedBase - Supplies the address of the view to unmap. 03621 03622 Return Value: 03623 03624 Status of the map view operation. 03625 03626 Environment: 03627 03628 Kernel Mode, IRQL of dispatch level. 03629 03630 --*/ 03631 03632 { 03633 PAGED_CODE(); 03634 03635 return MiUnmapViewInSystemSpace (&MmSession, MappedBase); 03636 }

NTKERNELAPI NTSTATUS MmUnmapViewOfSection IN PEPROCESS  Process,
IN PVOID  BaseAddress
 

Definition at line 90 of file umapview.c.

References DbgkUnMapViewOfSection(), _MMVAD::EndingVpn, ExFreePool(), FALSE, KeAttachProcess(), KeDetachProcess(), LOCK_WS_AND_ADDRESS_SPACE, LOCK_WS_UNSAFE, MI_VPN_TO_VA, MI_VPN_TO_VA_ENDING, MiCheckSecuredVad(), MiDeleteFor4kPage(), MiGetNextVad, MiGetPreviousVad, MiLocateAddress(), MiRemoveMappedView(), MiRemoveVad(), MiReturnPageTablePageCommitment(), MM_SECURE_DELETE_CHECK, NT_SUCCESS, NTSTATUS(), NULL, PAGE_SHIFT, PAGE_SIZE, PAGED_CODE, PsGetCurrentProcess, _MMVAD::StartingVpn, TRUE, _MMVAD::u, UNLOCK_WS_AND_ADDRESS_SPACE, and UNLOCK_WS_UNSAFE.

Referenced by CommitReadOnlyMemory(), FreeView(), LpcpDeletePort(), MiGetWritablePagesInSection(), MiLoadImageSection(), NtUnmapViewOfSection(), and UserCreateHeap().

00097 : 00098 00099 This function unmaps a previously created view to a section. 00100 00101 Arguments: 00102 00103 Process - Supplies a referenced pointer to a process object. 00104 00105 BaseAddress - Supplies the base address of the view. 00106 00107 Return Value: 00108 00109 Returns the status 00110 00111 TBS 00112 00113 00114 --*/ 00115 00116 { 00117 PMMVAD Vad; 00118 PMMVAD PreviousVad; 00119 PMMVAD NextVad; 00120 SIZE_T RegionSize; 00121 PVOID UnMapImageBase; 00122 NTSTATUS status; 00123 BOOLEAN Attached = FALSE; 00124 00125 PAGED_CODE(); 00126 00127 UnMapImageBase = NULL; 00128 00129 // 00130 // If the specified process is not the current process, attach 00131 // to the specified process. 00132 // 00133 00134 if (PsGetCurrentProcess() != Process) { 00135 KeAttachProcess (&Process->Pcb); 00136 Attached = TRUE; 00137 } 00138 00139 // 00140 // Get the address creation mutex to block multiple threads from 00141 // creating or deleting address space at the same time and 00142 // get the working set mutex so virtual address descriptors can 00143 // be inserted and walked. 00144 // Raise IRQL to block APCs. 00145 // 00146 // Get the working set mutex, no page faults allowed for now until 00147 // working set mutex released. 00148 // 00149 00150 00151 LOCK_WS_AND_ADDRESS_SPACE (Process); 00152 00153 // 00154 // Make sure the address space was not deleted, if so, return an error. 00155 // 00156 00157 if (Process->AddressSpaceDeleted != 0) { 00158 status = STATUS_PROCESS_IS_TERMINATING; 00159 goto ErrorReturn; 00160 } 00161 00162 // 00163 // Find the associated vad. 00164 // 00165 00166 Vad = MiLocateAddress (BaseAddress); 00167 00168 if ((Vad == (PMMVAD)NULL) || (Vad->u.VadFlags.PrivateMemory)) { 00169 00170 // 00171 // No Virtual Address Descriptor located for Base Address. 00172 // 00173 00174 status = STATUS_NOT_MAPPED_VIEW; 00175 goto ErrorReturn; 00176 } 00177 00178 if (Vad->u.VadFlags.NoChange == 1) { 00179 00180 // 00181 // An attempt is being made to delete a secured VAD, check 00182 // the whole VAD to see if this deletion is allowed. 00183 // 00184 00185 status = MiCheckSecuredVad ((PMMVAD)Vad, 00186 MI_VPN_TO_VA (Vad->StartingVpn), 00187 ((Vad->EndingVpn - Vad->StartingVpn) << PAGE_SHIFT) + 00188 (PAGE_SIZE - 1), 00189 MM_SECURE_DELETE_CHECK); 00190 00191 if (!NT_SUCCESS (status)) { 00192 goto ErrorReturn; 00193 } 00194 } 00195 00196 // 00197 // If this Vad is for an image section, then 00198 // get the base address of the section 00199 // 00200 00201 if ((Vad->u.VadFlags.ImageMap == 1) && (Process == PsGetCurrentProcess())) { 00202 UnMapImageBase = MI_VPN_TO_VA (Vad->StartingVpn); 00203 } 00204 00205 RegionSize = PAGE_SIZE + ((Vad->EndingVpn - Vad->StartingVpn) << PAGE_SHIFT); 00206 00207 PreviousVad = MiGetPreviousVad (Vad); 00208 NextVad = MiGetNextVad (Vad); 00209 00210 00211 MiRemoveVad (Vad); 00212 00213 // 00214 // Return commitment for page table pages if possible. 00215 // 00216 00217 MiReturnPageTablePageCommitment (MI_VPN_TO_VA (Vad->StartingVpn), 00218 MI_VPN_TO_VA_ENDING (Vad->EndingVpn), 00219 Process, 00220 PreviousVad, 00221 NextVad); 00222 00223 MiRemoveMappedView (Process, Vad); 00224 00225 #if defined(_MIALT4K_) 00226 00227 if (Process->Wow64Process != NULL) { 00228 00229 UNLOCK_WS_UNSAFE (Process); 00230 00231 MiDeleteFor4kPage(MI_VPN_TO_VA (Vad->StartingVpn), 00232 MI_VPN_TO_VA_ENDING (Vad->EndingVpn), 00233 Process); 00234 00235 LOCK_WS_UNSAFE (Process); 00236 00237 } 00238 00239 #endif 00240 00241 ExFreePool (Vad); 00242 00243 // 00244 // Update the current virtual size in the process header. 00245 // 00246 00247 Process->VirtualSize -= RegionSize; 00248 status = STATUS_SUCCESS; 00249 00250 ErrorReturn: 00251 00252 UNLOCK_WS_AND_ADDRESS_SPACE (Process); 00253 00254 if ( UnMapImageBase ) { 00255 DbgkUnMapViewOfSection(UnMapImageBase); 00256 } 00257 if (Attached == TRUE) { 00258 KeDetachProcess(); 00259 } 00260 00261 return status; 00262 }

NTKERNELAPI VOID MmUnsecureVirtualMemory IN HANDLE  SecureHandle  ) 
 

Definition at line 4614 of file mapview.c.

References ASSERT, ExFreePool(), List, _MMSECURE_ENTRY::List, LOCK_ADDRESS_SPACE, _MMSECURE_ENTRY::LongFlags2, MiLocateAddress(), PAGED_CODE, PsGetCurrentProcess, _MMSECURE_ENTRY::StartVpn, _MMVAD::u, _MMSECURE_ENTRY::u2, _MMVAD::u2, _MMVAD::u3, and UNLOCK_ADDRESS_SPACE.

04620 : 04621 04622 This routine unsecures memory previous secured via a call to 04623 MmSecureVirtualMemory. 04624 04625 Arguments: 04626 04627 SecureHandle - Supplies the handle returned in MmSecureVirtualMemory. 04628 04629 Return Value: 04630 04631 None. 04632 04633 Environment: 04634 04635 Kernel Mode. 04636 04637 --*/ 04638 04639 { 04640 PMMSECURE_ENTRY Secure; 04641 PEPROCESS Process; 04642 PMMVAD Vad; 04643 04644 PAGED_CODE(); 04645 04646 Secure = (PMMSECURE_ENTRY)SecureHandle; 04647 Process = PsGetCurrentProcess (); 04648 LOCK_ADDRESS_SPACE (Process); 04649 04650 if (Secure->u2.VadFlags2.StoredInVad) { 04651 Vad = CONTAINING_RECORD( Secure, 04652 MMVAD, 04653 u2.LongFlags2); 04654 } else { 04655 Vad = MiLocateAddress ((PVOID)Secure->StartVpn); 04656 } 04657 04658 ASSERT (Vad); 04659 ASSERT (Vad->u.VadFlags.NoChange == 1); 04660 04661 if (Vad->u2.VadFlags2.OneSecured) { 04662 ASSERT (Secure == (PMMSECURE_ENTRY)&Vad->u2.LongFlags2); 04663 Vad->u2.VadFlags2.OneSecured = 0; 04664 ASSERT (Vad->u2.VadFlags2.MultipleSecured == 0); 04665 if (Vad->u2.VadFlags2.SecNoChange == 0) { 04666 04667 // 04668 // No more secure entries in this list, remove the state. 04669 // 04670 04671 Vad->u.VadFlags.NoChange = 0; 04672 } 04673 } else { 04674 ASSERT (Vad->u2.VadFlags2.MultipleSecured == 1); 04675 04676 if (Secure == (PMMSECURE_ENTRY)&Vad->u2.LongFlags2) { 04677 04678 // 04679 // This was a single block that got converted into a list. 04680 // Reset the entry. 04681 // 04682 04683 Secure = CONTAINING_RECORD (Vad->u3.List.Flink, 04684 MMSECURE_ENTRY, 04685 List); 04686 } 04687 RemoveEntryList (&Secure->List); 04688 ExFreePool (Secure); 04689 if (IsListEmpty (&Vad->u3.List)) { 04690 04691 // 04692 // No more secure entries, reset the state. 04693 // 04694 04695 Vad->u2.VadFlags2.MultipleSecured = 0; 04696 04697 if ((Vad->u2.VadFlags2.SecNoChange == 0) && 04698 (Vad->u.VadFlags.PrivateMemory == 0)) { 04699 04700 // 04701 // No more secure entries in this list, remove the state 04702 // if and only if this VAD is not private. If this VAD 04703 // is private, removing the state NoChange flag indicates 04704 // that this is a short VAD which it no longer is. 04705 // 04706 04707 Vad->u.VadFlags.NoChange = 0; 04708 } 04709 } 04710 } 04711 04712 UNLOCK_ADDRESS_SPACE (Process); 04713 return; 04714 } }

BOOLEAN MmVerifyImageIsOkForMpUse IN PVOID  BaseAddress  ) 
 

Definition at line 4923 of file sysload.c.

References FALSE, KeNumberProcessors, NULL, PAGED_CODE, RtlImageNtHeader(), and TRUE.

Referenced by MmCheckSystemImage().

04926 { 04927 PIMAGE_NT_HEADERS NtHeaders; 04928 04929 PAGED_CODE(); 04930 04931 // 04932 // If the file is an image file, then subtract the two checksum words 04933 // in the optional header from the computed checksum before adding 04934 // the file length, and set the value of the header checksum. 04935 // 04936 04937 NtHeaders = RtlImageNtHeader(BaseAddress); 04938 if (NtHeaders != NULL) { 04939 if ( KeNumberProcessors > 1 && 04940 (NtHeaders->FileHeader.Characteristics & IMAGE_FILE_UP_SYSTEM_ONLY) ) { 04941 return FALSE; 04942 } 04943 } 04944 return TRUE; 04945 }

VOID MmWorkingSetManager VOID   ) 
 

Definition at line 397 of file wsmanage.c.

References _EPROCESS::AddressSpaceDeleted, _MMSUPPORT::AllowWorkingSetAdjustment, APC_LEVEL, ASSERT, DbgPrint, _MMWS_TRIM_CRITERIA::DesiredFreeGoal, ExTryToAcquireResourceExclusiveLite(), FALSE, _MMWS_TRIM_CRITERIA::FaultBased, _EPROCESS::ImageFileName, KeDelayExecutionThread(), KeDetachProcess(), KeForceAttachProcess(), KeLowerIrql(), KeQuerySystemTime(), KeRaiseIrql(), KernelMode, KeSetEvent(), _MMSUPPORT::LastTrimFaultCount, _MMSUPPORT::LastTrimTime, _MMWORKING_SET_EXPANSION_HEAD::ListHead, LOCK_EXPANSION, MiAttachSession(), MiCheckAndSetSystemTrimCriteria(), MiCheckCounter, MiCheckProcessTrimCriteria(), MiCheckSystemCacheWsTrimCriteria(), MiCheckSystemTrimEndCriteria(), MiDetachSession(), MiDetermineWsTrimAmount(), MiHydra, _MMSUPPORT::MinimumWorkingSetSize, MiTrimWorkingSet(), MM_DBG_WS_EXPANSION, MM_FORCE_TRIM, MM_NO_WS_EXPANSION, MM_SET_SESSION_RESOURCE_OWNER, MM_TRIM_COUNTER_MAXIMUM_LARGE_MEM, MM_WS_EXPANSION_IN_PROGRESS, MmAvailablePages, MmIsAddressValid(), MmMinimumFreePages, MmModifiedPageListHead, MmModifiedPageMaximum, MmModifiedPageWriterEvent, MmSessionSpace, MmShortTime, MmSystemCacheWorkingSetList, MmSystemCacheWs, MmSystemLockOwner, MmSystemWsLock, MmWorkingSetExpansionHead, MmWorkingSetList, NULL, _MMSUPPORT::PageFaultCount, _EPROCESS::Pcb, PERFINFO_WSMANAGE_ACTUALTRIM, PERFINFO_WSMANAGE_DECL, PERFINFO_WSMANAGE_FINALACTION, PERFINFO_WSMANAGE_PROCESS_RESET, PERFINFO_WSMANAGE_TOTRIM, PERFINFO_WSMANAGE_TRIMEND_CLAIMS, PERFINFO_WSMANAGE_TRIMEND_FAULTS, PERFINFO_WSMANAGE_TRIMWS, _MM_SESSION_SPACE::ProcessOutSwapCount, _EPROCESS::ProcessOutswapEnabled, _EPROCESS::ProcessOutswapped, PsGetCurrentProcess, PsGetCurrentThread, _MMWSL::Quota, _MM_SESSION_SPACE::SessionId, _MMPFNLIST::Total, TRUE, _MMSUPPORT::u, UNLOCK_EXPANSION, UNLOCK_SESSION_SPACE_WS, UNLOCK_SYSTEM_WS, UNLOCK_WS, _EPROCESS::Vm, _MMSUPPORT::VmWorkingSetList, _MMSUPPORT::WorkingSetExpansionLinks, _EPROCESS::WorkingSetLock, _MMSUPPORT::WorkingSetSize, and _MM_SESSION_SPACE::WsLock.

Referenced by KeBalanceSetManager().

00403 : 00404 00405 Implements the NT working set manager thread. When the number 00406 of free pages becomes critical and ample pages can be obtained by 00407 reducing working sets, the working set manager's event is set, and 00408 this thread becomes active. 00409 00410 Arguments: 00411 00412 None. 00413 00414 Return Value: 00415 00416 None. 00417 00418 Environment: 00419 00420 Kernel mode. 00421 00422 --*/ 00423 00424 { 00425 00426 PEPROCESS CurrentProcess; 00427 PEPROCESS ProcessToTrim; 00428 PLIST_ENTRY ListEntry; 00429 LOGICAL Attached; 00430 ULONG Trim; 00431 KIRQL OldIrql; 00432 PMMSUPPORT VmSupport; 00433 PMMWSL WorkingSetList; 00434 LARGE_INTEGER CurrentTime; 00435 ULONG count; 00436 LOGICAL DoTrimming; 00437 PMM_SESSION_SPACE SessionSpace; 00438 LOGICAL InformSessionOfRelease; 00439 #if DBG 00440 ULONG LastTrimFaultCount; 00441 #endif // DBG 00442 MMWS_TRIM_CRITERIA TrimCriteria; 00443 PERFINFO_WSMANAGE_DECL(); 00444 00445 #if DBG 00446 MmWorkingSetThread = PsGetCurrentThread (); 00447 #endif 00448 00449 ASSERT (MiHydra == FALSE || MmIsAddressValid (MmSessionSpace) == FALSE); 00450 00451 CurrentProcess = PsGetCurrentProcess (); 00452 00453 Trim = 0; 00454 00455 // 00456 // Set the trim criteria: If there are plenty of pages, the existing 00457 // sets are aged and FALSE is returned to signify no trim is necessary. 00458 // Otherwise, the working set expansion list is ordered so the best 00459 // candidates for trimming are placed at the front and TRUE is returned. 00460 // 00461 00462 DoTrimming = MiCheckAndSetSystemTrimCriteria(&TrimCriteria); 00463 00464 if (DoTrimming) { 00465 00466 Attached = 0; 00467 00468 KeQuerySystemTime (&CurrentTime); 00469 00470 ASSERT (MiHydra == FALSE || MmIsAddressValid (MmSessionSpace) == FALSE); 00471 00472 LOCK_EXPANSION (OldIrql); 00473 while (!IsListEmpty (&MmWorkingSetExpansionHead.ListHead)) { 00474 00475 // 00476 // Remove the entry at the head and trim it. 00477 // 00478 00479 ListEntry = RemoveHeadList (&MmWorkingSetExpansionHead.ListHead); 00480 00481 if (ListEntry == &MmSystemCacheWs.WorkingSetExpansionLinks) { 00482 VmSupport = &MmSystemCacheWs; 00483 ASSERT (VmSupport->u.Flags.SessionSpace == 0); 00484 ASSERT (VmSupport->u.Flags.TrimHard == 0); 00485 SessionSpace = NULL; 00486 } 00487 else { 00488 VmSupport = CONTAINING_RECORD(ListEntry, 00489 MMSUPPORT, 00490 WorkingSetExpansionLinks); 00491 00492 if (VmSupport->u.Flags.SessionSpace == 0) { 00493 ProcessToTrim = CONTAINING_RECORD(VmSupport, 00494 EPROCESS, 00495 Vm); 00496 00497 ASSERT (VmSupport == &ProcessToTrim->Vm); 00498 ASSERT (ProcessToTrim->AddressSpaceDeleted == 0); 00499 SessionSpace = NULL; 00500 } 00501 else { 00502 ASSERT (MiHydra == TRUE); 00503 SessionSpace = CONTAINING_RECORD(VmSupport, 00504 MM_SESSION_SPACE, 00505 Vm); 00506 } 00507 } 00508 00509 // 00510 // Note that other routines that set this bit must remove the 00511 // entry from the expansion list first. 00512 // 00513 00514 ASSERT (VmSupport->u.Flags.BeingTrimmed == 0); 00515 00516 // 00517 // Check to see if we've been here before. 00518 // 00519 00520 if ((*(PLARGE_INTEGER)&VmSupport->LastTrimTime).QuadPart == 00521 (*(PLARGE_INTEGER)&CurrentTime).QuadPart) { 00522 00523 InsertHeadList (&MmWorkingSetExpansionHead.ListHead, 00524 &VmSupport->WorkingSetExpansionLinks); 00525 00526 // 00527 // If we aren't finished we may sleep in this call. 00528 // 00529 00530 if (MiCheckSystemTrimEndCriteria(&TrimCriteria, OldIrql)) { 00531 00532 // 00533 // No more pages are needed so we're done. 00534 // 00535 00536 break; 00537 } 00538 00539 // 00540 // Start a new round of trimming. 00541 // 00542 00543 KeQuerySystemTime (&CurrentTime); 00544 00545 continue; 00546 } 00547 00548 PERFINFO_WSMANAGE_TRIMWS(ProcessToTrim, SessionSpace, VmSupport); 00549 00550 if (SessionSpace) { 00551 00552 if (MiCheckProcessTrimCriteria(&TrimCriteria, 00553 VmSupport, 00554 NULL, 00555 &CurrentTime) == FALSE) { 00556 00557 InsertTailList (&MmWorkingSetExpansionHead.ListHead, 00558 &VmSupport->WorkingSetExpansionLinks); 00559 continue; 00560 } 00561 00562 VmSupport->LastTrimTime = CurrentTime; 00563 VmSupport->u.Flags.BeingTrimmed = 1; 00564 00565 VmSupport->WorkingSetExpansionLinks.Flink = MM_NO_WS_EXPANSION; 00566 VmSupport->WorkingSetExpansionLinks.Blink = 00567 MM_WS_EXPANSION_IN_PROGRESS; 00568 UNLOCK_EXPANSION (OldIrql); 00569 00570 ProcessToTrim = NULL; 00571 00572 // 00573 // Attach directly to the session space to be trimmed. 00574 // 00575 00576 MiAttachSession (SessionSpace); 00577 00578 // 00579 // Try for the session working set lock. 00580 // 00581 00582 WorkingSetList = VmSupport->VmWorkingSetList; 00583 00584 KeRaiseIrql (APC_LEVEL, &OldIrql); 00585 00586 if (!ExTryToAcquireResourceExclusiveLite (&SessionSpace->WsLock)) { 00587 // 00588 // This session space's working set lock was not 00589 // granted, don't trim it. 00590 // 00591 00592 KeLowerIrql (OldIrql); 00593 00594 MiDetachSession (); 00595 00596 LOCK_EXPANSION (OldIrql); 00597 00598 ASSERT (VmSupport->u.Flags.BeingTrimmed == 1); 00599 00600 VmSupport->u.Flags.BeingTrimmed = 0; 00601 00602 VmSupport->AllowWorkingSetAdjustment = MM_FORCE_TRIM; 00603 00604 goto WorkingSetLockFailed; 00605 } 00606 00607 VmSupport->LastTrimFaultCount = VmSupport->PageFaultCount; 00608 00609 MM_SET_SESSION_RESOURCE_OWNER(); 00610 PERFINFO_WSMANAGE_PROCESS_RESET(VmSupport); 00611 } 00612 else if (VmSupport != &MmSystemCacheWs) { 00613 00614 // 00615 // Check to see if this is a forced trim or 00616 // if we are trimming because check counter is 00617 // at the maximum. 00618 // 00619 00620 if (MiCheckProcessTrimCriteria(&TrimCriteria, 00621 VmSupport, 00622 ProcessToTrim, 00623 &CurrentTime) == FALSE) { 00624 00625 InsertTailList (&MmWorkingSetExpansionHead.ListHead, 00626 &VmSupport->WorkingSetExpansionLinks); 00627 continue; 00628 } 00629 00630 VmSupport->LastTrimTime = CurrentTime; 00631 VmSupport->u.Flags.BeingTrimmed = 1; 00632 00633 VmSupport->WorkingSetExpansionLinks.Flink = MM_NO_WS_EXPANSION; 00634 VmSupport->WorkingSetExpansionLinks.Blink = 00635 MM_WS_EXPANSION_IN_PROGRESS; 00636 UNLOCK_EXPANSION (OldIrql); 00637 WorkingSetList = MmWorkingSetList; 00638 InformSessionOfRelease = FALSE; 00639 00640 // 00641 // Attach to the process in preparation for trimming. 00642 // 00643 00644 if (ProcessToTrim != CurrentProcess) { 00645 00646 Attached = KeForceAttachProcess (&ProcessToTrim->Pcb); 00647 00648 if (Attached == 0) { 00649 LOCK_EXPANSION (OldIrql); 00650 VmSupport->u.Flags.BeingTrimmed = 0; 00651 VmSupport->AllowWorkingSetAdjustment = MM_FORCE_TRIM; 00652 goto WorkingSetLockFailed; 00653 } 00654 if (ProcessToTrim->ProcessOutswapEnabled == TRUE) { 00655 ASSERT (ProcessToTrim->ProcessOutswapped == FALSE); 00656 if (MiHydra == TRUE && VmSupport->u.Flags.ProcessInSession == 1 && VmSupport->u.Flags.SessionLeader == 0) { 00657 InformSessionOfRelease = TRUE; 00658 } 00659 } 00660 } 00661 00662 // 00663 // Attempt to acquire the working set lock. If the 00664 // lock cannot be acquired, skip over this process. 00665 // 00666 00667 count = 0; 00668 do { 00669 if (ExTryToAcquireFastMutex(&ProcessToTrim->WorkingSetLock) != FALSE) { 00670 break; 00671 } 00672 KeDelayExecutionThread (KernelMode, FALSE, &MmShortTime); 00673 count += 1; 00674 if (count == 5) { 00675 00676 // 00677 // Could not get the lock, skip this process. 00678 // 00679 00680 if (InformSessionOfRelease == TRUE) { 00681 LOCK_EXPANSION (OldIrql); 00682 ASSERT (ProcessToTrim->ProcessOutswapEnabled == TRUE); 00683 ProcessToTrim->ProcessOutswapEnabled = FALSE; 00684 ASSERT (MmSessionSpace->ProcessOutSwapCount >= 1); 00685 MmSessionSpace->ProcessOutSwapCount -= 1; 00686 UNLOCK_EXPANSION (OldIrql); 00687 InformSessionOfRelease = FALSE; 00688 } 00689 00690 if (Attached) { 00691 KeDetachProcess (); 00692 Attached = 0; 00693 } 00694 00695 LOCK_EXPANSION (OldIrql); 00696 VmSupport->u.Flags.BeingTrimmed = 0; 00697 VmSupport->AllowWorkingSetAdjustment = MM_FORCE_TRIM; 00698 goto WorkingSetLockFailed; 00699 } 00700 } while (TRUE); 00701 00702 ASSERT (VmSupport->u.Flags.BeingTrimmed == 1); 00703 00704 #if DBG 00705 LastTrimFaultCount = VmSupport->LastTrimFaultCount; 00706 #endif // DBG 00707 VmSupport->LastTrimFaultCount = VmSupport->PageFaultCount; 00708 00709 PERFINFO_WSMANAGE_PROCESS_RESET(VmSupport); 00710 } 00711 else { 00712 00713 // 00714 // System cache, 00715 // 00716 00717 #if DBG 00718 LastTrimFaultCount = VmSupport->LastTrimFaultCount; 00719 #endif // DBG 00720 00721 PERFINFO_WSMANAGE_PROCESS_RESET(VmSupport); 00722 00723 // 00724 // Always try to trim the system cache when using claims. 00725 // Fault-based trimming might skip it from time to time. 00726 // 00727 00728 #ifndef _MI_USE_CLAIMS_ 00729 00730 if (!MiCheckSystemCacheWsTrimCriteria(VmSupport)) { 00731 00732 // 00733 // Don't trim the system cache. 00734 // 00735 00736 InsertTailList (&MmWorkingSetExpansionHead.ListHead, 00737 &VmSupport->WorkingSetExpansionLinks); 00738 continue; 00739 } 00740 #endif 00741 00742 VmSupport->LastTrimTime = CurrentTime; 00743 00744 // 00745 // Indicate that this working set is being trimmed. 00746 // 00747 00748 VmSupport->u.Flags.BeingTrimmed = 1; 00749 00750 UNLOCK_EXPANSION (OldIrql); 00751 00752 ProcessToTrim = NULL; 00753 WorkingSetList = MmSystemCacheWorkingSetList; 00754 00755 KeRaiseIrql (APC_LEVEL, &OldIrql); 00756 if (!ExTryToAcquireResourceExclusiveLite (&MmSystemWsLock)) { 00757 00758 // 00759 // System working set lock was not granted, don't trim 00760 // the system cache. 00761 // 00762 00763 KeLowerIrql (OldIrql); 00764 LOCK_EXPANSION (OldIrql); 00765 VmSupport->u.Flags.BeingTrimmed = 0; 00766 InsertTailList (&MmWorkingSetExpansionHead.ListHead, 00767 &VmSupport->WorkingSetExpansionLinks); 00768 continue; 00769 } 00770 00771 MmSystemLockOwner = PsGetCurrentThread(); 00772 00773 VmSupport->LastTrimFaultCount = VmSupport->PageFaultCount; 00774 00775 VmSupport->WorkingSetExpansionLinks.Flink = MM_NO_WS_EXPANSION; 00776 VmSupport->WorkingSetExpansionLinks.Blink = 00777 MM_WS_EXPANSION_IN_PROGRESS; 00778 } 00779 00780 // 00781 // Determine how many pages we want to trim from this working set. 00782 // 00783 00784 Trim = MiDetermineWsTrimAmount(&TrimCriteria, 00785 VmSupport, 00786 ProcessToTrim 00787 ); 00788 00789 #if DBG 00790 if (MmDebug & MM_DBG_WS_EXPANSION) { 00791 if (Trim) { 00792 if (VmSupport->u.Flags.SessionSpace == 0) { 00793 DbgPrint(" Trimming Process %16s %5d Faults, WS %6d, Trimming %5d ==> %5d\n", 00794 ProcessToTrim ? ProcessToTrim->ImageFileName : (PUCHAR)"System Cache", 00795 VmSupport->PageFaultCount - LastTrimFaultCount, 00796 VmSupport->WorkingSetSize, 00797 Trim, 00798 VmSupport->WorkingSetSize-Trim 00799 ); 00800 } 00801 else { 00802 DbgPrint(" Trimming Session 0x%x (id %d) %5d Faults, WS %6d, Trimming %5d ==> %5d\n", 00803 SessionSpace, 00804 SessionSpace->SessionId, 00805 VmSupport->PageFaultCount - LastTrimFaultCount, 00806 VmSupport->WorkingSetSize, 00807 Trim, 00808 VmSupport->WorkingSetSize-Trim 00809 ); 00810 } 00811 } 00812 } 00813 #endif //DBG 00814 00815 #ifdef _MI_USE_CLAIMS_ 00816 00817 // 00818 // If there's something to trim... 00819 // 00820 00821 if (Trim != 0 && 00822 (MmAvailablePages < TrimCriteria.ClaimBased.DesiredFreeGoal)) { 00823 00824 // 00825 // We haven't reached our goal, so trim now. 00826 // 00827 00828 PERFINFO_WSMANAGE_TOTRIM(Trim); 00829 00830 Trim = MiTrimWorkingSet (Trim, 00831 VmSupport, 00832 TrimCriteria.ClaimBased.TrimAge 00833 ); 00834 00835 PERFINFO_WSMANAGE_ACTUALTRIM(Trim); 00836 } 00837 00838 // 00839 // Estimating the current claim is always done here by taking a 00840 // sample of the working set. Aging is only done if the trim 00841 // pass warrants it (ie: the first pass only). 00842 // 00843 00844 MiAgeAndEstimateAvailableInWorkingSet( 00845 VmSupport, 00846 TrimCriteria.ClaimBased.DoAging, 00847 &TrimCriteria.ClaimBased.NewTotalClaim, 00848 &TrimCriteria.ClaimBased.NewTotalEstimatedAvailable 00849 ); 00850 #else 00851 if (Trim != 0) { 00852 00853 PERFINFO_WSMANAGE_TOTRIM(Trim); 00854 00855 Trim = MiTrimWorkingSet ( 00856 Trim, 00857 VmSupport, 00858 (BOOLEAN)(MiCheckCounter < MM_TRIM_COUNTER_MAXIMUM_LARGE_MEM) 00859 ); 00860 00861 PERFINFO_WSMANAGE_ACTUALTRIM(Trim); 00862 } 00863 #endif 00864 00865 // 00866 // Set the quota to the current size. 00867 // 00868 00869 WorkingSetList->Quota = VmSupport->WorkingSetSize; 00870 if (WorkingSetList->Quota < VmSupport->MinimumWorkingSetSize) { 00871 WorkingSetList->Quota = VmSupport->MinimumWorkingSetSize; 00872 } 00873 00874 if (SessionSpace) { 00875 00876 ASSERT (VmSupport->u.Flags.SessionSpace == 1); 00877 00878 UNLOCK_SESSION_SPACE_WS (OldIrql); 00879 00880 MiDetachSession (); 00881 } 00882 else if (VmSupport != &MmSystemCacheWs) { 00883 00884 ASSERT (VmSupport->u.Flags.SessionSpace == 0); 00885 UNLOCK_WS (ProcessToTrim); 00886 00887 if (InformSessionOfRelease == TRUE) { 00888 LOCK_EXPANSION (OldIrql); 00889 ASSERT (ProcessToTrim->ProcessOutswapEnabled == TRUE); 00890 ProcessToTrim->ProcessOutswapEnabled = FALSE; 00891 ASSERT (MmSessionSpace->ProcessOutSwapCount >= 1); 00892 MmSessionSpace->ProcessOutSwapCount -= 1; 00893 UNLOCK_EXPANSION (OldIrql); 00894 InformSessionOfRelease = FALSE; 00895 } 00896 00897 if (Attached) { 00898 KeDetachProcess (); 00899 Attached = 0; 00900 } 00901 00902 } 00903 else { 00904 ASSERT (VmSupport->u.Flags.SessionSpace == 0); 00905 UNLOCK_SYSTEM_WS (OldIrql); 00906 } 00907 00908 LOCK_EXPANSION (OldIrql); 00909 00910 ASSERT (VmSupport->u.Flags.BeingTrimmed == 1); 00911 VmSupport->u.Flags.BeingTrimmed = 0; 00912 00913 WorkingSetLockFailed: 00914 00915 ASSERT (VmSupport->WorkingSetExpansionLinks.Flink == MM_NO_WS_EXPANSION); 00916 00917 if (VmSupport->WorkingSetExpansionLinks.Blink == 00918 MM_WS_EXPANSION_IN_PROGRESS) { 00919 00920 // 00921 // If the working set size is still above the minimum, 00922 // add this back at the tail of the list. 00923 // 00924 00925 InsertTailList (&MmWorkingSetExpansionHead.ListHead, 00926 &VmSupport->WorkingSetExpansionLinks); 00927 } 00928 else { 00929 00930 // 00931 // The value in the blink is the address of an event 00932 // to set. 00933 // 00934 00935 ASSERT (VmSupport != &MmSystemCacheWs); 00936 00937 KeSetEvent ((PKEVENT)VmSupport->WorkingSetExpansionLinks.Blink, 00938 0, 00939 FALSE); 00940 } 00941 00942 #ifndef _MI_USE_CLAIMS_ 00943 TrimCriteria.FaultBased.TotalReduction += Trim; 00944 00945 // 00946 // Zero this in case the next attach fails. 00947 // 00948 00949 Trim = 0; 00950 00951 if (MiCheckCounter < MM_TRIM_COUNTER_MAXIMUM_LARGE_MEM) { 00952 if ((MmAvailablePages > TrimCriteria.FaultBased.DesiredFreeGoal) || 00953 (TrimCriteria.FaultBased.TotalReduction > TrimCriteria.FaultBased.DesiredReductionGoal)) { 00954 00955 00956 // 00957 // Ample pages now exist. 00958 // 00959 00960 PERFINFO_WSMANAGE_FINALACTION(WS_ACTION_AMPLE_PAGES_EXIST); 00961 break; 00962 } 00963 } 00964 #endif 00965 00966 } 00967 00968 #ifdef _MI_USE_CLAIMS_ 00969 MmTotalClaim = TrimCriteria.ClaimBased.NewTotalClaim; 00970 MmTotalEstimatedAvailable = TrimCriteria.ClaimBased.NewTotalEstimatedAvailable; 00971 PERFINFO_WSMANAGE_TRIMEND_CLAIMS(&TrimCriteria); 00972 #else 00973 MiCheckCounter = 0; 00974 PERFINFO_WSMANAGE_TRIMEND_FAULTS(&TrimCriteria); 00975 #endif 00976 00977 UNLOCK_EXPANSION (OldIrql); 00978 } 00979 00980 // 00981 // Signal the modified page writer as we have moved pages 00982 // to the modified list and memory was critical. 00983 // 00984 00985 if ((MmAvailablePages < MmMinimumFreePages) || 00986 (MmModifiedPageListHead.Total >= MmModifiedPageMaximum)) { 00987 KeSetEvent (&MmModifiedPageWriterEvent, 0, FALSE); 00988 } 00989 00990 ASSERT (CurrentProcess == PsGetCurrentProcess ()); 00991 00992 return; 00993 }

VOID MmWriteTriageInformation IN  PVOID  ) 
 

Referenced by IopWriteTriageDump().

VOID MmWriteUnloadedDriverInformation IN  PVOID  ) 
 

Referenced by IopWriteTriageDump().

VOID MmZeroPageThread VOID   ) 
 

Definition at line 29 of file zeropage.c.

References ASSERT, _KTHREAD::BasePriority, FALSE, _MMPFNLIST::Flink, KeBugCheckEx(), KeGetCurrentThread, KernelMode, KeSetPriorityThread(), KeWaitForMultipleObjects(), KeZeroPageFromIdleThread, LOCK_PFN_WITH_TRY, MI_GET_SECONDARY_COLOR, MI_PFN_ELEMENT, MiFindInitializationCode(), MiFreeInitializationCode(), MiInsertPageInList(), MiMapPageToZeroInHyperSpace(), MiRemoveAnyPage(), MM_EMPTY_LIST, MM_ZERO_PAGE_OBJECT, MmFreePageListHead, MmPageLocationList, MmZeroingPageEvent, MmZeroingPageThreadActive, NTSTATUS(), NULL, NUMBER_WAIT_OBJECTS, PAGE_SHIFT, PAGE_SIZE, PO_SYS_IDLE_OBJECT, PoSystemIdleTimer, PoSystemIdleWorker(), Status, _MMPFNLIST::Total, TRUE, _MMPFN::u3, UNLOCK_PFN, WrFreePage, and ZeroedPageList.

00035 : 00036 00037 Implements the NT zeroing page thread. This thread runs 00038 at priority zero and removes a page from the free list, 00039 zeroes it, and places it on the zeroed page list. 00040 00041 Arguments: 00042 00043 StartContext - not used. 00044 00045 Return Value: 00046 00047 None. 00048 00049 Environment: 00050 00051 Kernel mode. 00052 00053 --*/ 00054 00055 { 00056 PVOID EndVa; 00057 KIRQL OldIrql; 00058 PFN_NUMBER PageFrame; 00059 PMMPFN Pfn1; 00060 PVOID StartVa; 00061 PKTHREAD Thread; 00062 PVOID ZeroBase; 00063 PFN_NUMBER NewPage; 00064 PVOID WaitObjects[NUMBER_WAIT_OBJECTS]; 00065 NTSTATUS Status; 00066 00067 // 00068 // Before this becomes the zero page thread, free the kernel 00069 // initialization code. 00070 // 00071 00072 #if !defined(_IA64_) 00073 MiFindInitializationCode (&StartVa, &EndVa); 00074 if (StartVa != NULL) { 00075 MiFreeInitializationCode (StartVa, EndVa); 00076 } 00077 #endif 00078 00079 // 00080 // The following code sets the current thread's base priority to zero 00081 // and then sets its current priority to zero. This ensures that the 00082 // thread always runs at a priority of zero. 00083 // 00084 00085 Thread = KeGetCurrentThread(); 00086 Thread->BasePriority = 0; 00087 KeSetPriorityThread (Thread, 0); 00088 00089 // 00090 // Initialize wait object array for multiple wait 00091 // 00092 00093 WaitObjects[MM_ZERO_PAGE_OBJECT] = &MmZeroingPageEvent; 00094 WaitObjects[PO_SYS_IDLE_OBJECT] = &PoSystemIdleTimer; 00095 00096 // 00097 // Loop forever zeroing pages. 00098 // 00099 00100 do { 00101 00102 // 00103 // Wait until there are at least MmZeroPageMinimum pages 00104 // on the free list. 00105 // 00106 00107 Status = KeWaitForMultipleObjects (NUMBER_WAIT_OBJECTS, 00108 WaitObjects, 00109 WaitAny, 00110 WrFreePage, 00111 KernelMode, 00112 FALSE, 00113 (PLARGE_INTEGER) NULL, 00114 (PKWAIT_BLOCK) NULL 00115 ); 00116 00117 if (Status == PO_SYS_IDLE_OBJECT) { 00118 PoSystemIdleWorker (TRUE); 00119 continue; 00120 } 00121 00122 LOCK_PFN_WITH_TRY (OldIrql); 00123 do { 00124 if ((volatile)MmFreePageListHead.Total == 0) { 00125 00126 // 00127 // No pages on the free list at this time, wait for 00128 // some more. 00129 // 00130 00131 MmZeroingPageThreadActive = FALSE; 00132 UNLOCK_PFN (OldIrql); 00133 break; 00134 00135 } else { 00136 00137 PageFrame = MmFreePageListHead.Flink; 00138 Pfn1 = MI_PFN_ELEMENT(PageFrame); 00139 00140 ASSERT (PageFrame != MM_EMPTY_LIST); 00141 Pfn1 = MI_PFN_ELEMENT(PageFrame); 00142 00143 NewPage = MiRemoveAnyPage (MI_GET_SECONDARY_COLOR (PageFrame, Pfn1)); 00144 if (NewPage != PageFrame) { 00145 00146 // 00147 // Someone has removed a page from the colored lists chain 00148 // without updating the freelist chain. 00149 // 00150 00151 KeBugCheckEx (PFN_LIST_CORRUPT, 00152 0x8F, 00153 NewPage, 00154 PageFrame, 00155 0); 00156 } 00157 00158 // 00159 // Zero the page using the last color used to map the page. 00160 // 00161 00162 #if defined(_AXP64_) || defined(_X86_) || defined(_IA64_) 00163 00164 ZeroBase = MiMapPageToZeroInHyperSpace (PageFrame); 00165 UNLOCK_PFN (OldIrql); 00166 00167 #if defined(_X86_) 00168 00169 KeZeroPageFromIdleThread(ZeroBase); 00170 00171 #else //X86 00172 00173 RtlZeroMemory (ZeroBase, PAGE_SIZE); 00174 00175 #endif //X86 00176 00177 #else //AXP64||X86||IA64 00178 00179 ZeroBase = (PVOID)(Pfn1->u3.e1.PageColor << PAGE_SHIFT); 00180 UNLOCK_PFN (OldIrql); 00181 HalZeroPage(ZeroBase, ZeroBase, PageFrame); 00182 00183 #endif //AXP64||X86||IA64 00184 00185 LOCK_PFN_WITH_TRY (OldIrql); 00186 MiInsertPageInList (MmPageLocationList[ZeroedPageList], 00187 PageFrame); 00188 } 00189 } while(TRUE); 00190 } while (TRUE); 00191 } }

VOID VerifierFreeTrackedPool IN PVOID  VirtualAddress,
IN SIZE_T  ChargedBytes,
IN LOGICAL  CheckType,
IN LOGICAL  SpecialPool
 

Definition at line 1976 of file verifier.c.

References _MI_VERIFIER_DRIVER_ENTRY::CurrentNonPagedPoolAllocations, _MI_VERIFIER_DRIVER_ENTRY::CurrentPagedPoolAllocations, FALSE, Header, Index, KeBugCheckEx(), MI_VERIFIER_ENTRY_SIGNATURE, MmIsAddressValid(), MmVerifierData, _MI_VERIFIER_DRIVER_ENTRY::NonPagedBytes, PAGE_ALIGN, PAGE_ALIGNED, PAGE_SIZE, _MI_VERIFIER_DRIVER_ENTRY::PagedBytes, PagedPool, POOL_OVERHEAD, _MI_VERIFIER_DRIVER_ENTRY::Signature, TRUE, VerifierIsTrackingPool, VerifierPoolLock, _MI_VERIFIER_DRIVER_ENTRY::VerifierPoolLock, VerifierPoolMutex, and ViReleasePoolAllocation().

01985 : 01986 01987 Called directly from the pool manager or the memory manager for verifier- 01988 tracked allocations. The call to ExFreePool is already in progress. 01989 01990 Arguments: 01991 01992 VirtualAddress - Supplies the virtual address being freed. 01993 01994 ChargedBytes - Supplies the number of bytes charged to this allocation. 01995 01996 CheckType - Supplies PagedPool or NonPagedPool. 01997 01998 SpecialPool - Supplies TRUE if the allocation is from special pool. 01999 02000 Return Value: 02001 02002 None. 02003 02004 Environment: 02005 02006 Kernel mode. 02007 02008 N.B. 02009 02010 Callers freeing small pool allocations hold no locks or mutexes on entry. 02011 02012 Callers freeing special pool hold no locks or mutexes on entry. 02013 02014 Callers freeing pool of PAGE_SIZE or larger hold the PFN lock (for nonpaged 02015 allocations) or the PagedPool mutex (for paged allocations) on entry. 02016 02017 --*/ 02018 02019 { 02020 KIRQL OldIrql; 02021 ULONG_PTR Index; 02022 PPOOL_HEADER PoolHeader; 02023 PMI_VERIFIER_POOL_HEADER Header; 02024 PMI_VERIFIER_DRIVER_ENTRY Verifier; 02025 02026 if (VerifierIsTrackingPool == FALSE) { 02027 02028 // 02029 // The verifier is not enabled so the only way this routine is being 02030 // called is because the pool header is mangled or the caller specified 02031 // a bad address. Either way it's a bugcheck. 02032 // 02033 02034 KeBugCheckEx (BAD_POOL_CALLER, 02035 0x99, 02036 (ULONG_PTR)VirtualAddress, 02037 0, 02038 0); 02039 } 02040 02041 if (SpecialPool == TRUE) { 02042 02043 // 02044 // Special pool allocation. 02045 // 02046 02047 if (((ULONG_PTR)VirtualAddress & (PAGE_SIZE - 1))) { 02048 PoolHeader = PAGE_ALIGN (VirtualAddress); 02049 Header = (PMI_VERIFIER_POOL_HEADER)(PoolHeader + 1); 02050 } 02051 else { 02052 PoolHeader = (PPOOL_HEADER)((PCHAR)PAGE_ALIGN (VirtualAddress) + PAGE_SIZE - POOL_OVERHEAD); 02053 Header = (PMI_VERIFIER_POOL_HEADER)(PoolHeader - 1); 02054 } 02055 } 02056 else if (PAGE_ALIGNED(VirtualAddress)) { 02057 02058 // 02059 // Large page allocation. 02060 // 02061 02062 Header = (PMI_VERIFIER_POOL_HEADER) ((PCHAR)VirtualAddress + 02063 ChargedBytes - 02064 sizeof(MI_VERIFIER_POOL_HEADER)); 02065 } 02066 else { 02067 ChargedBytes -= POOL_OVERHEAD; 02068 Header = (PMI_VERIFIER_POOL_HEADER) ((PCHAR)VirtualAddress + 02069 ChargedBytes - 02070 sizeof(MI_VERIFIER_POOL_HEADER)); 02071 } 02072 02073 Verifier = Header->Verifier; 02074 02075 // 02076 // Check the pointer now so we can give a more friendly bugcheck 02077 // rather than crashing below on a bad reference. 02078 // 02079 02080 if ((((ULONG_PTR)Verifier & (sizeof(ULONG) - 1)) != 0) || 02081 (!MmIsAddressValid(&Verifier->Signature)) || 02082 (Verifier->Signature != MI_VERIFIER_ENTRY_SIGNATURE)) { 02083 02084 // 02085 // The caller corrupted the saved verifier field. 02086 // 02087 02088 KeBugCheckEx (DRIVER_VERIFIER_DETECTED_VIOLATION, 02089 0x53, 02090 (ULONG_PTR)VirtualAddress, 02091 (ULONG_PTR)Header, 02092 (ULONG_PTR)Verifier); 02093 } 02094 02095 Index = Header->ListIndex; 02096 02097 ExAcquireSpinLock (&Verifier->VerifierPoolLock, &OldIrql); 02098 02099 ViReleasePoolAllocation (Verifier, 02100 VirtualAddress, 02101 Index, 02102 ChargedBytes); 02103 02104 if (CheckType == PagedPool) { 02105 Verifier->PagedBytes -= ChargedBytes; 02106 Verifier->CurrentPagedPoolAllocations -= 1; 02107 02108 ExReleaseSpinLock (&Verifier->VerifierPoolLock, OldIrql); 02109 02110 ExAcquireFastMutex (&VerifierPoolMutex); 02111 MmVerifierData.PagedBytes -= ChargedBytes; 02112 MmVerifierData.CurrentPagedPoolAllocations -= 1; 02113 ExReleaseFastMutex (&VerifierPoolMutex); 02114 } 02115 else { 02116 Verifier->NonPagedBytes -= ChargedBytes; 02117 Verifier->CurrentNonPagedPoolAllocations -= 1; 02118 ExReleaseSpinLock (&Verifier->VerifierPoolLock, OldIrql); 02119 02120 ExAcquireSpinLock (&VerifierPoolLock, &OldIrql); 02121 MmVerifierData.NonPagedBytes -= ChargedBytes; 02122 MmVerifierData.CurrentNonPagedPoolAllocations -= 1; 02123 ExReleaseSpinLock (&VerifierPoolLock, OldIrql); 02124 } 02125 }


Variable Documentation

BOOLEAN Mm64BitPhysicalAddress
 

Definition at line 79 of file mm.h.

Referenced by MmInitSystem().

MMINFO_COUNTERS MmInfoCounters
 

Definition at line 475 of file mm.h.

Referenced by MiCheckAndSetSystemTrimCriteria(), MiCopyOnWrite(), MiGatherMappedPages(), MiGatherPagefilePages(), MiLocateAndReserveWsle(), MiResolveDemandZeroFault(), MiResolveMappedFileFault(), MiResolvePageFileFault(), MiResolveProtoPteFault(), MiResolveTransitionFault(), MiSessionCopyOnWrite(), MmAccessFault(), and NtQuerySystemInformation().

MMPFNLIST MmModifiedPageListHead
 

Definition at line 1087 of file mm.h.

Referenced by MiEnsureAvailablePageOrWait(), MiFlushAllPages(), MiInsertPageInList(), MiInsertStandbyListAtFront(), MiModifiedPageWriterWorker(), MiObtainFreePages(), MiUnlinkPageFromList(), MmAccessFault(), MmShutdownSystem(), and MmWorkingSetManager().

ULONG MmNumberOfColors
 

Definition at line 393 of file mm.h.

PFN_COUNT MmNumberOfPhysicalPages
 

Definition at line 399 of file mm.h.

Referenced by CcInitializeCacheManager(), ExpWorkerInitialization(), main(), MiAdjustWorkingSetManagerParameters(), MiBuildPagedPool(), MiCheckAndSetSystemTrimCriteria(), MiEnsureAvailablePagesInFreeDescriptor(), MiGetNextPhysicalPage(), MiInitializeSpecialPool(), MiInitMachineDependent(), MmAddPhysicalMemory(), MmInitSystem(), MmRemovePhysicalMemory(), MmSetMemoryPriorityProcess(), MxGetNextPage(), and NtQuerySystemInformation().

PPHYSICAL_MEMORY_DESCRIPTOR MmPhysicalMemoryBlock
 

Definition at line 48 of file mm.h.

Referenced by IopCreateSummaryDump(), IopDeleteNonExistentMemory(), IopInitializeDCB(), IopInitializeResourceMap(), IopInitializeSummaryDump(), IopReadDumpRegistry(), MiCheckForCrashDump(), MiEnsureAvailablePageOrWait(), MiFindContiguousMemory(), MmAddPhysicalMemory(), MmAllocatePagesForMdl(), MmGetPhysicalMemoryRanges(), MmInitSystem(), and MmRemovePhysicalMemory().

ULONG MmProductType
 

Definition at line 455 of file mm.h.

ULONG MmReadClusterSize
 

Definition at line 387 of file mm.h.

Referenced by MmInitSystem(), and PspCreateThread().

POBJECT_TYPE MmSectionObjectType
 

Definition at line 381 of file mm.h.

Referenced by MiLoadSystemImage(), MiSectionInitialization(), MmCreateSection(), MmGetFileNameForSection(), NtAcceptConnectPort(), NtCreateSuperSection(), NtExtendSection(), NtMapViewOfSection(), NtOpenSection(), NtQuerySection(), NtSecureConnectPort(), PsLocateSystemDll(), and PspCreateProcess().

PFN_COUNT MmSizeOfSystemCacheInPages
 

Definition at line 406 of file mm.h.

Referenced by CcInitializeVacbs(), and MmInitSystem().

PVOID MmSpecialPoolEnd
 

Definition at line 1977 of file mm.h.

Referenced by ExAllocatePoolWithQuotaTag(), ExFreePoolSanityChecks(), ExFreePoolWithTag(), ExQueryPoolBlockSize(), ExReturnPoolQuota(), KeBugCheckEx(), MiInitializeSpecialPool(), MiLockCode(), MiProtectSpecialPool(), MiResolveTransitionFault(), MmIsSpecialPoolAddressFree(), MmQuerySpecialPoolBlockSize(), and ViPostPoolAllocation().

PVOID MmSpecialPoolStart
 

Definition at line 1976 of file mm.h.

Referenced by ExAllocatePoolWithQuotaTag(), ExFreePoolSanityChecks(), ExFreePoolWithTag(), ExQueryPoolBlockSize(), ExReturnPoolQuota(), KeBugCheckEx(), MiInitializeSpecialPool(), MiLockCode(), MiResolveTransitionFault(), MmIsSpecialPoolAddressFree(), MmQuerySpecialPoolBlockSize(), and ViPostPoolAllocation().

ULONG MmSpecialPoolTag
 

Definition at line 1975 of file mm.h.

Referenced by ExAllocatePoolWithTag(), MiInitializeSpecialPool(), MiInitializeSpecialPoolCriteria(), MiInitMachineDependent(), and MmInitSystem().

MMSUPPORT MmSystemCacheWs
 

Definition at line 412 of file mm.h.

PFN_NUMBER MmThrottleBottom
 

Definition at line 1090 of file mm.h.

Referenced by MmInitSystem().

PFN_NUMBER MmThrottleTop
 

Definition at line 1089 of file mm.h.

Referenced by MmInitSystem().

ULONG_PTR MmVirtualBias
 

Definition at line 29 of file mm.h.

Referenced by IopCalculateRequiredDumpSpace(), MiInitMachineDependent(), MiMapViewOfImageSection(), MmCleanProcessAddressSpace(), MmCreateProcessAddressSpace(), MmFreeLoaderBlock(), MmInitializeProcessAddressSpace(), MmInitSystem(), and MmSetKernelDumpRange().

KEVENT MmWorkingSetManagerEvent
 

Definition at line 418 of file mm.h.

Referenced by KeBalanceSetManager(), MiEmptyAllWorkingSets(), MiObtainFreePages(), and MmInitSystem().


Generated on Sat May 15 19:44:46 2004 for test by doxygen 1.3.7