Main Page | Class Hierarchy | Class List | File List | Class Members | File Members

fssup.c File Reference

#include "cc.h"

Go to the source code of this file.

Defines

#define BugCheckFileId   (CACHE_BUG_CHECK_FSSUP)
#define me   0x00000001
#define IsSyscacheFile(FO)

Functions

VOID CcUnmapAndPurge (IN PSHARED_CACHE_MAP SharedCacheMap)
VOID CcDeleteMbcb (IN PSHARED_CACHE_MAP SharedCacheMap)
VOID CcPurgeAndClearCacheSection (IN PSHARED_CACHE_MAP SharedCacheMap, IN PLARGE_INTEGER FileOffset)
BOOLEAN CcInitializeCacheManager ()
VOID CcInitializeCacheMap (IN PFILE_OBJECT FileObject, IN PCC_FILE_SIZES FileSizes, IN BOOLEAN PinAccess, IN PCACHE_MANAGER_CALLBACKS Callbacks, IN PVOID LazyWriteContext)
BOOLEAN CcUninitializeCacheMap (IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER TruncateSize OPTIONAL, IN PCACHE_UNINITIALIZE_EVENT UninitializeEvent OPTIONAL)
VOID FASTCALL CcDeleteSharedCacheMap (IN PSHARED_CACHE_MAP SharedCacheMap, IN KIRQL ListIrql, IN ULONG ReleaseFile)
VOID CcSetFileSizes (IN PFILE_OBJECT FileObject, IN PCC_FILE_SIZES FileSizes)
BOOLEAN CcPurgeCacheSection (IN PSECTION_OBJECT_POINTERS SectionObjectPointer, IN PLARGE_INTEGER FileOffset, IN ULONG Length, IN BOOLEAN UninitializeCacheMaps)
VOID CcSetDirtyPageThreshold (IN PFILE_OBJECT FileObject, IN ULONG DirtyPageThreshold)
VOID CcZeroEndOfLastPage (IN PFILE_OBJECT FileObject)
BOOLEAN CcZeroData (IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER StartOffset, IN PLARGE_INTEGER EndOffset, IN BOOLEAN Wait)
PFILE_OBJECT CcGetFileObjectFromSectionPtrs (IN PSECTION_OBJECT_POINTERS SectionObjectPointer)
PFILE_OBJECT CcGetFileObjectFromBcb (IN PVOID Bcb)

Variables

POBJECT_TYPE IoFileObjectType
ULONG MmLargeSystemCache


Define Documentation

#define BugCheckFileId   (CACHE_BUG_CHECK_FSSUP)
 

Definition at line 28 of file fssup.c.

#define IsSyscacheFile FO   ) 
 

Value:

(((FO) != NULL) && \ (*(PUSHORT)(FO)->FsContext == 0X705) && \ FlagOn(*(PULONG)((PCHAR)(FO)->FsContext + 0x48), 0x80000000))

Definition at line 40 of file fssup.c.

#define me   0x00000001
 

Definition at line 34 of file fssup.c.


Function Documentation

VOID CcDeleteMbcb IN PSHARED_CACHE_MAP  SharedCacheMap  ) 
 

Definition at line 2626 of file fssup.c.

References _BITMAP_RANGE::Bitmap, _MBCB::BitmapRange2, _MBCB::BitmapRanges, CcAcquireMasterLockAtDpcLevel, CcAcquireVacbLockAtDpcLevel, CcDeallocateBcb(), CcDeallocateVacbLevel(), CcDrainVacbLevelZone(), CcReleaseMasterLockFromDpcLevel, CcReleaseVacbLockFromDpcLevel, CcTotalDirtyPages, _BITMAP_RANGE::DirtyPages, _MBCB::DirtyPages, ExFreePool(), FALSE, _BITMAP_RANGE::Links, MBCB_BITMAP_BLOCK_SIZE, NULL, and TRUE.

Referenced by CcDeleteSharedCacheMap(), and CcSetFileSizes().

02632 : 02633 02634 This routine may be called to reset the Mbcb for a stream to say 02635 there are no dirty pages, and free all auxillary allocation. 02636 02637 Arguments: 02638 02639 SharedCacheMap - Pointer to SharedCacheMap. 02640 02641 Return Value: 02642 02643 None. 02644 02645 --*/ 02646 02647 { 02648 PMBCB Mbcb; 02649 PBITMAP_RANGE BitmapRange; 02650 KIRQL OldIrql; 02651 ULONG DoDrain = FALSE; 02652 02653 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 02654 02655 Mbcb = SharedCacheMap->Mbcb; 02656 02657 // 02658 // Is there an Mbcb? 02659 // 02660 02661 if (Mbcb != NULL) { 02662 02663 // 02664 // First deduct the dirty pages we are getting rid of. 02665 // 02666 02667 CcAcquireMasterLockAtDpcLevel(); 02668 CcTotalDirtyPages -= Mbcb->DirtyPages; 02669 SharedCacheMap->DirtyPages -= Mbcb->DirtyPages; 02670 CcReleaseMasterLockFromDpcLevel(); 02671 02672 // 02673 // Now loop through all of the ranges. 02674 // 02675 02676 while (!IsListEmpty(&Mbcb->BitmapRanges)) { 02677 02678 // 02679 // Get next range and remove it from the list. 02680 // 02681 02682 BitmapRange = (PBITMAP_RANGE)CONTAINING_RECORD( Mbcb->BitmapRanges.Flink, 02683 BITMAP_RANGE, 02684 Links ); 02685 02686 RemoveEntryList( &BitmapRange->Links ); 02687 02688 // 02689 // If there is a bitmap, and it is not the initial embedded one, then 02690 // delete it. 02691 // 02692 02693 if ((BitmapRange->Bitmap != NULL) && 02694 (BitmapRange->Bitmap != (PULONG)&Mbcb->BitmapRange2)) { 02695 02696 DoDrain = TRUE; 02697 02698 // 02699 // Usually the bitmap is all zeros at this point, but it may not be. 02700 // 02701 02702 if (BitmapRange->DirtyPages != 0) { 02703 RtlZeroMemory( BitmapRange->Bitmap, MBCB_BITMAP_BLOCK_SIZE ); 02704 } 02705 CcAcquireVacbLockAtDpcLevel(); 02706 CcDeallocateVacbLevel( (PVACB *)BitmapRange->Bitmap, FALSE ); 02707 CcReleaseVacbLockFromDpcLevel(); 02708 } 02709 02710 // 02711 // If the range is not one of the initial embedded ranges, then delete it. 02712 // 02713 02714 if ((BitmapRange < (PBITMAP_RANGE)Mbcb) && 02715 (BitmapRange > (PBITMAP_RANGE)((PCHAR)Mbcb + sizeof(MBCB)))) { 02716 02717 ExFreePool( BitmapRange ); 02718 } 02719 } 02720 02721 // 02722 // Zero the pointer and get out. 02723 // 02724 02725 SharedCacheMap->Mbcb = NULL; 02726 02727 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 02728 02729 // 02730 // Now delete the Mbcb. 02731 // 02732 02733 CcDeallocateBcb( (PBCB)Mbcb ); 02734 02735 } else { 02736 02737 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 02738 } 02739 02740 if (DoDrain) { 02741 CcDrainVacbLevelZone(); 02742 } 02743 }

VOID FASTCALL CcDeleteSharedCacheMap IN PSHARED_CACHE_MAP  SharedCacheMap,
IN KIRQL  ListIrql,
IN ULONG  ReleaseFile
 

Definition at line 1517 of file fssup.c.

References ASSERT, _BCB::BaseAddress, _BCB::BcbLinks, _BCB::ByteLength, CACHE_NTC_BCB, CcAcquireMasterLock, CcBcbSpinLock, CcDeallocateBcb(), CcDeleteMbcb(), CcFreeActiveVacb(), CcFreeVirtualAddress(), CcReleaseMasterLock, CcTotalDirtyPages, CcUnlockVacbLevel, CcUnmapAndPurge(), CcWaitOnActiveCount(), DebugTrace, _BCB::Dirty, _CACHE_UNINITIALIZE_EVENT::Event, ExFreePool(), FALSE, _BCB::FileOffset, FsRtlReleaseFile(), GetActiveVacbAtDpcLevel, IsVacbLevelReferenced(), KeSetEvent(), me, _CACHE_UNINITIALIZE_EVENT::Next, _BCB::NodeTypeCode, NULL, ObDereferenceObject, PAGE_SHIFT, _BCB::PinCount, _FILE_OBJECT::SectionObjectPointer, SetFlag, _SECTION_OBJECT_POINTERS::SharedCacheMap, _BCB::Vacb, VACB_SIZE_OF_FIRST_LEVEL, and WRITE_QUEUED.

Referenced by CcInitializeCacheMap(), CcUninitializeCacheMap(), and CcWriteBehind().

01525 : 01526 01527 The specified SharedCacheMap is removed from the global list of 01528 SharedCacheMap's and deleted with all of its related structures. 01529 Other objects which were referenced in CcInitializeCacheMap are 01530 dereferenced here. 01531 01532 NOTE: The CcMasterSpinLock must already be acquired 01533 on entry. It is released on return. 01534 01535 Arguments: 01536 01537 SharedCacheMap - Pointer to Cache Map to delete 01538 01539 ListIrql - priority to restore to when releasing shared cache map list 01540 01541 ReleaseFile - Supplied as nonzero if file was acquired exclusive and 01542 should be released. 01543 01544 ReturnValue: 01545 01546 None. 01547 01548 --*/ 01549 01550 { 01551 LIST_ENTRY LocalList; 01552 PLIST_ENTRY NextEntry; 01553 PFILE_OBJECT FileObject; 01554 PVACB ActiveVacb; 01555 ULONG ActivePage; 01556 ULONG PageIsDirty; 01557 KIRQL OldIrql; 01558 PMBCB Mbcb; 01559 01560 DebugTrace(+1, me, "CcDeleteSharedCacheMap:\n", 0 ); 01561 DebugTrace( 0, me, " SharedCacheMap = %08lx\n", SharedCacheMap ); 01562 01563 // 01564 // Remove it from the global list and clear the pointer to it via 01565 // the File Object. 01566 // 01567 01568 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 01569 01570 // 01571 // Zero pointer to SharedCacheMap. Once we have cleared the pointer, 01572 // we can/must release the global list to avoid deadlocks. 01573 // 01574 01575 FileObject = SharedCacheMap->FileObject; 01576 01577 FileObject->SectionObjectPointer->SharedCacheMap = (PSHARED_CACHE_MAP)NULL; 01578 SetFlag( SharedCacheMap->Flags, WRITE_QUEUED ); 01579 01580 // 01581 // The OpenCount is 0, but we still need to flush out any dangling 01582 // cache read or writes. 01583 // 01584 01585 if ((SharedCacheMap->VacbActiveCount != 0) || (SharedCacheMap->NeedToZero != NULL)) { 01586 01587 // 01588 // We will put it in a local list and set a flag 01589 // to keep the Lazy Writer away from it, so that we can wrip it out 01590 // below if someone manages to sneak in and set something dirty, etc. 01591 // If the file system does not synchronize cleanup calls with an 01592 // exclusive on the stream, then this case is possible. 01593 // 01594 01595 InitializeListHead( &LocalList ); 01596 InsertTailList( &LocalList, &SharedCacheMap->SharedCacheMapLinks ); 01597 01598 // 01599 // If there is an active Vacb, then nuke it now (before waiting!). 01600 // 01601 01602 GetActiveVacbAtDpcLevel( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 01603 01604 CcReleaseMasterLock( ListIrql ); 01605 01606 // 01607 // No point in saying the page is dirty (which can cause an allocation 01608 // failure), since we are deleting this SharedCacheMap anyway. 01609 // 01610 01611 CcFreeActiveVacb( SharedCacheMap, ActiveVacb, ActivePage, FALSE ); 01612 01613 while (SharedCacheMap->VacbActiveCount != 0) { 01614 CcWaitOnActiveCount( SharedCacheMap ); 01615 } 01616 01617 // 01618 // Now in case we hit the rare path where someone moved the 01619 // SharedCacheMap again, do a remove again now. It may be 01620 // from our local list or it may be from the dirty list, 01621 // but who cares? The important thing is to remove it in 01622 // the case it was the dirty list, since we will delete it 01623 // below. 01624 // 01625 01626 CcAcquireMasterLock( &ListIrql ); 01627 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 01628 } 01629 01630 CcReleaseMasterLock( ListIrql ); 01631 01632 // 01633 // If there are Bcbs, then empty the list, asserting that none of them 01634 // can be pinned now if we have gotten this far! 01635 // 01636 01637 NextEntry = SharedCacheMap->BcbList.Flink; 01638 while (NextEntry != &SharedCacheMap->BcbList) { 01639 01640 PBCB Bcb; 01641 01642 Bcb = (PBCB)CONTAINING_RECORD( NextEntry, 01643 BCB, 01644 BcbLinks ); 01645 NextEntry = Bcb->BcbLinks.Flink; 01646 01647 // 01648 // Skip over the pendaflex entries, only removing true Bcbs 01649 // so that level teardown doesn't need to special case unhooking 01650 // the pendaflex. This has the side benefit of dramatically 01651 // reducing write traffic to memory on teardown of large files. 01652 // 01653 // I really wonder how often we have Bcbs at teardown. This is 01654 // a lot of work that could be avoided otherwise. 01655 // 01656 01657 if (Bcb->NodeTypeCode == CACHE_NTC_BCB) { 01658 01659 ASSERT( Bcb->PinCount == 0 ); 01660 01661 RemoveEntryList( &Bcb->BcbLinks ); 01662 01663 // 01664 // For large metadata streams we unlock the Vacb level when removing. 01665 // We do not need spinlocks since no other thread can be accessing 01666 // this list when we are deleting the SharedCacheMap. 01667 // 01668 01669 CcUnlockVacbLevel( SharedCacheMap, Bcb->FileOffset.QuadPart ); 01670 01671 // 01672 // There is a small window where the data could still be mapped 01673 // if (for example) the Lazy Writer collides with a CcCopyWrite 01674 // in the foreground, and then someone calls CcUninitializeCacheMap 01675 // while the Lazy Writer is active. This is because the Lazy 01676 // Writer biases the pin count. Deal with that here. 01677 // 01678 01679 if (Bcb->BaseAddress != NULL) { 01680 CcFreeVirtualAddress( Bcb->Vacb ); 01681 } 01682 01683 // 01684 // Debug routines used to remove Bcbs from the global list 01685 // 01686 01687 #if LIST_DBG 01688 01689 { 01690 KIRQL OldIrql; 01691 01692 ExAcquireSpinLock( &CcBcbSpinLock, &OldIrql ); 01693 01694 if (Bcb->CcBcbLinks.Flink != NULL) { 01695 01696 RemoveEntryList( &Bcb->CcBcbLinks ); 01697 CcBcbCount -= 1; 01698 } 01699 01700 ExReleaseSpinLock( &CcBcbSpinLock, OldIrql ); 01701 } 01702 01703 #endif 01704 01705 // 01706 // If the Bcb is dirty, we have to synchronize with the Lazy Writer 01707 // and reduce the total number of dirty. 01708 // 01709 01710 CcAcquireMasterLock( &ListIrql ); 01711 if (Bcb->Dirty) { 01712 01713 SharedCacheMap->DirtyPages -= Bcb->ByteLength >> PAGE_SHIFT; 01714 CcTotalDirtyPages -= Bcb->ByteLength >> PAGE_SHIFT; 01715 } 01716 01717 CcReleaseMasterLock( ListIrql ); 01718 01719 CcDeallocateBcb( Bcb ); 01720 } 01721 } 01722 01723 // 01724 // Call local routine to unmap, and purge if necessary. 01725 // 01726 01727 CcUnmapAndPurge( SharedCacheMap ); 01728 01729 // 01730 // Now release the file now that the purge is done. 01731 // 01732 01733 if (ReleaseFile) { 01734 FsRtlReleaseFile( SharedCacheMap->FileObject ); 01735 } 01736 01737 // 01738 // Dereference our pointer to the Section and FileObject 01739 // (We have to test the Section pointer since CcInitializeCacheMap 01740 // calls this routine for error recovery. Release our global 01741 // resource before dereferencing the FileObject to avoid deadlocks. 01742 // 01743 01744 if (SharedCacheMap->Section != NULL) { 01745 ObDereferenceObject( SharedCacheMap->Section ); 01746 } 01747 ObDereferenceObject( FileObject ); 01748 01749 // 01750 // If there is an Mbcb, deduct any dirty pages and deallocate. 01751 // 01752 01753 if (SharedCacheMap->Mbcb != NULL) { 01754 CcDeleteMbcb( SharedCacheMap ); 01755 } 01756 01757 // 01758 // If there was an uninitialize event specified for this shared cache 01759 // map, then set it to the signalled state, indicating that we are 01760 // removing the section and deleting the shared cache map. 01761 // 01762 01763 if (SharedCacheMap->UninitializeEvent != NULL) { 01764 PCACHE_UNINITIALIZE_EVENT CUEvent, EventNext; 01765 01766 CUEvent = SharedCacheMap->UninitializeEvent; 01767 while (CUEvent != NULL) { 01768 EventNext = CUEvent->Next; 01769 KeSetEvent(&CUEvent->Event, 0, FALSE); 01770 CUEvent = EventNext; 01771 } 01772 } 01773 01774 // 01775 // Now delete the Vacb vector. 01776 // 01777 01778 if ((SharedCacheMap->Vacbs != &SharedCacheMap->InitialVacbs[0]) 01779 01780 && 01781 01782 (SharedCacheMap->Vacbs != NULL)) { 01783 01784 // 01785 // If there are Vacb levels, then the Vacb Array better be in an empty state. 01786 // 01787 01788 ASSERT((SharedCacheMap->SectionSize.QuadPart <= VACB_SIZE_OF_FIRST_LEVEL) || 01789 !IsVacbLevelReferenced( SharedCacheMap, SharedCacheMap->Vacbs, 1 )); 01790 01791 ExFreePool( SharedCacheMap->Vacbs ); 01792 } 01793 01794 // 01795 // If an event had to be allocated for this SharedCacheMap, 01796 // deallocate it. 01797 // 01798 01799 if ((SharedCacheMap->CreateEvent != NULL) && (SharedCacheMap->CreateEvent != &SharedCacheMap->Event)) { 01800 ExFreePool( SharedCacheMap->CreateEvent ); 01801 } 01802 01803 if ((SharedCacheMap->WaitOnActiveCount != NULL) && (SharedCacheMap->WaitOnActiveCount != &SharedCacheMap->Event)) { 01804 ExFreePool( SharedCacheMap->WaitOnActiveCount ); 01805 } 01806 01807 // 01808 // Deallocate the storeage for the SharedCacheMap. 01809 // 01810 01811 ExFreePool( SharedCacheMap ); 01812 01813 DebugTrace(-1, me, "CcDeleteSharedCacheMap -> VOID\n", 0 ); 01814 01815 return; 01816 01817 }

PFILE_OBJECT CcGetFileObjectFromBcb IN PVOID  Bcb  ) 
 

Definition at line 3709 of file fssup.c.

03722 : 03723 03724 Bcb - A pointer to the pinned Bcb. 03725 03726 Return Value: 03727 03728 Pointer to the File Object, or NULL if the file is not cached or no 03729 longer cached 03730 03731 --*/ 03732 03733 { 03734 return ((PBCB)Bcb)->SharedCacheMap->FileObject; 03735 } }

PFILE_OBJECT CcGetFileObjectFromSectionPtrs IN PSECTION_OBJECT_POINTERS  SectionObjectPointer  ) 
 

Definition at line 3658 of file fssup.c.

References CcAcquireMasterLock, CcReleaseMasterLock, and NULL.

03675 : 03676 03677 SectionObjectPointer - A pointer to the Section Object Pointers 03678 structure in the nonpaged Fcb. 03679 03680 Return Value: 03681 03682 Pointer to the File Object, or NULL if the file is not cached or no 03683 longer cached 03684 03685 --*/ 03686 03687 { 03688 KIRQL OldIrql; 03689 PFILE_OBJECT FileObject = NULL; 03690 03691 // 03692 // Serialize with Creation/Deletion of all Shared CacheMaps 03693 // 03694 03695 CcAcquireMasterLock( &OldIrql ); 03696 03697 if (SectionObjectPointer->SharedCacheMap != NULL) { 03698 03699 FileObject = ((PSHARED_CACHE_MAP)SectionObjectPointer->SharedCacheMap)->FileObject; 03700 } 03701 03702 CcReleaseMasterLock( OldIrql ); 03703 03704 return FileObject; 03705 }

BOOLEAN CcInitializeCacheManager  ) 
 

Definition at line 69 of file fssup.c.

References _LAZY_WRITER::BcbZone, CcAggressiveZeroCount, CcAggressiveZeroThreshold, CcBcbSpinLock, CcBugCheck, CcCapturedSystemSize, CcCleanSharedCacheMapList, CcDebugTraceLock, CcDeferredWrites, CcDeferredWriteSpinLock, CcDirtyPageTarget, CcDirtyPageThreshold, CcDirtySharedCacheMapList, CcExpressWorkQueue, CcIdleDelayTick, CcIdleWorkerThreadList, CcInitializeVacbs(), CcLazyWriterCursor, CcMasterSpinLock, CcNumberWorkerThreads, CcPostTickWorkQueue, CcRegularWorkQueue, CcScanDpc(), CcTwilightLookasideList, CcWorkerThread(), CcWorkQueueSpinlock, ExAllocatePoolWithTag, ExCriticalWorkerThreads, ExInitializeNPagedLookasideList(), ExInitializeWorkItem, ExInitializeZone(), _SHARED_CACHE_MAP_LIST_CURSOR::Flags, Index, IS_CURSOR, KeInitializeDpc(), KeInitializeSpinLock(), KeInitializeTimer(), KeNumberProcessors, KeQueryTimeIncrement(), KiProcessorBlock, LAZY_WRITER_IDLE_DELAY, LazyWriter, _WORK_QUEUE_ITEM::List, LookasideTwilightList, _MMSUPPORT::MaximumWorkingSetSize, MmIsThisAnNtAsSystem(), MmLargeSystem, MmLargeSystemCache, MmMediumSystem, MmNumberOfPhysicalPages, MmQuerySystemSize(), MmSmallSystem, MmSystemCacheWs, NonPagedPool, NT_SUCCESS, NULL, _LAZY_WRITER::OurProcess, PAGE_SIZE, PsGetCurrentProcess, _LAZY_WRITER::ScanDpc, _LAZY_WRITER::ScanTimer, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, TRUE, USHORT, WORK_QUEUE_ENTRY, and _LAZY_WRITER::WorkQueue.

00074 : 00075 00076 This routine must be called during system initialization before the 00077 first call to any file system, to allow the Cache Manager to initialize 00078 its global data structures. This routine has no dependencies on other 00079 system components being initialized. 00080 00081 Arguments: 00082 00083 None 00084 00085 Return Value: 00086 00087 TRUE if initialization was successful 00088 00089 --*/ 00090 00091 { 00092 CLONG i; 00093 ULONG Index; 00094 PNPAGED_LOOKASIDE_LIST Lookaside; 00095 USHORT NumberOfItems; 00096 PKPRCB Prcb; 00097 PWORK_QUEUE_ITEM WorkItem; 00098 00099 #ifdef CCDBG_LOCK 00100 KeInitializeSpinLock( &CcDebugTraceLock ); 00101 #endif 00102 00103 #if DBG 00104 CcBcbCount = 0; 00105 InitializeListHead( &CcBcbList ); 00106 KeInitializeSpinLock( &CcBcbSpinLock ); 00107 #endif 00108 00109 // 00110 // Figure out the timeout clock tick for the lazy writer. 00111 // 00112 00113 CcIdleDelayTick = LAZY_WRITER_IDLE_DELAY / KeQueryTimeIncrement(); 00114 00115 // 00116 // Initialize shared cache map list structures 00117 // 00118 00119 KeInitializeSpinLock( &CcMasterSpinLock ); 00120 InitializeListHead( &CcCleanSharedCacheMapList ); 00121 InitializeListHead( &CcDirtySharedCacheMapList.SharedCacheMapLinks ); 00122 CcDirtySharedCacheMapList.Flags = IS_CURSOR; 00123 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 00124 &CcLazyWriterCursor.SharedCacheMapLinks ); 00125 CcLazyWriterCursor.Flags = IS_CURSOR; 00126 00127 // 00128 // Initialize worker thread structures 00129 // 00130 00131 KeInitializeSpinLock( &CcWorkQueueSpinlock ); 00132 InitializeListHead( &CcIdleWorkerThreadList ); 00133 InitializeListHead( &CcExpressWorkQueue ); 00134 InitializeListHead( &CcRegularWorkQueue ); 00135 InitializeListHead( &CcPostTickWorkQueue ); 00136 00137 // 00138 // Set the number of worker threads based on the system size. 00139 // 00140 00141 CcCapturedSystemSize = MmQuerySystemSize(); 00142 if (CcNumberWorkerThreads == 0) { 00143 00144 switch (CcCapturedSystemSize) { 00145 case MmSmallSystem: 00146 CcNumberWorkerThreads = ExCriticalWorkerThreads - 1; 00147 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8; 00148 CcAggressiveZeroThreshold = 1; 00149 break; 00150 00151 case MmMediumSystem: 00152 CcNumberWorkerThreads = ExCriticalWorkerThreads - 1; 00153 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4; 00154 CcAggressiveZeroThreshold = 2; 00155 break; 00156 00157 case MmLargeSystem: 00158 CcNumberWorkerThreads = ExCriticalWorkerThreads - 2; 00159 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4 + 00160 MmNumberOfPhysicalPages / 8; 00161 CcAggressiveZeroThreshold = 4; 00162 #if 0 00163 // 00164 // Use more memory if we are a large server. 00165 // 00166 00167 if ((MmLargeSystemCache != 0) && 00168 (CcDirtyPageThreshold < (MmNumberOfPhysicalPages - (0xE00000 / PAGE_SIZE)))) { 00169 00170 CcDirtyPageThreshold = MmNumberOfPhysicalPages - (0xE00000 / PAGE_SIZE); 00171 } 00172 #endif 00173 break; 00174 00175 default: 00176 CcNumberWorkerThreads = 1; 00177 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8; 00178 } 00179 00180 // CcDirtyPageThreshold = (2*1024*1024)/PAGE_SIZE; 00181 00182 if (MmSystemCacheWs.MaximumWorkingSetSize > ((4*1024*1024)/PAGE_SIZE)) { 00183 CcDirtyPageThreshold = MmSystemCacheWs.MaximumWorkingSetSize - 00184 ((2*1024*1024)/PAGE_SIZE); 00185 } 00186 00187 CcDirtyPageTarget = CcDirtyPageThreshold / 2 + 00188 CcDirtyPageThreshold / 4; 00189 } 00190 00191 CcAggressiveZeroCount = 0; 00192 00193 // 00194 // Now allocate and initialize the above number of worker thread 00195 // items. 00196 // 00197 00198 for (i = 0; i < CcNumberWorkerThreads; i++) { 00199 00200 WorkItem = ExAllocatePoolWithTag( NonPagedPool, sizeof(WORK_QUEUE_ITEM), 'qWcC' ); 00201 00202 if (WorkItem == NULL) { 00203 00204 CcBugCheck( 0, 0, 0 ); 00205 } 00206 00207 // 00208 // Initialize the work queue item and insert in our queue 00209 // of potential worker threads. 00210 // 00211 00212 ExInitializeWorkItem( WorkItem, CcWorkerThread, WorkItem ); 00213 InsertTailList( &CcIdleWorkerThreadList, &WorkItem->List ); 00214 } 00215 00216 // 00217 // Initialize the Lazy Writer thread structure, and start him up. 00218 // 00219 00220 RtlZeroMemory( &LazyWriter, sizeof(LAZY_WRITER) ); 00221 00222 KeInitializeSpinLock( &CcWorkQueueSpinlock ); 00223 InitializeListHead( &LazyWriter.WorkQueue ); 00224 00225 // 00226 // Store process address 00227 // 00228 00229 LazyWriter.OurProcess = PsGetCurrentProcess(); 00230 00231 // 00232 // Initialize the Scan Dpc and Timer. 00233 // 00234 00235 KeInitializeDpc( &LazyWriter.ScanDpc, &CcScanDpc, NULL ); 00236 KeInitializeTimer( &LazyWriter.ScanTimer ); 00237 00238 // 00239 // Now initialize the lookaside list for allocating Work Queue entries. 00240 // 00241 00242 switch ( CcCapturedSystemSize ) { 00243 00244 // 00245 // ~512 bytes 00246 // 00247 00248 case MmSmallSystem : 00249 NumberOfItems = 32; 00250 break; 00251 00252 // 00253 // ~1k bytes 00254 // 00255 00256 case MmMediumSystem : 00257 NumberOfItems = 64; 00258 break; 00259 00260 // 00261 // ~2k bytes 00262 // 00263 00264 case MmLargeSystem : 00265 NumberOfItems = 128; 00266 if (MmIsThisAnNtAsSystem()) { 00267 NumberOfItems += 128; 00268 } 00269 00270 break; 00271 } 00272 00273 ExInitializeNPagedLookasideList( &CcTwilightLookasideList, 00274 NULL, 00275 NULL, 00276 0, 00277 sizeof( WORK_QUEUE_ENTRY ), 00278 'kWcC', 00279 NumberOfItems ); 00280 00281 // 00282 // Initialize the per processor nonpaged lookaside lists and descriptors. 00283 // 00284 00285 for (Index = 0; Index < (ULONG)KeNumberProcessors; Index += 1) { 00286 Prcb = KiProcessorBlock[Index]; 00287 00288 // 00289 // Initialize the large IRP per processor lookaside pointers. 00290 // 00291 00292 Prcb->PPLookasideList[LookasideTwilightList].L = &CcTwilightLookasideList; 00293 Lookaside = (PNPAGED_LOOKASIDE_LIST)ExAllocatePoolWithTag( NonPagedPool, 00294 sizeof(NPAGED_LOOKASIDE_LIST), 00295 'KWcC'); 00296 00297 if (Lookaside != NULL) { 00298 ExInitializeNPagedLookasideList( Lookaside, 00299 NULL, 00300 NULL, 00301 0, 00302 sizeof( WORK_QUEUE_ENTRY ), 00303 'KWcC', 00304 NumberOfItems ); 00305 00306 } else { 00307 Lookaside = &CcTwilightLookasideList; 00308 } 00309 00310 Prcb->PPLookasideList[LookasideTwilightList].P = Lookaside; 00311 } 00312 00313 // 00314 // Now initialize the Bcb zone 00315 // 00316 00317 { 00318 PVOID InitialSegment; 00319 ULONG InitialSegmentSize; 00320 ULONG RoundedBcbSize = (sizeof(BCB) + 7) & ~7; 00321 00322 switch ( CcCapturedSystemSize ) { 00323 00324 // 00325 // ~1.5k bytes 00326 // 00327 00328 case MmSmallSystem : 00329 InitialSegmentSize = sizeof(ZONE_SEGMENT_HEADER) + RoundedBcbSize * 8; 00330 break; 00331 00332 // 00333 // 1 Page 00334 // 00335 00336 case MmMediumSystem : 00337 InitialSegmentSize = PAGE_SIZE; 00338 break; 00339 00340 // 00341 // 3 Pages 00342 // 00343 00344 case MmLargeSystem : 00345 InitialSegmentSize = 3 * PAGE_SIZE; 00346 break; 00347 } 00348 00349 // 00350 // Allocate the initial allocation for the zone. If we cannot get it, 00351 // something must really be wrong, so we will just bugcheck. 00352 // 00353 00354 if ((InitialSegment = ExAllocatePoolWithTag( NonPagedPool, 00355 InitialSegmentSize, 00356 'zBcC' )) == NULL) { 00357 00358 CcBugCheck( 0, 0, 0 ); 00359 } 00360 00361 if (!NT_SUCCESS(ExInitializeZone( &LazyWriter.BcbZone, 00362 RoundedBcbSize, 00363 InitialSegment, 00364 InitialSegmentSize ))) { 00365 CcBugCheck( 0, 0, 0 ); 00366 } 00367 } 00368 00369 // 00370 // Initialize the Deferred Write List. 00371 // 00372 00373 KeInitializeSpinLock( &CcDeferredWriteSpinLock ); 00374 InitializeListHead( &CcDeferredWrites ); 00375 00376 // 00377 // Initialize the Vacbs. 00378 // 00379 00380 CcInitializeVacbs(); 00381 00382 return TRUE; 00383 }

VOID CcInitializeCacheMap IN PFILE_OBJECT  FileObject,
IN PCC_FILE_SIZES  FileSizes,
IN BOOLEAN  PinAccess,
IN PCACHE_MANAGER_CALLBACKS  Callbacks,
IN PVOID  LazyWriteContext
 

Definition at line 387 of file fssup.c.

References _SHARED_CACHE_MAP::ActiveVacbSpinLock, _CC_FILE_SIZES::AllocationSize, ASSERT, _SHARED_CACHE_MAP::BcbList, _SHARED_CACHE_MAP::BcbSpinLock, BEING_CREATED, CACHE_NTC_PRIVATE_CACHE_MAP, CACHE_NTC_SHARED_CACHE_MAP, _SHARED_CACHE_MAP::Callbacks, CcAcquireMasterLock, CcCleanSharedCacheMapList, CcCreateVacbArray(), CcDecrementOpenCount, CcDeleteSharedCacheMap(), CcDirtySharedCacheMapList, CcExtendVacbArray(), CcIncrementOpenCount, CcReleaseMasterLock, CcScheduleLazyWriteScan(), ClearFlag, _SHARED_CACHE_MAP::CreateEvent, DebugTrace, DebugTrace2, DEFAULT_CREATE_MODULO, _SHARED_CACHE_MAP::DirtyPages, _SHARED_CACHE_MAP::Event, _CACHE_UNINITIALIZE_EVENT::Event, ExAllocatePoolWithTag, Executive, ExFreePool(), ExRaiseStatus(), FALSE, _SHARED_CACHE_MAP::FileObject, _PRIVATE_CACHE_MAP::FileObject, _SHARED_CACHE_MAP::FileSize, _CC_FILE_SIZES::FileSize, FlagOn, _SHARED_CACHE_MAP::Flags, FO_RANDOM_ACCESS, FO_SEQUENTIAL_ONLY, FSRTL_FLAG2_DO_MODIFIED_WRITE, FsRtlNormalizeNtstatus(), KeInitializeEvent, KeInitializeSpinLock(), KernelMode, KeSetEvent(), KeWaitForSingleObject(), _SHARED_CACHE_MAP::LazyWriteContext, LazyWriter, _SHARED_CACHE_MAP::LocalEvent, me, mm, MmCreateSection(), MmDisableModifiedWriteOfSection(), MmExtendSection(), MODIFIED_WRITE_DISABLED, _CACHE_UNINITIALIZE_EVENT::Next, _SHARED_CACHE_MAP::NodeByteSize, _PRIVATE_CACHE_MAP::NodeByteSize, _SHARED_CACHE_MAP::NodeTypeCode, _PRIVATE_CACHE_MAP::NodeTypeCode, NonPagedPool, NT_SUCCESS, NTSTATUS(), NULL, ObDeleteCapturedInsertInfo(), ObReferenceObject, ONLY_SEQUENTIAL_ONLY_SEEN, _SHARED_CACHE_MAP::OpenCount, _LAZY_WRITER::OtherWork, PAGE_SIZE, PIN_ACCESS, PRIVATE_CACHE_MAP, _SHARED_CACHE_MAP::PrivateCacheMap, _PRIVATE_CACHE_MAP::PrivateLinks, _SHARED_CACHE_MAP::PrivateList, RANDOM_ACCESS_SEEN, _PRIVATE_CACHE_MAP::ReadAheadMask, _PRIVATE_CACHE_MAP::ReadAheadSpinLock, _LAZY_WRITER::ScanActive, _SHARED_CACHE_MAP::Section, _SHARED_CACHE_MAP::SectionSize, SetFlag, SHARED_CACHE_MAP, _SHARED_CACHE_MAP::SharedCacheMapLinks, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, _SHARED_CACHE_MAP::Status, Status, TRUE, TRUNCATE_REQUIRED, try_return, _SHARED_CACHE_MAP::UninitializeEvent, VACB_MAPPING_GRANULARITY, _SHARED_CACHE_MAP::Vacbs, _SHARED_CACHE_MAP::ValidDataGoal, _SHARED_CACHE_MAP::ValidDataLength, _CC_FILE_SIZES::ValidDataLength, and WRITE_QUEUED.

Referenced by UdfCommonRead(), and UdfCreateInternalStream().

00397 : 00398 00399 This routine is intended to be called by File Systems only. It 00400 initializes the cache maps for data caching. It should be called 00401 every time a file is open or created, and NO_INTERMEDIATE_BUFFERING 00402 was specified as FALSE. 00403 00404 Arguments: 00405 00406 FileObject - A pointer to the newly-created file object. 00407 00408 FileSizes - A pointer to AllocationSize, FileSize and ValidDataLength 00409 for the file. ValidDataLength should contain MAXLONGLONG if 00410 valid data length tracking and callbacks are not desired. 00411 00412 PinAccess - FALSE if file will be used exclusively for Copy and Mdl 00413 access, or TRUE if file will be used for Pin access. 00414 (Files for Pin access are not limited in size as the caller 00415 must access multiple areas of the file at once.) 00416 00417 Callbacks - Structure of callbacks used by the Lazy Writer 00418 00419 LazyWriteContext - Parameter to be passed in to above routine. 00420 00421 Return Value: 00422 00423 None. If an error occurs, this routine will Raise the status. 00424 00425 --*/ 00426 00427 { 00428 KIRQL OldIrql; 00429 PSHARED_CACHE_MAP SharedCacheMap = NULL; 00430 PVOID CacheMapToFree = NULL; 00431 CC_FILE_SIZES LocalSizes; 00432 BOOLEAN WeSetBeingCreated = FALSE; 00433 BOOLEAN SharedListOwned = FALSE; 00434 BOOLEAN MustUninitialize = FALSE; 00435 BOOLEAN WeCreated = FALSE; 00436 00437 DebugTrace(+1, me, "CcInitializeCacheMap:\n", 0 ); 00438 DebugTrace( 0, me, " FileObject = %08lx\n", FileObject ); 00439 DebugTrace( 0, me, " FileSizes = %08lx\n", FileSizes ); 00440 00441 // 00442 // Make a local copy of the passed in file sizes before acquiring 00443 // the spin lock. 00444 // 00445 00446 LocalSizes = *FileSizes; 00447 00448 // 00449 // If no FileSize was given, set to one byte before maximizing below. 00450 // 00451 00452 if (LocalSizes.AllocationSize.QuadPart == 0) { 00453 LocalSizes.AllocationSize.LowPart += 1; 00454 } 00455 00456 // 00457 // If caller has Write access or will allow write, then round 00458 // size to next create modulo. (***Temp*** there may be too many 00459 // apps that end up allowing shared write, thanks to our Dos heritage, 00460 // to keep that part of the check in.) 00461 // 00462 00463 if (FileObject->WriteAccess /*|| FileObject->SharedWrite */) { 00464 00465 LocalSizes.AllocationSize.QuadPart = LocalSizes.AllocationSize.QuadPart + (LONGLONG)(DEFAULT_CREATE_MODULO - 1); 00466 LocalSizes.AllocationSize.LowPart &= ~(DEFAULT_CREATE_MODULO - 1); 00467 00468 } else { 00469 00470 LocalSizes.AllocationSize.QuadPart = LocalSizes.AllocationSize.QuadPart + (LONGLONG)(VACB_MAPPING_GRANULARITY - 1); 00471 LocalSizes.AllocationSize.LowPart &= ~(VACB_MAPPING_GRANULARITY - 1); 00472 } 00473 00474 // 00475 // Do the allocate of the SharedCacheMap, based on an unsafe test, 00476 // while not holding a spinlock. Allocation failures look like we 00477 // never decided to allocate one here! 00478 // 00479 00480 if (FileObject->SectionObjectPointer->SharedCacheMap == NULL) { 00481 CacheMapToFree = ExAllocatePoolWithTag( NonPagedPool, sizeof(SHARED_CACHE_MAP), 'cScC' ); 00482 } 00483 00484 // 00485 // Serialize Creation/Deletion of all Shared CacheMaps 00486 // 00487 00488 CcAcquireMasterLock( &OldIrql ); 00489 SharedListOwned = TRUE; 00490 00491 // 00492 // Insure release of our global resource 00493 // 00494 00495 try { 00496 00497 // 00498 // Check for second initialization of same file object 00499 // 00500 00501 if (FileObject->PrivateCacheMap != NULL) { 00502 00503 DebugTrace( 0, 0, "CacheMap already initialized\n", 0 ); 00504 try_return( NOTHING ); 00505 } 00506 00507 // 00508 // Get current Shared Cache Map pointer indirectly off of the file object. 00509 // (The actual pointer is typically in a file system data structure, such 00510 // as an Fcb.) 00511 // 00512 00513 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 00514 00515 // 00516 // If there is no SharedCacheMap, then we must create a section and 00517 // the SharedCacheMap structure. 00518 // 00519 00520 if (SharedCacheMap == NULL) { 00521 00522 // 00523 // After successfully creating the section, allocate the SharedCacheMap. 00524 // 00525 00526 WeCreated = TRUE; 00527 00528 if (CacheMapToFree == NULL) { 00529 CacheMapToFree = (PSHARED_CACHE_MAP)ExAllocatePoolWithTag( NonPagedPool, 00530 sizeof(SHARED_CACHE_MAP), 00531 'cScC' ); 00532 } 00533 00534 SharedCacheMap = CacheMapToFree; 00535 CacheMapToFree = NULL; 00536 00537 if (SharedCacheMap == NULL) { 00538 00539 DebugTrace( 0, 0, "Failed to allocate SharedCacheMap\n", 0 ); 00540 00541 CcReleaseMasterLock( OldIrql ); 00542 SharedListOwned = FALSE; 00543 00544 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 00545 } 00546 00547 // 00548 // Zero the SharedCacheMap and fill in the nonzero portions later. 00549 // 00550 00551 RtlZeroMemory( SharedCacheMap, sizeof(SHARED_CACHE_MAP) ); 00552 00553 #if DANLO 00554 SharedCacheMap->OpenCountLog.Size = sizeof(SharedCacheMap->OpenCountLog.Log)/sizeof(CC_LOG_ENTRY); 00555 #endif 00556 00557 // 00558 // Now initialize the Shared Cache Map. 00559 // 00560 00561 SharedCacheMap->NodeTypeCode = CACHE_NTC_SHARED_CACHE_MAP; 00562 SharedCacheMap->NodeByteSize = sizeof(SHARED_CACHE_MAP); 00563 SharedCacheMap->FileObject = FileObject; 00564 SharedCacheMap->FileSize = LocalSizes.FileSize; 00565 SharedCacheMap->ValidDataLength = LocalSizes.ValidDataLength; 00566 SharedCacheMap->ValidDataGoal = LocalSizes.ValidDataLength; 00567 // SharedCacheMap->Section set below 00568 00569 // 00570 // Initialize the spin locks. 00571 // 00572 00573 KeInitializeSpinLock( &SharedCacheMap->ActiveVacbSpinLock ); 00574 KeInitializeSpinLock( &SharedCacheMap->BcbSpinLock ); 00575 00576 if (PinAccess) { 00577 SetFlag(SharedCacheMap->Flags, PIN_ACCESS); 00578 } 00579 00580 // 00581 // Initialize our allocation hint for the local event. 00582 // 00583 00584 SharedCacheMap->LocalEvent = &SharedCacheMap->Event; 00585 00586 // 00587 // If this file has FO_SEQUENTIAL_ONLY set, then remember that 00588 // in the SharedCacheMap. 00589 // 00590 00591 if (FlagOn(FileObject->Flags, FO_SEQUENTIAL_ONLY)) { 00592 SetFlag(SharedCacheMap->Flags, ONLY_SEQUENTIAL_ONLY_SEEN); 00593 } 00594 00595 // 00596 // Do the round-robin allocation of the spinlock for the shared 00597 // cache map. Note the manipulation of the next 00598 // counter is safe, since we have the CcMasterSpinLock 00599 // exclusive. 00600 // 00601 00602 InitializeListHead( &SharedCacheMap->BcbList ); 00603 SharedCacheMap->Callbacks = Callbacks; 00604 SharedCacheMap->LazyWriteContext = LazyWriteContext; 00605 00606 // 00607 // Initialize listhead for all PrivateCacheMaps 00608 // 00609 00610 InitializeListHead( &SharedCacheMap->PrivateList ); 00611 00612 // 00613 // Insert the new Shared Cache Map in the global list 00614 // 00615 00616 InsertTailList( &CcCleanSharedCacheMapList, 00617 &SharedCacheMap->SharedCacheMapLinks ); 00618 00619 // 00620 // Finally, store the pointer to the Shared Cache Map back 00621 // via the indirect pointer in the File Object. 00622 // 00623 00624 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap; 00625 00626 // 00627 // We must reference this file object so that it cannot go away 00628 // until we do CcUninitializeCacheMap below. Note we cannot 00629 // find or rely on the FileObject that Memory Management has, 00630 // although normally it will be this same one anyway. 00631 // 00632 00633 ObReferenceObject ( FileObject ); 00634 00635 } else { 00636 00637 // 00638 // If this file has FO_SEQUENTIAL_ONLY clear, then remember that 00639 // in the SharedCacheMap. 00640 // 00641 00642 if (!FlagOn(FileObject->Flags, FO_SEQUENTIAL_ONLY)) { 00643 ClearFlag(SharedCacheMap->Flags, ONLY_SEQUENTIAL_ONLY_SEEN); 00644 } 00645 } 00646 00647 // 00648 // If this file is opened for random access, remember this in 00649 // the SharedCacheMap. 00650 // 00651 00652 if (FlagOn(FileObject->Flags, FO_RANDOM_ACCESS)) { 00653 SetFlag(SharedCacheMap->Flags, RANDOM_ACCESS_SEEN); 00654 } 00655 00656 // 00657 // Make sure that no one is trying to lazy delete it in the case 00658 // that the Cache Map was already there. 00659 // 00660 00661 ClearFlag(SharedCacheMap->Flags, TRUNCATE_REQUIRED); 00662 00663 // 00664 // In case there has been a CcUnmapAndPurge call, we check here if we 00665 // if we need to recreate the section and map it. 00666 // 00667 00668 if ((SharedCacheMap->Vacbs == NULL) && 00669 !FlagOn(SharedCacheMap->Flags, BEING_CREATED)) { 00670 00671 // 00672 // Increment the OpenCount on the CacheMap. 00673 // 00674 00675 CcIncrementOpenCount( SharedCacheMap, 'onnI' ); 00676 MustUninitialize = TRUE; 00677 00678 // 00679 // We still want anyone else to wait. 00680 // 00681 00682 SetFlag(SharedCacheMap->Flags, BEING_CREATED); 00683 WeSetBeingCreated = TRUE; 00684 00685 // 00686 // If there is a create event, then this must be the path where we 00687 // we were only unmapped. We will just clear it here again in case 00688 // someone needs to wait again this time too. 00689 // 00690 00691 if (SharedCacheMap->CreateEvent != NULL) { 00692 00693 KeInitializeEvent( SharedCacheMap->CreateEvent, 00694 NotificationEvent, 00695 FALSE ); 00696 } 00697 00698 // 00699 // Release global resource 00700 // 00701 00702 CcReleaseMasterLock( OldIrql ); 00703 SharedListOwned = FALSE; 00704 00705 // 00706 // We have to test this, because the section may only be unmapped. 00707 // 00708 00709 if (SharedCacheMap->Section == NULL) { 00710 00711 LARGE_INTEGER LargeZero = {0,0}; 00712 00713 // 00714 // Call MM to create a section for this file, for the calculated 00715 // section size. Note that we have the choice in this service to 00716 // pass in a FileHandle or a FileObject pointer, but not both. 00717 // Naturally we want to pass in the handle. 00718 // 00719 00720 DebugTrace( 0, mm, "MmCreateSection:\n", 0 ); 00721 DebugTrace2(0, mm, " MaximumSize = %08lx, %08lx\n", 00722 LocalSizes.AllocationSize.LowPart, 00723 LocalSizes.AllocationSize.HighPart ); 00724 DebugTrace( 0, mm, " FileObject = %08lx\n", FileObject ); 00725 00726 SharedCacheMap->Status = MmCreateSection( &SharedCacheMap->Section, 00727 SECTION_MAP_READ 00728 | SECTION_MAP_WRITE 00729 | SECTION_QUERY, 00730 NULL, 00731 &LocalSizes.AllocationSize, 00732 PAGE_READWRITE, 00733 SEC_COMMIT, 00734 NULL, 00735 FileObject ); 00736 00737 DebugTrace( 0, mm, " <Section = %08lx\n", SharedCacheMap->Section ); 00738 00739 if (!NT_SUCCESS( SharedCacheMap->Status )){ 00740 DebugTrace( 0, 0, "Error from MmCreateSection = %08lx\n", 00741 SharedCacheMap->Status ); 00742 00743 SharedCacheMap->Section = NULL; 00744 ExRaiseStatus( FsRtlNormalizeNtstatus( SharedCacheMap->Status, 00745 STATUS_UNEXPECTED_MM_CREATE_ERR )); 00746 } 00747 00748 ObDeleteCapturedInsertInfo(SharedCacheMap->Section); 00749 00750 // 00751 // If this is a stream file object, then no user can map it, 00752 // and we should keep the modified page writer out of it. 00753 // 00754 00755 if (!FlagOn(((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->Flags2, 00756 FSRTL_FLAG2_DO_MODIFIED_WRITE) && 00757 (FileObject->FsContext2 == NULL)) { 00758 00759 BOOLEAN Disabled; 00760 00761 Disabled = MmDisableModifiedWriteOfSection( FileObject->SectionObjectPointer ); 00762 CcAcquireMasterLock( &OldIrql ); 00763 SetFlag(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED); 00764 CcReleaseMasterLock( OldIrql ); 00765 00766 //**** ASSERT( Disabled ); 00767 } 00768 00769 // 00770 // Create the Vacb array. 00771 // 00772 00773 CcCreateVacbArray( SharedCacheMap, LocalSizes.AllocationSize ); 00774 } 00775 00776 // 00777 // If the section already exists, we still have to call MM to 00778 // extend, in case it is not large enough. 00779 // 00780 00781 else { 00782 00783 if ( LocalSizes.AllocationSize.QuadPart > SharedCacheMap->SectionSize.QuadPart ) { 00784 00785 NTSTATUS Status; 00786 00787 DebugTrace( 0, mm, "MmExtendSection:\n", 0 ); 00788 DebugTrace( 0, mm, " Section = %08lx\n", SharedCacheMap->Section ); 00789 DebugTrace2(0, mm, " Size = %08lx, %08lx\n", 00790 LocalSizes.AllocationSize.LowPart, 00791 LocalSizes.AllocationSize.HighPart ); 00792 00793 Status = MmExtendSection( SharedCacheMap->Section, 00794 &LocalSizes.AllocationSize, 00795 TRUE ); 00796 00797 if (!NT_SUCCESS(Status)) { 00798 00799 DebugTrace( 0, 0, "Error from MmExtendSection, Status = %08lx\n", 00800 Status ); 00801 00802 ExRaiseStatus( FsRtlNormalizeNtstatus( Status, 00803 STATUS_UNEXPECTED_MM_EXTEND_ERR )); 00804 } 00805 } 00806 00807 // 00808 // Extend the Vacb array. 00809 // 00810 00811 CcExtendVacbArray( SharedCacheMap, LocalSizes.AllocationSize ); 00812 } 00813 00814 // 00815 // Now show that we are all done and resume any waiters. 00816 // 00817 00818 CcAcquireMasterLock( &OldIrql ); 00819 ClearFlag(SharedCacheMap->Flags, BEING_CREATED); 00820 WeSetBeingCreated = FALSE; 00821 if (SharedCacheMap->CreateEvent != NULL) { 00822 KeSetEvent( SharedCacheMap->CreateEvent, 0, FALSE ); 00823 } 00824 CcReleaseMasterLock( OldIrql ); 00825 } 00826 00827 // 00828 // Else if the section is already there, we make sure it is large 00829 // enough by calling CcExtendCacheSection. 00830 // 00831 00832 else { 00833 00834 // 00835 // If the SharedCacheMap is currently being created we have 00836 // to optionally create and wait on an event for it. Note that 00837 // the only safe time to delete the event is in 00838 // CcUninitializeCacheMap, because we otherwise have no way of 00839 // knowing when everyone has reached the KeWaitForSingleObject. 00840 // 00841 00842 if (FlagOn(SharedCacheMap->Flags, BEING_CREATED)) { 00843 00844 if (SharedCacheMap->CreateEvent == NULL) { 00845 00846 // 00847 // If the local event is not being used then we can grab it. 00848 // (Should be quite rare that it is in use.) 00849 // 00850 00851 SharedCacheMap->CreateEvent = InterlockedExchangePointer( &SharedCacheMap->LocalEvent, NULL ); 00852 00853 if (SharedCacheMap->CreateEvent == NULL) { 00854 00855 SharedCacheMap->CreateEvent = (PKEVENT)ExAllocatePoolWithTag( NonPagedPool, 00856 sizeof(KEVENT), 00857 'vEcC' ); 00858 } 00859 00860 if (SharedCacheMap->CreateEvent == NULL) { 00861 DebugTrace( 0, 0, "Failed to allocate CreateEvent\n", 0 ); 00862 00863 CcReleaseMasterLock( OldIrql ); 00864 SharedListOwned = FALSE; 00865 00866 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 00867 } 00868 00869 KeInitializeEvent( SharedCacheMap->CreateEvent, 00870 NotificationEvent, 00871 FALSE ); 00872 } 00873 00874 // 00875 // Increment the OpenCount on the CacheMap. 00876 // 00877 00878 CcIncrementOpenCount( SharedCacheMap, 'ecnI' ); 00879 MustUninitialize = TRUE; 00880 00881 // 00882 // Release global resource before waiting 00883 // 00884 00885 CcReleaseMasterLock( OldIrql ); 00886 SharedListOwned = FALSE; 00887 00888 DebugTrace( 0, 0, "Waiting on CreateEvent\n", 0 ); 00889 00890 KeWaitForSingleObject( SharedCacheMap->CreateEvent, 00891 Executive, 00892 KernelMode, 00893 FALSE, 00894 (PLARGE_INTEGER)NULL); 00895 00896 // 00897 // If the real creator got an error, then we must bomb 00898 // out too. 00899 // 00900 00901 if (!NT_SUCCESS(SharedCacheMap->Status)) { 00902 ExRaiseStatus( FsRtlNormalizeNtstatus( SharedCacheMap->Status, 00903 STATUS_UNEXPECTED_MM_CREATE_ERR )); 00904 } 00905 } 00906 else { 00907 00908 PCACHE_UNINITIALIZE_EVENT CUEvent, EventNext; 00909 00910 // 00911 // Increment the OpenCount on the CacheMap. 00912 // 00913 00914 CcIncrementOpenCount( SharedCacheMap, 'esnI' ); 00915 MustUninitialize = TRUE; 00916 00917 // 00918 // If there is a process waiting on an uninitialize on this 00919 // cache map to complete, let the thread that is waiting go, 00920 // since the uninitialize is now complete. 00921 // 00922 CUEvent = SharedCacheMap->UninitializeEvent; 00923 00924 while (CUEvent != NULL) { 00925 EventNext = CUEvent->Next; 00926 KeSetEvent(&CUEvent->Event, 0, FALSE); 00927 CUEvent = EventNext; 00928 } 00929 00930 SharedCacheMap->UninitializeEvent = NULL; 00931 00932 // 00933 // Release global resource 00934 // 00935 00936 CcReleaseMasterLock( OldIrql ); 00937 SharedListOwned = FALSE; 00938 } 00939 } 00940 00941 { 00942 PPRIVATE_CACHE_MAP PrivateCacheMap; 00943 00944 // 00945 // Now allocate (if local one already in use) and initialize 00946 // the Private Cache Map. 00947 // 00948 00949 PrivateCacheMap = &SharedCacheMap->PrivateCacheMap; 00950 00951 // 00952 // See if we should allocate a PrivateCacheMap while not holding 00953 // a spinlock. 00954 // 00955 00956 if (CacheMapToFree != NULL) { 00957 ExFreePool( CacheMapToFree ); 00958 CacheMapToFree = NULL; 00959 } 00960 00961 if (PrivateCacheMap->NodeTypeCode != 0) { 00962 CacheMapToFree = ExAllocatePoolWithTag( NonPagedPool, sizeof(PRIVATE_CACHE_MAP), 'cPcC' ); 00963 } 00964 00965 // 00966 // Insert the new PrivateCacheMap in the list off the SharedCacheMap. 00967 // 00968 00969 CcAcquireMasterLock( &OldIrql ); 00970 SharedListOwned = TRUE; 00971 00972 // 00973 // Now make sure there is still no PrivateCacheMap, and if so just get out. 00974 // 00975 00976 if (FileObject->PrivateCacheMap == NULL) { 00977 00978 // 00979 // Is the local one already in use? 00980 // 00981 00982 if (PrivateCacheMap->NodeTypeCode != 0) { 00983 00984 // 00985 // Use the one allocated above, if there is one, else go to pool now. 00986 // 00987 00988 if (CacheMapToFree == NULL) { 00989 CacheMapToFree = 00990 (PPRIVATE_CACHE_MAP)ExAllocatePoolWithTag( NonPagedPool, 00991 sizeof(PRIVATE_CACHE_MAP), 00992 'cPcC' ); 00993 } 00994 PrivateCacheMap = CacheMapToFree; 00995 CacheMapToFree = NULL; 00996 } 00997 00998 if (PrivateCacheMap == NULL) { 00999 01000 DebugTrace( 0, 0, "Failed to allocate PrivateCacheMap\n", 0 ); 01001 01002 CcReleaseMasterLock( OldIrql ); 01003 SharedListOwned = FALSE; 01004 01005 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 01006 } 01007 01008 RtlZeroMemory( PrivateCacheMap, sizeof(PRIVATE_CACHE_MAP) ); 01009 01010 PrivateCacheMap->NodeTypeCode = CACHE_NTC_PRIVATE_CACHE_MAP; 01011 PrivateCacheMap->NodeByteSize = sizeof(PRIVATE_CACHE_MAP); 01012 PrivateCacheMap->FileObject = FileObject; 01013 PrivateCacheMap->ReadAheadMask = PAGE_SIZE - 1; 01014 01015 // 01016 // Initialize the spin lock. 01017 // 01018 01019 KeInitializeSpinLock( &PrivateCacheMap->ReadAheadSpinLock ); 01020 01021 InsertTailList( &SharedCacheMap->PrivateList, &PrivateCacheMap->PrivateLinks ); 01022 01023 FileObject->PrivateCacheMap = PrivateCacheMap; 01024 01025 } else { 01026 01027 // 01028 // We raced with another initializer for the same fileobject and must 01029 // drop our (to this point speculative) opencount. 01030 // 01031 01032 ASSERT( SharedCacheMap->OpenCount > 1 ); 01033 01034 CcDecrementOpenCount( SharedCacheMap, 'rpnI' ); 01035 SharedCacheMap = NULL; 01036 } 01037 } 01038 01039 MustUninitialize = FALSE; 01040 try_exit: NOTHING; 01041 } 01042 finally { 01043 01044 // 01045 // See if we got an error and must uninitialize the SharedCacheMap 01046 // 01047 01048 if (MustUninitialize) { 01049 01050 if (!SharedListOwned) { 01051 CcAcquireMasterLock( &OldIrql ); 01052 } 01053 if (WeSetBeingCreated) { 01054 if (SharedCacheMap->CreateEvent != NULL) { 01055 KeSetEvent( SharedCacheMap->CreateEvent, 0, FALSE ); 01056 } 01057 ClearFlag(SharedCacheMap->Flags, BEING_CREATED); 01058 } 01059 01060 // 01061 // Now release our open count. 01062 // 01063 01064 CcDecrementOpenCount( SharedCacheMap, 'umnI' ); 01065 01066 if ((SharedCacheMap->OpenCount == 0) && 01067 !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) && 01068 (SharedCacheMap->DirtyPages == 0)) { 01069 01070 // 01071 // On PinAccess it is safe and necessary to eliminate 01072 // the structure immediately. 01073 // 01074 01075 if (PinAccess) { 01076 01077 CcDeleteSharedCacheMap( SharedCacheMap, OldIrql, FALSE ); 01078 01079 // 01080 // If it is not PinAccess, we must lazy delete, because 01081 // we could get into a deadlock trying to acquire the 01082 // stream exclusive when we dereference the file object. 01083 // 01084 01085 } else { 01086 01087 // 01088 // Move it to the dirty list so the lazy write scan will 01089 // see it. 01090 // 01091 01092 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 01093 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 01094 &SharedCacheMap->SharedCacheMapLinks ); 01095 01096 // 01097 // Make sure the Lazy Writer will wake up, because we 01098 // want him to delete this SharedCacheMap. 01099 // 01100 01101 LazyWriter.OtherWork = TRUE; 01102 if (!LazyWriter.ScanActive) { 01103 CcScheduleLazyWriteScan(); 01104 } 01105 01106 CcReleaseMasterLock( OldIrql ); 01107 } 01108 01109 } else { 01110 01111 CcReleaseMasterLock( OldIrql ); 01112 } 01113 01114 SharedListOwned = FALSE; 01115 01116 // 01117 // If we did not create this SharedCacheMap, then there is a 01118 // possibility that it is in the dirty list. Once we are sure 01119 // we have the spinlock, just make sure it is in the clean list 01120 // if there are no dirty bytes and the open count is nonzero. 01121 // (The latter test is almost guaranteed, of course, but we check 01122 // it to be safe.) 01123 // 01124 01125 } else if (!WeCreated && 01126 (SharedCacheMap != NULL)) { 01127 01128 if (!SharedListOwned) { 01129 01130 CcAcquireMasterLock( &OldIrql ); 01131 SharedListOwned = TRUE; 01132 } 01133 01134 if ((SharedCacheMap->DirtyPages == 0) && 01135 (SharedCacheMap->OpenCount != 0)) { 01136 01137 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 01138 InsertTailList( &CcCleanSharedCacheMapList, 01139 &SharedCacheMap->SharedCacheMapLinks ); 01140 } 01141 } 01142 01143 // 01144 // Release global resource 01145 // 01146 01147 if (SharedListOwned) { 01148 CcReleaseMasterLock( OldIrql ); 01149 } 01150 01151 if (CacheMapToFree != NULL) { 01152 ExFreePool(CacheMapToFree); 01153 } 01154 01155 } 01156 01157 DebugTrace(-1, me, "CcInitializeCacheMap -> VOID\n", 0 ); 01158 01159 return; 01160 }

VOID CcPurgeAndClearCacheSection IN PSHARED_CACHE_MAP  SharedCacheMap,
IN PLARGE_INTEGER  FileOffset
 

Definition at line 2194 of file fssup.c.

References CcFreeVirtualAddress(), CcGetVirtualAddress(), CcPurgeCacheSection(), CcSetDirtyInMask(), EXCEPTION_EXECUTE_HANDLER, FALSE, MmFlushSection(), MmSetAddressRangeModified(), NULL, and PAGE_SIZE.

Referenced by CcSetFileSizes().

02201 : 02202 02203 This routine calls CcPurgeCacheSection after zeroing the end any 02204 partial page at the start of the range. If the file is not cached 02205 it flushes this page before the purge. 02206 02207 Arguments: 02208 02209 SectionObjectPointer - A pointer to the Section Object Pointers 02210 structure in the nonpaged Fcb. 02211 02212 FileOffset - Offset from which file should be purged - rounded down 02213 to page boundary. If NULL, purge the entire file. 02214 02215 ReturnValue: 02216 02217 FALSE - if the section was not successfully purged 02218 TRUE - if the section was successfully purged 02219 02220 --*/ 02221 02222 { 02223 ULONG TempLength, Length; 02224 LARGE_INTEGER LocalFileOffset; 02225 IO_STATUS_BLOCK IoStatus; 02226 PVOID TempVa; 02227 PVACB Vacb; 02228 02229 // 02230 // If a range was specified, then we have to see if we need to 02231 // save any user data before purging. 02232 // 02233 02234 if ((FileOffset->LowPart & (PAGE_SIZE - 1)) != 0) { 02235 02236 // 02237 // Switch to LocalFileOffset. We do it this way because we 02238 // still pass it on as an optional parameter. 02239 // 02240 02241 LocalFileOffset = *FileOffset; 02242 FileOffset = &LocalFileOffset; 02243 02244 // 02245 // If the file is cached, then we can actually zero the data to 02246 // be purged in memory, and not purge those pages. This is a huge 02247 // savings, because sometimes the flushes in the other case cause 02248 // us to kill lots of stack, time and I/O doing CcZeroData in especially 02249 // large user-mapped files. 02250 // 02251 02252 if ((SharedCacheMap->Section != NULL) && 02253 (SharedCacheMap->Vacbs != NULL)) { 02254 02255 // 02256 // First zero the first page we are keeping, if it has data, and 02257 // adjust FileOffset and Length to allow it to stay. 02258 // 02259 02260 TempLength = PAGE_SIZE - (FileOffset->LowPart & (PAGE_SIZE - 1)); 02261 02262 TempVa = CcGetVirtualAddress( SharedCacheMap, *FileOffset, &Vacb, &Length ); 02263 02264 try { 02265 02266 // 02267 // Do not map and zero the page if we are not reducing our notion 02268 // of Valid Data, because that does two bad things. First CcSetDirtyInMask 02269 // will arbitrarily smash up ValidDataGoal (causing a potential invalid 02270 // CcSetValidData call). Secondly, if the Lazy Writer writes the last 02271 // page ahead of another flush through MM, then the file system will 02272 // never see a write from MM, and will not include the last page in 02273 // ValidDataLength on disk. 02274 // 02275 02276 RtlZeroMemory( TempVa, TempLength ); 02277 02278 if (FileOffset->QuadPart <= SharedCacheMap->ValidDataGoal.QuadPart) { 02279 02280 // 02281 // Make sure the Lazy Writer writes it. 02282 // 02283 02284 CcSetDirtyInMask( SharedCacheMap, FileOffset, TempLength ); 02285 02286 // 02287 // Otherwise, we are mapped, so make sure at least that Mm 02288 // knows the page is dirty since we zeroed it. 02289 // 02290 02291 } else { 02292 02293 MmSetAddressRangeModified( TempVa, 1 ); 02294 } 02295 02296 FileOffset->QuadPart += (LONGLONG)TempLength; 02297 02298 // 02299 // If we get any kind of error, like failing to read the page from 02300 // the network, just charge on. Note that we only read it in order 02301 // to zero it and avoid the flush below, so if we cannot read it 02302 // there is really no stale data problem. 02303 // 02304 02305 } except(EXCEPTION_EXECUTE_HANDLER) { 02306 02307 NOTHING; 02308 } 02309 02310 CcFreeVirtualAddress( Vacb ); 02311 02312 } else { 02313 02314 // 02315 // First flush the first page we are keeping, if it has data, before 02316 // we throw it away. 02317 // 02318 02319 MmFlushSection( SharedCacheMap->FileObject->SectionObjectPointer, FileOffset, 1, &IoStatus, FALSE ); 02320 } 02321 } 02322 02323 CcPurgeCacheSection( SharedCacheMap->FileObject->SectionObjectPointer, 02324 FileOffset, 02325 0, 02326 FALSE ); 02327 }

BOOLEAN CcPurgeCacheSection IN PSECTION_OBJECT_POINTERS  SectionObjectPointer,
IN PLARGE_INTEGER  FileOffset,
IN ULONG  Length,
IN BOOLEAN  UninitializeCacheMaps
 

Definition at line 2331 of file fssup.c.

References ASSERT, CcAcquireMasterLock, CcCollisionDelay, CcDecrementOpenCount, CcDirtySharedCacheMapList, CcFreeActiveVacb(), CcIncrementOpenCount, CcReleaseMasterLock, CcScheduleLazyWriteScan(), CcUninitializeCacheMap(), CcUnmapVacbArray(), CcWaitOnActiveCount(), DebugTrace, DebugTrace2, _SHARED_CACHE_MAP::DirtyPages, FALSE, _PRIVATE_CACHE_MAP::FileObject, FlagOn, _SHARED_CACHE_MAP::Flags, GetActiveVacbAtDpcLevel, KeDelayExecutionThread(), KernelMode, LazyWriter, me, mm, MmCanFileBeTruncated(), MmPurgeSection(), NULL, _SHARED_CACHE_MAP::OpenCount, _LAZY_WRITER::OtherWork, PAGE_SIZE, _SHARED_CACHE_MAP::PrivateList, _LAZY_WRITER::ScanActive, _SHARED_CACHE_MAP::SharedCacheMapLinks, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, TRUE, _SHARED_CACHE_MAP::Vacbs, VOID(), and WRITE_QUEUED.

Referenced by CcPurgeAndClearCacheSection(), CcSetFileSizes(), CcUninitializeCacheMap(), CcUnmapAndPurge(), CcZeroEndOfLastPage(), UdfPurgeVolume(), and UdfUpdateVcbPhase0().

02340 : 02341 02342 This routine may be called to force a purge of the cache section, 02343 even if it is cached. Note, if a user has the file mapped, then the purge 02344 will *not* take effect, and this must be considered part of normal application 02345 interaction. The purpose of purge is to throw away potentially nonzero 02346 data, so that it will be read in again and presumably zeroed. This is 02347 not really a security issue, but rather an effort to not confuse the 02348 application when it sees nonzero data. We cannot help the fact that 02349 a user-mapped view forces us to hang on to stale data. 02350 02351 This routine is intended to be called whenever previously written 02352 data is being truncated from the file, and the file is not being 02353 deleted. 02354 02355 The file must be acquired exclusive in order to call this routine. 02356 02357 Arguments: 02358 02359 SectionObjectPointer - A pointer to the Section Object Pointers 02360 structure in the nonpaged Fcb. 02361 02362 FileOffset - Offset from which file should be purged - rounded down 02363 to page boundary. If NULL, purge the entire file. 02364 02365 Length - Defines the length of the byte range to purge, starting at 02366 FileOffset. This parameter is ignored if FileOffset is 02367 specified as NULL. If FileOffset is specified and Length 02368 is 0, then purge from FileOffset to the end of the file. 02369 02370 UninitializeCacheMaps - If TRUE, we should uninitialize all the private 02371 cache maps before purging the data. 02372 02373 ReturnValue: 02374 02375 FALSE - if the section was not successfully purged 02376 TRUE - if the section was successfully purged 02377 02378 --*/ 02379 02380 { 02381 KIRQL OldIrql; 02382 PSHARED_CACHE_MAP SharedCacheMap; 02383 PPRIVATE_CACHE_MAP PrivateCacheMap; 02384 ULONG ActivePage; 02385 ULONG PageIsDirty; 02386 BOOLEAN PurgeWorked = TRUE; 02387 PVACB Vacb = NULL; 02388 02389 DebugTrace(+1, me, "CcPurgeCacheSection:\n", 0 ); 02390 DebugTrace( 0, mm, " SectionObjectPointer = %08lx\n", SectionObjectPointer ); 02391 DebugTrace2(0, me, " FileOffset = %08lx, %08lx\n", 02392 ARGUMENT_PRESENT(FileOffset) ? FileOffset->LowPart 02393 : 0, 02394 ARGUMENT_PRESENT(FileOffset) ? FileOffset->HighPart 02395 : 0 ); 02396 DebugTrace( 0, me, " Length = %08lx\n", Length ); 02397 02398 02399 // 02400 // If you want us to uninitialize cache maps, the RtlZeroMemory paths 02401 // below depend on actually having to purge something after zeroing. 02402 // 02403 02404 ASSERT(!UninitializeCacheMaps || (Length == 0) || (Length >= PAGE_SIZE * 2)); 02405 02406 // 02407 // Serialize Creation/Deletion of all Shared CacheMaps 02408 // 02409 02410 CcAcquireMasterLock( &OldIrql ); 02411 02412 // 02413 // Get pointer to SharedCacheMap via File Object. 02414 // 02415 02416 SharedCacheMap = SectionObjectPointer->SharedCacheMap; 02417 02418 // 02419 // Increment open count to make sure the SharedCacheMap stays around, 02420 // then release the spinlock so that we can call Mm. 02421 // 02422 02423 if (SharedCacheMap != NULL) { 02424 02425 CcIncrementOpenCount( SharedCacheMap, 'scPS' ); 02426 02427 // 02428 // If there is an active Vacb, then nuke it now (before waiting!). 02429 // 02430 02431 GetActiveVacbAtDpcLevel( SharedCacheMap, Vacb, ActivePage, PageIsDirty ); 02432 } 02433 02434 CcReleaseMasterLock( OldIrql ); 02435 02436 if (Vacb != NULL) { 02437 02438 CcFreeActiveVacb( SharedCacheMap, Vacb, ActivePage, PageIsDirty ); 02439 } 02440 02441 // 02442 // Use try-finally to insure cleanup of the Open Count and Vacb on the 02443 // way out. 02444 // 02445 02446 try { 02447 02448 // 02449 // Increment open count to make sure the SharedCacheMap stays around, 02450 // then release the spinlock so that we can call Mm. 02451 // 02452 02453 if (SharedCacheMap != NULL) { 02454 02455 // 02456 // Now loop to make sure that no one is currently caching the file. 02457 // 02458 02459 if (UninitializeCacheMaps) { 02460 02461 while (!IsListEmpty( &SharedCacheMap->PrivateList )) { 02462 02463 PrivateCacheMap = CONTAINING_RECORD( SharedCacheMap->PrivateList.Flink, 02464 PRIVATE_CACHE_MAP, 02465 PrivateLinks ); 02466 02467 CcUninitializeCacheMap( PrivateCacheMap->FileObject, NULL, NULL ); 02468 } 02469 } 02470 02471 // 02472 // Now, let's unmap and purge here. 02473 // 02474 // We still need to wait for any dangling cache read or writes. 02475 // 02476 // In fact we have to loop and wait because the lazy writer can 02477 // sneak in and do an CcGetVirtualAddressIfMapped, and we are not 02478 // synchronized. 02479 // 02480 02481 while ((SharedCacheMap->Vacbs != NULL) && 02482 !CcUnmapVacbArray( SharedCacheMap, FileOffset, Length, FALSE )) { 02483 02484 CcWaitOnActiveCount( SharedCacheMap ); 02485 } 02486 } 02487 02488 // 02489 // Purge failures are extremely rare if there are no user mapped sections. 02490 // However, it is possible that we will get one from our own mapping, if 02491 // the file is being lazy deleted from a previous open. For that case 02492 // we wait here until the purge succeeds, so that we are not left with 02493 // old user file data. Although Length is actually invariant in this loop, 02494 // we do need to keep checking that we are allowed to truncate in case a 02495 // user maps the file during a delay. 02496 // 02497 02498 while (!(PurgeWorked = MmPurgeSection(SectionObjectPointer, 02499 FileOffset, 02500 Length, 02501 (BOOLEAN)((SharedCacheMap !=NULL) && 02502 ARGUMENT_PRESENT(FileOffset)))) && 02503 (Length == 0) && 02504 MmCanFileBeTruncated(SectionObjectPointer, FileOffset)) { 02505 02506 (VOID)KeDelayExecutionThread( KernelMode, FALSE, &CcCollisionDelay ); 02507 } 02508 02509 } finally { 02510 02511 // 02512 // Reduce the open count on the SharedCacheMap if there was one. 02513 // 02514 02515 if (SharedCacheMap != NULL) { 02516 02517 // 02518 // Serialize again to decrement the open count. 02519 // 02520 02521 CcAcquireMasterLock( &OldIrql ); 02522 02523 CcDecrementOpenCount( SharedCacheMap, 'scPF' ); 02524 02525 if ((SharedCacheMap->OpenCount == 0) && 02526 !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) && 02527 (SharedCacheMap->DirtyPages == 0)) { 02528 02529 // 02530 // Move to the dirty list. 02531 // 02532 02533 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 02534 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 02535 &SharedCacheMap->SharedCacheMapLinks ); 02536 02537 // 02538 // Make sure the Lazy Writer will wake up, because we 02539 // want him to delete this SharedCacheMap. 02540 // 02541 02542 LazyWriter.OtherWork = TRUE; 02543 if (!LazyWriter.ScanActive) { 02544 CcScheduleLazyWriteScan(); 02545 } 02546 } 02547 02548 CcReleaseMasterLock( OldIrql ); 02549 } 02550 } 02551 02552 DebugTrace(-1, me, "CcPurgeCacheSection -> %02lx\n", PurgeWorked ); 02553 02554 return PurgeWorked; 02555 }

VOID CcSetDirtyPageThreshold IN PFILE_OBJECT  FileObject,
IN ULONG  DirtyPageThreshold
 

Definition at line 2747 of file fssup.c.

References _SHARED_CACHE_MAP::DirtyPageThreshold, FlagOn, FSRTL_FLAG_LIMIT_MODIFIED_PAGES, NULL, and SetFlag.

02754 : 02755 02756 This routine may be called to set a dirty page threshold for this 02757 stream. The write throttling will kick in whenever the file system 02758 attempts to exceed the dirty page threshold for this file. 02759 02760 Arguments: 02761 02762 FileObject - Supplies file object for the stream 02763 02764 DirtyPageThreshold - Supplies the dirty page threshold for this stream, 02765 or 0 for no threshold. 02766 02767 Return Value: 02768 02769 None 02770 02771 Environment: 02772 02773 The caller must guarantee exclusive access to the FsRtl header flags, 02774 for example, by calling this routine once during create of the structure 02775 containing the header. Then it would call the routine again when actually 02776 caching the stream. 02777 02778 --*/ 02779 02780 { 02781 KIRQL OldIrql; 02782 PSHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 02783 02784 if (SharedCacheMap != NULL) { 02785 02786 SharedCacheMap->DirtyPageThreshold = DirtyPageThreshold; 02787 } 02788 02789 // 02790 // Test the flag before setting, in case the caller is no longer properly 02791 // synchronized. 02792 // 02793 02794 if (!FlagOn(((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->Flags, 02795 FSRTL_FLAG_LIMIT_MODIFIED_PAGES)) { 02796 02797 SetFlag(((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->Flags, 02798 FSRTL_FLAG_LIMIT_MODIFIED_PAGES); 02799 } 02800 }

VOID CcSetFileSizes IN PFILE_OBJECT  FileObject,
IN PCC_FILE_SIZES  FileSizes
 

Definition at line 1821 of file fssup.c.

References CcAcquireMasterLock, CcDecrementOpenCount, CcDeleteMbcb(), CcDirtySharedCacheMapList, CcExtendVacbArray(), CcFreeActiveVacb(), CcIncrementOpenCount, CcPurgeAndClearCacheSection(), CcPurgeCacheSection(), CcReleaseMasterLock, CcScheduleLazyWriteScan(), DebugTrace, DebugTrace2, DEFAULT_EXTEND_MODULO, _SHARED_CACHE_MAP::DirtyPages, ExRaiseStatus(), FALSE, _SHARED_CACHE_MAP::FileSize, FlagOn, _SHARED_CACHE_MAP::Flags, FsRtlNormalizeNtstatus(), GetActiveVacbAtDpcLevel, LazyWriter, _SHARED_CACHE_MAP::Mbcb, me, mm, MmExtendSection(), MmFlushSection(), _SHARED_CACHE_MAP::NeedToZero, NT_SUCCESS, NTSTATUS(), NULL, _SHARED_CACHE_MAP::OpenCount, _LAZY_WRITER::OtherWork, PAGE_SIZE, PIN_ACCESS, _LAZY_WRITER::ScanActive, _SHARED_CACHE_MAP::Section, _SHARED_CACHE_MAP::SectionSize, _SHARED_CACHE_MAP::SharedCacheMapLinks, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, Status, TRUE, _SHARED_CACHE_MAP::VacbActiveCount, _SHARED_CACHE_MAP::ValidDataGoal, _SHARED_CACHE_MAP::ValidDataLength, and WRITE_QUEUED.

Referenced by UdfLookupMetaVsnOfExtent(), and UdfUpdateVcbPhase0().

01828 : 01829 01830 This routine must be called whenever a file has been extended to reflect 01831 this extension in the cache maps and underlying section. Calling this 01832 routine has a benign effect if the current size of the section is 01833 already greater than or equal to the new AllocationSize. 01834 01835 This routine must also be called whenever the FileSize for a file changes 01836 to reflect these changes in the Cache Manager. 01837 01838 This routine seems rather large, but in the normal case it only acquires 01839 a spinlock, updates some fields, and exits. Less often it will either 01840 extend the section, or truncate/purge the file, but it would be unexpected 01841 to do both. On the other hand, the idea of this routine is that it does 01842 "everything" required when AllocationSize or FileSize change. 01843 01844 Arguments: 01845 01846 FileObject - A file object for which CcInitializeCacheMap has been 01847 previously called. 01848 01849 FileSizes - A pointer to AllocationSize, FileSize and ValidDataLength 01850 for the file. AllocationSize is ignored if it is not larger 01851 than the current section size (i.e., it is ignored unless it 01852 has grown). ValidDataLength is not used. 01853 01854 01855 Return Value: 01856 01857 None 01858 01859 --*/ 01860 01861 { 01862 LARGE_INTEGER NewSectionSize; 01863 LARGE_INTEGER NewFileSize; 01864 LARGE_INTEGER NewValidDataLength; 01865 IO_STATUS_BLOCK IoStatus; 01866 PSHARED_CACHE_MAP SharedCacheMap; 01867 NTSTATUS Status; 01868 KIRQL OldIrql; 01869 PVACB ActiveVacb; 01870 ULONG ActivePage; 01871 ULONG PageIsDirty; 01872 01873 DebugTrace(+1, me, "CcSetFileSizes:\n", 0 ); 01874 DebugTrace( 0, me, " FileObject = %08lx\n", FileObject ); 01875 DebugTrace( 0, me, " FileSizes = %08lx\n", FileSizes ); 01876 01877 // 01878 // Make a local copy of the new file size and section size. 01879 // 01880 01881 NewSectionSize = FileSizes->AllocationSize; 01882 NewFileSize = FileSizes->FileSize; 01883 NewValidDataLength = FileSizes->ValidDataLength; 01884 01885 // 01886 // Serialize Creation/Deletion of all Shared CacheMaps 01887 // 01888 01889 CcAcquireMasterLock( &OldIrql ); 01890 01891 // 01892 // Get pointer to SharedCacheMap via File Object. 01893 // 01894 01895 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 01896 01897 // 01898 // If the file is not cached, just get out. 01899 // 01900 01901 if ((SharedCacheMap == NULL) || (SharedCacheMap->Section == NULL)) { 01902 01903 CcReleaseMasterLock( OldIrql ); 01904 01905 // 01906 // Let's try to purge the file incase this is a truncate. In the 01907 // vast majority of cases when there is no shared cache map, there 01908 // is no data section either, so this call will eventually be 01909 // no-oped in Mm. 01910 // 01911 01912 // 01913 // First flush the first page we are keeping, if it has data, before 01914 // we throw it away. 01915 // 01916 01917 if (NewFileSize.LowPart & (PAGE_SIZE - 1)) { 01918 MmFlushSection( FileObject->SectionObjectPointer, &NewFileSize, 1, &IoStatus, FALSE ); 01919 } 01920 01921 CcPurgeCacheSection( FileObject->SectionObjectPointer, 01922 &NewFileSize, 01923 0, 01924 FALSE ); 01925 01926 DebugTrace(-1, me, "CcSetFileSizes -> VOID\n", 0 ); 01927 01928 return; 01929 } 01930 01931 // 01932 // Make call a Noop if file is not mapped, or section already big enough. 01933 // 01934 01935 if ( NewSectionSize.QuadPart > SharedCacheMap->SectionSize.QuadPart ) { 01936 01937 // 01938 // Increment open count to make sure the SharedCacheMap stays around, 01939 // then release the spinlock so that we can call Mm. 01940 // 01941 01942 CcIncrementOpenCount( SharedCacheMap, '1fSS' ); 01943 CcReleaseMasterLock( OldIrql ); 01944 01945 // 01946 // Round new section size to pages. 01947 // 01948 01949 NewSectionSize.QuadPart = NewSectionSize.QuadPart + (LONGLONG)(DEFAULT_EXTEND_MODULO - 1); 01950 NewSectionSize.LowPart &= ~(DEFAULT_EXTEND_MODULO - 1); 01951 01952 // 01953 // Use try-finally to make sure we get the open count decremented. 01954 // 01955 01956 try { 01957 01958 // 01959 // Call MM to extend the section. 01960 // 01961 01962 DebugTrace( 0, mm, "MmExtendSection:\n", 0 ); 01963 DebugTrace( 0, mm, " Section = %08lx\n", SharedCacheMap->Section ); 01964 DebugTrace2(0, mm, " Size = %08lx, %08lx\n", 01965 NewSectionSize.LowPart, NewSectionSize.HighPart ); 01966 01967 Status = MmExtendSection( SharedCacheMap->Section, &NewSectionSize, TRUE ); 01968 01969 if (!NT_SUCCESS(Status)) { 01970 01971 DebugTrace( 0, 0, "Error from MmExtendSection, Status = %08lx\n", 01972 Status ); 01973 01974 ExRaiseStatus( FsRtlNormalizeNtstatus( Status, 01975 STATUS_UNEXPECTED_MM_EXTEND_ERR )); 01976 } 01977 01978 // 01979 // Extend the Vacb array. 01980 // 01981 01982 CcExtendVacbArray( SharedCacheMap, NewSectionSize ); 01983 01984 } finally { 01985 01986 // 01987 // Serialize again to decrement the open count. 01988 // 01989 01990 CcAcquireMasterLock( &OldIrql ); 01991 01992 CcDecrementOpenCount( SharedCacheMap, '1fSF' ); 01993 01994 if ((SharedCacheMap->OpenCount == 0) && 01995 !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) && 01996 (SharedCacheMap->DirtyPages == 0)) { 01997 01998 // 01999 // Move to the dirty list. 02000 // 02001 02002 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 02003 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 02004 &SharedCacheMap->SharedCacheMapLinks ); 02005 02006 // 02007 // Make sure the Lazy Writer will wake up, because we 02008 // want him to delete this SharedCacheMap. 02009 // 02010 02011 LazyWriter.OtherWork = TRUE; 02012 if (!LazyWriter.ScanActive) { 02013 CcScheduleLazyWriteScan(); 02014 } 02015 } 02016 02017 CcReleaseMasterLock( OldIrql ); 02018 } 02019 02020 // 02021 // It is now very unlikely that we have any more work to do, but just 02022 // in case we reacquire the spinlock and check again if we are cached. 02023 // 02024 02025 CcAcquireMasterLock( &OldIrql ); 02026 02027 // 02028 // Get pointer to SharedCacheMap via File Object. 02029 // 02030 02031 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 02032 02033 // 02034 // If the file is not cached, just get out. 02035 // 02036 02037 if (SharedCacheMap == NULL) { 02038 02039 CcReleaseMasterLock( OldIrql ); 02040 02041 DebugTrace(-1, me, "CcSetFileSizes -> VOID\n", 0 ); 02042 02043 return; 02044 } 02045 } 02046 02047 // 02048 // If we are shrinking either of these two sizes, then we must free the 02049 // active page, since it may be locked. 02050 // 02051 02052 CcIncrementOpenCount( SharedCacheMap, '2fSS' ); 02053 02054 try { 02055 02056 if ( ( NewFileSize.QuadPart < SharedCacheMap->ValidDataGoal.QuadPart ) || 02057 ( NewFileSize.QuadPart < SharedCacheMap->FileSize.QuadPart )) { 02058 02059 GetActiveVacbAtDpcLevel( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 02060 02061 if ((ActiveVacb != NULL) || (SharedCacheMap->NeedToZero != NULL)) { 02062 02063 CcReleaseMasterLock( OldIrql ); 02064 02065 CcFreeActiveVacb( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 02066 02067 // 02068 // Serialize again to reduce ValidDataLength. It cannot change 02069 // because the caller must have the file exclusive. 02070 // 02071 02072 CcAcquireMasterLock( &OldIrql ); 02073 } 02074 } 02075 02076 // 02077 // If the section did not grow, see if the file system supports ValidDataLength, 02078 // then update the valid data length in the file system. 02079 // 02080 02081 if ( SharedCacheMap->ValidDataLength.QuadPart != MAXLONGLONG ) { 02082 02083 if ( NewFileSize.QuadPart < SharedCacheMap->ValidDataLength.QuadPart ) { 02084 SharedCacheMap->ValidDataLength = NewFileSize; 02085 } 02086 02087 // 02088 // Update our notion of ValidDataGoal (how far the file has been written 02089 // in the cache) with caller's ValidDataLength. (Our ValidDataLength controls 02090 // when we issue ValidDataLength callbacks.) *** For now play it safe by 02091 // only growing here, which is the historical problem at hand, as with 02092 // compressed and uncompressed stream caches. 02093 // 02094 02095 if (NewValidDataLength.QuadPart > SharedCacheMap->ValidDataGoal.QuadPart) { 02096 SharedCacheMap->ValidDataGoal = NewValidDataLength; 02097 } 02098 } 02099 02100 // 02101 // On truncate, be nice guys and actually purge away user data from 02102 // the cache. However, the PinAccess check is important to avoid deadlocks 02103 // in Ntfs. 02104 // 02105 // It is also important to check the Vacb Active count. The caller 02106 // must have the file exclusive, therefore, no one else can be actively 02107 // doing anything in the file. Normally the Active count will be zero 02108 // (like in a normal call from Set File Info), and we can go ahead and truncate. 02109 // However, if the active count is nonzero, chances are this very thread has 02110 // something pinned or mapped, and we will deadlock if we try to purge and 02111 // wait for the count to go zero. A rare case of this which deadlocked DaveC 02112 // on Christmas Day of 1992, is where Ntfs was trying to convert an attribute 02113 // from resident to nonresident - which is a good example of a case where the 02114 // purge was not needed. 02115 // 02116 02117 if ( (NewFileSize.QuadPart < SharedCacheMap->FileSize.QuadPart ) && 02118 !FlagOn(SharedCacheMap->Flags, PIN_ACCESS) && 02119 (SharedCacheMap->VacbActiveCount == 0)) { 02120 02121 // 02122 // Release the spinlock so that we can call Mm. 02123 // 02124 02125 CcReleaseMasterLock( OldIrql ); 02126 02127 // 02128 // If we are actually truncating to zero (a size which has particular 02129 // meaning to the Lazy Writer scan!), then we must reset the Mbcb if 02130 // there is one, so that we do not keep dirty pages around forever. 02131 // 02132 02133 if ((NewFileSize.QuadPart == 0) && (SharedCacheMap->Mbcb != NULL)) { 02134 CcDeleteMbcb( SharedCacheMap ); 02135 } 02136 02137 CcPurgeAndClearCacheSection( SharedCacheMap, &NewFileSize ); 02138 02139 // 02140 // Serialize again to decrement the open count. 02141 // 02142 02143 CcAcquireMasterLock( &OldIrql ); 02144 } 02145 02146 } finally { 02147 02148 // 02149 // We should only be raising without owning the spinlock. 02150 // 02151 02152 if (AbnormalTermination()) { 02153 02154 CcAcquireMasterLock( &OldIrql ); 02155 } 02156 02157 CcDecrementOpenCount( SharedCacheMap, '2fSF' ); 02158 02159 SharedCacheMap->FileSize = NewFileSize; 02160 02161 if ((SharedCacheMap->OpenCount == 0) && 02162 !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) && 02163 (SharedCacheMap->DirtyPages == 0)) { 02164 02165 // 02166 // Move to the dirty list. 02167 // 02168 02169 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 02170 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 02171 &SharedCacheMap->SharedCacheMapLinks ); 02172 02173 // 02174 // Make sure the Lazy Writer will wake up, because we 02175 // want him to delete this SharedCacheMap. 02176 // 02177 02178 LazyWriter.OtherWork = TRUE; 02179 if (!LazyWriter.ScanActive) { 02180 CcScheduleLazyWriteScan(); 02181 } 02182 } 02183 02184 CcReleaseMasterLock( OldIrql ); 02185 } 02186 02187 DebugTrace(-1, me, "CcSetFileSizes -> VOID\n", 0 ); 02188 02189 return; 02190 }

BOOLEAN CcUninitializeCacheMap IN PFILE_OBJECT  FileObject,
IN PLARGE_INTEGER TruncateSize  OPTIONAL,
IN PCACHE_UNINITIALIZE_EVENT UninitializeEvent  OPTIONAL
 

Definition at line 1164 of file fssup.c.

References ASSERT, CcAcquireMasterLock, CcDecrementOpenCount, CcDeleteSharedCacheMap(), CcDirtySharedCacheMapList, CcFreeActiveVacb(), CcPurgeCacheSection(), CcReleaseMasterLock, CcScheduleLazyWriteScan(), DebugTrace, DebugTrace2, _SHARED_CACHE_MAP::DirtyPages, ExFreePool(), FALSE, _PRIVATE_CACHE_MAP::FileObject, _SHARED_CACHE_MAP::FileSize, FlagOn, _SHARED_CACHE_MAP::Flags, GetActiveVacbAtDpcLevel, KeSetEvent(), LazyWriter, me, mm, _PRIVATE_CACHE_MAP::NodeTypeCode, NULL, _SHARED_CACHE_MAP::OpenCount, _LAZY_WRITER::OtherWork, PIN_ACCESS, _SHARED_CACHE_MAP::PrivateCacheMap, _PRIVATE_CACHE_MAP::PrivateLinks, _SHARED_CACHE_MAP::PrivateList, _LAZY_WRITER::ScanActive, SetFlag, _VACB::SharedCacheMap, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, _SHARED_CACHE_MAP::SharedCacheMapLinks, TRUE, TRUNCATE_REQUIRED, try_return, _SHARED_CACHE_MAP::UninitializeEvent, and WRITE_QUEUED.

01172 : 01173 01174 This routine uninitializes the previously initialized Shared and Private 01175 Cache Maps. This routine is only intended to be called by File Systems. 01176 It should be called when the File System receives a cleanup call on the 01177 File Object. 01178 01179 A File System which supports data caching must always call this routine 01180 whenever it closes a file, whether the caller opened the file with 01181 NO_INTERMEDIATE_BUFFERING as FALSE or not. This is because the final 01182 cleanup of a file related to truncation or deletion of the file, can 01183 only occur on the last close, whether the last closer cached the file 01184 or not. When CcUnitializeCacheMap is called on a file object for which 01185 CcInitializeCacheMap was never called, the call has a benign effect 01186 iff no one has truncated or deleted the file; otherwise the necessary 01187 cleanup relating to the truncate or close is performed. 01188 01189 In summary, CcUnitializeCacheMap does the following: 01190 01191 If the caller had Write or Delete access, the cache is flushed. 01192 (This could change with lazy writing.) 01193 01194 If a Cache Map was initialized on this File Object, it is 01195 unitialized (unmap any views, delete section, and delete 01196 Cache Map structures). 01197 01198 On the last Cleanup, if the file has been deleted, the 01199 Section is forced closed. If the file has been truncated, then 01200 the truncated pages are purged from the cache. 01201 01202 Arguments: 01203 01204 FileObject - File Object which was previously supplied to 01205 CcInitializeCacheMap. 01206 01207 TruncateSize - If specified, the file was truncated to the specified 01208 size, and the cache should be purged accordingly. 01209 01210 UninitializeEvent - If specified, then the provided event 01211 will be set to the signalled state when the actual flush is 01212 completed. This is only of interest to file systems that 01213 require that they be notified when a cache flush operation 01214 has completed. Due to network protocol restrictions, it 01215 is critical that network file systems know exactly when 01216 a cache flush operation completes, by specifying this 01217 event, they can be notified when the cache section is 01218 finally purged if the section is "lazy-deleted". 01219 01220 ReturnValue: 01221 01222 FALSE if Section was not closed. 01223 TRUE if Section was closed. 01224 01225 --*/ 01226 01227 { 01228 KIRQL OldIrql; 01229 PSHARED_CACHE_MAP SharedCacheMap; 01230 ULONG ActivePage; 01231 ULONG PageIsDirty; 01232 PVACB ActiveVacb = NULL; 01233 BOOLEAN SectionClosed = FALSE; 01234 BOOLEAN SharedListAcquired = FALSE; 01235 PPRIVATE_CACHE_MAP PrivateCacheMap; 01236 01237 DebugTrace(+1, me, "CcUninitializeCacheMap:\n", 0 ); 01238 DebugTrace( 0, me, " FileObject = %08lx\n", FileObject ); 01239 DebugTrace( 0, me, " &TruncateSize = %08lx\n", TruncateSize ); 01240 01241 // 01242 // Insure release of resources 01243 // 01244 01245 try { 01246 01247 // 01248 // Serialize Creation/Deletion of all Shared CacheMaps 01249 // 01250 01251 CcAcquireMasterLock( &OldIrql ); 01252 SharedListAcquired = TRUE; 01253 01254 // 01255 // Get pointer to SharedCacheMap via File Object. 01256 // 01257 01258 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 01259 PrivateCacheMap = FileObject->PrivateCacheMap; 01260 01261 // 01262 // Decrement Open Count on SharedCacheMap, if we did a cached open. 01263 // Also unmap PrivateCacheMap if it is mapped and deallocate it. 01264 // 01265 01266 if (PrivateCacheMap != NULL) { 01267 01268 ASSERT( PrivateCacheMap->FileObject == FileObject ); 01269 01270 CcDecrementOpenCount( SharedCacheMap, 'ninU' ); 01271 01272 // 01273 // Remove PrivateCacheMap from list in SharedCacheMap. 01274 // 01275 01276 RemoveEntryList( &PrivateCacheMap->PrivateLinks ); 01277 01278 // 01279 // Free local or allocated PrivateCacheMap 01280 // 01281 01282 if (PrivateCacheMap == &SharedCacheMap->PrivateCacheMap) { 01283 PrivateCacheMap->NodeTypeCode = 0; 01284 PrivateCacheMap = NULL; 01285 } 01286 01287 FileObject->PrivateCacheMap = (PPRIVATE_CACHE_MAP)NULL; 01288 } 01289 01290 // 01291 // Now if we have a SharedCacheMap whose Open Count went to 0, we 01292 // have some additional cleanup. 01293 // 01294 01295 if (SharedCacheMap != NULL) { 01296 01297 // 01298 // If a Truncate Size was specified, then remember that we want to 01299 // truncate the FileSize and purge the unneeded pages when OpenCount 01300 // goes to 0. 01301 // 01302 01303 if (ARGUMENT_PRESENT(TruncateSize)) { 01304 01305 if ( (TruncateSize->QuadPart == 0) && (SharedCacheMap->FileSize.QuadPart != 0) ) { 01306 01307 SetFlag(SharedCacheMap->Flags, TRUNCATE_REQUIRED); 01308 01309 } else 01310 01311 // 01312 // If this is the last guy, I can drop the file size down 01313 // now. 01314 // 01315 01316 if (IsListEmpty(&SharedCacheMap->PrivateList)) { 01317 SharedCacheMap->FileSize = *TruncateSize; 01318 } 01319 } 01320 01321 // 01322 // If other file objects are still using this SharedCacheMap, 01323 // then we are done now. 01324 // 01325 01326 if (SharedCacheMap->OpenCount != 0) { 01327 01328 DebugTrace(-1, me, "SharedCacheMap OpenCount != 0\n", 0); 01329 01330 // 01331 // If the caller specified an event to be set when 01332 // the cache uninitialize is completed, set the event 01333 // now, because the uninitialize is complete for this file. 01334 // (Note, we make him wait if he is the last guy.) 01335 // 01336 01337 if (ARGUMENT_PRESENT(UninitializeEvent)) { 01338 01339 if (!IsListEmpty(&SharedCacheMap->PrivateList)) { 01340 KeSetEvent(&UninitializeEvent->Event, 0, FALSE); 01341 } else { 01342 UninitializeEvent->Next = SharedCacheMap->UninitializeEvent; 01343 SharedCacheMap->UninitializeEvent = UninitializeEvent; 01344 } 01345 } 01346 01347 try_return( SectionClosed = FALSE ); 01348 } 01349 01350 // 01351 // The private cache map list better be empty! 01352 // 01353 01354 ASSERT(IsListEmpty(&SharedCacheMap->PrivateList)); 01355 01356 // 01357 // Set the "uninitialize complete" in the shared cache map 01358 // so that CcDeleteSharedCacheMap will delete it. 01359 // 01360 01361 if (ARGUMENT_PRESENT(UninitializeEvent)) { 01362 UninitializeEvent->Next = SharedCacheMap->UninitializeEvent; 01363 SharedCacheMap->UninitializeEvent = UninitializeEvent; 01364 } 01365 01366 // 01367 // We are in the process of deleting this cache map. If the 01368 // Lazy Writer is active or the Bcb list is not empty or the Lazy 01369 // Writer will hit this SharedCacheMap because we are purging 01370 // the file to 0, then get out and let the Lazy Writer clean 01371 // up. 01372 // 01373 01374 if ((!FlagOn(SharedCacheMap->Flags, PIN_ACCESS) && 01375 !ARGUMENT_PRESENT(UninitializeEvent)) 01376 01377 || 01378 01379 FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) 01380 01381 || 01382 01383 (SharedCacheMap->DirtyPages != 0)) { 01384 01385 // 01386 // Move it to the dirty list so the lazy write scan will 01387 // see it. 01388 // 01389 01390 if (!FlagOn(SharedCacheMap->Flags, WRITE_QUEUED)) { 01391 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 01392 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 01393 &SharedCacheMap->SharedCacheMapLinks ); 01394 } 01395 01396 // 01397 // Make sure the Lazy Writer will wake up, because we 01398 // want him to delete this SharedCacheMap. 01399 // 01400 01401 LazyWriter.OtherWork = TRUE; 01402 if (!LazyWriter.ScanActive) { 01403 CcScheduleLazyWriteScan(); 01404 } 01405 01406 // 01407 // Get the active Vacb if we are going to lazy delete, to 01408 // free it for someone who can use it. 01409 // 01410 01411 GetActiveVacbAtDpcLevel( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 01412 01413 DebugTrace(-1, me, "SharedCacheMap has Bcbs and not purging to 0\n", 0); 01414 01415 try_return( SectionClosed = FALSE ); 01416 } 01417 01418 // 01419 // Now we can delete the SharedCacheMap. If there are any Bcbs, 01420 // then we must be truncating to 0, and they will also be deleted. 01421 // On return the Shared Cache Map List Spinlock will be released. 01422 // 01423 01424 CcDeleteSharedCacheMap( SharedCacheMap, OldIrql, FALSE ); 01425 01426 SharedListAcquired = FALSE; 01427 01428 try_return( SectionClosed = TRUE ); 01429 } 01430 01431 // 01432 // No Shared Cache Map. To make the file go away, we still need to 01433 // purge the section, if one exists. (And we still need to release 01434 // our global list first to avoid deadlocks.) 01435 // 01436 01437 else { 01438 if (ARGUMENT_PRESENT(TruncateSize) && 01439 ( TruncateSize->QuadPart == 0 ) && 01440 (*(PCHAR *)FileObject->SectionObjectPointer != NULL)) { 01441 01442 CcReleaseMasterLock( OldIrql ); 01443 SharedListAcquired = FALSE; 01444 01445 DebugTrace( 0, mm, "MmPurgeSection:\n", 0 ); 01446 DebugTrace( 0, mm, " SectionObjectPointer = %08lx\n", 01447 FileObject->SectionObjectPointer ); 01448 DebugTrace2(0, mm, " Offset = %08lx\n", 01449 TruncateSize->LowPart, 01450 TruncateSize->HighPart ); 01451 01452 // 01453 // 0 Length means to purge from the TruncateSize on. 01454 // 01455 01456 CcPurgeCacheSection( FileObject->SectionObjectPointer, 01457 TruncateSize, 01458 0, 01459 FALSE ); 01460 } 01461 01462 // 01463 // If the caller specified an event to be set when 01464 // the cache uninitialize is completed, set the event 01465 // now, because the uninitialize is complete for this file. 01466 // 01467 01468 if (ARGUMENT_PRESENT(UninitializeEvent)) { 01469 KeSetEvent(&UninitializeEvent->Event, 0, FALSE); 01470 } 01471 01472 } 01473 01474 try_exit: NOTHING; 01475 } 01476 finally { 01477 01478 // 01479 // Release global resources 01480 // 01481 01482 if (SharedListAcquired) { 01483 CcReleaseMasterLock( OldIrql ); 01484 } 01485 01486 // 01487 // Free the active vacb, if we found one. 01488 // 01489 01490 if (ActiveVacb != NULL) { 01491 01492 CcFreeActiveVacb( ActiveVacb->SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 01493 } 01494 01495 // 01496 // Free PrivateCacheMap now that we no longer have the spinlock. 01497 // 01498 01499 if (PrivateCacheMap != NULL) { 01500 ExFreePool( PrivateCacheMap ); 01501 } 01502 } 01503 01504 DebugTrace(-1, me, "CcUnitializeCacheMap -> %02lx\n", SectionClosed ); 01505 01506 return SectionClosed; 01507 01508 }

VOID CcUnmapAndPurge IN PSHARED_CACHE_MAP  SharedCacheMap  ) 
 

Definition at line 2563 of file fssup.c.

References CcPurgeCacheSection(), CcUnmapVacbArray(), DebugTrace, DebugTrace2, FALSE, FlagOn, mm, NULL, _FILE_OBJECT::SectionObjectPointer, TRUNCATE_REQUIRED, and VOID().

Referenced by CcDeleteSharedCacheMap().

02569 : 02570 02571 This routine may be called to unmap and purge a section, causing Memory 02572 Management to throw the pages out and reset his notion of file size. 02573 02574 Arguments: 02575 02576 SharedCacheMap - Pointer to SharedCacheMap of section to purge. 02577 02578 Return Value: 02579 02580 None. 02581 02582 --*/ 02583 02584 { 02585 PFILE_OBJECT FileObject; 02586 KIRQL OldIrql; 02587 02588 FileObject = SharedCacheMap->FileObject; 02589 02590 // 02591 // Unmap all Vacbs 02592 // 02593 02594 if (SharedCacheMap->Vacbs != NULL) { 02595 (VOID)CcUnmapVacbArray( SharedCacheMap, NULL, 0, FALSE ); 02596 } 02597 02598 // 02599 // Now that the file is unmapped, we can purge the truncated 02600 // pages from memory, if TRUNCATE_REQUIRED. Note that since the 02601 // entire section is being purged (FileSize == NULL), the purge 02602 // and subsequent delete of the SharedCacheMap should drop 02603 // all references on the section and file object clearing the 02604 // way for the Close Call and actual file delete to occur 02605 // immediately. 02606 // 02607 02608 if (FlagOn(SharedCacheMap->Flags, TRUNCATE_REQUIRED)) { 02609 02610 DebugTrace( 0, mm, "MmPurgeSection:\n", 0 ); 02611 DebugTrace( 0, mm, " SectionObjectPointer = %08lx\n", 02612 FileObject->SectionObjectPointer ); 02613 DebugTrace2(0, mm, " Offset = %08lx\n", 02614 SharedCacheMap->FileSize.LowPart, 02615 SharedCacheMap->FileSize.HighPart ); 02616 02617 CcPurgeCacheSection( FileObject->SectionObjectPointer, 02618 NULL, 02619 0, 02620 FALSE ); 02621 } 02622 }

BOOLEAN CcZeroData IN PFILE_OBJECT  FileObject,
IN PLARGE_INTEGER  StartOffset,
IN PLARGE_INTEGER  EndOffset,
IN BOOLEAN  Wait
 

Definition at line 2952 of file fssup.c.

References ASSERT, _MDL::ByteCount, CcAggressiveZeroCount, CcAggressiveZeroThreshold, CcPinFileData(), CcSetDirtyPinnedData(), CcUnpinFileData(), COMPUTE_PAGES_SPANNED, DebugTrace, Event(), ExAllocatePoolWithTag, Executive, ExFreePool(), ExRaiseStatus(), FALSE, FlagOn, FO_WRITE_THROUGH, IoAllocateMdl(), IoFreeMdl(), IoGetRelatedDeviceObject(), IoReadAccess, IoSynchronousPageWrite(), KeInitializeEvent, KernelMode, KeSweepDcache(), KeWaitForSingleObject(), LowPagePriority, _MDL::MappedSystemVa, MAX_ZERO_TRANSFER, MAX_ZEROS_IN_CACHE, MDL_PAGES_LOCKED, MDL_SOURCE_IS_NONPAGED_POOL, _MDL::MdlFlags, me, MIN_ZERO_TRANSFER, MmAvailablePages, MmBuildMdlForNonPagedPool(), MmDisablePageFaultClustering, MmEnablePageFaultClustering, MmGetMdlPfnArray, MmGetSystemAddressForMdl, MmGetSystemAddressForMdlSafe, MmProbeAndLockPages(), MmSetAddressRangeModified(), MmUnlockPages(), MmUnmapLockedPages(), NonPagedPoolCacheAligned, NT_SUCCESS, NTSTATUS(), NULL, PAGE_SIZE, _DEVICE_OBJECT::SectorSize, Status, TRUE, try_return, and UNPIN.

Referenced by FsRtlCopyWrite(), and FsRtlPrepareMdlWriteDev().

02961 : 02962 02963 This routine attempts to zero the specified file data and deliver the 02964 correct I/O status. 02965 02966 If the caller does not want to block (such as for disk I/O), then 02967 Wait should be supplied as FALSE. If Wait was supplied as FALSE and 02968 it is currently impossible to zero all of the requested data without 02969 blocking, then this routine will return FALSE. However, if the 02970 required space is immediately accessible in the cache and no blocking is 02971 required, this routine zeros the data and returns TRUE. 02972 02973 If the caller supplies Wait as TRUE, then this routine is guaranteed 02974 to zero the data and return TRUE. If the correct space is immediately 02975 accessible in the cache, then no blocking will occur. Otherwise, 02976 the necessary work will be initiated to read and/or free cache data, 02977 and the caller will be blocked until the data can be received. 02978 02979 File system Fsd's should typically supply Wait = TRUE if they are 02980 processing a synchronous I/O requests, or Wait = FALSE if they are 02981 processing an asynchronous request. 02982 02983 File system threads should supply Wait = TRUE. 02984 02985 IMPORTANT NOTE: File systems which call this routine must be prepared 02986 to handle a special form of a write call where the Mdl is already 02987 supplied. Namely, if Irp->MdlAddress is supplied, the file system 02988 must check the low order bit of Irp->MdlAddress->ByteOffset. If it 02989 is set, that means that the Irp was generated in this routine and 02990 the file system must do two things: 02991 02992 Decrement Irp->MdlAddress->ByteOffset and Irp->UserBuffer 02993 02994 Clear Irp->MdlAddress immediately prior to completing the 02995 request, as this routine expects to reuse the Mdl and 02996 ultimately deallocate the Mdl itself. 02997 02998 Arguments: 02999 03000 FileObject - pointer to the FileObject for which a range of bytes 03001 is to be zeroed. This FileObject may either be for 03002 a cached file or a noncached file. If the file is 03003 not cached, then WriteThrough must be TRUE and 03004 StartOffset and EndOffset must be on sector boundaries. 03005 03006 StartOffset - Start offset in file to be zeroed. 03007 03008 EndOffset - End offset in file to be zeroed. 03009 03010 Wait - FALSE if caller may not block, TRUE otherwise (see description 03011 above) 03012 03013 Return Value: 03014 03015 FALSE - if Wait was supplied as FALSE and the data was not zeroed. 03016 03017 TRUE - if the data has been zeroed. 03018 03019 Raises: 03020 03021 STATUS_INSUFFICIENT_RESOURCES - If a pool allocation failure occurs. 03022 This can only occur if Wait was specified as TRUE. (If Wait is 03023 specified as FALSE, and an allocation failure occurs, this 03024 routine simply returns FALSE.) 03025 03026 --*/ 03027 03028 { 03029 PSHARED_CACHE_MAP SharedCacheMap; 03030 PVOID CacheBuffer; 03031 LARGE_INTEGER FOffset; 03032 LARGE_INTEGER ToGo; 03033 ULONG ZeroBytes, ZeroTransfer; 03034 ULONG SectorMask; 03035 ULONG i; 03036 BOOLEAN WriteThrough; 03037 BOOLEAN AggressiveZero = FALSE; 03038 ULONG SavedState = 0; 03039 ULONG MaxZerosInCache = MAX_ZEROS_IN_CACHE; 03040 ULONG NumberOfColors = 1; 03041 03042 PBCB Bcb = NULL; 03043 PCHAR Zeros = NULL; 03044 PMDL ZeroMdl = NULL; 03045 ULONG MaxBytesMappedInMdl = 0; 03046 BOOLEAN Result = TRUE; 03047 03048 PPFN_NUMBER Page; 03049 ULONG SavedByteCount; 03050 LARGE_INTEGER SizeLeft; 03051 03052 DebugTrace(+1, me, "CcZeroData\n", 0 ); 03053 03054 WriteThrough = (BOOLEAN)(((FileObject->Flags & FO_WRITE_THROUGH) != 0) || 03055 (FileObject->PrivateCacheMap == NULL)); 03056 03057 // 03058 // If the caller specified Wait, but the FileObject is WriteThrough, 03059 // then we need to just get out. 03060 // 03061 03062 if (WriteThrough && !Wait) { 03063 03064 DebugTrace(-1, me, "CcZeroData->FALSE (WriteThrough && !Wait)\n", 0 ); 03065 03066 return FALSE; 03067 } 03068 03069 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 03070 03071 SectorMask = IoGetRelatedDeviceObject(FileObject)->SectorSize - 1; 03072 03073 FOffset = *StartOffset; 03074 03075 // 03076 // Calculate how much to zero this time. 03077 // 03078 03079 ToGo.QuadPart = EndOffset->QuadPart - FOffset.QuadPart; 03080 03081 // 03082 // This magic number is what the fastpaths throttle on, and they will present 03083 // non-sector aligned zeroing requests. As long as we will always handle them 03084 // on the cached path, we are OK. 03085 // 03086 // If we will not make the cached path, the request must be aligned. 03087 // 03088 03089 ASSERT( ToGo.QuadPart <= 0x2000 || 03090 ((ToGo.LowPart & SectorMask) == 0 && 03091 (FOffset.LowPart & SectorMask) == 0)); 03092 03093 // 03094 // We will only do zeroing in the cache if the caller is using a 03095 // cached file object, and did not specify WriteThrough. We are 03096 // willing to zero some data in the cache if our total is not too 03097 // much, or there is sufficient available pages. 03098 // 03099 03100 if (((ToGo.QuadPart <= 0x2000) || 03101 (MmAvailablePages >= ((MAX_ZEROS_IN_CACHE / PAGE_SIZE) * 4))) && !WriteThrough) { 03102 03103 try { 03104 03105 while (MaxZerosInCache != 0) { 03106 03107 ULONG ReceivedLength; 03108 LARGE_INTEGER BeyondLastByte; 03109 03110 if ( ToGo.QuadPart > (LONGLONG)MaxZerosInCache ) { 03111 03112 // 03113 // If Wait == FALSE, then there is no point in getting started, 03114 // because we would have to start all over again zeroing with 03115 // Wait == TRUE, since we would fall out of this loop and 03116 // start synchronously writing pages to disk. 03117 // 03118 03119 if (!Wait) { 03120 03121 DebugTrace(-1, me, "CcZeroData -> FALSE\n", 0 ); 03122 03123 try_return( Result = FALSE ); 03124 } 03125 } 03126 else { 03127 MaxZerosInCache = ToGo.LowPart; 03128 } 03129 03130 // 03131 // Call local routine to Map or Access the file data, then zero the data, 03132 // then call another local routine to free the data. If we cannot map 03133 // the data because of a Wait condition, return FALSE. 03134 // 03135 // Note that this call may result in an exception, however, if it 03136 // does no Bcb is returned and this routine has absolutely no 03137 // cleanup to perform. Therefore, we do not have a try-finally 03138 // and we allow the possibility that we will simply be unwound 03139 // without notice. 03140 // 03141 03142 if (!CcPinFileData( FileObject, 03143 &FOffset, 03144 MaxZerosInCache, 03145 FALSE, 03146 TRUE, 03147 Wait, 03148 &Bcb, 03149 &CacheBuffer, 03150 &BeyondLastByte )) { 03151 03152 DebugTrace(-1, me, "CcZeroData -> FALSE\n", 0 ); 03153 03154 try_return( Result = FALSE ); 03155 } 03156 03157 // 03158 // Calculate how much data is described by Bcb starting at our desired 03159 // file offset. If it is more than we need, we will zero the whole thing 03160 // anyway. 03161 // 03162 03163 ReceivedLength = (ULONG)(BeyondLastByte.QuadPart - FOffset.QuadPart ); 03164 03165 // 03166 // Now attempt to allocate an Mdl to describe the mapped data. 03167 // 03168 03169 ZeroMdl = IoAllocateMdl( CacheBuffer, 03170 ReceivedLength, 03171 FALSE, 03172 FALSE, 03173 NULL ); 03174 03175 if (ZeroMdl == NULL) { 03176 03177 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 03178 } 03179 03180 // 03181 // It is necessary to probe and lock the pages, or else 03182 // the pages may not still be in memory when we do the 03183 // MmSetAddressRangeModified for the dirty Bcb. 03184 // 03185 03186 MmDisablePageFaultClustering(&SavedState); 03187 MmProbeAndLockPages( ZeroMdl, KernelMode, IoReadAccess ); 03188 MmEnablePageFaultClustering(SavedState); 03189 SavedState = 0; 03190 03191 // 03192 // Assume we did not get all the data we wanted, and set FOffset 03193 // to the end of the returned data, and advance buffer pointer. 03194 // 03195 03196 FOffset = BeyondLastByte; 03197 03198 // 03199 // Figure out how many bytes we are allowed to zero in the cache. 03200 // Note it is possible we have zeroed a little more than our maximum, 03201 // because we hit an existing Bcb that extended beyond the range. 03202 // 03203 03204 if (MaxZerosInCache <= ReceivedLength) { 03205 MaxZerosInCache = 0; 03206 } 03207 else { 03208 MaxZerosInCache -= ReceivedLength; 03209 } 03210 03211 // 03212 // Now set the Bcb dirty. We have to explicitly set the address 03213 // range modified here, because that work otherwise gets deferred 03214 // to the Lazy Writer. 03215 // 03216 03217 MmSetAddressRangeModified( CacheBuffer, ReceivedLength ); 03218 CcSetDirtyPinnedData( Bcb, NULL ); 03219 03220 // 03221 // Unmap the data now 03222 // 03223 03224 CcUnpinFileData( Bcb, FALSE, UNPIN ); 03225 Bcb = NULL; 03226 03227 // 03228 // Unlock and free the Mdl (we only loop back if we crossed 03229 // a 256KB boundary. 03230 // 03231 03232 MmUnlockPages( ZeroMdl ); 03233 IoFreeMdl( ZeroMdl ); 03234 ZeroMdl = NULL; 03235 } 03236 03237 try_exit: NOTHING; 03238 } finally { 03239 03240 if (SavedState != 0) { 03241 MmEnablePageFaultClustering(SavedState); 03242 } 03243 03244 // 03245 // Clean up only necessary in abnormal termination. 03246 // 03247 03248 if (Bcb != NULL) { 03249 03250 CcUnpinFileData( Bcb, FALSE, UNPIN ); 03251 } 03252 03253 // 03254 // Since the last thing in the above loop which can 03255 // fail is the MmProbeAndLockPages, we only need to 03256 // free the Mdl here. 03257 // 03258 03259 if (ZeroMdl != NULL) { 03260 03261 IoFreeMdl( ZeroMdl ); 03262 } 03263 } 03264 03265 // 03266 // If hit a wait condition above, return it now. 03267 // 03268 03269 if (!Result) { 03270 return FALSE; 03271 } 03272 03273 // 03274 // If we finished, get out nbow. 03275 // 03276 03277 if ( FOffset.QuadPart >= EndOffset->QuadPart ) { 03278 return TRUE; 03279 } 03280 } 03281 03282 // 03283 // We either get here because we decided above not to zero anything in 03284 // the cache directly, or else we zeroed up to our maximum and still 03285 // have some left to zero direct to the file on disk. In either case, 03286 // we will now zero from FOffset to *EndOffset, and then flush this 03287 // range in case the file is cached/mapped, and there are modified 03288 // changes in memory. 03289 // 03290 03291 // 03292 // Round FOffset and EndOffset up to sector boundaries, since 03293 // we will be doing disk I/O, and calculate size left. 03294 // 03295 03296 ASSERT( (FOffset.LowPart & SectorMask) == 0 ); 03297 03298 FOffset.QuadPart += (LONGLONG)SectorMask; 03299 FOffset.LowPart &= ~SectorMask; 03300 SizeLeft.QuadPart = EndOffset->QuadPart + (LONGLONG)SectorMask; 03301 SizeLeft.LowPart &= ~SectorMask; 03302 SizeLeft.QuadPart -= FOffset.QuadPart; 03303 03304 ASSERT( (FOffset.LowPart & SectorMask) == 0 ); 03305 ASSERT( (SizeLeft.LowPart & SectorMask) == 0 ); 03306 03307 if (SizeLeft.QuadPart == 0) { 03308 return TRUE; 03309 } 03310 03311 // 03312 // try-finally to guarantee cleanup. 03313 // 03314 03315 try { 03316 03317 // 03318 // Allocate a page to hold the zeros we will write, and 03319 // zero it. 03320 // 03321 03322 ZeroBytes = NumberOfColors * PAGE_SIZE; 03323 03324 if (SizeLeft.HighPart == 0 && SizeLeft.LowPart < ZeroBytes) { 03325 ZeroBytes = SizeLeft.LowPart; 03326 } 03327 03328 Zeros = (PCHAR)ExAllocatePoolWithTag( NonPagedPoolCacheAligned, ZeroBytes, 'eZcC' ); 03329 03330 if (Zeros != NULL) { 03331 03332 // 03333 // Allocate and initialize an Mdl to describe the zeros 03334 // we need to transfer. Allocate to cover the maximum 03335 // size required, and we will use and reuse it in the 03336 // loop below, initialized correctly. 03337 // 03338 03339 if (SizeLeft.HighPart == 0 && SizeLeft.LowPart < MAX_ZERO_TRANSFER) { 03340 03341 ZeroTransfer = SizeLeft.LowPart; 03342 03343 } else { 03344 03345 // 03346 // See how aggressive we can afford to be. 03347 // 03348 03349 if (InterlockedIncrement( &CcAggressiveZeroCount ) <= CcAggressiveZeroThreshold) { 03350 AggressiveZero = TRUE; 03351 ZeroTransfer = MAX_ZERO_TRANSFER; 03352 } else { 03353 InterlockedDecrement( &CcAggressiveZeroCount ); 03354 ZeroTransfer = MIN_ZERO_TRANSFER; 03355 } 03356 } 03357 03358 // 03359 // Since the maximum zero may start at a very aggresive level, fall back 03360 // until we really have to give up. Since filter drivers, filesystems and 03361 // even storage drivers may need to map this Mdl, we have to pre-map it 03362 // into system space so that we know enough PTEs are avaliable. We also 03363 // need to throttle our consumption of virtual addresses based on the size 03364 // of the system and the number of parallel instances of this work outstanding. 03365 // This may be a bit of overkill, but since running out of PTEs is a fatal 03366 // event for the rest of the system, try to help out while still being fast. 03367 // 03368 03369 while (TRUE) { 03370 03371 // 03372 // Spin down trying to get an MDL which can describe our operation. 03373 // 03374 03375 while (TRUE) { 03376 03377 ZeroMdl = IoAllocateMdl( Zeros, ZeroTransfer, FALSE, FALSE, NULL ); 03378 03379 // 03380 // Throttle ourselves to what we've physically allocated. Note that 03381 // we could have started with an odd multiple of this number. If we 03382 // tried for exactly that size and failed, we're toast. 03383 // 03384 03385 if (ZeroMdl || ZeroTransfer == ZeroBytes) { 03386 03387 break; 03388 } 03389 03390 Fall_Back: 03391 03392 // 03393 // Fallback by half and round down to a sector multiple. 03394 // 03395 03396 ZeroTransfer /= 2; 03397 ZeroTransfer &= ~SectorMask; 03398 if (ZeroTransfer < ZeroBytes) { 03399 ZeroTransfer = ZeroBytes; 03400 } 03401 03402 ASSERT( (ZeroTransfer & SectorMask) == 0 && ZeroTransfer != 0); 03403 } 03404 03405 if (ZeroMdl == NULL) { 03406 03407 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 03408 } 03409 03410 // 03411 // If we have throttled all the way down, stop and just build a 03412 // simple MDL describing our previous allocation. 03413 // 03414 03415 if (ZeroTransfer == ZeroBytes) { 03416 03417 MmBuildMdlForNonPagedPool( ZeroMdl ); 03418 break; 03419 } 03420 03421 // 03422 // Now we will temporarily lock the allocated pages 03423 // only, and then replicate the page frame numbers through 03424 // the entire Mdl to keep writing the same pages of zeros. 03425 // 03426 // It would be nice if Mm exported a way for us to not have 03427 // to pull the Mdl apart and rebuild it ourselves, but this 03428 // is so bizzare a purpose as to be tolerable. 03429 // 03430 03431 SavedByteCount = ZeroMdl->ByteCount; 03432 ZeroMdl->ByteCount = ZeroBytes; 03433 MmBuildMdlForNonPagedPool( ZeroMdl ); 03434 03435 ZeroMdl->MdlFlags &= ~MDL_SOURCE_IS_NONPAGED_POOL; 03436 ZeroMdl->MdlFlags |= MDL_PAGES_LOCKED; 03437 ZeroMdl->MappedSystemVa = NULL; 03438 ZeroMdl->ByteCount = SavedByteCount; 03439 Page = MmGetMdlPfnArray( ZeroMdl ); 03440 for (i = NumberOfColors; 03441 i < (COMPUTE_PAGES_SPANNED( 0, SavedByteCount )); 03442 i++) { 03443 03444 *(Page + i) = *(Page + i - NumberOfColors); 03445 } 03446 03447 if (MmGetSystemAddressForMdlSafe( ZeroMdl, LowPagePriority ) == NULL) { 03448 03449 // 03450 // Blow away this Mdl and trim for the retry. Since it didn't 03451 // get mapped, there is nothing fancy to do. 03452 // 03453 03454 IoFreeMdl( ZeroMdl ); 03455 goto Fall_Back; 03456 } 03457 03458 break; 03459 } 03460 03461 // 03462 // We failed to allocate the space we wanted, so we will go to 03463 // half of a page and limp along. 03464 // 03465 03466 } else { 03467 03468 // 03469 // Of course, if we have a device which has large sectors, that defines 03470 // the lower limit of our attempt. 03471 // 03472 03473 if (IoGetRelatedDeviceObject(FileObject)->SectorSize < PAGE_SIZE / 2) { 03474 03475 ZeroBytes = PAGE_SIZE / 2; 03476 Zeros = (PCHAR)ExAllocatePoolWithTag( NonPagedPoolCacheAligned, ZeroBytes, 'eZcC' ); 03477 } 03478 03479 // 03480 // If we cannot get even that much, then let's write a sector at a time. 03481 // 03482 03483 if (Zeros == NULL) { 03484 03485 ZeroBytes = IoGetRelatedDeviceObject(FileObject)->SectorSize; 03486 Zeros = (PCHAR)ExAllocatePoolWithTag( NonPagedPoolCacheAligned, ZeroBytes, 'eZcC' ); 03487 03488 // 03489 // If we cannot get even the minimum, we have to give up. 03490 // 03491 03492 if (Zeros == NULL) { 03493 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 03494 } 03495 } 03496 03497 // 03498 // Allocate and initialize an Mdl to describe the zeros 03499 // we need to transfer. Allocate to cover the maximum 03500 // size required, and we will use and reuse it in the 03501 // loop below, initialized correctly. 03502 // 03503 03504 ZeroTransfer = ZeroBytes; 03505 ZeroMdl = IoAllocateMdl( Zeros, ZeroBytes, FALSE, FALSE, NULL ); 03506 03507 ASSERT( (ZeroTransfer & SectorMask) == 0 ); 03508 03509 if (ZeroMdl == NULL) { 03510 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 03511 } 03512 03513 // 03514 // Now we will lock and map the allocated pages. 03515 // 03516 03517 MmBuildMdlForNonPagedPool( ZeroMdl ); 03518 03519 ASSERT( ZeroMdl->MappedSystemVa == Zeros ); 03520 } 03521 03522 #ifdef MIPS 03523 #ifdef MIPS_PREFILL 03524 RtlFillMemory( Zeros, ZeroBytes, 0xDD ); 03525 KeSweepDcache( TRUE ); 03526 #endif 03527 #endif 03528 03529 // 03530 // Zero the buffer now. 03531 // 03532 03533 RtlZeroMemory( Zeros, ZeroBytes ); 03534 03535 // 03536 // We have a mapped and zeroed range back by an MDL to use. Note the 03537 // size we have for cleanup, since we will possibly wind this down 03538 // over the operation. 03539 // 03540 03541 ASSERT( MmGetSystemAddressForMdl(ZeroMdl) ); 03542 MaxBytesMappedInMdl = ZeroMdl->ByteCount; 03543 03544 // 03545 // Now loop to write buffers full of zeros through to the file 03546 // until we reach the starting Vbn for the transfer. 03547 // 03548 03549 ASSERT( ZeroTransfer != 0 && 03550 (ZeroTransfer & SectorMask) == 0 && 03551 (SizeLeft.LowPart & SectorMask) == 0 ); 03552 03553 while ( SizeLeft.QuadPart != 0 ) { 03554 03555 IO_STATUS_BLOCK IoStatus; 03556 NTSTATUS Status; 03557 KEVENT Event; 03558 03559 // 03560 // See if we really need to write that many zeros, and 03561 // trim the size back if not. 03562 // 03563 03564 if ( (LONGLONG)ZeroTransfer > SizeLeft.QuadPart ) { 03565 03566 ZeroTransfer = SizeLeft.LowPart; 03567 } 03568 03569 // 03570 // (Re)initialize the kernel event to FALSE. 03571 // 03572 03573 KeInitializeEvent( &Event, NotificationEvent, FALSE ); 03574 03575 // 03576 // Initiate and wait for the synchronous transfer. 03577 // 03578 03579 ZeroMdl->ByteCount = ZeroTransfer; 03580 03581 Status = IoSynchronousPageWrite( FileObject, 03582 ZeroMdl, 03583 &FOffset, 03584 &Event, 03585 &IoStatus ); 03586 03587 // 03588 // If pending is returned (which is a successful status), 03589 // we must wait for the request to complete. 03590 // 03591 03592 if (Status == STATUS_PENDING) { 03593 KeWaitForSingleObject( &Event, 03594 Executive, 03595 KernelMode, 03596 FALSE, 03597 (PLARGE_INTEGER)NULL); 03598 } 03599 03600 03601 // 03602 // If we got an error back in Status, then the Iosb 03603 // was not written, so we will just copy the status 03604 // there, then test the final status after that. 03605 // 03606 03607 if (!NT_SUCCESS(Status)) { 03608 ExRaiseStatus( Status ); 03609 } 03610 03611 if (!NT_SUCCESS(IoStatus.Status)) { 03612 ExRaiseStatus( IoStatus.Status ); 03613 } 03614 03615 // 03616 // If we succeeded, then update where we are at by how much 03617 // we wrote, and loop back to see if there is more. 03618 // 03619 03620 FOffset.QuadPart = FOffset.QuadPart + (LONGLONG)ZeroTransfer; 03621 SizeLeft.QuadPart = SizeLeft.QuadPart - (LONGLONG)ZeroTransfer; 03622 } 03623 } 03624 finally{ 03625 03626 // 03627 // Clean up anything from zeroing pages on a noncached 03628 // write. 03629 // 03630 03631 if (ZeroMdl != NULL) { 03632 03633 if ((MaxBytesMappedInMdl != 0) && 03634 !FlagOn(ZeroMdl->MdlFlags, MDL_SOURCE_IS_NONPAGED_POOL)) { 03635 ZeroMdl->ByteCount = MaxBytesMappedInMdl; 03636 MmUnmapLockedPages (ZeroMdl->MappedSystemVa, ZeroMdl); 03637 } 03638 03639 IoFreeMdl( ZeroMdl ); 03640 } 03641 03642 if (AggressiveZero) { 03643 InterlockedDecrement( &CcAggressiveZeroCount ); 03644 } 03645 03646 if (Zeros != NULL) { 03647 ExFreePool( Zeros ); 03648 } 03649 03650 DebugTrace(-1, me, "CcZeroData -> TRUE\n", 0 ); 03651 } 03652 03653 return TRUE; 03654 }

VOID CcZeroEndOfLastPage IN PFILE_OBJECT  FileObject  ) 
 

Definition at line 2804 of file fssup.c.

References _SHARED_CACHE_MAP::ActiveVacb, ASSERT, CcAcquireMasterLock, CcDecrementOpenCount, CcDirtySharedCacheMapList, CcFlushCache(), CcFreeActiveVacb(), CcIncrementOpenCount, CcPurgeCacheSection(), CcReleaseMasterLock, CcScheduleLazyWriteScan(), _SHARED_CACHE_MAP::DirtyPages, FALSE, FlagOn, _SHARED_CACHE_MAP::Flags, FSRTL_FLAG2_PURGE_WHEN_MAPPED, FSRTL_FLAG_ADVANCED_HEADER, FSRTL_FLAG_USER_MAPPED_FILE, FsRtlAcquireFileExclusive(), FsRtlReleaseFile(), GetActiveVacbAtDpcLevel, LazyWriter, _SHARED_CACHE_MAP::NeedToZero, NULL, _SHARED_CACHE_MAP::OpenCount, _LAZY_WRITER::OtherWork, _LAZY_WRITER::ScanActive, SetFlag, _SHARED_CACHE_MAP::SharedCacheMapLinks, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, TRUE, and WRITE_QUEUED.

Referenced by MiCreateImageFileMap(), and NtCreateSection().

02810 : 02811 02812 This routine is only called by Mm before mapping a user view to 02813 a section. If there is an uninitialized page at the end of the 02814 file, we zero it by freeing that page. 02815 02816 Parameters: 02817 02818 FileObject - File object for section to be mapped 02819 02820 Return Value: 02821 02822 None 02823 --*/ 02824 02825 { 02826 PSHARED_CACHE_MAP SharedCacheMap; 02827 ULONG ActivePage; 02828 ULONG PageIsDirty; 02829 KIRQL OldIrql; 02830 PVOID NeedToZero = NULL; 02831 PVACB ActiveVacb = NULL; 02832 IO_STATUS_BLOCK Iosb; 02833 BOOLEAN PurgeResult; 02834 02835 // 02836 // See if we have an active Vacb, that we need to free. 02837 // 02838 02839 FsRtlAcquireFileExclusive( FileObject ); 02840 CcAcquireMasterLock( &OldIrql ); 02841 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 02842 02843 if (SharedCacheMap != NULL) { 02844 02845 // 02846 // See if there is an active vacb. 02847 // 02848 02849 if ((SharedCacheMap->ActiveVacb != NULL) || ((NeedToZero = SharedCacheMap->NeedToZero) != NULL)) { 02850 02851 CcIncrementOpenCount( SharedCacheMap, 'peZS' ); 02852 GetActiveVacbAtDpcLevel( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 02853 } 02854 } 02855 02856 CcReleaseMasterLock( OldIrql ); 02857 02858 // 02859 // Remember in FsRtl header is there is a user section. 02860 // If this is an advanced header then also acquire the mutex to access 02861 // this field. 02862 // 02863 02864 if (FlagOn( ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->Flags, 02865 FSRTL_FLAG_ADVANCED_HEADER )) { 02866 02867 ExAcquireFastMutex( ((PFSRTL_ADVANCED_FCB_HEADER)FileObject->FsContext)->FastMutex ); 02868 02869 SetFlag( ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->Flags, 02870 FSRTL_FLAG_USER_MAPPED_FILE ); 02871 02872 ExReleaseFastMutex( ((PFSRTL_ADVANCED_FCB_HEADER)FileObject->FsContext)->FastMutex ); 02873 02874 } else { 02875 02876 SetFlag( ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->Flags, 02877 FSRTL_FLAG_USER_MAPPED_FILE ); 02878 } 02879 02880 // 02881 // Free the active vacb now so we don't deadlock if we have to purge 02882 // 02883 02884 02885 if ((ActiveVacb != NULL) || (NeedToZero != NULL)) { 02886 CcFreeActiveVacb( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 02887 } 02888 02889 02890 if (FlagOn( ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->Flags2, FSRTL_FLAG2_PURGE_WHEN_MAPPED )) { 02891 02892 if (FileObject->SectionObjectPointer->SharedCacheMap) { 02893 ASSERT( ((PSHARED_CACHE_MAP)(FileObject->SectionObjectPointer->SharedCacheMap))->VacbActiveCount == 0 ); 02894 } 02895 02896 CcFlushCache( FileObject->SectionObjectPointer, NULL, 0, &Iosb ); 02897 PurgeResult = CcPurgeCacheSection( FileObject->SectionObjectPointer, NULL, 0, FALSE ); 02898 02899 if (FileObject->SectionObjectPointer->SharedCacheMap) { 02900 ASSERT( ((PSHARED_CACHE_MAP)(FileObject->SectionObjectPointer->SharedCacheMap))->VacbActiveCount == 0 ); 02901 } 02902 } 02903 02904 02905 FsRtlReleaseFile( FileObject ); 02906 02907 // 02908 // If the file is cached and we have a Vacb to free, we need to 02909 // use the lazy writer callback to synchronize so no one will be 02910 // extending valid data. 02911 // 02912 02913 if ((ActiveVacb != NULL) || (NeedToZero != NULL)) { 02914 02915 // 02916 // Serialize again to decrement the open count. 02917 // 02918 02919 CcAcquireMasterLock( &OldIrql ); 02920 02921 CcDecrementOpenCount( SharedCacheMap, 'peZF' ); 02922 02923 if ((SharedCacheMap->OpenCount == 0) && 02924 !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) && 02925 (SharedCacheMap->DirtyPages == 0)) { 02926 02927 // 02928 // Move to the dirty list. 02929 // 02930 02931 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 02932 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 02933 &SharedCacheMap->SharedCacheMapLinks ); 02934 02935 // 02936 // Make sure the Lazy Writer will wake up, because we 02937 // want him to delete this SharedCacheMap. 02938 // 02939 02940 LazyWriter.OtherWork = TRUE; 02941 if (!LazyWriter.ScanActive) { 02942 CcScheduleLazyWriteScan(); 02943 } 02944 } 02945 02946 CcReleaseMasterLock( OldIrql ); 02947 } 02948 }


Variable Documentation

POBJECT_TYPE IoFileObjectType
 

Definition at line 44 of file fssup.c.

Referenced by BuildQueryDirectoryIrp(), IoAttachDevice(), IoCheckDesiredAccess(), IoCreateStreamFileObject(), IoCreateStreamFileObjectLite(), IoGetDeviceObjectPointer(), IopCompleteDumpInitialization(), IopConfigureCrashDump(), IopCreateObjectTypes(), IopGetDumpStack(), IopInvalidateVolumesForDevice(), IopMarkBootPartition(), IopOpenLinkOrRenameTarget(), IopParseDevice(), IopSetEaOrQuotaInformationFile(), IopTrackLink(), IopXxxControlFile(), MmCreateSection(), NtCancelIoFile(), NtCreatePagingFile(), NtFlushBuffersFile(), NtLockFile(), NtNotifyChangeDirectoryFile(), NtQueryEaFile(), NtQueryInformationFile(), NtQueryQuotaInformationFile(), NtQueryVolumeInformationFile(), NtReadFile(), NtReadFileScatter(), NtSetEaFile(), NtSetInformationFile(), NtSetVolumeInformationFile(), NtUnlockFile(), NtWriteFile(), NtWriteFileGather(), ObGetObjectInformation(), ObpLookupObjectName(), UdfInvalidateVolumes(), and VdmQueryDirectoryFile().

ULONG MmLargeSystemCache
 

Definition at line 45 of file fssup.c.

Referenced by CcInitializeCacheManager(), MiInitMachineDependent(), and MmInitSystem().


Generated on Sat May 15 19:43:47 2004 for test by doxygen 1.3.7