Main Page | Class Hierarchy | Class List | File List | Class Members | File Members

cache.h File Reference

Go to the source code of this file.

Classes

struct  _PUBLIC_BCB
struct  _CC_FILE_SIZES
struct  _CACHE_MANAGER_CALLBACKS
struct  _CACHE_UNINITIALIZE_EVENT

Defines

#define VACB_MAPPING_GRANULARITY   (0x40000)
#define VACB_OFFSET_SHIFT   (18)
#define CcIsFileCached(FO)
#define CcGetFileSizePointer(FO)
#define CcCopyWriteWontFlush(FO, FOFF, LEN)   ((LEN) <= 0X10000)
#define CcReadAhead(FO, FOFF, LEN)
#define PIN_WAIT   (1)
#define PIN_EXCLUSIVE   (2)
#define PIN_NO_READ   (4)
#define PIN_IF_BCB   (8)

Typedefs

typedef _PUBLIC_BCB PUBLIC_BCB
typedef _PUBLIC_BCBPPUBLIC_BCB
typedef _CC_FILE_SIZES CC_FILE_SIZES
typedef _CC_FILE_SIZESPCC_FILE_SIZES
typedef BOOLEAN(* PACQUIRE_FOR_LAZY_WRITE )(IN PVOID Context, IN BOOLEAN Wait)
typedef VOID(* PRELEASE_FROM_LAZY_WRITE )(IN PVOID Context)
typedef BOOLEAN(* PACQUIRE_FOR_READ_AHEAD )(IN PVOID Context, IN BOOLEAN Wait)
typedef VOID(* PRELEASE_FROM_READ_AHEAD )(IN PVOID Context)
typedef _CACHE_MANAGER_CALLBACKS CACHE_MANAGER_CALLBACKS
typedef _CACHE_MANAGER_CALLBACKSPCACHE_MANAGER_CALLBACKS
typedef _CACHE_UNINITIALIZE_EVENT CACHE_UNINITIALIZE_EVENT
typedef _CACHE_UNINITIALIZE_EVENTPCACHE_UNINITIALIZE_EVENT
typedef VOID(* PDIRTY_PAGE_ROUTINE )(IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN ULONG Length, IN PLARGE_INTEGER OldestLsn, IN PLARGE_INTEGER NewestLsn, IN PVOID Context1, IN PVOID Context2)
typedef VOID(* PFLUSH_TO_LSN )(IN PVOID LogHandle, IN LARGE_INTEGER Lsn)
typedef VOID(* PCC_POST_DEFERRED_WRITE )(IN PVOID Context1, IN PVOID Context2)

Functions

NTKERNELAPI BOOLEAN CcInitializeCacheManager ()
NTKERNELAPI VOID CcInitializeCacheMap (IN PFILE_OBJECT FileObject, IN PCC_FILE_SIZES FileSizes, IN BOOLEAN PinAccess, IN PCACHE_MANAGER_CALLBACKS Callbacks, IN PVOID LazyWriteContext)
NTKERNELAPI BOOLEAN CcUninitializeCacheMap (IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER TruncateSize OPTIONAL, IN PCACHE_UNINITIALIZE_EVENT UninitializeCompleteEvent OPTIONAL)
NTKERNELAPI VOID CcSetFileSizes (IN PFILE_OBJECT FileObject, IN PCC_FILE_SIZES FileSizes)
NTKERNELAPI BOOLEAN CcPurgeCacheSection (IN PSECTION_OBJECT_POINTERS SectionObjectPointer, IN PLARGE_INTEGER FileOffset OPTIONAL, IN ULONG Length, IN BOOLEAN UninitializeCacheMaps)
NTKERNELAPI VOID CcSetDirtyPageThreshold (IN PFILE_OBJECT FileObject, IN ULONG DirtyPageThreshold)
NTKERNELAPI VOID CcFlushCache (IN PSECTION_OBJECT_POINTERS SectionObjectPointer, IN PLARGE_INTEGER FileOffset OPTIONAL, IN ULONG Length, OUT PIO_STATUS_BLOCK IoStatus OPTIONAL)
NTKERNELAPI LARGE_INTEGER CcGetFlushedValidData (IN PSECTION_OBJECT_POINTERS SectionObjectPointer, IN BOOLEAN BcbListHeld)
NTKERNELAPI VOID CcZeroEndOfLastPage (IN PFILE_OBJECT FileObject)
NTKERNELAPI BOOLEAN CcZeroData (IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER StartOffset, IN PLARGE_INTEGER EndOffset, IN BOOLEAN Wait)
NTKERNELAPI PVOID CcRemapBcb (IN PVOID Bcb)
NTKERNELAPI VOID CcRepinBcb (IN PVOID Bcb)
NTKERNELAPI VOID CcUnpinRepinnedBcb (IN PVOID Bcb, IN BOOLEAN WriteThrough, OUT PIO_STATUS_BLOCK IoStatus)
NTKERNELAPI PFILE_OBJECT CcGetFileObjectFromSectionPtrs (IN PSECTION_OBJECT_POINTERS SectionObjectPointer)
NTKERNELAPI PFILE_OBJECT CcGetFileObjectFromBcb (IN PVOID Bcb)
NTKERNELAPI BOOLEAN CcCanIWrite (IN PFILE_OBJECT FileObject, IN ULONG BytesToWrite, IN BOOLEAN Wait, IN BOOLEAN Retrying)
NTKERNELAPI VOID CcDeferWrite (IN PFILE_OBJECT FileObject, IN PCC_POST_DEFERRED_WRITE PostRoutine, IN PVOID Context1, IN PVOID Context2, IN ULONG BytesToWrite, IN BOOLEAN Retrying)
NTKERNELAPI BOOLEAN CcCopyRead (IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN ULONG Length, IN BOOLEAN Wait, OUT PVOID Buffer, OUT PIO_STATUS_BLOCK IoStatus)
NTKERNELAPI VOID CcFastCopyRead (IN PFILE_OBJECT FileObject, IN ULONG FileOffset, IN ULONG Length, IN ULONG PageCount, OUT PVOID Buffer, OUT PIO_STATUS_BLOCK IoStatus)
NTKERNELAPI BOOLEAN CcCopyWrite (IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN ULONG Length, IN BOOLEAN Wait, IN PVOID Buffer)
NTKERNELAPI VOID CcFastCopyWrite (IN PFILE_OBJECT FileObject, IN ULONG FileOffset, IN ULONG Length, IN PVOID Buffer)
NTKERNELAPI VOID CcMdlRead (IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN ULONG Length, OUT PMDL *MdlChain, OUT PIO_STATUS_BLOCK IoStatus)
NTKERNELAPI VOID CcMdlReadComplete (IN PFILE_OBJECT FileObject, IN PMDL MdlChain)
NTKERNELAPI VOID CcMdlReadComplete2 (IN PFILE_OBJECT FileObject, IN PMDL MdlChain)
NTKERNELAPI VOID CcPrepareMdlWrite (IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN ULONG Length, OUT PMDL *MdlChain, OUT PIO_STATUS_BLOCK IoStatus)
NTKERNELAPI VOID CcMdlWriteComplete (IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN PMDL MdlChain)
NTKERNELAPI VOID CcMdlWriteComplete2 (IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN PMDL MdlChain)
NTKERNELAPI VOID CcScheduleReadAhead (IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN ULONG Length)
NTSTATUS CcWaitForCurrentLazyWriterActivity ()
NTKERNELAPI VOID CcSetReadAheadGranularity (IN PFILE_OBJECT FileObject, IN ULONG Granularity)
NTKERNELAPI BOOLEAN CcPinRead (IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN ULONG Length, IN ULONG Flags, OUT PVOID *Bcb, OUT PVOID *Buffer)
NTKERNELAPI BOOLEAN CcMapData (IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN ULONG Length, IN BOOLEAN Wait, OUT PVOID *Bcb, OUT PVOID *Buffer)
NTKERNELAPI BOOLEAN CcPinMappedData (IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN ULONG Length, IN ULONG Flags, IN OUT PVOID *Bcb)
NTKERNELAPI BOOLEAN CcPreparePinWrite (IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN ULONG Length, IN BOOLEAN Zero, IN ULONG Flags, OUT PVOID *Bcb, OUT PVOID *Buffer)
NTKERNELAPI VOID CcSetDirtyPinnedData (IN PVOID BcbVoid, IN PLARGE_INTEGER Lsn OPTIONAL)
NTKERNELAPI VOID CcUnpinData (IN PVOID Bcb)
NTKERNELAPI VOID CcSetBcbOwnerPointer (IN PVOID Bcb, IN PVOID OwnerPointer)
NTKERNELAPI VOID CcUnpinDataForThread (IN PVOID Bcb, IN ERESOURCE_THREAD ResourceThreadId)
NTKERNELAPI VOID CcSetAdditionalCacheAttributes (IN PFILE_OBJECT FileObject, IN BOOLEAN DisableReadAhead, IN BOOLEAN DisableWriteBehind)
NTKERNELAPI VOID CcSetLogHandleForFile (IN PFILE_OBJECT FileObject, IN PVOID LogHandle, IN PFLUSH_TO_LSN FlushToLsnRoutine)
NTKERNELAPI LARGE_INTEGER CcGetDirtyPages (IN PVOID LogHandle, IN PDIRTY_PAGE_ROUTINE DirtyPageRoutine, IN PVOID Context1, IN PVOID Context2)
NTKERNELAPI BOOLEAN CcIsThereDirtyData (IN PVPB Vpb)
NTKERNELAPI LARGE_INTEGER CcGetLsnForFileObject (IN PFILE_OBJECT FileObject, OUT PLARGE_INTEGER OldestLsn OPTIONAL)

Variables

ULONG CcThrowAway
ULONG CcFastReadNoWait
ULONG CcFastReadWait
ULONG CcFastReadResourceMiss
ULONG CcFastReadNotPossible
ULONG CcFastMdlReadNoWait
ULONG CcFastMdlReadWait
ULONG CcFastMdlReadResourceMiss
ULONG CcFastMdlReadNotPossible
ULONG CcMapDataNoWait
ULONG CcMapDataWait
ULONG CcMapDataNoWaitMiss
ULONG CcMapDataWaitMiss
ULONG CcPinMappedDataCount
ULONG CcPinReadNoWait
ULONG CcPinReadWait
ULONG CcPinReadNoWaitMiss
ULONG CcPinReadWaitMiss
ULONG CcCopyReadNoWait
ULONG CcCopyReadWait
ULONG CcCopyReadNoWaitMiss
ULONG CcCopyReadWaitMiss
ULONG CcMdlReadNoWait
ULONG CcMdlReadWait
ULONG CcMdlReadNoWaitMiss
ULONG CcMdlReadWaitMiss
ULONG CcReadAheadIos
ULONG CcLazyWriteIos
ULONG CcLazyWritePages
ULONG CcDataFlushes
ULONG CcDataPages
PULONG CcMissCounter


Define Documentation

#define CcCopyWriteWontFlush FO,
FOFF,
LEN   )     ((LEN) <= 0X10000)
 

Definition at line 375 of file cache.h.

Referenced by FsRtlCopyWrite().

#define CcGetFileSizePointer FO   ) 
 

Value:

( \ ((PLARGE_INTEGER)((FO)->SectionObjectPointer->SharedCacheMap) + 1) \ )

Definition at line 277 of file cache.h.

Referenced by FsRtlCopyWrite(), and FsRtlPrepareMdlWriteDev().

#define CcIsFileCached FO   ) 
 

Value:

( \ ((FO)->SectionObjectPointer != NULL) && \ (((PSECTION_OBJECT_POINTERS)(FO)->SectionObjectPointer)->SharedCacheMap != NULL) \ )

Definition at line 173 of file cache.h.

Referenced by IoSynchronousPageWrite().

#define CcReadAhead FO,
FOFF,
LEN   ) 
 

Value:

{ \ if ((LEN) >= 256) { \ CcScheduleReadAhead((FO),(FOFF),(LEN)); \ } \ }

Definition at line 531 of file cache.h.

#define PIN_EXCLUSIVE   (2)
 

Definition at line 590 of file cache.h.

Referenced by CcPinFileData().

#define PIN_IF_BCB   (8)
 

Definition at line 609 of file cache.h.

Referenced by CcPinFileData().

#define PIN_NO_READ   (4)
 

Definition at line 600 of file cache.h.

Referenced by CcPinFileData().

#define PIN_WAIT   (1)
 

Definition at line 582 of file cache.h.

Referenced by CcPinFileData(), CcPinMappedData(), and CcPinRead().

#define VACB_MAPPING_GRANULARITY   (0x40000)
 

Definition at line 30 of file cache.h.

Referenced by CcCopyRead(), CcCopyWrite(), CcCreateVacbArray(), CcFastCopyRead(), CcFastCopyWrite(), CcFreeActiveVacb(), CcGetVacbMiss(), CcGetVirtualAddress(), CcGetVirtualAddressIfMapped(), CcInitializeCacheMap(), CcPinFileData(), CcUnmapVacbArray(), UdfLookupDirEntryPostProcessing(), and UdfLookupInitialDirEntry().

#define VACB_OFFSET_SHIFT   (18)
 

Definition at line 31 of file cache.h.

Referenced by CcAdjustVacbLevelLockCount(), CcCopyRead(), CcCreateVacbArray(), CcExtendVacbArray(), CcFastCopyRead(), CcGetBcbListHeadLargeOffset(), CcGetVacbLargeOffset(), CcInitializeVacbs(), CcSetVacbLargeOffset(), and SetVacb().


Typedef Documentation

typedef struct _CACHE_MANAGER_CALLBACKS CACHE_MANAGER_CALLBACKS
 

typedef struct _CACHE_UNINITIALIZE_EVENT CACHE_UNINITIALIZE_EVENT
 

typedef struct _CC_FILE_SIZES CC_FILE_SIZES
 

typedef BOOLEAN(* PACQUIRE_FOR_LAZY_WRITE)(IN PVOID Context, IN BOOLEAN Wait)
 

Definition at line 90 of file cache.h.

typedef BOOLEAN(* PACQUIRE_FOR_READ_AHEAD)(IN PVOID Context, IN BOOLEAN Wait)
 

Definition at line 109 of file cache.h.

typedef struct _CACHE_MANAGER_CALLBACKS * PCACHE_MANAGER_CALLBACKS
 

typedef struct _CACHE_UNINITIALIZE_EVENT * PCACHE_UNINITIALIZE_EVENT
 

typedef struct _CC_FILE_SIZES * PCC_FILE_SIZES
 

Referenced by UdfCommonRead(), and UdfLookupMetaVsnOfExtent().

typedef VOID(* PCC_POST_DEFERRED_WRITE)(IN PVOID Context1, IN PVOID Context2)
 

Definition at line 387 of file cache.h.

typedef VOID(* PDIRTY_PAGE_ROUTINE)(IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN ULONG Length, IN PLARGE_INTEGER OldestLsn, IN PLARGE_INTEGER NewestLsn, IN PVOID Context1, IN PVOID Context2)
 

Definition at line 148 of file cache.h.

typedef VOID(* PFLUSH_TO_LSN)(IN PVOID LogHandle, IN LARGE_INTEGER Lsn)
 

Definition at line 163 of file cache.h.

typedef struct _PUBLIC_BCB * PPUBLIC_BCB
 

typedef VOID(* PRELEASE_FROM_LAZY_WRITE)(IN PVOID Context)
 

Definition at line 100 of file cache.h.

typedef VOID(* PRELEASE_FROM_READ_AHEAD)(IN PVOID Context)
 

Definition at line 119 of file cache.h.

typedef struct _PUBLIC_BCB PUBLIC_BCB
 


Function Documentation

NTKERNELAPI BOOLEAN CcCanIWrite IN PFILE_OBJECT  FileObject,
IN ULONG  BytesToWrite,
IN BOOLEAN  Wait,
IN BOOLEAN  Retrying
 

Referenced by CcLazyWriteScan(), CcPostDeferredWrites(), FsRtlCopyWrite(), and FsRtlPrepareMdlWriteDev().

NTKERNELAPI BOOLEAN CcCopyRead IN PFILE_OBJECT  FileObject,
IN PLARGE_INTEGER  FileOffset,
IN ULONG  Length,
IN BOOLEAN  Wait,
OUT PVOID  Buffer,
OUT PIO_STATUS_BLOCK  IoStatus
 

Definition at line 31 of file copysup.c.

References ASSERT, _VACB::BaseAddress, _PRIVATE_CACHE_MAP::BeyondLastByte1, _PRIVATE_CACHE_MAP::BeyondLastByte2, Buffer, CcCopyReadExceptionFilter(), CcCopyReadNoWait, CcCopyReadNoWaitMiss, CcCopyReadWait, CcCopyReadWaitMiss, CcFreeActiveVacb(), CcFreeVirtualAddress(), CcGetVirtualAddress(), CcMissCounter, CcPinFileData(), CcScheduleReadAhead(), CcThrowAway, CcUnpinFileData(), COMPUTE_PAGES_SPANNED, DebugTrace, ExRaiseStatus(), FALSE, _PRIVATE_CACHE_MAP::FileOffset1, _PRIVATE_CACHE_MAP::FileOffset2, _SHARED_CACHE_MAP::FileSize, FlagOn, FO_RANDOM_ACCESS, FsRtlNormalizeNtstatus(), GetActiveVacb, HOT_STATISTIC, me, MmCheckCachedPageState(), MmResetPageFaultReadAhead, MmSavePageFaultReadAhead, MmSetPageFaultReadAhead, _SHARED_CACHE_MAP::NeedToZero, NTSTATUS(), NULL, PAGE_SHIFT, PsGetCurrentThread, _PRIVATE_CACHE_MAP::ReadAheadEnabled, _PRIVATE_CACHE_MAP::ReadAheadLength, ROUND_TO_PAGES, SetActiveVacb, Status, TRUE, UNPIN, VACB_MAPPING_GRANULARITY, and VACB_OFFSET_SHIFT.

Referenced by FsRtlCopyRead(), and UdfCommonRead().

00042 : 00043 00044 This routine attempts to copy the specified file data from the cache 00045 into the output buffer, and deliver the correct I/O status. It is *not* 00046 safe to call this routine from Dpc level. 00047 00048 If the caller does not want to block (such as for disk I/O), then 00049 Wait should be supplied as FALSE. If Wait was supplied as FALSE and 00050 it is currently impossible to supply all of the requested data without 00051 blocking, then this routine will return FALSE. However, if the 00052 data is immediately accessible in the cache and no blocking is 00053 required, this routine copies the data and returns TRUE. 00054 00055 If the caller supplies Wait as TRUE, then this routine is guaranteed 00056 to copy the data and return TRUE. If the data is immediately 00057 accessible in the cache, then no blocking will occur. Otherwise, 00058 the the data transfer from the file into the cache will be initiated, 00059 and the caller will be blocked until the data can be returned. 00060 00061 File system Fsd's should typically supply Wait = TRUE if they are 00062 processing a synchronous I/O requests, or Wait = FALSE if they are 00063 processing an asynchronous request. 00064 00065 File system or Server Fsp threads should supply Wait = TRUE. 00066 00067 Arguments: 00068 00069 FileObject - Pointer to the file object for a file which was 00070 opened with NO_INTERMEDIATE_BUFFERING clear, i.e., for 00071 which CcInitializeCacheMap was called by the file system. 00072 00073 FileOffset - Byte offset in file for desired data. 00074 00075 Length - Length of desired data in bytes. 00076 00077 Wait - FALSE if caller may not block, TRUE otherwise (see description 00078 above) 00079 00080 Buffer - Pointer to output buffer to which data should be copied. 00081 00082 IoStatus - Pointer to standard I/O status block to receive the status 00083 for the transfer. (STATUS_SUCCESS guaranteed for cache 00084 hits, otherwise the actual I/O status is returned.) 00085 00086 Note that even if FALSE is returned, the IoStatus.Information 00087 field will return the count of any bytes successfully 00088 transferred before a blocking condition occured. The caller 00089 may either choose to ignore this information, or resume 00090 the copy later accounting for bytes transferred. 00091 00092 Return Value: 00093 00094 FALSE - if Wait was supplied as FALSE and the data was not delivered 00095 00096 TRUE - if the data is being delivered 00097 00098 --*/ 00099 00100 { 00101 PSHARED_CACHE_MAP SharedCacheMap; 00102 PPRIVATE_CACHE_MAP PrivateCacheMap; 00103 PVOID CacheBuffer; 00104 LARGE_INTEGER FOffset; 00105 PVACB Vacb; 00106 PBCB Bcb; 00107 PVACB ActiveVacb; 00108 ULONG ActivePage; 00109 ULONG PageIsDirty; 00110 ULONG SavedState; 00111 ULONG PagesToGo; 00112 ULONG MoveLength; 00113 ULONG LengthToGo; 00114 KIRQL OldIrql; 00115 NTSTATUS Status; 00116 ULONG OriginalLength = Length; 00117 ULONG PageCount = COMPUTE_PAGES_SPANNED((ULongToPtr(FileOffset->LowPart)), Length); 00118 PETHREAD Thread = PsGetCurrentThread(); 00119 ULONG GotAMiss = 0; 00120 00121 DebugTrace(+1, me, "CcCopyRead\n", 0 ); 00122 00123 MmSavePageFaultReadAhead( Thread, &SavedState ); 00124 00125 // 00126 // Get pointer to shared and private cache maps 00127 // 00128 00129 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 00130 PrivateCacheMap = FileObject->PrivateCacheMap; 00131 00132 // 00133 // Check for read past file size, the caller must filter this case out. 00134 // 00135 00136 ASSERT( ( FileOffset->QuadPart + (LONGLONG)Length) <= SharedCacheMap->FileSize.QuadPart ); 00137 00138 // 00139 // If read ahead is enabled, then do the read ahead here so it 00140 // overlaps with the copy (otherwise we will do it below). 00141 // Note that we are assuming that we will not get ahead of our 00142 // current transfer - if read ahead is working it should either 00143 // already be in memory or else underway. 00144 // 00145 00146 if (PrivateCacheMap->ReadAheadEnabled && (PrivateCacheMap->ReadAheadLength[1] == 0)) { 00147 CcScheduleReadAhead( FileObject, FileOffset, Length ); 00148 } 00149 00150 FOffset = *FileOffset; 00151 00152 // 00153 // Increment performance counters 00154 // 00155 00156 if (Wait) { 00157 HOT_STATISTIC(CcCopyReadWait) += 1; 00158 00159 // 00160 // This is not an exact solution, but when IoPageRead gets a miss, 00161 // it cannot tell whether it was CcCopyRead or CcMdlRead, but since 00162 // the miss should occur very soon, by loading the pointer here 00163 // probably the right counter will get incremented, and in any case, 00164 // we hope the errrors average out! 00165 // 00166 00167 CcMissCounter = &CcCopyReadWaitMiss; 00168 00169 } else { 00170 HOT_STATISTIC(CcCopyReadNoWait) += 1; 00171 } 00172 00173 // 00174 // See if we have an active Vacb, that we can just copy to. 00175 // 00176 00177 GetActiveVacb( SharedCacheMap, OldIrql, ActiveVacb, ActivePage, PageIsDirty ); 00178 00179 if (ActiveVacb != NULL) { 00180 00181 if ((ULONG)(FOffset.QuadPart >> VACB_OFFSET_SHIFT) == (ActivePage >> (VACB_OFFSET_SHIFT - PAGE_SHIFT))) { 00182 00183 ULONG LengthToCopy = VACB_MAPPING_GRANULARITY - (FOffset.LowPart & (VACB_MAPPING_GRANULARITY - 1)); 00184 00185 if (SharedCacheMap->NeedToZero != NULL) { 00186 CcFreeActiveVacb( SharedCacheMap, NULL, 0, FALSE ); 00187 } 00188 00189 // 00190 // Get the starting point in the view. 00191 // 00192 00193 CacheBuffer = (PVOID)((PCHAR)ActiveVacb->BaseAddress + 00194 (FOffset.LowPart & (VACB_MAPPING_GRANULARITY - 1))); 00195 00196 // 00197 // Reduce LengthToCopy if it is greater than our caller's length. 00198 // 00199 00200 if (LengthToCopy > Length) { 00201 LengthToCopy = Length; 00202 } 00203 00204 // 00205 // Like the logic for the normal case below, we want to spin around 00206 // making sure Mm only reads the pages we will need. 00207 // 00208 00209 PagesToGo = COMPUTE_PAGES_SPANNED( CacheBuffer, 00210 LengthToCopy ) - 1; 00211 00212 // 00213 // Copy the data to the user buffer. 00214 // 00215 00216 try { 00217 00218 if (PagesToGo != 0) { 00219 00220 LengthToGo = LengthToCopy; 00221 00222 while (LengthToGo != 0) { 00223 00224 MoveLength = (ULONG)((PCHAR)(ROUND_TO_PAGES(((PCHAR)CacheBuffer + 1))) - 00225 (PCHAR)CacheBuffer); 00226 00227 if (MoveLength > LengthToGo) { 00228 MoveLength = LengthToGo; 00229 } 00230 00231 // 00232 // Here's hoping that it is cheaper to call Mm to see if 00233 // the page is valid. If not let Mm know how many pages 00234 // we are after before doing the move. 00235 // 00236 00237 MmSetPageFaultReadAhead( Thread, PagesToGo ); 00238 GotAMiss |= !MmCheckCachedPageState( CacheBuffer, FALSE ); 00239 00240 RtlCopyBytes( Buffer, CacheBuffer, MoveLength ); 00241 00242 PagesToGo -= 1; 00243 00244 LengthToGo -= MoveLength; 00245 Buffer = (PCHAR)Buffer + MoveLength; 00246 CacheBuffer = (PCHAR)CacheBuffer + MoveLength; 00247 } 00248 00249 // 00250 // Handle the read here that stays on a single page. 00251 // 00252 00253 } else { 00254 00255 // 00256 // Here's hoping that it is cheaper to call Mm to see if 00257 // the page is valid. If not let Mm know how many pages 00258 // we are after before doing the move. 00259 // 00260 00261 MmSetPageFaultReadAhead( Thread, 0 ); 00262 GotAMiss |= !MmCheckCachedPageState( CacheBuffer, FALSE ); 00263 00264 RtlCopyBytes( Buffer, CacheBuffer, LengthToCopy ); 00265 00266 Buffer = (PCHAR)Buffer + LengthToCopy; 00267 } 00268 00269 } except( CcCopyReadExceptionFilter( GetExceptionInformation(), 00270 &Status ) ) { 00271 00272 MmResetPageFaultReadAhead( Thread, SavedState ); 00273 00274 SetActiveVacb( SharedCacheMap, OldIrql, ActiveVacb, ActivePage, PageIsDirty ); 00275 00276 // 00277 // If we got an access violation, then the user buffer went 00278 // away. Otherwise we must have gotten an I/O error trying 00279 // to bring the data in. 00280 // 00281 00282 if (Status == STATUS_ACCESS_VIOLATION) { 00283 ExRaiseStatus( STATUS_INVALID_USER_BUFFER ); 00284 } 00285 else { 00286 ExRaiseStatus( FsRtlNormalizeNtstatus( Status, 00287 STATUS_UNEXPECTED_IO_ERROR )); 00288 } 00289 } 00290 00291 // 00292 // Now adjust FOffset and Length by what we copied. 00293 // 00294 00295 FOffset.QuadPart = FOffset.QuadPart + (LONGLONG)LengthToCopy; 00296 Length -= LengthToCopy; 00297 00298 } 00299 00300 // 00301 // If that was all the data, then remember the Vacb 00302 // 00303 00304 if (Length == 0) { 00305 00306 SetActiveVacb( SharedCacheMap, OldIrql, ActiveVacb, ActivePage, PageIsDirty ); 00307 00308 // 00309 // Otherwise we must free it because we will map other vacbs below. 00310 // 00311 00312 } else { 00313 00314 CcFreeActiveVacb( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 00315 } 00316 } 00317 00318 // 00319 // Not all of the transfer will come back at once, so we have to loop 00320 // until the entire transfer is complete. 00321 // 00322 00323 while (Length != 0) { 00324 00325 ULONG ReceivedLength; 00326 LARGE_INTEGER BeyondLastByte; 00327 00328 // 00329 // Call local routine to Map or Access the file data, then move the data, 00330 // then call another local routine to free the data. If we cannot map 00331 // the data because of a Wait condition, return FALSE. 00332 // 00333 // Note that this call may result in an exception, however, if it 00334 // does no Bcb is returned and this routine has absolutely no 00335 // cleanup to perform. Therefore, we do not have a try-finally 00336 // and we allow the possibility that we will simply be unwound 00337 // without notice. 00338 // 00339 00340 if (Wait) { 00341 00342 CacheBuffer = CcGetVirtualAddress( SharedCacheMap, 00343 FOffset, 00344 &Vacb, 00345 &ReceivedLength ); 00346 00347 BeyondLastByte.QuadPart = FOffset.QuadPart + (LONGLONG)ReceivedLength; 00348 00349 } else if (!CcPinFileData( FileObject, 00350 &FOffset, 00351 Length, 00352 TRUE, 00353 FALSE, 00354 FALSE, 00355 &Bcb, 00356 &CacheBuffer, 00357 &BeyondLastByte )) { 00358 00359 DebugTrace(-1, me, "CcCopyRead -> FALSE\n", 0 ); 00360 00361 HOT_STATISTIC(CcCopyReadNoWaitMiss) += 1; 00362 00363 // 00364 // Enable ReadAhead if we missed. 00365 // 00366 00367 PrivateCacheMap->ReadAheadEnabled = TRUE; 00368 00369 return FALSE; 00370 00371 } else { 00372 00373 // 00374 // Calculate how much data is described by Bcb starting at our desired 00375 // file offset. 00376 // 00377 00378 ReceivedLength = (ULONG)(BeyondLastByte.QuadPart - FOffset.QuadPart); 00379 } 00380 00381 // 00382 // If we got more than we need, make sure to only transfer 00383 // the right amount. 00384 // 00385 00386 if (ReceivedLength > Length) { 00387 ReceivedLength = Length; 00388 } 00389 00390 // 00391 // It is possible for the user buffer to become no longer accessible 00392 // since it was last checked by the I/O system. If we fail to access 00393 // the buffer we must raise a status that the caller's exception 00394 // filter considers as "expected". Also we unmap the Bcb here, since 00395 // we otherwise would have no other reason to put a try-finally around 00396 // this loop. 00397 // 00398 00399 try { 00400 00401 PagesToGo = COMPUTE_PAGES_SPANNED( CacheBuffer, 00402 ReceivedLength ) - 1; 00403 00404 // 00405 // We know exactly how much we want to read here, and we do not 00406 // want to read any more in case the caller is doing random access. 00407 // Our read ahead logic takes care of detecting sequential reads, 00408 // and tends to do large asynchronous read aheads. So far we have 00409 // only mapped the data and we have not forced any in. What we 00410 // do now is get into a loop where we copy a page at a time and 00411 // just prior to each move, we tell MM how many additional pages 00412 // we would like to have read in, in the event that we take a 00413 // fault. With this strategy, for cache hits we never make a single 00414 // expensive call to MM to guarantee that the data is in, yet if we 00415 // do take a fault, we are guaranteed to only take one fault because 00416 // we will read all of the data in for the rest of the transfer. 00417 // 00418 // We test first for the multiple page case, to keep the small 00419 // reads faster. 00420 // 00421 00422 if (PagesToGo != 0) { 00423 00424 LengthToGo = ReceivedLength; 00425 00426 while (LengthToGo != 0) { 00427 00428 MoveLength = (ULONG)((PCHAR)(ROUND_TO_PAGES(((PCHAR)CacheBuffer + 1))) - 00429 (PCHAR)CacheBuffer); 00430 00431 if (MoveLength > LengthToGo) { 00432 MoveLength = LengthToGo; 00433 } 00434 00435 // 00436 // Here's hoping that it is cheaper to call Mm to see if 00437 // the page is valid. If not let Mm know how many pages 00438 // we are after before doing the move. 00439 // 00440 00441 MmSetPageFaultReadAhead( Thread, PagesToGo ); 00442 GotAMiss |= !MmCheckCachedPageState( CacheBuffer, FALSE ); 00443 00444 RtlCopyBytes( Buffer, CacheBuffer, MoveLength ); 00445 00446 PagesToGo -= 1; 00447 00448 LengthToGo -= MoveLength; 00449 Buffer = (PCHAR)Buffer + MoveLength; 00450 CacheBuffer = (PCHAR)CacheBuffer + MoveLength; 00451 } 00452 00453 // 00454 // Handle the read here that stays on a single page. 00455 // 00456 00457 } else { 00458 00459 // 00460 // Here's hoping that it is cheaper to call Mm to see if 00461 // the page is valid. If not let Mm know how many pages 00462 // we are after before doing the move. 00463 // 00464 00465 MmSetPageFaultReadAhead( Thread, 0 ); 00466 GotAMiss |= !MmCheckCachedPageState( CacheBuffer, FALSE ); 00467 00468 RtlCopyBytes( Buffer, CacheBuffer, ReceivedLength ); 00469 00470 Buffer = (PCHAR)Buffer + ReceivedLength; 00471 } 00472 00473 } 00474 except( CcCopyReadExceptionFilter( GetExceptionInformation(), 00475 &Status ) ) { 00476 00477 CcMissCounter = &CcThrowAway; 00478 00479 // 00480 // If we get an exception, then we have to renable page fault 00481 // clustering and unmap on the way out. 00482 // 00483 00484 MmResetPageFaultReadAhead( Thread, SavedState ); 00485 00486 00487 if (Wait) { 00488 CcFreeVirtualAddress( Vacb ); 00489 } else { 00490 CcUnpinFileData( Bcb, TRUE, UNPIN ); 00491 } 00492 00493 // 00494 // If we got an access violation, then the user buffer went 00495 // away. Otherwise we must have gotten an I/O error trying 00496 // to bring the data in. 00497 // 00498 00499 if (Status == STATUS_ACCESS_VIOLATION) { 00500 ExRaiseStatus( STATUS_INVALID_USER_BUFFER ); 00501 } 00502 else { 00503 ExRaiseStatus( FsRtlNormalizeNtstatus( Status, 00504 STATUS_UNEXPECTED_IO_ERROR )); 00505 } 00506 } 00507 00508 // 00509 // Update number of bytes transferred. 00510 // 00511 00512 Length -= ReceivedLength; 00513 00514 // 00515 // Unmap the data now, and calculate length left to transfer. 00516 // 00517 00518 if (Wait) { 00519 00520 // 00521 // If there is more to go, just free this vacb. 00522 // 00523 00524 if (Length != 0) { 00525 00526 CcFreeVirtualAddress( Vacb ); 00527 00528 // 00529 // Otherwise save it for the next time through. 00530 // 00531 00532 } else { 00533 00534 SetActiveVacb( SharedCacheMap, OldIrql, Vacb, (ULONG)(FOffset.QuadPart >> PAGE_SHIFT), 0 ); 00535 break; 00536 } 00537 00538 } else { 00539 CcUnpinFileData( Bcb, TRUE, UNPIN ); 00540 } 00541 00542 // 00543 // Assume we did not get all the data we wanted, and set FOffset 00544 // to the end of the returned data. 00545 // 00546 00547 FOffset = BeyondLastByte; 00548 } 00549 00550 MmResetPageFaultReadAhead( Thread, SavedState ); 00551 00552 CcMissCounter = &CcThrowAway; 00553 00554 // 00555 // Now enable read ahead if it looks like we got any misses, and do 00556 // the first one. 00557 // 00558 00559 if (GotAMiss && 00560 !FlagOn( FileObject->Flags, FO_RANDOM_ACCESS ) && 00561 !PrivateCacheMap->ReadAheadEnabled) { 00562 00563 PrivateCacheMap->ReadAheadEnabled = TRUE; 00564 CcScheduleReadAhead( FileObject, FileOffset, OriginalLength ); 00565 } 00566 00567 // 00568 // Now that we have described our desired read ahead, let's 00569 // shift the read history down. 00570 // 00571 00572 PrivateCacheMap->FileOffset1 = PrivateCacheMap->FileOffset2; 00573 PrivateCacheMap->BeyondLastByte1 = PrivateCacheMap->BeyondLastByte2; 00574 PrivateCacheMap->FileOffset2 = *FileOffset; 00575 PrivateCacheMap->BeyondLastByte2.QuadPart = 00576 FileOffset->QuadPart + (LONGLONG)OriginalLength; 00577 00578 IoStatus->Status = STATUS_SUCCESS; 00579 IoStatus->Information = OriginalLength; 00580 00581 DebugTrace(-1, me, "CcCopyRead -> TRUE\n", 0 ); 00582 00583 return TRUE; 00584 }

NTKERNELAPI BOOLEAN CcCopyWrite IN PFILE_OBJECT  FileObject,
IN PLARGE_INTEGER  FileOffset,
IN ULONG  Length,
IN BOOLEAN  Wait,
IN PVOID  Buffer
 

Definition at line 1075 of file copysup.c.

References ACTIVE_PAGE_IS_DIRTY, _SHARED_CACHE_MAP::ActiveVacbSpinLock, _VACB::BaseAddress, BooleanFlagOn, Buffer, CcCopyReadExceptionFilter(), CcFreeActiveVacb(), CcMapAndCopy(), CcPinFileData(), CcSetDirtyPinnedData(), CcUnpinFileData(), DebugTrace, ExRaiseStatus(), FALSE, FlagOn, FO_WRITE_THROUGH, FSRTL_FLAG_ADVANCED_HEADER, FsRtlNormalizeNtstatus(), GetActiveVacb, me, _SHARED_CACHE_MAP::NeedToZero, _SHARED_CACHE_MAP::NeedToZeroVacb, NTSTATUS(), NULL, PAGE_SHIFT, PAGE_SIZE, PFSRTL_ADVANCED_FCB_HEADER, SetActiveVacb, Status, TRUE, UNPIN, VACB_MAPPING_GRANULARITY, ZERO_FIRST_PAGE, ZERO_LAST_PAGE, and ZERO_MIDDLE_PAGES.

Referenced by FsRtlCopyWrite().

01085 : 01086 01087 This routine attempts to copy the specified file data from the specified 01088 buffer into the Cache, and deliver the correct I/O status. It is *not* 01089 safe to call this routine from Dpc level. 01090 01091 If the caller does not want to block (such as for disk I/O), then 01092 Wait should be supplied as FALSE. If Wait was supplied as FALSE and 01093 it is currently impossible to receive all of the requested data without 01094 blocking, then this routine will return FALSE. However, if the 01095 correct space is immediately accessible in the cache and no blocking is 01096 required, this routine copies the data and returns TRUE. 01097 01098 If the caller supplies Wait as TRUE, then this routine is guaranteed 01099 to copy the data and return TRUE. If the correct space is immediately 01100 accessible in the cache, then no blocking will occur. Otherwise, 01101 the necessary work will be initiated to read and/or free cache data, 01102 and the caller will be blocked until the data can be received. 01103 01104 File system Fsd's should typically supply Wait = TRUE if they are 01105 processing a synchronous I/O requests, or Wait = FALSE if they are 01106 processing an asynchronous request. 01107 01108 File system or Server Fsp threads should supply Wait = TRUE. 01109 01110 Arguments: 01111 01112 FileObject - Pointer to the file object for a file which was 01113 opened with NO_INTERMEDIATE_BUFFERING clear, i.e., for 01114 which CcInitializeCacheMap was called by the file system. 01115 01116 FileOffset - Byte offset in file to receive the data. 01117 01118 Length - Length of data in bytes. 01119 01120 Wait - FALSE if caller may not block, TRUE otherwise (see description 01121 above) 01122 01123 Buffer - Pointer to input buffer from which data should be copied. 01124 01125 Return Value: 01126 01127 FALSE - if Wait was supplied as FALSE and the data was not copied. 01128 01129 TRUE - if the data has been copied. 01130 01131 Raises: 01132 01133 STATUS_INSUFFICIENT_RESOURCES - If a pool allocation failure occurs. 01134 This can only occur if Wait was specified as TRUE. (If Wait is 01135 specified as FALSE, and an allocation failure occurs, this 01136 routine simply returns FALSE.) 01137 01138 --*/ 01139 01140 { 01141 PSHARED_CACHE_MAP SharedCacheMap; 01142 PFSRTL_ADVANCED_FCB_HEADER FcbHeader; 01143 PVACB ActiveVacb; 01144 ULONG ActivePage; 01145 PVOID ActiveAddress; 01146 ULONG PageIsDirty; 01147 KIRQL OldIrql; 01148 NTSTATUS Status; 01149 PVOID CacheBuffer; 01150 LARGE_INTEGER FOffset; 01151 PBCB Bcb; 01152 ULONG ZeroFlags; 01153 LARGE_INTEGER Temp; 01154 01155 DebugTrace(+1, me, "CcCopyWrite\n", 0 ); 01156 01157 // 01158 // If the caller specified Wait == FALSE, but the FileObject is WriteThrough, 01159 // then we need to just get out. 01160 // 01161 01162 if ((FileObject->Flags & FO_WRITE_THROUGH) && !Wait) { 01163 01164 DebugTrace(-1, me, "CcCopyWrite->FALSE (WriteThrough && !Wait)\n", 0 ); 01165 01166 return FALSE; 01167 } 01168 01169 // 01170 // Get pointer to shared cache map 01171 // 01172 01173 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 01174 FOffset = *FileOffset; 01175 01176 // 01177 // See if we have an active Vacb, that we can just copy to. 01178 // 01179 01180 GetActiveVacb( SharedCacheMap, OldIrql, ActiveVacb, ActivePage, PageIsDirty ); 01181 01182 if (ActiveVacb != NULL) { 01183 01184 // 01185 // See if the request starts in the ActivePage. WriteThrough requests must 01186 // go the longer route through CcMapAndCopy, where WriteThrough flushes are 01187 // implemented. 01188 // 01189 01190 if (((ULONG)(FOffset.QuadPart >> PAGE_SHIFT) == ActivePage) && (Length != 0) && 01191 !FlagOn( FileObject->Flags, FO_WRITE_THROUGH )) { 01192 01193 ULONG LengthToCopy = PAGE_SIZE - (FOffset.LowPart & (PAGE_SIZE - 1)); 01194 01195 // 01196 // Reduce LengthToCopy if it is greater than our caller's length. 01197 // 01198 01199 if (LengthToCopy > Length) { 01200 LengthToCopy = Length; 01201 } 01202 01203 // 01204 // Copy the data to the user buffer. 01205 // 01206 01207 try { 01208 01209 // 01210 // If we are copying to a page that is locked down, then 01211 // we have to do it under our spinlock, and update the 01212 // NeedToZero field. 01213 // 01214 01215 OldIrql = 0xFF; 01216 01217 CacheBuffer = (PVOID)((PCHAR)ActiveVacb->BaseAddress + 01218 (FOffset.LowPart & (VACB_MAPPING_GRANULARITY - 1))); 01219 01220 if (SharedCacheMap->NeedToZero != NULL) { 01221 01222 // 01223 // The FastLock may not write our "flag". 01224 // 01225 01226 OldIrql = 0; 01227 01228 ExAcquireFastLock( &SharedCacheMap->ActiveVacbSpinLock, &OldIrql ); 01229 01230 // 01231 // Note that the NeedToZero could be cleared, since we 01232 // tested it without the spinlock. 01233 // 01234 01235 ActiveAddress = SharedCacheMap->NeedToZero; 01236 if ((ActiveAddress != NULL) && 01237 (ActiveVacb == SharedCacheMap->NeedToZeroVacb) && 01238 (((PCHAR)CacheBuffer + LengthToCopy) > (PCHAR)ActiveAddress)) { 01239 01240 // 01241 // If we are skipping some bytes in the page, then we need 01242 // to zero them. 01243 // 01244 01245 if ((PCHAR)CacheBuffer > (PCHAR)ActiveAddress) { 01246 01247 RtlZeroMemory( ActiveAddress, (PCHAR)CacheBuffer - (PCHAR)ActiveAddress ); 01248 } 01249 SharedCacheMap->NeedToZero = (PVOID)((PCHAR)CacheBuffer + LengthToCopy); 01250 } 01251 01252 ExReleaseFastLock( &SharedCacheMap->ActiveVacbSpinLock, OldIrql ); 01253 } 01254 01255 RtlCopyBytes( CacheBuffer, Buffer, LengthToCopy ); 01256 01257 } except( CcCopyReadExceptionFilter( GetExceptionInformation(), 01258 &Status ) ) { 01259 01260 // 01261 // If we failed to overwrite the uninitialized data, 01262 // zero it now (we cannot safely restore NeedToZero). 01263 // 01264 01265 if (OldIrql != 0xFF) { 01266 RtlZeroBytes( CacheBuffer, LengthToCopy ); 01267 } 01268 01269 SetActiveVacb( SharedCacheMap, OldIrql, ActiveVacb, ActivePage, ACTIVE_PAGE_IS_DIRTY ); 01270 01271 // 01272 // If we got an access violation, then the user buffer went 01273 // away. Otherwise we must have gotten an I/O error trying 01274 // to bring the data in. 01275 // 01276 01277 if (Status == STATUS_ACCESS_VIOLATION) { 01278 ExRaiseStatus( STATUS_INVALID_USER_BUFFER ); 01279 } 01280 else { 01281 ExRaiseStatus( FsRtlNormalizeNtstatus( Status, 01282 STATUS_UNEXPECTED_IO_ERROR )); 01283 } 01284 } 01285 01286 // 01287 // Now adjust FOffset and Length by what we copied. 01288 // 01289 01290 Buffer = (PVOID)((PCHAR)Buffer + LengthToCopy); 01291 FOffset.QuadPart = FOffset.QuadPart + (LONGLONG)LengthToCopy; 01292 Length -= LengthToCopy; 01293 01294 // 01295 // If that was all the data, then get outski... 01296 // 01297 01298 if (Length == 0) { 01299 01300 SetActiveVacb( SharedCacheMap, OldIrql, ActiveVacb, ActivePage, ACTIVE_PAGE_IS_DIRTY ); 01301 return TRUE; 01302 } 01303 01304 // 01305 // Remember that the page is dirty now. 01306 // 01307 01308 PageIsDirty |= ACTIVE_PAGE_IS_DIRTY; 01309 } 01310 01311 CcFreeActiveVacb( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 01312 01313 // 01314 // Else someone else could have the active page, and may want to zero 01315 // the range we plan to write! 01316 // 01317 01318 } else if (SharedCacheMap->NeedToZero != NULL) { 01319 01320 CcFreeActiveVacb( SharedCacheMap, NULL, 0, FALSE ); 01321 } 01322 01323 // 01324 // At this point we can calculate the ZeroFlags. 01325 // 01326 01327 // 01328 // We can always zero middle pages, if any. 01329 // 01330 01331 ZeroFlags = ZERO_MIDDLE_PAGES; 01332 01333 if (((FOffset.LowPart & (PAGE_SIZE - 1)) == 0) && 01334 (Length >= PAGE_SIZE)) { 01335 ZeroFlags |= ZERO_FIRST_PAGE; 01336 } 01337 01338 if (((FOffset.LowPart + Length) & (PAGE_SIZE - 1)) == 0) { 01339 ZeroFlags |= ZERO_LAST_PAGE; 01340 } 01341 01342 Temp = FOffset; 01343 Temp.LowPart &= ~(PAGE_SIZE -1); 01344 01345 // 01346 // If there is an advanced header, then we can acquire the FastMutex to 01347 // make capturing ValidDataLength atomic. Currently our other file systems 01348 // are either RO or do not really support 64-bits. 01349 // 01350 01351 FcbHeader = (PFSRTL_ADVANCED_FCB_HEADER)FileObject->FsContext; 01352 if (FlagOn(FcbHeader->Flags, FSRTL_FLAG_ADVANCED_HEADER)) { 01353 ExAcquireFastMutex( FcbHeader->FastMutex ); 01354 Temp.QuadPart = ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->ValidDataLength.QuadPart - 01355 Temp.QuadPart; 01356 ExReleaseFastMutex( FcbHeader->FastMutex ); 01357 } else { 01358 Temp.QuadPart = ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->ValidDataLength.QuadPart - 01359 Temp.QuadPart; 01360 } 01361 01362 if (Temp.QuadPart <= 0) { 01363 ZeroFlags |= ZERO_FIRST_PAGE | ZERO_MIDDLE_PAGES | ZERO_LAST_PAGE; 01364 } else if ((Temp.HighPart == 0) && (Temp.LowPart <= PAGE_SIZE)) { 01365 ZeroFlags |= ZERO_MIDDLE_PAGES | ZERO_LAST_PAGE; 01366 } 01367 01368 // 01369 // Call a routine to map and copy the data in Mm and get out. 01370 // 01371 01372 if (Wait) { 01373 01374 CcMapAndCopy( SharedCacheMap, 01375 Buffer, 01376 &FOffset, 01377 Length, 01378 ZeroFlags, 01379 BooleanFlagOn( FileObject->Flags, FO_WRITE_THROUGH )); 01380 01381 return TRUE; 01382 } 01383 01384 // 01385 // The rest of this routine is the Wait == FALSE case. 01386 // 01387 // Not all of the transfer will come back at once, so we have to loop 01388 // until the entire transfer is complete. 01389 // 01390 01391 while (Length != 0) { 01392 01393 ULONG ReceivedLength; 01394 LARGE_INTEGER BeyondLastByte; 01395 01396 if (!CcPinFileData( FileObject, 01397 &FOffset, 01398 Length, 01399 FALSE, 01400 TRUE, 01401 FALSE, 01402 &Bcb, 01403 &CacheBuffer, 01404 &BeyondLastByte )) { 01405 01406 DebugTrace(-1, me, "CcCopyWrite -> FALSE\n", 0 ); 01407 01408 return FALSE; 01409 01410 } else { 01411 01412 // 01413 // Calculate how much data is described by Bcb starting at our desired 01414 // file offset. 01415 // 01416 01417 ReceivedLength = (ULONG)(BeyondLastByte.QuadPart - FOffset.QuadPart); 01418 01419 // 01420 // If we got more than we need, make sure to only transfer 01421 // the right amount. 01422 // 01423 01424 if (ReceivedLength > Length) { 01425 ReceivedLength = Length; 01426 } 01427 } 01428 01429 // 01430 // It is possible for the user buffer to become no longer accessible 01431 // since it was last checked by the I/O system. If we fail to access 01432 // the buffer we must raise a status that the caller's exception 01433 // filter considers as "expected". Also we unmap the Bcb here, since 01434 // we otherwise would have no other reason to put a try-finally around 01435 // this loop. 01436 // 01437 01438 try { 01439 01440 RtlCopyBytes( CacheBuffer, Buffer, ReceivedLength ); 01441 01442 CcSetDirtyPinnedData( Bcb, NULL ); 01443 CcUnpinFileData( Bcb, FALSE, UNPIN ); 01444 } 01445 except( CcCopyReadExceptionFilter( GetExceptionInformation(), 01446 &Status ) ) { 01447 01448 CcUnpinFileData( Bcb, TRUE, UNPIN ); 01449 01450 // 01451 // If we got an access violation, then the user buffer went 01452 // away. Otherwise we must have gotten an I/O error trying 01453 // to bring the data in. 01454 // 01455 01456 if (Status == STATUS_ACCESS_VIOLATION) { 01457 ExRaiseStatus( STATUS_INVALID_USER_BUFFER ); 01458 } 01459 else { 01460 01461 ExRaiseStatus(FsRtlNormalizeNtstatus( Status, STATUS_UNEXPECTED_IO_ERROR )); 01462 } 01463 } 01464 01465 // 01466 // Assume we did not get all the data we wanted, and set FOffset 01467 // to the end of the returned data and adjust the Buffer and Length. 01468 // 01469 01470 FOffset = BeyondLastByte; 01471 Buffer = (PCHAR)Buffer + ReceivedLength; 01472 Length -= ReceivedLength; 01473 } 01474 01475 DebugTrace(-1, me, "CcCopyWrite -> TRUE\n", 0 ); 01476 01477 return TRUE; 01478 }

NTKERNELAPI VOID CcDeferWrite IN PFILE_OBJECT  FileObject,
IN PCC_POST_DEFERRED_WRITE  PostRoutine,
IN PVOID  Context1,
IN PVOID  Context2,
IN ULONG  BytesToWrite,
IN BOOLEAN  Retrying
 

Definition at line 1982 of file copysup.c.

References BooleanFlagOn, _DEFERRED_WRITE::BytesToWrite, CACHE_NTC_DEFERRED_WRITE, CcAcquireMasterLock, CcDeferredWrites, CcDeferredWriteSpinLock, CcPostDeferredWrites(), CcReleaseMasterLock, CcScheduleLazyWriteScan(), _DEFERRED_WRITE::Context1, Context1, _DEFERRED_WRITE::Context2, Context2, DEFERRED_WRITE, _DEFERRED_WRITE::DeferredWriteLinks, _DEFERRED_WRITE::Event, ExAllocatePoolWithTag, ExInterlockedInsertHeadList(), ExInterlockedInsertTailList(), _DEFERRED_WRITE::FileObject, FSRTL_FLAG_LIMIT_MODIFIED_PAGES, LazyWriter, _DEFERRED_WRITE::LimitModifiedPages, _DEFERRED_WRITE::NodeByteSize, _DEFERRED_WRITE::NodeTypeCode, NonPagedPool, NULL, _DEFERRED_WRITE::PostRoutine, and _LAZY_WRITER::ScanActive.

01993 : 01994 01995 This routine may be called to have the Cache Manager defer posting 01996 of a write until the Lazy Writer makes some progress writing, or 01997 there are more available pages. A file system would normally call 01998 this routine after receiving FALSE from CcCanIWrite, and preparing 01999 the request to be posted. 02000 02001 Arguments: 02002 02003 FileObject - for the file to be written 02004 02005 PostRoutine - Address of the PostRoutine that the Cache Manager can 02006 call to post the request when conditions are right. Note 02007 that it is possible that this routine will be called 02008 immediately from this routine. 02009 02010 Context1 - First context parameter for the post routine. 02011 02012 Context2 - Secont parameter for the post routine. 02013 02014 BytesToWrite - Number of bytes that the request is trying to write 02015 to the cache. 02016 02017 Retrying - Supplied as FALSE if the request is being posted for the 02018 first time, TRUE otherwise. 02019 02020 Return Value: 02021 02022 None 02023 02024 --*/ 02025 02026 { 02027 PDEFERRED_WRITE DeferredWrite; 02028 KIRQL OldIrql; 02029 02030 // 02031 // Attempt to allocate a deferred write block, and if we do not get 02032 // one, just post it immediately rather than gobbling up must succeed 02033 // pool. 02034 // 02035 02036 DeferredWrite = ExAllocatePoolWithTag( NonPagedPool, sizeof(DEFERRED_WRITE), 'wDcC' ); 02037 02038 if (DeferredWrite == NULL) { 02039 (*PostRoutine)( Context1, Context2 ); 02040 return; 02041 } 02042 02043 // 02044 // Fill in the block. 02045 // 02046 02047 DeferredWrite->NodeTypeCode = CACHE_NTC_DEFERRED_WRITE; 02048 DeferredWrite->NodeByteSize = sizeof(DEFERRED_WRITE); 02049 DeferredWrite->FileObject = FileObject; 02050 DeferredWrite->BytesToWrite = BytesToWrite; 02051 DeferredWrite->Event = NULL; 02052 DeferredWrite->PostRoutine = PostRoutine; 02053 DeferredWrite->Context1 = Context1; 02054 DeferredWrite->Context2 = Context2; 02055 DeferredWrite->LimitModifiedPages = BooleanFlagOn(((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->Flags, 02056 FSRTL_FLAG_LIMIT_MODIFIED_PAGES); 02057 02058 // 02059 // Now insert at the appropriate end of the list 02060 // 02061 02062 if (Retrying) { 02063 ExInterlockedInsertHeadList( &CcDeferredWrites, 02064 &DeferredWrite->DeferredWriteLinks, 02065 &CcDeferredWriteSpinLock ); 02066 } else { 02067 ExInterlockedInsertTailList( &CcDeferredWrites, 02068 &DeferredWrite->DeferredWriteLinks, 02069 &CcDeferredWriteSpinLock ); 02070 } 02071 02072 // 02073 // Now since we really didn't synchronize anything but the insertion, 02074 // we call the post routine to make sure that in some wierd case we 02075 // do not leave anyone hanging with no dirty bytes for the Lazy Writer. 02076 // 02077 02078 CcPostDeferredWrites(); 02079 02080 // 02081 // Schedule the lazy writer in case the reason we're blocking 02082 // is that we're waiting for Mm (or some other external flag) 02083 // to lower and let this write happen. He will be the one to 02084 // keep coming back and checking if this can proceed, even if 02085 // there are no cache manager pages to write. 02086 // 02087 02088 CcAcquireMasterLock( &OldIrql); 02089 02090 if (!LazyWriter.ScanActive) { 02091 CcScheduleLazyWriteScan(); 02092 } 02093 02094 CcReleaseMasterLock( OldIrql); 02095 }

NTKERNELAPI VOID CcFastCopyRead IN PFILE_OBJECT  FileObject,
IN ULONG  FileOffset,
IN ULONG  Length,
IN ULONG  PageCount,
OUT PVOID  Buffer,
OUT PIO_STATUS_BLOCK  IoStatus
 

Definition at line 588 of file copysup.c.

References ASSERT, _VACB::BaseAddress, _PRIVATE_CACHE_MAP::BeyondLastByte1, _PRIVATE_CACHE_MAP::BeyondLastByte2, Buffer, CcCopyReadExceptionFilter(), CcCopyReadWait, CcCopyReadWaitMiss, CcFreeActiveVacb(), CcFreeVirtualAddress(), CcGetVirtualAddress(), CcMissCounter, CcScheduleReadAhead(), CcThrowAway, COMPUTE_PAGES_SPANNED, DebugTrace, ExRaiseStatus(), FALSE, _PRIVATE_CACHE_MAP::FileOffset1, _PRIVATE_CACHE_MAP::FileOffset2, _SHARED_CACHE_MAP::FileSize, FlagOn, FO_RANDOM_ACCESS, FsRtlNormalizeNtstatus(), GetActiveVacb, HOT_STATISTIC, me, MmCheckCachedPageState(), MmResetPageFaultReadAhead, MmSavePageFaultReadAhead, MmSetPageFaultReadAhead, _SHARED_CACHE_MAP::NeedToZero, NTSTATUS(), NULL, PAGE_SHIFT, PsGetCurrentThread, _PRIVATE_CACHE_MAP::ReadAheadEnabled, _PRIVATE_CACHE_MAP::ReadAheadLength, ROUND_TO_PAGES, SetActiveVacb, Status, TRUE, VACB_MAPPING_GRANULARITY, and VACB_OFFSET_SHIFT.

Referenced by FsRtlCopyRead().

00599 : 00600 00601 This routine attempts to copy the specified file data from the cache 00602 into the output buffer, and deliver the correct I/O status. 00603 00604 This is a faster version of CcCopyRead which only supports 32-bit file 00605 offsets and synchronicity (Wait = TRUE). 00606 00607 Arguments: 00608 00609 FileObject - Pointer to the file object for a file which was 00610 opened with NO_INTERMEDIATE_BUFFERING clear, i.e., for 00611 which CcInitializeCacheMap was called by the file system. 00612 00613 FileOffset - Byte offset in file for desired data. 00614 00615 Length - Length of desired data in bytes. 00616 00617 PageCount - Number of pages spanned by the read. 00618 00619 Buffer - Pointer to output buffer to which data should be copied. 00620 00621 IoStatus - Pointer to standard I/O status block to receive the status 00622 for the transfer. (STATUS_SUCCESS guaranteed for cache 00623 hits, otherwise the actual I/O status is returned.) 00624 00625 Note that even if FALSE is returned, the IoStatus.Information 00626 field will return the count of any bytes successfully 00627 transferred before a blocking condition occured. The caller 00628 may either choose to ignore this information, or resume 00629 the copy later accounting for bytes transferred. 00630 00631 Return Value: 00632 00633 None 00634 00635 --*/ 00636 00637 { 00638 PSHARED_CACHE_MAP SharedCacheMap; 00639 PPRIVATE_CACHE_MAP PrivateCacheMap; 00640 PVOID CacheBuffer; 00641 LARGE_INTEGER FOffset; 00642 PVACB Vacb; 00643 PVACB ActiveVacb; 00644 ULONG ActivePage; 00645 ULONG PageIsDirty; 00646 ULONG SavedState; 00647 ULONG PagesToGo; 00648 ULONG MoveLength; 00649 ULONG LengthToGo; 00650 KIRQL OldIrql; 00651 NTSTATUS Status; 00652 LARGE_INTEGER OriginalOffset; 00653 ULONG OriginalLength = Length; 00654 PETHREAD Thread = PsGetCurrentThread(); 00655 ULONG GotAMiss = 0; 00656 00657 DebugTrace(+1, me, "CcFastCopyRead\n", 0 ); 00658 00659 MmSavePageFaultReadAhead( Thread, &SavedState ); 00660 00661 // 00662 // Get pointer to shared and private cache maps 00663 // 00664 00665 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 00666 PrivateCacheMap = FileObject->PrivateCacheMap; 00667 00668 // 00669 // Check for read past file size, the caller must filter this case out. 00670 // 00671 00672 ASSERT( (FileOffset + Length) <= SharedCacheMap->FileSize.LowPart ); 00673 00674 // 00675 // If read ahead is enabled, then do the read ahead here so it 00676 // overlaps with the copy (otherwise we will do it below). 00677 // Note that we are assuming that we will not get ahead of our 00678 // current transfer - if read ahead is working it should either 00679 // already be in memory or else underway. 00680 // 00681 00682 OriginalOffset.LowPart = FileOffset; 00683 OriginalOffset.HighPart = 0; 00684 00685 if (PrivateCacheMap->ReadAheadEnabled && (PrivateCacheMap->ReadAheadLength[1] == 0)) { 00686 CcScheduleReadAhead( FileObject, &OriginalOffset, Length ); 00687 } 00688 00689 // 00690 // This is not an exact solution, but when IoPageRead gets a miss, 00691 // it cannot tell whether it was CcCopyRead or CcMdlRead, but since 00692 // the miss should occur very soon, by loading the pointer here 00693 // probably the right counter will get incremented, and in any case, 00694 // we hope the errrors average out! 00695 // 00696 00697 CcMissCounter = &CcCopyReadWaitMiss; 00698 00699 // 00700 // Increment performance counters 00701 // 00702 00703 HOT_STATISTIC(CcCopyReadWait) += 1; 00704 00705 // 00706 // See if we have an active Vacb, that we can just copy to. 00707 // 00708 00709 GetActiveVacb( SharedCacheMap, OldIrql, ActiveVacb, ActivePage, PageIsDirty ); 00710 00711 if (ActiveVacb != NULL) { 00712 00713 if ((FileOffset >> VACB_OFFSET_SHIFT) == (ActivePage >> (VACB_OFFSET_SHIFT - PAGE_SHIFT))) { 00714 00715 ULONG LengthToCopy = VACB_MAPPING_GRANULARITY - (FileOffset & (VACB_MAPPING_GRANULARITY - 1)); 00716 00717 if (SharedCacheMap->NeedToZero != NULL) { 00718 CcFreeActiveVacb( SharedCacheMap, NULL, 0, FALSE ); 00719 } 00720 00721 // 00722 // Get the starting point in the view. 00723 // 00724 00725 CacheBuffer = (PVOID)((PCHAR)ActiveVacb->BaseAddress + 00726 (FileOffset & (VACB_MAPPING_GRANULARITY - 1))); 00727 00728 // 00729 // Reduce LengthToCopy if it is greater than our caller's length. 00730 // 00731 00732 if (LengthToCopy > Length) { 00733 LengthToCopy = Length; 00734 } 00735 00736 // 00737 // Like the logic for the normal case below, we want to spin around 00738 // making sure Mm only reads the pages we will need. 00739 // 00740 00741 PagesToGo = COMPUTE_PAGES_SPANNED( CacheBuffer, 00742 LengthToCopy ) - 1; 00743 00744 // 00745 // Copy the data to the user buffer. 00746 // 00747 00748 try { 00749 00750 if (PagesToGo != 0) { 00751 00752 LengthToGo = LengthToCopy; 00753 00754 while (LengthToGo != 0) { 00755 00756 MoveLength = (ULONG)((PCHAR)(ROUND_TO_PAGES(((PCHAR)CacheBuffer + 1))) - 00757 (PCHAR)CacheBuffer); 00758 00759 if (MoveLength > LengthToGo) { 00760 MoveLength = LengthToGo; 00761 } 00762 00763 // 00764 // Here's hoping that it is cheaper to call Mm to see if 00765 // the page is valid. If not let Mm know how many pages 00766 // we are after before doing the move. 00767 // 00768 00769 MmSetPageFaultReadAhead( Thread, PagesToGo ); 00770 GotAMiss |= !MmCheckCachedPageState( CacheBuffer, FALSE ); 00771 00772 RtlCopyBytes( Buffer, CacheBuffer, MoveLength ); 00773 00774 PagesToGo -= 1; 00775 00776 LengthToGo -= MoveLength; 00777 Buffer = (PCHAR)Buffer + MoveLength; 00778 CacheBuffer = (PCHAR)CacheBuffer + MoveLength; 00779 } 00780 00781 // 00782 // Handle the read here that stays on a single page. 00783 // 00784 00785 } else { 00786 00787 // 00788 // Here's hoping that it is cheaper to call Mm to see if 00789 // the page is valid. If not let Mm know how many pages 00790 // we are after before doing the move. 00791 // 00792 00793 MmSetPageFaultReadAhead( Thread, 0 ); 00794 GotAMiss |= !MmCheckCachedPageState( CacheBuffer, FALSE ); 00795 00796 RtlCopyBytes( Buffer, CacheBuffer, LengthToCopy ); 00797 00798 Buffer = (PCHAR)Buffer + LengthToCopy; 00799 } 00800 00801 } except( CcCopyReadExceptionFilter( GetExceptionInformation(), 00802 &Status ) ) { 00803 00804 MmResetPageFaultReadAhead( Thread, SavedState ); 00805 00806 00807 SetActiveVacb( SharedCacheMap, OldIrql, ActiveVacb, ActivePage, PageIsDirty ); 00808 00809 // 00810 // If we got an access violation, then the user buffer went 00811 // away. Otherwise we must have gotten an I/O error trying 00812 // to bring the data in. 00813 // 00814 00815 if (Status == STATUS_ACCESS_VIOLATION) { 00816 ExRaiseStatus( STATUS_INVALID_USER_BUFFER ); 00817 } 00818 else { 00819 ExRaiseStatus( FsRtlNormalizeNtstatus( Status, 00820 STATUS_UNEXPECTED_IO_ERROR )); 00821 } 00822 } 00823 00824 // 00825 // Now adjust FileOffset and Length by what we copied. 00826 // 00827 00828 FileOffset += LengthToCopy; 00829 Length -= LengthToCopy; 00830 } 00831 00832 // 00833 // If that was all the data, then remember the Vacb 00834 // 00835 00836 if (Length == 0) { 00837 00838 SetActiveVacb( SharedCacheMap, OldIrql, ActiveVacb, ActivePage, PageIsDirty ); 00839 00840 // 00841 // Otherwise we must free it because we will map other vacbs below. 00842 // 00843 00844 } else { 00845 00846 CcFreeActiveVacb( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 00847 } 00848 } 00849 00850 // 00851 // Not all of the transfer will come back at once, so we have to loop 00852 // until the entire transfer is complete. 00853 // 00854 00855 FOffset.HighPart = 0; 00856 FOffset.LowPart = FileOffset; 00857 00858 while (Length != 0) { 00859 00860 ULONG ReceivedLength; 00861 ULONG BeyondLastByte; 00862 00863 // 00864 // Call local routine to Map or Access the file data, then move the data, 00865 // then call another local routine to free the data. If we cannot map 00866 // the data because of a Wait condition, return FALSE. 00867 // 00868 // Note that this call may result in an exception, however, if it 00869 // does no Bcb is returned and this routine has absolutely no 00870 // cleanup to perform. Therefore, we do not have a try-finally 00871 // and we allow the possibility that we will simply be unwound 00872 // without notice. 00873 // 00874 00875 CacheBuffer = CcGetVirtualAddress( SharedCacheMap, 00876 FOffset, 00877 &Vacb, 00878 &ReceivedLength ); 00879 00880 BeyondLastByte = FOffset.LowPart + ReceivedLength; 00881 00882 // 00883 // If we got more than we need, make sure to only transfer 00884 // the right amount. 00885 // 00886 00887 if (ReceivedLength > Length) { 00888 ReceivedLength = Length; 00889 } 00890 00891 // 00892 // It is possible for the user buffer to become no longer accessible 00893 // since it was last checked by the I/O system. If we fail to access 00894 // the buffer we must raise a status that the caller's exception 00895 // filter considers as "expected". Also we unmap the Bcb here, since 00896 // we otherwise would have no other reason to put a try-finally around 00897 // this loop. 00898 // 00899 00900 try { 00901 00902 PagesToGo = COMPUTE_PAGES_SPANNED( CacheBuffer, 00903 ReceivedLength ) - 1; 00904 00905 // 00906 // We know exactly how much we want to read here, and we do not 00907 // want to read any more in case the caller is doing random access. 00908 // Our read ahead logic takes care of detecting sequential reads, 00909 // and tends to do large asynchronous read aheads. So far we have 00910 // only mapped the data and we have not forced any in. What we 00911 // do now is get into a loop where we copy a page at a time and 00912 // just prior to each move, we tell MM how many additional pages 00913 // we would like to have read in, in the event that we take a 00914 // fault. With this strategy, for cache hits we never make a single 00915 // expensive call to MM to guarantee that the data is in, yet if we 00916 // do take a fault, we are guaranteed to only take one fault because 00917 // we will read all of the data in for the rest of the transfer. 00918 // 00919 // We test first for the multiple page case, to keep the small 00920 // reads faster. 00921 // 00922 00923 if (PagesToGo != 0) { 00924 00925 LengthToGo = ReceivedLength; 00926 00927 while (LengthToGo != 0) { 00928 00929 MoveLength = (ULONG)((PCHAR)(ROUND_TO_PAGES(((PCHAR)CacheBuffer + 1))) - 00930 (PCHAR)CacheBuffer); 00931 00932 if (MoveLength > LengthToGo) { 00933 MoveLength = LengthToGo; 00934 } 00935 00936 // 00937 // Here's hoping that it is cheaper to call Mm to see if 00938 // the page is valid. If not let Mm know how many pages 00939 // we are after before doing the move. 00940 // 00941 00942 MmSetPageFaultReadAhead( Thread, PagesToGo ); 00943 GotAMiss |= !MmCheckCachedPageState( CacheBuffer, FALSE ); 00944 00945 RtlCopyBytes( Buffer, CacheBuffer, MoveLength ); 00946 00947 PagesToGo -= 1; 00948 00949 LengthToGo -= MoveLength; 00950 Buffer = (PCHAR)Buffer + MoveLength; 00951 CacheBuffer = (PCHAR)CacheBuffer + MoveLength; 00952 } 00953 00954 // 00955 // Handle the read here that stays on a single page. 00956 // 00957 00958 } else { 00959 00960 // 00961 // Here's hoping that it is cheaper to call Mm to see if 00962 // the page is valid. If not let Mm know how many pages 00963 // we are after before doing the move. 00964 // 00965 00966 MmSetPageFaultReadAhead( Thread, 0 ); 00967 GotAMiss |= !MmCheckCachedPageState( CacheBuffer, FALSE ); 00968 00969 RtlCopyBytes( Buffer, CacheBuffer, ReceivedLength ); 00970 00971 Buffer = (PCHAR)Buffer + ReceivedLength; 00972 } 00973 } 00974 except( CcCopyReadExceptionFilter( GetExceptionInformation(), 00975 &Status ) ) { 00976 00977 CcMissCounter = &CcThrowAway; 00978 00979 // 00980 // If we get an exception, then we have to renable page fault 00981 // clustering and unmap on the way out. 00982 // 00983 00984 MmResetPageFaultReadAhead( Thread, SavedState ); 00985 00986 00987 CcFreeVirtualAddress( Vacb ); 00988 00989 // 00990 // If we got an access violation, then the user buffer went 00991 // away. Otherwise we must have gotten an I/O error trying 00992 // to bring the data in. 00993 // 00994 00995 if (Status == STATUS_ACCESS_VIOLATION) { 00996 ExRaiseStatus( STATUS_INVALID_USER_BUFFER ); 00997 } 00998 else { 00999 ExRaiseStatus( FsRtlNormalizeNtstatus( Status, 01000 STATUS_UNEXPECTED_IO_ERROR )); 01001 } 01002 } 01003 01004 // 01005 // Update number of bytes transferred. 01006 // 01007 01008 Length -= ReceivedLength; 01009 01010 // 01011 // Unmap the data now, and calculate length left to transfer. 01012 // 01013 01014 if (Length != 0) { 01015 01016 // 01017 // If there is more to go, just free this vacb. 01018 // 01019 01020 CcFreeVirtualAddress( Vacb ); 01021 01022 } else { 01023 01024 // 01025 // Otherwise save it for the next time through. 01026 // 01027 01028 SetActiveVacb( SharedCacheMap, OldIrql, Vacb, (FOffset.LowPart >> PAGE_SHIFT), 0 ); 01029 break; 01030 } 01031 01032 // 01033 // Assume we did not get all the data we wanted, and set FOffset 01034 // to the end of the returned data. 01035 // 01036 01037 FOffset.LowPart = BeyondLastByte; 01038 } 01039 01040 MmResetPageFaultReadAhead( Thread, SavedState ); 01041 01042 CcMissCounter = &CcThrowAway; 01043 01044 // 01045 // Now enable read ahead if it looks like we got any misses, and do 01046 // the first one. 01047 // 01048 01049 if (GotAMiss && 01050 !FlagOn( FileObject->Flags, FO_RANDOM_ACCESS ) && 01051 !PrivateCacheMap->ReadAheadEnabled) { 01052 01053 PrivateCacheMap->ReadAheadEnabled = TRUE; 01054 CcScheduleReadAhead( FileObject, &OriginalOffset, OriginalLength ); 01055 } 01056 01057 // 01058 // Now that we have described our desired read ahead, let's 01059 // shift the read history down. 01060 // 01061 01062 PrivateCacheMap->FileOffset1.LowPart = PrivateCacheMap->FileOffset2.LowPart; 01063 PrivateCacheMap->BeyondLastByte1.LowPart = PrivateCacheMap->BeyondLastByte2.LowPart; 01064 PrivateCacheMap->FileOffset2.LowPart = OriginalOffset.LowPart; 01065 PrivateCacheMap->BeyondLastByte2.LowPart = OriginalOffset.LowPart + OriginalLength; 01066 01067 IoStatus->Status = STATUS_SUCCESS; 01068 IoStatus->Information = OriginalLength; 01069 01070 DebugTrace(-1, me, "CcFastCopyRead -> VOID\n", 0 ); 01071 }

NTKERNELAPI VOID CcFastCopyWrite IN PFILE_OBJECT  FileObject,
IN ULONG  FileOffset,
IN ULONG  Length,
IN PVOID  Buffer
 

Definition at line 1482 of file copysup.c.

References ACTIVE_PAGE_IS_DIRTY, _SHARED_CACHE_MAP::ActiveVacbSpinLock, ASSERT, _VACB::BaseAddress, BooleanFlagOn, Buffer, CcCopyReadExceptionFilter(), CcFreeActiveVacb(), CcMapAndCopy(), DebugTrace, ExRaiseStatus(), FALSE, FlagOn, FO_WRITE_THROUGH, FsRtlNormalizeNtstatus(), GetActiveVacb, me, _SHARED_CACHE_MAP::NeedToZero, _SHARED_CACHE_MAP::NeedToZeroVacb, NTSTATUS(), NULL, PAGE_SHIFT, PAGE_SIZE, SetActiveVacb, Status, VACB_MAPPING_GRANULARITY, ZERO_FIRST_PAGE, ZERO_LAST_PAGE, and ZERO_MIDDLE_PAGES.

Referenced by FsRtlCopyWrite().

01491 : 01492 01493 This routine attempts to copy the specified file data from the specified 01494 buffer into the Cache, and deliver the correct I/O status. 01495 01496 This is a faster version of CcCopyWrite which only supports 32-bit file 01497 offsets and synchronicity (Wait = TRUE) and no Write Through. 01498 01499 Arguments: 01500 01501 FileObject - Pointer to the file object for a file which was 01502 opened with NO_INTERMEDIATE_BUFFERING clear, i.e., for 01503 which CcInitializeCacheMap was called by the file system. 01504 01505 FileOffset - Byte offset in file to receive the data. 01506 01507 Length - Length of data in bytes. 01508 01509 Buffer - Pointer to input buffer from which data should be copied. 01510 01511 Return Value: 01512 01513 None 01514 01515 Raises: 01516 01517 STATUS_INSUFFICIENT_RESOURCES - If a pool allocation failure occurs. 01518 This can only occur if Wait was specified as TRUE. (If Wait is 01519 specified as FALSE, and an allocation failure occurs, this 01520 routine simply returns FALSE.) 01521 01522 --*/ 01523 01524 { 01525 PSHARED_CACHE_MAP SharedCacheMap; 01526 PVOID CacheBuffer; 01527 PVACB ActiveVacb; 01528 ULONG ActivePage; 01529 PVOID ActiveAddress; 01530 ULONG PageIsDirty; 01531 KIRQL OldIrql; 01532 NTSTATUS Status; 01533 ULONG ZeroFlags; 01534 ULONG ValidDataLength; 01535 LARGE_INTEGER FOffset; 01536 01537 DebugTrace(+1, me, "CcFastCopyWrite\n", 0 ); 01538 01539 // 01540 // Get pointer to shared cache map and a copy of valid data length 01541 // 01542 01543 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 01544 01545 // 01546 // See if we have an active Vacb, that we can just copy to. 01547 // 01548 01549 GetActiveVacb( SharedCacheMap, OldIrql, ActiveVacb, ActivePage, PageIsDirty ); 01550 01551 if (ActiveVacb != NULL) { 01552 01553 // 01554 // See if the request starts in the ActivePage. WriteThrough requests must 01555 // go the longer route through CcMapAndCopy, where WriteThrough flushes are 01556 // implemented. 01557 // 01558 01559 if (((FileOffset >> PAGE_SHIFT) == ActivePage) && (Length != 0) && 01560 !FlagOn( FileObject->Flags, FO_WRITE_THROUGH )) { 01561 01562 ULONG LengthToCopy = PAGE_SIZE - (FileOffset & (PAGE_SIZE - 1)); 01563 01564 // 01565 // Reduce LengthToCopy if it is greater than our caller's length. 01566 // 01567 01568 if (LengthToCopy > Length) { 01569 LengthToCopy = Length; 01570 } 01571 01572 // 01573 // Copy the data to the user buffer. 01574 // 01575 01576 try { 01577 01578 // 01579 // If we are copying to a page that is locked down, then 01580 // we have to do it under our spinlock, and update the 01581 // NeedToZero field. 01582 // 01583 01584 OldIrql = 0xFF; 01585 01586 CacheBuffer = (PVOID)((PCHAR)ActiveVacb->BaseAddress + 01587 (FileOffset & (VACB_MAPPING_GRANULARITY - 1))); 01588 01589 if (SharedCacheMap->NeedToZero != NULL) { 01590 01591 // 01592 // The FastLock may not write our "flag". 01593 // 01594 01595 OldIrql = 0; 01596 01597 ExAcquireFastLock( &SharedCacheMap->ActiveVacbSpinLock, &OldIrql ); 01598 01599 // 01600 // Note that the NeedToZero could be cleared, since we 01601 // tested it without the spinlock. 01602 // 01603 01604 ActiveAddress = SharedCacheMap->NeedToZero; 01605 if ((ActiveAddress != NULL) && 01606 (ActiveVacb == SharedCacheMap->NeedToZeroVacb) && 01607 (((PCHAR)CacheBuffer + LengthToCopy) > (PCHAR)ActiveAddress)) { 01608 01609 // 01610 // If we are skipping some bytes in the page, then we need 01611 // to zero them. 01612 // 01613 01614 if ((PCHAR)CacheBuffer > (PCHAR)ActiveAddress) { 01615 01616 RtlZeroMemory( ActiveAddress, (PCHAR)CacheBuffer - (PCHAR)ActiveAddress ); 01617 } 01618 SharedCacheMap->NeedToZero = (PVOID)((PCHAR)CacheBuffer + LengthToCopy); 01619 } 01620 01621 ExReleaseFastLock( &SharedCacheMap->ActiveVacbSpinLock, OldIrql ); 01622 } 01623 01624 RtlCopyBytes( CacheBuffer, Buffer, LengthToCopy ); 01625 01626 } except( CcCopyReadExceptionFilter( GetExceptionInformation(), 01627 &Status ) ) { 01628 01629 // 01630 // If we failed to overwrite the uninitialized data, 01631 // zero it now (we cannot safely restore NeedToZero). 01632 // 01633 01634 if (OldIrql != 0xFF) { 01635 RtlZeroBytes( CacheBuffer, LengthToCopy ); 01636 } 01637 01638 SetActiveVacb( SharedCacheMap, OldIrql, ActiveVacb, ActivePage, ACTIVE_PAGE_IS_DIRTY ); 01639 01640 // 01641 // If we got an access violation, then the user buffer went 01642 // away. Otherwise we must have gotten an I/O error trying 01643 // to bring the data in. 01644 // 01645 01646 if (Status == STATUS_ACCESS_VIOLATION) { 01647 ExRaiseStatus( STATUS_INVALID_USER_BUFFER ); 01648 } 01649 else { 01650 ExRaiseStatus( FsRtlNormalizeNtstatus( Status, 01651 STATUS_UNEXPECTED_IO_ERROR )); 01652 } 01653 } 01654 01655 // 01656 // Now adjust FileOffset and Length by what we copied. 01657 // 01658 01659 Buffer = (PVOID)((PCHAR)Buffer + LengthToCopy); 01660 FileOffset += LengthToCopy; 01661 Length -= LengthToCopy; 01662 01663 // 01664 // If that was all the data, then get outski... 01665 // 01666 01667 if (Length == 0) { 01668 01669 SetActiveVacb( SharedCacheMap, OldIrql, ActiveVacb, ActivePage, ACTIVE_PAGE_IS_DIRTY ); 01670 return; 01671 } 01672 01673 // 01674 // Remember that the page is dirty now. 01675 // 01676 01677 PageIsDirty |= ACTIVE_PAGE_IS_DIRTY; 01678 } 01679 01680 CcFreeActiveVacb( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 01681 01682 // 01683 // Else someone else could have the active page, and may want to zero 01684 // the range we plan to write! 01685 // 01686 01687 } else if (SharedCacheMap->NeedToZero != NULL) { 01688 01689 CcFreeActiveVacb( SharedCacheMap, NULL, 0, FALSE ); 01690 } 01691 01692 // 01693 // Set up for call to CcMapAndCopy 01694 // 01695 01696 FOffset.LowPart = FileOffset; 01697 FOffset.HighPart = 0; 01698 01699 ValidDataLength = ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->ValidDataLength.LowPart; 01700 01701 ASSERT((ValidDataLength == MAXULONG) || 01702 (((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->ValidDataLength.HighPart == 0)); 01703 01704 // 01705 // At this point we can calculate the ReadOnly flag for 01706 // the purposes of whether to use the Bcb resource, and 01707 // we can calculate the ZeroFlags. 01708 // 01709 01710 // 01711 // We can always zero middle pages, if any. 01712 // 01713 01714 ZeroFlags = ZERO_MIDDLE_PAGES; 01715 01716 if (((FileOffset & (PAGE_SIZE - 1)) == 0) && 01717 (Length >= PAGE_SIZE)) { 01718 ZeroFlags |= ZERO_FIRST_PAGE; 01719 } 01720 01721 if (((FileOffset + Length) & (PAGE_SIZE - 1)) == 0) { 01722 ZeroFlags |= ZERO_LAST_PAGE; 01723 } 01724 01725 if ((FileOffset & ~(PAGE_SIZE - 1)) >= ValidDataLength) { 01726 ZeroFlags |= ZERO_FIRST_PAGE | ZERO_MIDDLE_PAGES | ZERO_LAST_PAGE; 01727 } else if (((FileOffset & ~(PAGE_SIZE - 1)) + PAGE_SIZE) >= ValidDataLength) { 01728 ZeroFlags |= ZERO_MIDDLE_PAGES | ZERO_LAST_PAGE; 01729 } 01730 01731 // 01732 // Call a routine to map and copy the data in Mm and get out. 01733 // 01734 01735 CcMapAndCopy( SharedCacheMap, 01736 Buffer, 01737 &FOffset, 01738 Length, 01739 ZeroFlags, 01740 BooleanFlagOn( FileObject->Flags, FO_WRITE_THROUGH )); 01741 01742 DebugTrace(-1, me, "CcFastCopyWrite -> VOID\n", 0 ); 01743 }

NTKERNELAPI VOID CcFlushCache IN PSECTION_OBJECT_POINTERS  SectionObjectPointer,
IN PLARGE_INTEGER FileOffset  OPTIONAL,
IN ULONG  Length,
OUT PIO_STATUS_BLOCK IoStatus  OPTIONAL
 

Definition at line 4411 of file cachesub.c.

References _SHARED_CACHE_MAP::ActivePage, _SHARED_CACHE_MAP::ActiveVacb, CC_REQUEUE, CcAcquireByteRangeForWrite(), CcAcquireMasterLock, CcDecrementOpenCount, CcDeferredWrites, CcDirtySharedCacheMapList, CcExceptionFilter(), CcFreeActiveVacb(), CcFreeVirtualAddress(), CcGetVirtualAddressIfMapped(), CcIdleDelayTick, CcIncrementOpenCount, CcLazyWriteHotSpots, CcLazyWriteIos, CcLazyWritePages, CcNoDelay, CcPostDeferredWrites(), CcReleaseByteRangeFromWrite(), CcReleaseMasterLock, CcScheduleLazyWriteScan(), DebugTrace, DebugTrace2, _SHARED_CACHE_MAP::DirtyPages, FALSE, _SHARED_CACHE_MAP::FileObject, _SHARED_CACHE_MAP::FileSize, FlagOn, _SHARED_CACHE_MAP::Flags, _FILE_OBJECT::FsContext, FSRTL_FLAG_USER_MAPPED_FILE, GetActiveVacbAtDpcLevel, KeQueryTickCount(), LAZY_WRITE_OCCURRED, _SHARED_CACHE_MAP::LazyWritePassCount, LazyWriter, me, mm, MmFlushSection(), MmSetAddressRangeModified(), MODIFIED_WRITE_DISABLED, _SHARED_CACHE_MAP::NeedToZero, _SHARED_CACHE_MAP::NeedToZeroPage, NT_SUCCESS, NTSTATUS(), NULL, Offset, _SHARED_CACHE_MAP::OpenCount, _LAZY_WRITER::OtherWork, PAGE_SHIFT, PAGE_SIZE, _SHARED_CACHE_MAP::PagesToWrite, PFSRTL_COMMON_FCB_HEADER, PIN_ACCESS, RetryError, _LAZY_WRITER::ScanActive, _FILE_OBJECT::SectionObjectPointer, SetFlag, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, _SHARED_CACHE_MAP::SharedCacheMapLinks, Status, TRUE, _SHARED_CACHE_MAP::ValidDataGoal, _SHARED_CACHE_MAP::ValidDataLength, and WRITE_QUEUED.

Referenced by CcWriteBehind(), CcZeroEndOfLastPage(), LfsFlushLfcb(), LfsFlushLogPage(), and MiFlushDataSection().

04420 : 04421 04422 This routine may be called to flush dirty data from the cache to the 04423 cached file on disk. Any byte range within the file may be flushed, 04424 or the entire file may be flushed by omitting the FileOffset parameter. 04425 04426 This routine does not take a Wait parameter; the caller should assume 04427 that it will always block. 04428 04429 Arguments: 04430 04431 SectionObjectPointer - A pointer to the Section Object Pointers 04432 structure in the nonpaged Fcb. 04433 04434 FileOffset - If this parameter is supplied (not NULL), then only the 04435 byte range specified by FileOffset and Length are flushed. 04436 If &CcNoDelay is specified, then this signifies the call 04437 from the Lazy Writer, and the lazy write scan should resume 04438 as normal from the last spot where it left off in the file. 04439 04440 Length - Defines the length of the byte range to flush, starting at 04441 FileOffset. This parameter is ignored if FileOffset is 04442 specified as NULL. 04443 04444 IoStatus - The I/O status resulting from the flush operation. 04445 04446 Return Value: 04447 04448 None. 04449 04450 --*/ 04451 04452 { 04453 LARGE_INTEGER NextFileOffset, TargetOffset; 04454 ULONG NextLength; 04455 PBCB FirstBcb; 04456 KIRQL OldIrql; 04457 PSHARED_CACHE_MAP SharedCacheMap; 04458 IO_STATUS_BLOCK TrashStatus; 04459 PVOID TempVa; 04460 ULONG RemainingLength, TempLength; 04461 NTSTATUS PopupStatus; 04462 BOOLEAN HotSpot; 04463 ULONG BytesWritten = 0; 04464 BOOLEAN PopupRequired = FALSE; 04465 BOOLEAN VerifyRequired = FALSE; 04466 BOOLEAN IsLazyWriter = FALSE; 04467 BOOLEAN FreeActiveVacb = FALSE; 04468 PVACB ActiveVacb = NULL; 04469 NTSTATUS Status = STATUS_SUCCESS; 04470 LARGE_INTEGER EndTick, CurrentTick; 04471 04472 DebugTrace(+1, me, "CcFlushCache:\n", 0 ); 04473 DebugTrace( 0, mm, " SectionObjectPointer = %08lx\n", SectionObjectPointer ); 04474 DebugTrace2(0, me, " FileOffset = %08lx, %08lx\n", 04475 ARGUMENT_PRESENT(FileOffset) ? FileOffset->LowPart 04476 : 0, 04477 ARGUMENT_PRESENT(FileOffset) ? FileOffset->HighPart 04478 : 0 ); 04479 DebugTrace( 0, me, " Length = %08lx\n", Length ); 04480 04481 // 04482 // If IoStatus passed a Null pointer, set up to through status away. 04483 // 04484 04485 if (!ARGUMENT_PRESENT(IoStatus)) { 04486 IoStatus = &TrashStatus; 04487 } 04488 IoStatus->Status = STATUS_SUCCESS; 04489 IoStatus->Information = 0; 04490 04491 // 04492 // See if this is the Lazy Writer. Since he wants to use this common 04493 // routine, which is also a public routine callable by file systems, 04494 // the Lazy Writer shows his call by specifying CcNoDelay as the file offset! 04495 // 04496 // Also, in case we do not write anything because we see only HotSpot(s), 04497 // initialize the Status to indicate a retryable error, so CcWorkerThread 04498 // knows we did not make any progress. Of course any actual flush will 04499 // overwrite this code. 04500 // 04501 04502 if (FileOffset == &CcNoDelay) { 04503 IoStatus->Status = STATUS_VERIFY_REQUIRED; 04504 IsLazyWriter = TRUE; 04505 FileOffset = NULL; 04506 } 04507 04508 // 04509 // If there is nothing to do, return here. 04510 // 04511 04512 if (ARGUMENT_PRESENT(FileOffset) && (Length == 0)) { 04513 04514 DebugTrace(-1, me, "CcFlushCache -> VOID\n", 0 ); 04515 return; 04516 } 04517 04518 // 04519 // See if the file is cached. 04520 // 04521 04522 CcAcquireMasterLock( &OldIrql ); 04523 04524 SharedCacheMap = SectionObjectPointer->SharedCacheMap; 04525 04526 if (SharedCacheMap != NULL) { 04527 04528 // 04529 // Increment the open count to keep it from going away. 04530 // 04531 04532 CcIncrementOpenCount( SharedCacheMap, 'fcCS' ); 04533 04534 if ((SharedCacheMap->NeedToZero != NULL) || (SharedCacheMap->ActiveVacb != NULL)) { 04535 04536 ULONG FirstPage = 0; 04537 ULONG LastPage = MAXULONG; 04538 04539 if (ARGUMENT_PRESENT(FileOffset)) { 04540 04541 FirstPage = (ULONG)(FileOffset->QuadPart >> PAGE_SHIFT); 04542 LastPage = (ULONG)((FileOffset->QuadPart + Length - 1) >> PAGE_SHIFT); 04543 } 04544 04545 // 04546 // Make sure we do not flush the active page without zeroing any 04547 // uninitialized data. Also, it is very important to free the active 04548 // page if it is the one to be flushed, so that we get the dirty 04549 // bit out to the Pfn. 04550 // 04551 04552 if (((((LONGLONG)LastPage + 1) << PAGE_SHIFT) > SharedCacheMap->ValidDataGoal.QuadPart) || 04553 04554 ((SharedCacheMap->NeedToZero != NULL) && 04555 (FirstPage <= SharedCacheMap->NeedToZeroPage) && 04556 (LastPage >= SharedCacheMap->NeedToZeroPage)) || 04557 04558 ((SharedCacheMap->ActiveVacb != NULL) && 04559 (FirstPage <= SharedCacheMap->ActivePage) && 04560 (LastPage >= SharedCacheMap->ActivePage))) { 04561 04562 GetActiveVacbAtDpcLevel( SharedCacheMap, ActiveVacb, RemainingLength, TempLength ); 04563 FreeActiveVacb = TRUE; 04564 } 04565 } 04566 } 04567 04568 CcReleaseMasterLock( OldIrql ); 04569 04570 if (FreeActiveVacb) { 04571 CcFreeActiveVacb( SharedCacheMap, ActiveVacb, RemainingLength, TempLength ); 04572 } 04573 04574 // 04575 // If there is a user-mapped file, then we perform the "service" of 04576 // flushing even data not written via the file system. Note that this 04577 // is pretty important for folks provoking the flush/purge of a coherency 04578 // operation. 04579 // 04580 // It is critical this happen before we examine our own hints. In the course 04581 // of this flush it is possible valid data length will be advanced by the 04582 // underlying filesystem, with CcZero'ing behind - which will cause us to 04583 // make some dirty zeroes in the cache. Syscache bug! Note how coherency 04584 // flushing works ... 04585 // 04586 04587 if ((SharedCacheMap == NULL) 04588 04589 || 04590 04591 FlagOn(((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->Flags, 04592 FSRTL_FLAG_USER_MAPPED_FILE) && !IsLazyWriter) { 04593 04594 // 04595 // Call MM to flush the section through our view. 04596 // 04597 04598 DebugTrace( 0, mm, "MmFlushSection:\n", 0 ); 04599 DebugTrace( 0, mm, " SectionObjectPointer = %08lx\n", SectionObjectPointer ); 04600 DebugTrace2(0, me, " FileOffset = %08lx, %08lx\n", 04601 ARGUMENT_PRESENT(FileOffset) ? FileOffset->LowPart 04602 : 0, 04603 ARGUMENT_PRESENT(FileOffset) ? FileOffset->HighPart 04604 : 0 ); 04605 DebugTrace( 0, mm, " RegionSize = %08lx\n", Length ); 04606 04607 try { 04608 04609 Status = MmFlushSection( SectionObjectPointer, 04610 FileOffset, 04611 Length, 04612 IoStatus, 04613 TRUE ); 04614 04615 } except( CcExceptionFilter( IoStatus->Status = GetExceptionCode() )) { 04616 04617 KdPrint(("CACHE MANAGER: MmFlushSection raised %08lx\n", IoStatus->Status)); 04618 } 04619 04620 if ((!NT_SUCCESS(IoStatus->Status)) && !RetryError(IoStatus->Status)) { 04621 04622 PopupRequired = TRUE; 04623 PopupStatus = IoStatus->Status; 04624 } 04625 04626 DebugTrace2(0, mm, " <IoStatus = %08lx, %08lx\n", 04627 IoStatus->Status, IoStatus->Information ); 04628 } 04629 04630 // 04631 // Scan for dirty pages if there is a shared cache map. 04632 // 04633 04634 if (SharedCacheMap != NULL) { 04635 04636 // 04637 // If FileOffset was not specified then set to flush entire region 04638 // and set valid data length to the goal so that we will not get 04639 // any more call backs. 04640 // 04641 04642 if (!IsLazyWriter && !ARGUMENT_PRESENT(FileOffset)) { 04643 04644 SharedCacheMap->ValidDataLength = SharedCacheMap->ValidDataGoal; 04645 } 04646 04647 // 04648 // If this is an explicit flush, initialize our offset to scan for. 04649 // 04650 04651 if (ARGUMENT_PRESENT(FileOffset)) { 04652 TargetOffset = *FileOffset; 04653 } 04654 04655 // 04656 // Assume we want to pass the explicit flush flag in Length. 04657 // But overwrite it if a length really was specified. On 04658 // subsequent loops, NextLength will have some nonzero value. 04659 // 04660 04661 NextLength = 1; 04662 if (Length != 0) { 04663 NextLength = Length; 04664 } 04665 04666 // 04667 // Now calculate the tick that will signal the expiration of a 04668 // lazy writer tick interval. 04669 // 04670 04671 if (IsLazyWriter) { 04672 04673 KeQueryTickCount( &EndTick ); 04674 EndTick.QuadPart += CcIdleDelayTick; 04675 } 04676 04677 // 04678 // Loop as long as we find buffers to flush for this 04679 // SharedCacheMap, and we are not trying to delete the guy. 04680 // 04681 04682 while (((SharedCacheMap->PagesToWrite != 0) || !IsLazyWriter) 04683 04684 && 04685 ((SharedCacheMap->FileSize.QuadPart != 0) || 04686 FlagOn(SharedCacheMap->Flags, PIN_ACCESS)) 04687 04688 && 04689 04690 !VerifyRequired 04691 04692 && 04693 04694 CcAcquireByteRangeForWrite ( SharedCacheMap, 04695 IsLazyWriter ? NULL : (ARGUMENT_PRESENT(FileOffset) ? 04696 &TargetOffset : NULL), 04697 IsLazyWriter ? 0: NextLength, 04698 &NextFileOffset, 04699 &NextLength, 04700 &FirstBcb )) { 04701 04702 // 04703 // Assume this range is not a hot spot. 04704 // 04705 04706 HotSpot = FALSE; 04707 04708 // 04709 // We defer calling Mm to set address range modified until here, to take 04710 // overhead out of the main line path, and to reduce the number of TBIS 04711 // on a multiprocessor. 04712 // 04713 04714 RemainingLength = NextLength; 04715 04716 do { 04717 04718 // 04719 // See if the next file offset is mapped. (If not, the dirty bit 04720 // was propagated on the unmap.) 04721 // 04722 04723 if ((TempVa = CcGetVirtualAddressIfMapped( SharedCacheMap, 04724 NextFileOffset.QuadPart + NextLength - RemainingLength, 04725 &ActiveVacb, 04726 &TempLength)) != NULL) { 04727 04728 // 04729 // Reduce TempLength to RemainingLength if necessary, and 04730 // call MM. 04731 // 04732 04733 if (TempLength > RemainingLength) { 04734 TempLength = RemainingLength; 04735 } 04736 04737 // 04738 // Clear the Dirty bit (if set) in the PTE and set the 04739 // Pfn modified. Assume if the Pte was dirty, that this may 04740 // be a hot spot. Do not do hot spots for metadata, and unless 04741 // they are within ValidDataLength as reported to the file system 04742 // via CcSetValidData. 04743 // 04744 04745 HotSpot = (BOOLEAN)((MmSetAddressRangeModified(TempVa, TempLength) || HotSpot) && 04746 ((NextFileOffset.QuadPart + NextLength) < 04747 (SharedCacheMap->ValidDataLength.QuadPart)) && 04748 ((SharedCacheMap->LazyWritePassCount & 0xF) != 0) && IsLazyWriter) && 04749 !FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED); 04750 04751 CcFreeVirtualAddress( ActiveVacb ); 04752 04753 } else { 04754 04755 // 04756 // Reduce TempLength to RemainingLength if necessary. 04757 // 04758 04759 if (TempLength > RemainingLength) { 04760 TempLength = RemainingLength; 04761 } 04762 } 04763 04764 // 04765 // Reduce RemainingLength by what we processed. 04766 // 04767 04768 RemainingLength -= TempLength; 04769 04770 // 04771 // Loop until done. 04772 // 04773 04774 } while (RemainingLength != 0); 04775 04776 CcLazyWriteHotSpots += HotSpot; 04777 04778 // 04779 // Now flush, now flush if we do not think it is a hot spot. 04780 // 04781 04782 if (!HotSpot) { 04783 04784 MmFlushSection( SharedCacheMap->FileObject->SectionObjectPointer, 04785 &NextFileOffset, 04786 NextLength, 04787 IoStatus, 04788 !IsLazyWriter ); 04789 04790 if (NT_SUCCESS(IoStatus->Status)) { 04791 04792 if (!FlagOn(SharedCacheMap->Flags, LAZY_WRITE_OCCURRED)) { 04793 04794 CcAcquireMasterLock( &OldIrql ); 04795 SetFlag(SharedCacheMap->Flags, LAZY_WRITE_OCCURRED); 04796 CcReleaseMasterLock( OldIrql ); 04797 } 04798 04799 // 04800 // Increment performance counters 04801 // 04802 04803 if (IsLazyWriter) { 04804 04805 CcLazyWriteIos += 1; 04806 CcLazyWritePages += (NextLength + PAGE_SIZE - 1) >> PAGE_SHIFT; 04807 } 04808 04809 } else { 04810 04811 LARGE_INTEGER Offset = NextFileOffset; 04812 ULONG RetryLength = NextLength; 04813 04814 DebugTrace2( 0, 0, "I/O Error on Cache Flush: %08lx, %08lx\n", 04815 IoStatus->Status, IoStatus->Information ); 04816 04817 if (RetryError(IoStatus->Status)) { 04818 04819 VerifyRequired = TRUE; 04820 04821 // 04822 // Loop to write each page individually, starting with one 04823 // more try on the page that got the error, in case that page 04824 // or any page beyond it can be successfully written 04825 // individually. Note that Offset and RetryLength are 04826 // guaranteed to be in integral pages, but the Information 04827 // field from the failed request is not. 04828 // 04829 // We ignore errors now, and give it one last shot, before 04830 // setting the pages clean (see below). 04831 // 04832 04833 } else { 04834 04835 do { 04836 04837 DebugTrace2( 0, 0, "Trying page at offset %08lx, %08lx\n", 04838 Offset.LowPart, Offset.HighPart ); 04839 04840 MmFlushSection ( SharedCacheMap->FileObject->SectionObjectPointer, 04841 &Offset, 04842 PAGE_SIZE, 04843 IoStatus, 04844 !IsLazyWriter ); 04845 04846 DebugTrace2( 0, 0, "I/O status = %08lx, %08lx\n", 04847 IoStatus->Status, IoStatus->Information ); 04848 04849 if (NT_SUCCESS(IoStatus->Status)) { 04850 CcAcquireMasterLock( &OldIrql ); 04851 SetFlag(SharedCacheMap->Flags, LAZY_WRITE_OCCURRED); 04852 CcReleaseMasterLock( OldIrql ); 04853 } 04854 04855 if ((!NT_SUCCESS(IoStatus->Status)) && !RetryError(IoStatus->Status)) { 04856 04857 PopupRequired = TRUE; 04858 PopupStatus = IoStatus->Status; 04859 } 04860 04861 VerifyRequired = VerifyRequired || RetryError(IoStatus->Status); 04862 04863 Offset.QuadPart = Offset.QuadPart + (LONGLONG)PAGE_SIZE; 04864 RetryLength -= PAGE_SIZE; 04865 04866 } while(RetryLength > 0); 04867 } 04868 } 04869 } 04870 04871 // 04872 // Now release the Bcb resources and set them clean. Note we do not check 04873 // here for errors, and just returned in the I/O status. Errors on writes 04874 // are rare to begin with. Nonetheless, our strategy is to rely on 04875 // one or more of the following (depending on the file system) to prevent 04876 // errors from getting to us. 04877 // 04878 // - Retries and/or other forms of error recovery in the disk driver 04879 // - Mirroring driver 04880 // - Hot fixing in the noncached path of the file system 04881 // 04882 // In the unexpected case that a write error does get through, we 04883 // *currently* just set the Bcbs clean anyway, rather than let 04884 // Bcbs and pages accumulate which cannot be written. Note we did 04885 // a popup above to at least notify the guy. 04886 // 04887 // Set the pages dirty again if we either saw a HotSpot or got 04888 // verify required. 04889 // 04890 04891 CcReleaseByteRangeFromWrite ( SharedCacheMap, 04892 &NextFileOffset, 04893 NextLength, 04894 FirstBcb, 04895 (BOOLEAN)(HotSpot || VerifyRequired) ); 04896 04897 // 04898 // See if there is any deferred writes we should post. 04899 // 04900 04901 BytesWritten += NextLength; 04902 if ((BytesWritten >= 0x40000) && !IsListEmpty(&CcDeferredWrites)) { 04903 CcPostDeferredWrites(); 04904 BytesWritten = 0; 04905 } 04906 04907 // 04908 // If we're the lazy writer and have spent more than the active tick 04909 // length in this loop, break out for a requeue so we share the 04910 // file resources. 04911 // 04912 04913 if (IsLazyWriter) { 04914 04915 KeQueryTickCount( &CurrentTick ); 04916 04917 // 04918 // Note that CcIdleDelay is a relative (negative) timestamp. 04919 // 04920 04921 if (CurrentTick.QuadPart > EndTick.QuadPart) { 04922 04923 IoStatus->Information = CC_REQUEUE; 04924 break; 04925 } 04926 } 04927 04928 // 04929 // Now for explicit flushes, we should advance our range. 04930 // 04931 04932 if (ARGUMENT_PRESENT(FileOffset)) { 04933 04934 NextFileOffset.QuadPart += NextLength; 04935 04936 // 04937 // Done yet? 04938 // 04939 04940 if ((FileOffset->QuadPart + Length) <= NextFileOffset.QuadPart) { 04941 break; 04942 } 04943 04944 // 04945 // Calculate new target range 04946 // 04947 04948 NextLength = (ULONG)((FileOffset->QuadPart + Length) - NextFileOffset.QuadPart); 04949 TargetOffset = NextFileOffset; 04950 } 04951 } 04952 } 04953 04954 // 04955 // See if there are any deferred writes we should post if 04956 // we escaped the loop without checking after a series of 04957 // flushes. 04958 // 04959 04960 if (BytesWritten != 0 && !IsListEmpty(&CcDeferredWrites)) { 04961 04962 CcPostDeferredWrites(); 04963 } 04964 04965 // 04966 // Now we can get rid of the open count, and clean up as required. 04967 // 04968 04969 if (SharedCacheMap != NULL) { 04970 04971 // 04972 // Serialize again to decrement the open count. 04973 // 04974 04975 CcAcquireMasterLock( &OldIrql ); 04976 04977 CcDecrementOpenCount( SharedCacheMap, 'fcCF' ); 04978 04979 if ((SharedCacheMap->OpenCount == 0) && 04980 !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) && 04981 (SharedCacheMap->DirtyPages == 0)) { 04982 04983 // 04984 // Move to the dirty list. 04985 // 04986 04987 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 04988 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 04989 &SharedCacheMap->SharedCacheMapLinks ); 04990 04991 // 04992 // Make sure the Lazy Writer will wake up, because we 04993 // want him to delete this SharedCacheMap. 04994 // 04995 04996 LazyWriter.OtherWork = TRUE; 04997 if (!LazyWriter.ScanActive) { 04998 CcScheduleLazyWriteScan(); 04999 } 05000 } 05001 05002 CcReleaseMasterLock( OldIrql ); 05003 } 05004 05005 // 05006 // Make sure and return the first error to our caller. In the 05007 // case of the Lazy Writer, a popup will be issued. 05008 // 05009 05010 if (PopupRequired) { 05011 IoStatus->Status = PopupStatus; 05012 } 05013 05014 // 05015 // Let the Lazy writer know if we did anything, so he can 05016 05017 DebugTrace(-1, me, "CcFlushCache -> VOID\n", 0 ); 05018 05019 return; 05020 }

NTKERNELAPI LARGE_INTEGER CcGetDirtyPages IN PVOID  LogHandle,
IN PDIRTY_PAGE_ROUTINE  DirtyPageRoutine,
IN PVOID  Context1,
IN PVOID  Context2
 

Definition at line 142 of file logsup.c.

References _BCB::BcbLinks, _SHARED_CACHE_MAP::BcbList, _SHARED_CACHE_MAP::BcbSpinLock, _BCB::ByteLength, CACHE_NTC_BCB, CcAcquireMasterLock, CcDecrementOpenCount, CcDirtySharedCacheMapList, CcIncrementOpenCount, CcReleaseMasterLock, CcUnpinFileData(), Context1, Context2, _BCB::Dirty, _SHARED_CACHE_MAP::DirtyPages, _SHARED_CACHE_MAP::FileObject, _BCB::FileOffset, FlagOn, _SHARED_CACHE_MAP::Flags, IS_CURSOR, _SHARED_CACHE_MAP::LogHandle, _BCB::NewestLsn, _BCB::NodeTypeCode, NTSTATUS(), NULL, _BCB::OldestLsn, _BCB::PinCount, _SHARED_CACHE_MAP::SharedCacheMapLinks, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, TRUE, UNPIN, and UNREF.

00151 : 00152 00153 This routine may be called to return all of the dirty pages in all files 00154 for a given log handle. Each page is returned by an individual call to 00155 the Dirty Page Routine. The Dirty Page Routine is defined by a prototype 00156 in ntos\inc\cache.h. 00157 00158 Arguments: 00159 00160 LogHandle - Log Handle which must match the log handle previously stored 00161 for all files which are to be returned. 00162 00163 DirtyPageRoutine -- The routine to call as each dirty page for this log 00164 handle is found. 00165 00166 Context1 - First context parameter to be passed to the Dirty Page Routine. 00167 00168 Context2 - First context parameter to be passed to the Dirty Page Routine. 00169 00170 Return Value: 00171 00172 LARGE_INTEGER - Oldest Lsn found of all the dirty pages, or 0 if no dirty pages 00173 00174 --*/ 00175 00176 { 00177 PSHARED_CACHE_MAP SharedCacheMap; 00178 PBCB Bcb, BcbToUnpin = NULL; 00179 KIRQL OldIrql; 00180 NTSTATUS ExceptionStatus; 00181 LARGE_INTEGER SavedFileOffset, SavedOldestLsn, SavedNewestLsn; 00182 ULONG SavedByteLength; 00183 LARGE_INTEGER OldestLsn = {0,0}; 00184 00185 // 00186 // Synchronize with changes to the SharedCacheMap list. 00187 // 00188 00189 CcAcquireMasterLock( &OldIrql ); 00190 00191 SharedCacheMap = CONTAINING_RECORD( CcDirtySharedCacheMapList.SharedCacheMapLinks.Flink, 00192 SHARED_CACHE_MAP, 00193 SharedCacheMapLinks ); 00194 00195 // 00196 // Use try/finally for cleanup. The only spot where we can raise is out of the 00197 // filesystem callback, but we have the exception handler out here so we aren't 00198 // constantly setting/unsetting it. 00199 // 00200 00201 try { 00202 00203 while (&SharedCacheMap->SharedCacheMapLinks != &CcDirtySharedCacheMapList.SharedCacheMapLinks) { 00204 00205 // 00206 // Skip over cursors, SharedCacheMaps for other LogHandles, and ones with 00207 // no dirty pages 00208 // 00209 00210 if (!FlagOn(SharedCacheMap->Flags, IS_CURSOR) && (SharedCacheMap->LogHandle == LogHandle) && 00211 (SharedCacheMap->DirtyPages != 0)) { 00212 00213 // 00214 // This SharedCacheMap should stick around for a while in the dirty list. 00215 // 00216 00217 CcIncrementOpenCount( SharedCacheMap, 'pdGS' ); 00218 SharedCacheMap->DirtyPages += 1; 00219 CcReleaseMasterLock( OldIrql ); 00220 00221 // 00222 // Set our initial resume point and point to first Bcb in List. 00223 // 00224 00225 ExAcquireFastLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 00226 Bcb = CONTAINING_RECORD( SharedCacheMap->BcbList.Flink, BCB, BcbLinks ); 00227 00228 // 00229 // Scan to the end of the Bcb list. 00230 // 00231 00232 while (&Bcb->BcbLinks != &SharedCacheMap->BcbList) { 00233 00234 // 00235 // If the Bcb is dirty, then capture the inputs for the 00236 // callback routine so we can call without holding a spinlock. 00237 // 00238 00239 if ((Bcb->NodeTypeCode == CACHE_NTC_BCB) && Bcb->Dirty) { 00240 00241 SavedFileOffset = Bcb->FileOffset; 00242 SavedByteLength = Bcb->ByteLength; 00243 SavedOldestLsn = Bcb->OldestLsn; 00244 SavedNewestLsn = Bcb->NewestLsn; 00245 00246 // 00247 // Increment PinCount so the Bcb sticks around 00248 // 00249 00250 Bcb->PinCount += 1; 00251 00252 ExReleaseFastLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00253 00254 // 00255 // Any Bcb to unpin from a previous loop? 00256 // 00257 00258 if (BcbToUnpin != NULL) { 00259 CcUnpinFileData( BcbToUnpin, TRUE, UNREF ); 00260 BcbToUnpin = NULL; 00261 } 00262 00263 // 00264 // Call the file system. This callback may raise status. 00265 // 00266 00267 (*DirtyPageRoutine)( SharedCacheMap->FileObject, 00268 &SavedFileOffset, 00269 SavedByteLength, 00270 &SavedOldestLsn, 00271 &SavedNewestLsn, 00272 Context1, 00273 Context2 ); 00274 00275 // 00276 // Possibly update OldestLsn 00277 // 00278 00279 if ((SavedOldestLsn.QuadPart != 0) && 00280 ((OldestLsn.QuadPart == 0) || (SavedOldestLsn.QuadPart < OldestLsn.QuadPart ))) { 00281 OldestLsn = SavedOldestLsn; 00282 } 00283 00284 // 00285 // Now reacquire the spinlock and scan from the resume point 00286 // point to the next Bcb to return in the descending list. 00287 // 00288 00289 ExAcquireFastLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 00290 00291 // 00292 // Normally the Bcb can stay around a while, but if not, 00293 // we will just remember it for the next time we do not 00294 // have the spin lock. We cannot unpin it now, because 00295 // we would lose our place in the list. 00296 // 00297 00298 if (Bcb->PinCount > 1) { 00299 Bcb->PinCount -= 1; 00300 } else { 00301 BcbToUnpin = Bcb; 00302 } 00303 } 00304 00305 Bcb = CONTAINING_RECORD( Bcb->BcbLinks.Flink, BCB, BcbLinks ); 00306 } 00307 ExReleaseFastLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00308 00309 // 00310 // We need to unpin any Bcb we are holding before moving on to 00311 // the next SharedCacheMap, or else CcDeleteSharedCacheMap will 00312 // also delete this Bcb. 00313 // 00314 00315 if (BcbToUnpin != NULL) { 00316 00317 CcUnpinFileData( BcbToUnpin, TRUE, UNREF ); 00318 BcbToUnpin = NULL; 00319 } 00320 00321 CcAcquireMasterLock( &OldIrql ); 00322 00323 // 00324 // Now release the SharedCacheMap, leaving it in the dirty list. 00325 // 00326 00327 CcDecrementOpenCount( SharedCacheMap, 'pdGF' ); 00328 SharedCacheMap->DirtyPages -= 1; 00329 } 00330 00331 // 00332 // Now loop back for the next cache map. 00333 // 00334 00335 SharedCacheMap = 00336 CONTAINING_RECORD( SharedCacheMap->SharedCacheMapLinks.Flink, 00337 SHARED_CACHE_MAP, 00338 SharedCacheMapLinks ); 00339 } 00340 00341 CcReleaseMasterLock( OldIrql ); 00342 00343 } finally { 00344 00345 // 00346 // Drop the Bcb if we are being ejected. We are guaranteed that the 00347 // only raise is from the callback, at which point we have an incremented 00348 // pincount. 00349 // 00350 00351 if (AbnormalTermination()) { 00352 00353 CcUnpinFileData( Bcb, TRUE, UNPIN ); 00354 } 00355 } 00356 00357 return OldestLsn; 00358 }

NTKERNELAPI PFILE_OBJECT CcGetFileObjectFromBcb IN PVOID  Bcb  ) 
 

Definition at line 3709 of file fssup.c.

03722 : 03723 03724 Bcb - A pointer to the pinned Bcb. 03725 03726 Return Value: 03727 03728 Pointer to the File Object, or NULL if the file is not cached or no 03729 longer cached 03730 03731 --*/ 03732 03733 { 03734 return ((PBCB)Bcb)->SharedCacheMap->FileObject; 03735 } }

NTKERNELAPI PFILE_OBJECT CcGetFileObjectFromSectionPtrs IN PSECTION_OBJECT_POINTERS  SectionObjectPointer  ) 
 

Definition at line 3658 of file fssup.c.

References CcAcquireMasterLock, CcReleaseMasterLock, and NULL.

03675 : 03676 03677 SectionObjectPointer - A pointer to the Section Object Pointers 03678 structure in the nonpaged Fcb. 03679 03680 Return Value: 03681 03682 Pointer to the File Object, or NULL if the file is not cached or no 03683 longer cached 03684 03685 --*/ 03686 03687 { 03688 KIRQL OldIrql; 03689 PFILE_OBJECT FileObject = NULL; 03690 03691 // 03692 // Serialize with Creation/Deletion of all Shared CacheMaps 03693 // 03694 03695 CcAcquireMasterLock( &OldIrql ); 03696 03697 if (SectionObjectPointer->SharedCacheMap != NULL) { 03698 03699 FileObject = ((PSHARED_CACHE_MAP)SectionObjectPointer->SharedCacheMap)->FileObject; 03700 } 03701 03702 CcReleaseMasterLock( OldIrql ); 03703 03704 return FileObject; 03705 }

NTKERNELAPI LARGE_INTEGER CcGetFlushedValidData IN PSECTION_OBJECT_POINTERS  SectionObjectPointer,
IN BOOLEAN  BcbListHeld
 

Definition at line 4232 of file cachesub.c.

References ASSERT, _BITMAP_RANGE::BasePage, _BCB::BcbLinks, _SHARED_CACHE_MAP::BcbList, _SHARED_CACHE_MAP::BcbSpinLock, CACHE_NTC_BCB, CcAcquireMasterLock, CcAcquireMasterLockAtDpcLevel, CcDecrementOpenCount, CcDirtySharedCacheMapList, CcFindBitmapRangeToClean(), CcIncrementOpenCount, CcReleaseMasterLock, CcScheduleLazyWriteScan(), _BCB::Dirty, _MBCB::DirtyPages, _SHARED_CACHE_MAP::DirtyPages, _BCB::FileOffset, _BITMAP_RANGE::FirstDirtyPage, FlagOn, _SHARED_CACHE_MAP::Flags, LazyWriter, _SHARED_CACHE_MAP::Mbcb, _BCB::NodeTypeCode, NULL, _SHARED_CACHE_MAP::OpenCount, _LAZY_WRITER::OtherWork, PAGE_SHIFT, _LAZY_WRITER::ScanActive, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, _SHARED_CACHE_MAP::SharedCacheMapLinks, TRUE, _SHARED_CACHE_MAP::ValidDataGoal, and WRITE_QUEUED.

Referenced by CcWriteBehind().

04239 : 04240 04241 This routine may be called by a file system to find out how far the Cache Manager 04242 has flushed in the stream. More accurately, this routine returns either the FileOffset 04243 of the lowest dirty page currently in the file. 04244 04245 NOTE that even though the routine takes SectionObjectPointer, the caller must insure 04246 that the stream is cached and stays cached for the duration of this routine, much like 04247 for the copy routines, etc. 04248 04249 Arguments: 04250 04251 SectionObjectPointer - A pointer to the Section Object Pointers 04252 structure in the nonpaged Fcb. 04253 04254 CcInternalCaller - must be TRUE if the caller is coming from Cc, FALSE otherwise. 04255 TRUE imples the need for self-synchronization. 04256 04257 Return Value: 04258 04259 The derived number for flushed ValidData, or MAXLONGLONG in the quad part if 04260 the Section is not cached. (Naturally the caller can guarantee that this case 04261 does not occur, and internal callers do.) 04262 04263 --*/ 04264 04265 { 04266 PSHARED_CACHE_MAP SharedCacheMap; 04267 KIRQL OldIrql; 04268 LARGE_INTEGER NewValidDataLength; 04269 04270 // 04271 // External callers may be unsynchronized with this shared cache map 04272 // perhaps going away underneath this call. NTFS and his 04273 // pair of streams for compression-on-the-wire is a good example of 04274 // someone who may be synchronized in one stream but needs to peek at 04275 // the other. 04276 // 04277 04278 if (!CcInternalCaller) { 04279 04280 CcAcquireMasterLock( &OldIrql ); 04281 04282 SharedCacheMap = SectionObjectPointer->SharedCacheMap; 04283 04284 if (SharedCacheMap == NULL) { 04285 CcReleaseMasterLock( OldIrql ); 04286 NewValidDataLength.QuadPart = MAXLONGLONG; 04287 return NewValidDataLength; 04288 } 04289 04290 CcIncrementOpenCount( SharedCacheMap, 'dfGS' ); 04291 CcReleaseMasterLock( OldIrql ); 04292 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 04293 04294 } else { 04295 04296 SharedCacheMap = SectionObjectPointer->SharedCacheMap; 04297 } 04298 04299 ASSERT( SharedCacheMap != NULL ); 04300 04301 // 04302 // If the file is entirely clean, then we wish to return 04303 // the new ValidDataLength as equal to ValidDataGoal. 04304 // 04305 04306 NewValidDataLength = SharedCacheMap->ValidDataGoal; 04307 04308 // 04309 // If there may be dirty pages we will look at the last Bcb in the 04310 // descending-order Bcb list, and see if it describes data beyond 04311 // ValidDataGoal. 04312 // 04313 // It is important to note that since we use DirtyPages as a faux 04314 // reference count over some short windows (+1, -1) the simple 04315 // fact it is nonzero does *not* mean the file is dirty. 04316 // 04317 // (This test is logically too conservative. For example, the last Bcb 04318 // may not even be dirty (in which case we should look at its 04319 // predecessor), or we may have earlier written valid data to this 04320 // byte range (which also means if we knew this we could look at 04321 // the predessor). This simply means that the Lazy Writer may not 04322 // successfully get ValidDataLength updated in a file being randomly 04323 // accessed until the level of file access dies down, or at the latest 04324 // until the file is closed. However, security will never be 04325 // compromised.) 04326 // 04327 04328 if (SharedCacheMap->DirtyPages) { 04329 04330 PBITMAP_RANGE BitmapRange; 04331 PBCB LastBcb; 04332 PMBCB Mbcb = SharedCacheMap->Mbcb; 04333 04334 if ((Mbcb != NULL) && (Mbcb->DirtyPages != 0)) { 04335 04336 BitmapRange = CcFindBitmapRangeToClean( Mbcb, 0 ); 04337 04338 ASSERT(BitmapRange->FirstDirtyPage != MAXULONG); 04339 04340 NewValidDataLength.QuadPart = (BitmapRange->BasePage + BitmapRange->FirstDirtyPage) 04341 << PAGE_SHIFT; 04342 } 04343 04344 LastBcb = CONTAINING_RECORD( SharedCacheMap->BcbList.Flink, 04345 BCB, 04346 BcbLinks ); 04347 04348 while (&LastBcb->BcbLinks != &SharedCacheMap->BcbList) { 04349 04350 if ((LastBcb->NodeTypeCode == CACHE_NTC_BCB) && LastBcb->Dirty) { 04351 break; 04352 } 04353 04354 LastBcb = CONTAINING_RECORD( LastBcb->BcbLinks.Flink, 04355 BCB, 04356 BcbLinks ); 04357 } 04358 04359 // 04360 // Check the Base of the last entry. 04361 // 04362 04363 if ((&LastBcb->BcbLinks != &SharedCacheMap->BcbList) && 04364 (LastBcb->FileOffset.QuadPart < NewValidDataLength.QuadPart )) { 04365 04366 NewValidDataLength = LastBcb->FileOffset; 04367 } 04368 } 04369 04370 if (!CcInternalCaller) { 04371 04372 // 04373 // Remove our reference. 04374 // 04375 04376 CcAcquireMasterLockAtDpcLevel(); 04377 CcDecrementOpenCount( SharedCacheMap, 'dfGF' ); 04378 04379 if ((SharedCacheMap->OpenCount == 0) && 04380 !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) && 04381 (SharedCacheMap->DirtyPages == 0)) { 04382 04383 // 04384 // Move to the dirty list. 04385 // 04386 04387 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 04388 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 04389 &SharedCacheMap->SharedCacheMapLinks ); 04390 04391 // 04392 // Make sure the Lazy Writer will wake up, because we 04393 // want him to delete this SharedCacheMap. 04394 // 04395 04396 LazyWriter.OtherWork = TRUE; 04397 if (!LazyWriter.ScanActive) { 04398 CcScheduleLazyWriteScan(); 04399 } 04400 } 04401 04402 ExReleaseSpinLockFromDpcLevel( &SharedCacheMap->BcbSpinLock ); 04403 CcReleaseMasterLock( OldIrql ); 04404 } 04405 04406 return NewValidDataLength; 04407 }

NTKERNELAPI LARGE_INTEGER CcGetLsnForFileObject IN PFILE_OBJECT  FileObject,
OUT PLARGE_INTEGER OldestLsn  OPTIONAL
 

Definition at line 450 of file logsup.c.

References _BCB::BcbLinks, _SHARED_CACHE_MAP::BcbList, _SHARED_CACHE_MAP::BcbSpinLock, CACHE_NTC_BCB, _BCB::Dirty, _BCB::NewestLsn, _BCB::NodeTypeCode, NULL, and _BCB::OldestLsn.

00457 : 00458 00459 This routine returns the oldest and newest LSNs for a file object. 00460 00461 Arguments: 00462 00463 FileObject - File for which the log handle should be stored. 00464 00465 OldestLsn - pointer to location to store oldest LSN for file object. 00466 00467 Return Value: 00468 00469 The newest LSN for the file object. 00470 00471 --*/ 00472 00473 { 00474 PBCB Bcb; 00475 KIRQL OldIrql; 00476 LARGE_INTEGER Oldest, Newest; 00477 PSHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 00478 00479 // 00480 // initialize lsn variables 00481 // 00482 00483 Oldest.LowPart = 0; 00484 Oldest.HighPart = 0; 00485 Newest.LowPart = 0; 00486 Newest.HighPart = 0; 00487 00488 if(SharedCacheMap == NULL) { 00489 return Oldest; 00490 } 00491 00492 ExAcquireFastLock(&SharedCacheMap->BcbSpinLock, &OldIrql); 00493 00494 // 00495 // Now point to first Bcb in List, and loop through it. 00496 // 00497 00498 Bcb = CONTAINING_RECORD( SharedCacheMap->BcbList.Flink, BCB, BcbLinks ); 00499 00500 while (&Bcb->BcbLinks != &SharedCacheMap->BcbList) { 00501 00502 // 00503 // If the Bcb is dirty then capture the oldest and newest lsn 00504 // 00505 00506 00507 if ((Bcb->NodeTypeCode == CACHE_NTC_BCB) && Bcb->Dirty) { 00508 00509 LARGE_INTEGER BcbLsn, BcbNewest; 00510 00511 BcbLsn = Bcb->OldestLsn; 00512 BcbNewest = Bcb->NewestLsn; 00513 00514 if ((BcbLsn.QuadPart != 0) && 00515 ((Oldest.QuadPart == 0) || 00516 (BcbLsn.QuadPart < Oldest.QuadPart))) { 00517 00518 Oldest = BcbLsn; 00519 } 00520 00521 if ((BcbLsn.QuadPart != 0) && (BcbNewest.QuadPart > Newest.QuadPart)) { 00522 00523 Newest = BcbNewest; 00524 } 00525 } 00526 00527 00528 Bcb = CONTAINING_RECORD( Bcb->BcbLinks.Flink, BCB, BcbLinks ); 00529 } 00530 00531 // 00532 // Now release the spin lock for this Bcb list and generate a callback 00533 // if we got something. 00534 // 00535 00536 ExReleaseFastLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00537 00538 if (ARGUMENT_PRESENT(OldestLsn)) { 00539 00540 *OldestLsn = Oldest; 00541 } 00542 00543 return Newest; 00544 } }

NTKERNELAPI BOOLEAN CcInitializeCacheManager  ) 
 

Definition at line 69 of file fssup.c.

References _LAZY_WRITER::BcbZone, CcAggressiveZeroCount, CcAggressiveZeroThreshold, CcBcbSpinLock, CcBugCheck, CcCapturedSystemSize, CcCleanSharedCacheMapList, CcDebugTraceLock, CcDeferredWrites, CcDeferredWriteSpinLock, CcDirtyPageTarget, CcDirtyPageThreshold, CcDirtySharedCacheMapList, CcExpressWorkQueue, CcIdleDelayTick, CcIdleWorkerThreadList, CcInitializeVacbs(), CcLazyWriterCursor, CcMasterSpinLock, CcNumberWorkerThreads, CcPostTickWorkQueue, CcRegularWorkQueue, CcScanDpc(), CcTwilightLookasideList, CcWorkerThread(), CcWorkQueueSpinlock, ExAllocatePoolWithTag, ExCriticalWorkerThreads, ExInitializeNPagedLookasideList(), ExInitializeWorkItem, ExInitializeZone(), _SHARED_CACHE_MAP_LIST_CURSOR::Flags, Index, IS_CURSOR, KeInitializeDpc(), KeInitializeSpinLock(), KeInitializeTimer(), KeNumberProcessors, KeQueryTimeIncrement(), KiProcessorBlock, LAZY_WRITER_IDLE_DELAY, LazyWriter, _WORK_QUEUE_ITEM::List, LookasideTwilightList, _MMSUPPORT::MaximumWorkingSetSize, MmIsThisAnNtAsSystem(), MmLargeSystem, MmLargeSystemCache, MmMediumSystem, MmNumberOfPhysicalPages, MmQuerySystemSize(), MmSmallSystem, MmSystemCacheWs, NonPagedPool, NT_SUCCESS, NULL, _LAZY_WRITER::OurProcess, PAGE_SIZE, PsGetCurrentProcess, _LAZY_WRITER::ScanDpc, _LAZY_WRITER::ScanTimer, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, TRUE, USHORT, WORK_QUEUE_ENTRY, and _LAZY_WRITER::WorkQueue.

00074 : 00075 00076 This routine must be called during system initialization before the 00077 first call to any file system, to allow the Cache Manager to initialize 00078 its global data structures. This routine has no dependencies on other 00079 system components being initialized. 00080 00081 Arguments: 00082 00083 None 00084 00085 Return Value: 00086 00087 TRUE if initialization was successful 00088 00089 --*/ 00090 00091 { 00092 CLONG i; 00093 ULONG Index; 00094 PNPAGED_LOOKASIDE_LIST Lookaside; 00095 USHORT NumberOfItems; 00096 PKPRCB Prcb; 00097 PWORK_QUEUE_ITEM WorkItem; 00098 00099 #ifdef CCDBG_LOCK 00100 KeInitializeSpinLock( &CcDebugTraceLock ); 00101 #endif 00102 00103 #if DBG 00104 CcBcbCount = 0; 00105 InitializeListHead( &CcBcbList ); 00106 KeInitializeSpinLock( &CcBcbSpinLock ); 00107 #endif 00108 00109 // 00110 // Figure out the timeout clock tick for the lazy writer. 00111 // 00112 00113 CcIdleDelayTick = LAZY_WRITER_IDLE_DELAY / KeQueryTimeIncrement(); 00114 00115 // 00116 // Initialize shared cache map list structures 00117 // 00118 00119 KeInitializeSpinLock( &CcMasterSpinLock ); 00120 InitializeListHead( &CcCleanSharedCacheMapList ); 00121 InitializeListHead( &CcDirtySharedCacheMapList.SharedCacheMapLinks ); 00122 CcDirtySharedCacheMapList.Flags = IS_CURSOR; 00123 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 00124 &CcLazyWriterCursor.SharedCacheMapLinks ); 00125 CcLazyWriterCursor.Flags = IS_CURSOR; 00126 00127 // 00128 // Initialize worker thread structures 00129 // 00130 00131 KeInitializeSpinLock( &CcWorkQueueSpinlock ); 00132 InitializeListHead( &CcIdleWorkerThreadList ); 00133 InitializeListHead( &CcExpressWorkQueue ); 00134 InitializeListHead( &CcRegularWorkQueue ); 00135 InitializeListHead( &CcPostTickWorkQueue ); 00136 00137 // 00138 // Set the number of worker threads based on the system size. 00139 // 00140 00141 CcCapturedSystemSize = MmQuerySystemSize(); 00142 if (CcNumberWorkerThreads == 0) { 00143 00144 switch (CcCapturedSystemSize) { 00145 case MmSmallSystem: 00146 CcNumberWorkerThreads = ExCriticalWorkerThreads - 1; 00147 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8; 00148 CcAggressiveZeroThreshold = 1; 00149 break; 00150 00151 case MmMediumSystem: 00152 CcNumberWorkerThreads = ExCriticalWorkerThreads - 1; 00153 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4; 00154 CcAggressiveZeroThreshold = 2; 00155 break; 00156 00157 case MmLargeSystem: 00158 CcNumberWorkerThreads = ExCriticalWorkerThreads - 2; 00159 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 4 + 00160 MmNumberOfPhysicalPages / 8; 00161 CcAggressiveZeroThreshold = 4; 00162 #if 0 00163 // 00164 // Use more memory if we are a large server. 00165 // 00166 00167 if ((MmLargeSystemCache != 0) && 00168 (CcDirtyPageThreshold < (MmNumberOfPhysicalPages - (0xE00000 / PAGE_SIZE)))) { 00169 00170 CcDirtyPageThreshold = MmNumberOfPhysicalPages - (0xE00000 / PAGE_SIZE); 00171 } 00172 #endif 00173 break; 00174 00175 default: 00176 CcNumberWorkerThreads = 1; 00177 CcDirtyPageThreshold = MmNumberOfPhysicalPages / 8; 00178 } 00179 00180 // CcDirtyPageThreshold = (2*1024*1024)/PAGE_SIZE; 00181 00182 if (MmSystemCacheWs.MaximumWorkingSetSize > ((4*1024*1024)/PAGE_SIZE)) { 00183 CcDirtyPageThreshold = MmSystemCacheWs.MaximumWorkingSetSize - 00184 ((2*1024*1024)/PAGE_SIZE); 00185 } 00186 00187 CcDirtyPageTarget = CcDirtyPageThreshold / 2 + 00188 CcDirtyPageThreshold / 4; 00189 } 00190 00191 CcAggressiveZeroCount = 0; 00192 00193 // 00194 // Now allocate and initialize the above number of worker thread 00195 // items. 00196 // 00197 00198 for (i = 0; i < CcNumberWorkerThreads; i++) { 00199 00200 WorkItem = ExAllocatePoolWithTag( NonPagedPool, sizeof(WORK_QUEUE_ITEM), 'qWcC' ); 00201 00202 if (WorkItem == NULL) { 00203 00204 CcBugCheck( 0, 0, 0 ); 00205 } 00206 00207 // 00208 // Initialize the work queue item and insert in our queue 00209 // of potential worker threads. 00210 // 00211 00212 ExInitializeWorkItem( WorkItem, CcWorkerThread, WorkItem ); 00213 InsertTailList( &CcIdleWorkerThreadList, &WorkItem->List ); 00214 } 00215 00216 // 00217 // Initialize the Lazy Writer thread structure, and start him up. 00218 // 00219 00220 RtlZeroMemory( &LazyWriter, sizeof(LAZY_WRITER) ); 00221 00222 KeInitializeSpinLock( &CcWorkQueueSpinlock ); 00223 InitializeListHead( &LazyWriter.WorkQueue ); 00224 00225 // 00226 // Store process address 00227 // 00228 00229 LazyWriter.OurProcess = PsGetCurrentProcess(); 00230 00231 // 00232 // Initialize the Scan Dpc and Timer. 00233 // 00234 00235 KeInitializeDpc( &LazyWriter.ScanDpc, &CcScanDpc, NULL ); 00236 KeInitializeTimer( &LazyWriter.ScanTimer ); 00237 00238 // 00239 // Now initialize the lookaside list for allocating Work Queue entries. 00240 // 00241 00242 switch ( CcCapturedSystemSize ) { 00243 00244 // 00245 // ~512 bytes 00246 // 00247 00248 case MmSmallSystem : 00249 NumberOfItems = 32; 00250 break; 00251 00252 // 00253 // ~1k bytes 00254 // 00255 00256 case MmMediumSystem : 00257 NumberOfItems = 64; 00258 break; 00259 00260 // 00261 // ~2k bytes 00262 // 00263 00264 case MmLargeSystem : 00265 NumberOfItems = 128; 00266 if (MmIsThisAnNtAsSystem()) { 00267 NumberOfItems += 128; 00268 } 00269 00270 break; 00271 } 00272 00273 ExInitializeNPagedLookasideList( &CcTwilightLookasideList, 00274 NULL, 00275 NULL, 00276 0, 00277 sizeof( WORK_QUEUE_ENTRY ), 00278 'kWcC', 00279 NumberOfItems ); 00280 00281 // 00282 // Initialize the per processor nonpaged lookaside lists and descriptors. 00283 // 00284 00285 for (Index = 0; Index < (ULONG)KeNumberProcessors; Index += 1) { 00286 Prcb = KiProcessorBlock[Index]; 00287 00288 // 00289 // Initialize the large IRP per processor lookaside pointers. 00290 // 00291 00292 Prcb->PPLookasideList[LookasideTwilightList].L = &CcTwilightLookasideList; 00293 Lookaside = (PNPAGED_LOOKASIDE_LIST)ExAllocatePoolWithTag( NonPagedPool, 00294 sizeof(NPAGED_LOOKASIDE_LIST), 00295 'KWcC'); 00296 00297 if (Lookaside != NULL) { 00298 ExInitializeNPagedLookasideList( Lookaside, 00299 NULL, 00300 NULL, 00301 0, 00302 sizeof( WORK_QUEUE_ENTRY ), 00303 'KWcC', 00304 NumberOfItems ); 00305 00306 } else { 00307 Lookaside = &CcTwilightLookasideList; 00308 } 00309 00310 Prcb->PPLookasideList[LookasideTwilightList].P = Lookaside; 00311 } 00312 00313 // 00314 // Now initialize the Bcb zone 00315 // 00316 00317 { 00318 PVOID InitialSegment; 00319 ULONG InitialSegmentSize; 00320 ULONG RoundedBcbSize = (sizeof(BCB) + 7) & ~7; 00321 00322 switch ( CcCapturedSystemSize ) { 00323 00324 // 00325 // ~1.5k bytes 00326 // 00327 00328 case MmSmallSystem : 00329 InitialSegmentSize = sizeof(ZONE_SEGMENT_HEADER) + RoundedBcbSize * 8; 00330 break; 00331 00332 // 00333 // 1 Page 00334 // 00335 00336 case MmMediumSystem : 00337 InitialSegmentSize = PAGE_SIZE; 00338 break; 00339 00340 // 00341 // 3 Pages 00342 // 00343 00344 case MmLargeSystem : 00345 InitialSegmentSize = 3 * PAGE_SIZE; 00346 break; 00347 } 00348 00349 // 00350 // Allocate the initial allocation for the zone. If we cannot get it, 00351 // something must really be wrong, so we will just bugcheck. 00352 // 00353 00354 if ((InitialSegment = ExAllocatePoolWithTag( NonPagedPool, 00355 InitialSegmentSize, 00356 'zBcC' )) == NULL) { 00357 00358 CcBugCheck( 0, 0, 0 ); 00359 } 00360 00361 if (!NT_SUCCESS(ExInitializeZone( &LazyWriter.BcbZone, 00362 RoundedBcbSize, 00363 InitialSegment, 00364 InitialSegmentSize ))) { 00365 CcBugCheck( 0, 0, 0 ); 00366 } 00367 } 00368 00369 // 00370 // Initialize the Deferred Write List. 00371 // 00372 00373 KeInitializeSpinLock( &CcDeferredWriteSpinLock ); 00374 InitializeListHead( &CcDeferredWrites ); 00375 00376 // 00377 // Initialize the Vacbs. 00378 // 00379 00380 CcInitializeVacbs(); 00381 00382 return TRUE; 00383 }

NTKERNELAPI VOID CcInitializeCacheMap IN PFILE_OBJECT  FileObject,
IN PCC_FILE_SIZES  FileSizes,
IN BOOLEAN  PinAccess,
IN PCACHE_MANAGER_CALLBACKS  Callbacks,
IN PVOID  LazyWriteContext
 

Definition at line 387 of file fssup.c.

References _SHARED_CACHE_MAP::ActiveVacbSpinLock, _CC_FILE_SIZES::AllocationSize, ASSERT, _SHARED_CACHE_MAP::BcbList, _SHARED_CACHE_MAP::BcbSpinLock, BEING_CREATED, CACHE_NTC_PRIVATE_CACHE_MAP, CACHE_NTC_SHARED_CACHE_MAP, _SHARED_CACHE_MAP::Callbacks, CcAcquireMasterLock, CcCleanSharedCacheMapList, CcCreateVacbArray(), CcDecrementOpenCount, CcDeleteSharedCacheMap(), CcDirtySharedCacheMapList, CcExtendVacbArray(), CcIncrementOpenCount, CcReleaseMasterLock, CcScheduleLazyWriteScan(), ClearFlag, _SHARED_CACHE_MAP::CreateEvent, DebugTrace, DebugTrace2, DEFAULT_CREATE_MODULO, _SHARED_CACHE_MAP::DirtyPages, _CACHE_UNINITIALIZE_EVENT::Event, _SHARED_CACHE_MAP::Event, ExAllocatePoolWithTag, Executive, ExFreePool(), ExRaiseStatus(), FALSE, _PRIVATE_CACHE_MAP::FileObject, _SHARED_CACHE_MAP::FileObject, _CC_FILE_SIZES::FileSize, _SHARED_CACHE_MAP::FileSize, FlagOn, _SHARED_CACHE_MAP::Flags, FO_RANDOM_ACCESS, FO_SEQUENTIAL_ONLY, FSRTL_FLAG2_DO_MODIFIED_WRITE, FsRtlNormalizeNtstatus(), KeInitializeEvent, KeInitializeSpinLock(), KernelMode, KeSetEvent(), KeWaitForSingleObject(), _SHARED_CACHE_MAP::LazyWriteContext, LazyWriter, _SHARED_CACHE_MAP::LocalEvent, me, mm, MmCreateSection(), MmDisableModifiedWriteOfSection(), MmExtendSection(), MODIFIED_WRITE_DISABLED, _CACHE_UNINITIALIZE_EVENT::Next, _PRIVATE_CACHE_MAP::NodeByteSize, _SHARED_CACHE_MAP::NodeByteSize, _PRIVATE_CACHE_MAP::NodeTypeCode, _SHARED_CACHE_MAP::NodeTypeCode, NonPagedPool, NT_SUCCESS, NTSTATUS(), NULL, ObDeleteCapturedInsertInfo(), ObReferenceObject, ONLY_SEQUENTIAL_ONLY_SEEN, _SHARED_CACHE_MAP::OpenCount, _LAZY_WRITER::OtherWork, PAGE_SIZE, PIN_ACCESS, PRIVATE_CACHE_MAP, _SHARED_CACHE_MAP::PrivateCacheMap, _PRIVATE_CACHE_MAP::PrivateLinks, _SHARED_CACHE_MAP::PrivateList, RANDOM_ACCESS_SEEN, _PRIVATE_CACHE_MAP::ReadAheadMask, _PRIVATE_CACHE_MAP::ReadAheadSpinLock, _LAZY_WRITER::ScanActive, _SHARED_CACHE_MAP::Section, _SHARED_CACHE_MAP::SectionSize, SetFlag, SHARED_CACHE_MAP, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, _SHARED_CACHE_MAP::SharedCacheMapLinks, Status, _SHARED_CACHE_MAP::Status, TRUE, TRUNCATE_REQUIRED, try_return, _SHARED_CACHE_MAP::UninitializeEvent, VACB_MAPPING_GRANULARITY, _SHARED_CACHE_MAP::Vacbs, _SHARED_CACHE_MAP::ValidDataGoal, _CC_FILE_SIZES::ValidDataLength, _SHARED_CACHE_MAP::ValidDataLength, and WRITE_QUEUED.

Referenced by UdfCommonRead(), and UdfCreateInternalStream().

00397 : 00398 00399 This routine is intended to be called by File Systems only. It 00400 initializes the cache maps for data caching. It should be called 00401 every time a file is open or created, and NO_INTERMEDIATE_BUFFERING 00402 was specified as FALSE. 00403 00404 Arguments: 00405 00406 FileObject - A pointer to the newly-created file object. 00407 00408 FileSizes - A pointer to AllocationSize, FileSize and ValidDataLength 00409 for the file. ValidDataLength should contain MAXLONGLONG if 00410 valid data length tracking and callbacks are not desired. 00411 00412 PinAccess - FALSE if file will be used exclusively for Copy and Mdl 00413 access, or TRUE if file will be used for Pin access. 00414 (Files for Pin access are not limited in size as the caller 00415 must access multiple areas of the file at once.) 00416 00417 Callbacks - Structure of callbacks used by the Lazy Writer 00418 00419 LazyWriteContext - Parameter to be passed in to above routine. 00420 00421 Return Value: 00422 00423 None. If an error occurs, this routine will Raise the status. 00424 00425 --*/ 00426 00427 { 00428 KIRQL OldIrql; 00429 PSHARED_CACHE_MAP SharedCacheMap = NULL; 00430 PVOID CacheMapToFree = NULL; 00431 CC_FILE_SIZES LocalSizes; 00432 BOOLEAN WeSetBeingCreated = FALSE; 00433 BOOLEAN SharedListOwned = FALSE; 00434 BOOLEAN MustUninitialize = FALSE; 00435 BOOLEAN WeCreated = FALSE; 00436 00437 DebugTrace(+1, me, "CcInitializeCacheMap:\n", 0 ); 00438 DebugTrace( 0, me, " FileObject = %08lx\n", FileObject ); 00439 DebugTrace( 0, me, " FileSizes = %08lx\n", FileSizes ); 00440 00441 // 00442 // Make a local copy of the passed in file sizes before acquiring 00443 // the spin lock. 00444 // 00445 00446 LocalSizes = *FileSizes; 00447 00448 // 00449 // If no FileSize was given, set to one byte before maximizing below. 00450 // 00451 00452 if (LocalSizes.AllocationSize.QuadPart == 0) { 00453 LocalSizes.AllocationSize.LowPart += 1; 00454 } 00455 00456 // 00457 // If caller has Write access or will allow write, then round 00458 // size to next create modulo. (***Temp*** there may be too many 00459 // apps that end up allowing shared write, thanks to our Dos heritage, 00460 // to keep that part of the check in.) 00461 // 00462 00463 if (FileObject->WriteAccess /*|| FileObject->SharedWrite */) { 00464 00465 LocalSizes.AllocationSize.QuadPart = LocalSizes.AllocationSize.QuadPart + (LONGLONG)(DEFAULT_CREATE_MODULO - 1); 00466 LocalSizes.AllocationSize.LowPart &= ~(DEFAULT_CREATE_MODULO - 1); 00467 00468 } else { 00469 00470 LocalSizes.AllocationSize.QuadPart = LocalSizes.AllocationSize.QuadPart + (LONGLONG)(VACB_MAPPING_GRANULARITY - 1); 00471 LocalSizes.AllocationSize.LowPart &= ~(VACB_MAPPING_GRANULARITY - 1); 00472 } 00473 00474 // 00475 // Do the allocate of the SharedCacheMap, based on an unsafe test, 00476 // while not holding a spinlock. Allocation failures look like we 00477 // never decided to allocate one here! 00478 // 00479 00480 if (FileObject->SectionObjectPointer->SharedCacheMap == NULL) { 00481 CacheMapToFree = ExAllocatePoolWithTag( NonPagedPool, sizeof(SHARED_CACHE_MAP), 'cScC' ); 00482 } 00483 00484 // 00485 // Serialize Creation/Deletion of all Shared CacheMaps 00486 // 00487 00488 CcAcquireMasterLock( &OldIrql ); 00489 SharedListOwned = TRUE; 00490 00491 // 00492 // Insure release of our global resource 00493 // 00494 00495 try { 00496 00497 // 00498 // Check for second initialization of same file object 00499 // 00500 00501 if (FileObject->PrivateCacheMap != NULL) { 00502 00503 DebugTrace( 0, 0, "CacheMap already initialized\n", 0 ); 00504 try_return( NOTHING ); 00505 } 00506 00507 // 00508 // Get current Shared Cache Map pointer indirectly off of the file object. 00509 // (The actual pointer is typically in a file system data structure, such 00510 // as an Fcb.) 00511 // 00512 00513 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 00514 00515 // 00516 // If there is no SharedCacheMap, then we must create a section and 00517 // the SharedCacheMap structure. 00518 // 00519 00520 if (SharedCacheMap == NULL) { 00521 00522 // 00523 // After successfully creating the section, allocate the SharedCacheMap. 00524 // 00525 00526 WeCreated = TRUE; 00527 00528 if (CacheMapToFree == NULL) { 00529 CacheMapToFree = (PSHARED_CACHE_MAP)ExAllocatePoolWithTag( NonPagedPool, 00530 sizeof(SHARED_CACHE_MAP), 00531 'cScC' ); 00532 } 00533 00534 SharedCacheMap = CacheMapToFree; 00535 CacheMapToFree = NULL; 00536 00537 if (SharedCacheMap == NULL) { 00538 00539 DebugTrace( 0, 0, "Failed to allocate SharedCacheMap\n", 0 ); 00540 00541 CcReleaseMasterLock( OldIrql ); 00542 SharedListOwned = FALSE; 00543 00544 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 00545 } 00546 00547 // 00548 // Zero the SharedCacheMap and fill in the nonzero portions later. 00549 // 00550 00551 RtlZeroMemory( SharedCacheMap, sizeof(SHARED_CACHE_MAP) ); 00552 00553 #if DANLO 00554 SharedCacheMap->OpenCountLog.Size = sizeof(SharedCacheMap->OpenCountLog.Log)/sizeof(CC_LOG_ENTRY); 00555 #endif 00556 00557 // 00558 // Now initialize the Shared Cache Map. 00559 // 00560 00561 SharedCacheMap->NodeTypeCode = CACHE_NTC_SHARED_CACHE_MAP; 00562 SharedCacheMap->NodeByteSize = sizeof(SHARED_CACHE_MAP); 00563 SharedCacheMap->FileObject = FileObject; 00564 SharedCacheMap->FileSize = LocalSizes.FileSize; 00565 SharedCacheMap->ValidDataLength = LocalSizes.ValidDataLength; 00566 SharedCacheMap->ValidDataGoal = LocalSizes.ValidDataLength; 00567 // SharedCacheMap->Section set below 00568 00569 // 00570 // Initialize the spin locks. 00571 // 00572 00573 KeInitializeSpinLock( &SharedCacheMap->ActiveVacbSpinLock ); 00574 KeInitializeSpinLock( &SharedCacheMap->BcbSpinLock ); 00575 00576 if (PinAccess) { 00577 SetFlag(SharedCacheMap->Flags, PIN_ACCESS); 00578 } 00579 00580 // 00581 // Initialize our allocation hint for the local event. 00582 // 00583 00584 SharedCacheMap->LocalEvent = &SharedCacheMap->Event; 00585 00586 // 00587 // If this file has FO_SEQUENTIAL_ONLY set, then remember that 00588 // in the SharedCacheMap. 00589 // 00590 00591 if (FlagOn(FileObject->Flags, FO_SEQUENTIAL_ONLY)) { 00592 SetFlag(SharedCacheMap->Flags, ONLY_SEQUENTIAL_ONLY_SEEN); 00593 } 00594 00595 // 00596 // Do the round-robin allocation of the spinlock for the shared 00597 // cache map. Note the manipulation of the next 00598 // counter is safe, since we have the CcMasterSpinLock 00599 // exclusive. 00600 // 00601 00602 InitializeListHead( &SharedCacheMap->BcbList ); 00603 SharedCacheMap->Callbacks = Callbacks; 00604 SharedCacheMap->LazyWriteContext = LazyWriteContext; 00605 00606 // 00607 // Initialize listhead for all PrivateCacheMaps 00608 // 00609 00610 InitializeListHead( &SharedCacheMap->PrivateList ); 00611 00612 // 00613 // Insert the new Shared Cache Map in the global list 00614 // 00615 00616 InsertTailList( &CcCleanSharedCacheMapList, 00617 &SharedCacheMap->SharedCacheMapLinks ); 00618 00619 // 00620 // Finally, store the pointer to the Shared Cache Map back 00621 // via the indirect pointer in the File Object. 00622 // 00623 00624 FileObject->SectionObjectPointer->SharedCacheMap = SharedCacheMap; 00625 00626 // 00627 // We must reference this file object so that it cannot go away 00628 // until we do CcUninitializeCacheMap below. Note we cannot 00629 // find or rely on the FileObject that Memory Management has, 00630 // although normally it will be this same one anyway. 00631 // 00632 00633 ObReferenceObject ( FileObject ); 00634 00635 } else { 00636 00637 // 00638 // If this file has FO_SEQUENTIAL_ONLY clear, then remember that 00639 // in the SharedCacheMap. 00640 // 00641 00642 if (!FlagOn(FileObject->Flags, FO_SEQUENTIAL_ONLY)) { 00643 ClearFlag(SharedCacheMap->Flags, ONLY_SEQUENTIAL_ONLY_SEEN); 00644 } 00645 } 00646 00647 // 00648 // If this file is opened for random access, remember this in 00649 // the SharedCacheMap. 00650 // 00651 00652 if (FlagOn(FileObject->Flags, FO_RANDOM_ACCESS)) { 00653 SetFlag(SharedCacheMap->Flags, RANDOM_ACCESS_SEEN); 00654 } 00655 00656 // 00657 // Make sure that no one is trying to lazy delete it in the case 00658 // that the Cache Map was already there. 00659 // 00660 00661 ClearFlag(SharedCacheMap->Flags, TRUNCATE_REQUIRED); 00662 00663 // 00664 // In case there has been a CcUnmapAndPurge call, we check here if we 00665 // if we need to recreate the section and map it. 00666 // 00667 00668 if ((SharedCacheMap->Vacbs == NULL) && 00669 !FlagOn(SharedCacheMap->Flags, BEING_CREATED)) { 00670 00671 // 00672 // Increment the OpenCount on the CacheMap. 00673 // 00674 00675 CcIncrementOpenCount( SharedCacheMap, 'onnI' ); 00676 MustUninitialize = TRUE; 00677 00678 // 00679 // We still want anyone else to wait. 00680 // 00681 00682 SetFlag(SharedCacheMap->Flags, BEING_CREATED); 00683 WeSetBeingCreated = TRUE; 00684 00685 // 00686 // If there is a create event, then this must be the path where we 00687 // we were only unmapped. We will just clear it here again in case 00688 // someone needs to wait again this time too. 00689 // 00690 00691 if (SharedCacheMap->CreateEvent != NULL) { 00692 00693 KeInitializeEvent( SharedCacheMap->CreateEvent, 00694 NotificationEvent, 00695 FALSE ); 00696 } 00697 00698 // 00699 // Release global resource 00700 // 00701 00702 CcReleaseMasterLock( OldIrql ); 00703 SharedListOwned = FALSE; 00704 00705 // 00706 // We have to test this, because the section may only be unmapped. 00707 // 00708 00709 if (SharedCacheMap->Section == NULL) { 00710 00711 LARGE_INTEGER LargeZero = {0,0}; 00712 00713 // 00714 // Call MM to create a section for this file, for the calculated 00715 // section size. Note that we have the choice in this service to 00716 // pass in a FileHandle or a FileObject pointer, but not both. 00717 // Naturally we want to pass in the handle. 00718 // 00719 00720 DebugTrace( 0, mm, "MmCreateSection:\n", 0 ); 00721 DebugTrace2(0, mm, " MaximumSize = %08lx, %08lx\n", 00722 LocalSizes.AllocationSize.LowPart, 00723 LocalSizes.AllocationSize.HighPart ); 00724 DebugTrace( 0, mm, " FileObject = %08lx\n", FileObject ); 00725 00726 SharedCacheMap->Status = MmCreateSection( &SharedCacheMap->Section, 00727 SECTION_MAP_READ 00728 | SECTION_MAP_WRITE 00729 | SECTION_QUERY, 00730 NULL, 00731 &LocalSizes.AllocationSize, 00732 PAGE_READWRITE, 00733 SEC_COMMIT, 00734 NULL, 00735 FileObject ); 00736 00737 DebugTrace( 0, mm, " <Section = %08lx\n", SharedCacheMap->Section ); 00738 00739 if (!NT_SUCCESS( SharedCacheMap->Status )){ 00740 DebugTrace( 0, 0, "Error from MmCreateSection = %08lx\n", 00741 SharedCacheMap->Status ); 00742 00743 SharedCacheMap->Section = NULL; 00744 ExRaiseStatus( FsRtlNormalizeNtstatus( SharedCacheMap->Status, 00745 STATUS_UNEXPECTED_MM_CREATE_ERR )); 00746 } 00747 00748 ObDeleteCapturedInsertInfo(SharedCacheMap->Section); 00749 00750 // 00751 // If this is a stream file object, then no user can map it, 00752 // and we should keep the modified page writer out of it. 00753 // 00754 00755 if (!FlagOn(((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->Flags2, 00756 FSRTL_FLAG2_DO_MODIFIED_WRITE) && 00757 (FileObject->FsContext2 == NULL)) { 00758 00759 BOOLEAN Disabled; 00760 00761 Disabled = MmDisableModifiedWriteOfSection( FileObject->SectionObjectPointer ); 00762 CcAcquireMasterLock( &OldIrql ); 00763 SetFlag(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED); 00764 CcReleaseMasterLock( OldIrql ); 00765 00766 //**** ASSERT( Disabled ); 00767 } 00768 00769 // 00770 // Create the Vacb array. 00771 // 00772 00773 CcCreateVacbArray( SharedCacheMap, LocalSizes.AllocationSize ); 00774 } 00775 00776 // 00777 // If the section already exists, we still have to call MM to 00778 // extend, in case it is not large enough. 00779 // 00780 00781 else { 00782 00783 if ( LocalSizes.AllocationSize.QuadPart > SharedCacheMap->SectionSize.QuadPart ) { 00784 00785 NTSTATUS Status; 00786 00787 DebugTrace( 0, mm, "MmExtendSection:\n", 0 ); 00788 DebugTrace( 0, mm, " Section = %08lx\n", SharedCacheMap->Section ); 00789 DebugTrace2(0, mm, " Size = %08lx, %08lx\n", 00790 LocalSizes.AllocationSize.LowPart, 00791 LocalSizes.AllocationSize.HighPart ); 00792 00793 Status = MmExtendSection( SharedCacheMap->Section, 00794 &LocalSizes.AllocationSize, 00795 TRUE ); 00796 00797 if (!NT_SUCCESS(Status)) { 00798 00799 DebugTrace( 0, 0, "Error from MmExtendSection, Status = %08lx\n", 00800 Status ); 00801 00802 ExRaiseStatus( FsRtlNormalizeNtstatus( Status, 00803 STATUS_UNEXPECTED_MM_EXTEND_ERR )); 00804 } 00805 } 00806 00807 // 00808 // Extend the Vacb array. 00809 // 00810 00811 CcExtendVacbArray( SharedCacheMap, LocalSizes.AllocationSize ); 00812 } 00813 00814 // 00815 // Now show that we are all done and resume any waiters. 00816 // 00817 00818 CcAcquireMasterLock( &OldIrql ); 00819 ClearFlag(SharedCacheMap->Flags, BEING_CREATED); 00820 WeSetBeingCreated = FALSE; 00821 if (SharedCacheMap->CreateEvent != NULL) { 00822 KeSetEvent( SharedCacheMap->CreateEvent, 0, FALSE ); 00823 } 00824 CcReleaseMasterLock( OldIrql ); 00825 } 00826 00827 // 00828 // Else if the section is already there, we make sure it is large 00829 // enough by calling CcExtendCacheSection. 00830 // 00831 00832 else { 00833 00834 // 00835 // If the SharedCacheMap is currently being created we have 00836 // to optionally create and wait on an event for it. Note that 00837 // the only safe time to delete the event is in 00838 // CcUninitializeCacheMap, because we otherwise have no way of 00839 // knowing when everyone has reached the KeWaitForSingleObject. 00840 // 00841 00842 if (FlagOn(SharedCacheMap->Flags, BEING_CREATED)) { 00843 00844 if (SharedCacheMap->CreateEvent == NULL) { 00845 00846 // 00847 // If the local event is not being used then we can grab it. 00848 // (Should be quite rare that it is in use.) 00849 // 00850 00851 SharedCacheMap->CreateEvent = InterlockedExchangePointer( &SharedCacheMap->LocalEvent, NULL ); 00852 00853 if (SharedCacheMap->CreateEvent == NULL) { 00854 00855 SharedCacheMap->CreateEvent = (PKEVENT)ExAllocatePoolWithTag( NonPagedPool, 00856 sizeof(KEVENT), 00857 'vEcC' ); 00858 } 00859 00860 if (SharedCacheMap->CreateEvent == NULL) { 00861 DebugTrace( 0, 0, "Failed to allocate CreateEvent\n", 0 ); 00862 00863 CcReleaseMasterLock( OldIrql ); 00864 SharedListOwned = FALSE; 00865 00866 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 00867 } 00868 00869 KeInitializeEvent( SharedCacheMap->CreateEvent, 00870 NotificationEvent, 00871 FALSE ); 00872 } 00873 00874 // 00875 // Increment the OpenCount on the CacheMap. 00876 // 00877 00878 CcIncrementOpenCount( SharedCacheMap, 'ecnI' ); 00879 MustUninitialize = TRUE; 00880 00881 // 00882 // Release global resource before waiting 00883 // 00884 00885 CcReleaseMasterLock( OldIrql ); 00886 SharedListOwned = FALSE; 00887 00888 DebugTrace( 0, 0, "Waiting on CreateEvent\n", 0 ); 00889 00890 KeWaitForSingleObject( SharedCacheMap->CreateEvent, 00891 Executive, 00892 KernelMode, 00893 FALSE, 00894 (PLARGE_INTEGER)NULL); 00895 00896 // 00897 // If the real creator got an error, then we must bomb 00898 // out too. 00899 // 00900 00901 if (!NT_SUCCESS(SharedCacheMap->Status)) { 00902 ExRaiseStatus( FsRtlNormalizeNtstatus( SharedCacheMap->Status, 00903 STATUS_UNEXPECTED_MM_CREATE_ERR )); 00904 } 00905 } 00906 else { 00907 00908 PCACHE_UNINITIALIZE_EVENT CUEvent, EventNext; 00909 00910 // 00911 // Increment the OpenCount on the CacheMap. 00912 // 00913 00914 CcIncrementOpenCount( SharedCacheMap, 'esnI' ); 00915 MustUninitialize = TRUE; 00916 00917 // 00918 // If there is a process waiting on an uninitialize on this 00919 // cache map to complete, let the thread that is waiting go, 00920 // since the uninitialize is now complete. 00921 // 00922 CUEvent = SharedCacheMap->UninitializeEvent; 00923 00924 while (CUEvent != NULL) { 00925 EventNext = CUEvent->Next; 00926 KeSetEvent(&CUEvent->Event, 0, FALSE); 00927 CUEvent = EventNext; 00928 } 00929 00930 SharedCacheMap->UninitializeEvent = NULL; 00931 00932 // 00933 // Release global resource 00934 // 00935 00936 CcReleaseMasterLock( OldIrql ); 00937 SharedListOwned = FALSE; 00938 } 00939 } 00940 00941 { 00942 PPRIVATE_CACHE_MAP PrivateCacheMap; 00943 00944 // 00945 // Now allocate (if local one already in use) and initialize 00946 // the Private Cache Map. 00947 // 00948 00949 PrivateCacheMap = &SharedCacheMap->PrivateCacheMap; 00950 00951 // 00952 // See if we should allocate a PrivateCacheMap while not holding 00953 // a spinlock. 00954 // 00955 00956 if (CacheMapToFree != NULL) { 00957 ExFreePool( CacheMapToFree ); 00958 CacheMapToFree = NULL; 00959 } 00960 00961 if (PrivateCacheMap->NodeTypeCode != 0) { 00962 CacheMapToFree = ExAllocatePoolWithTag( NonPagedPool, sizeof(PRIVATE_CACHE_MAP), 'cPcC' ); 00963 } 00964 00965 // 00966 // Insert the new PrivateCacheMap in the list off the SharedCacheMap. 00967 // 00968 00969 CcAcquireMasterLock( &OldIrql ); 00970 SharedListOwned = TRUE; 00971 00972 // 00973 // Now make sure there is still no PrivateCacheMap, and if so just get out. 00974 // 00975 00976 if (FileObject->PrivateCacheMap == NULL) { 00977 00978 // 00979 // Is the local one already in use? 00980 // 00981 00982 if (PrivateCacheMap->NodeTypeCode != 0) { 00983 00984 // 00985 // Use the one allocated above, if there is one, else go to pool now. 00986 // 00987 00988 if (CacheMapToFree == NULL) { 00989 CacheMapToFree = 00990 (PPRIVATE_CACHE_MAP)ExAllocatePoolWithTag( NonPagedPool, 00991 sizeof(PRIVATE_CACHE_MAP), 00992 'cPcC' ); 00993 } 00994 PrivateCacheMap = CacheMapToFree; 00995 CacheMapToFree = NULL; 00996 } 00997 00998 if (PrivateCacheMap == NULL) { 00999 01000 DebugTrace( 0, 0, "Failed to allocate PrivateCacheMap\n", 0 ); 01001 01002 CcReleaseMasterLock( OldIrql ); 01003 SharedListOwned = FALSE; 01004 01005 ExRaiseStatus(STATUS_INSUFFICIENT_RESOURCES); 01006 } 01007 01008 RtlZeroMemory( PrivateCacheMap, sizeof(PRIVATE_CACHE_MAP) ); 01009 01010 PrivateCacheMap->NodeTypeCode = CACHE_NTC_PRIVATE_CACHE_MAP; 01011 PrivateCacheMap->NodeByteSize = sizeof(PRIVATE_CACHE_MAP); 01012 PrivateCacheMap->FileObject = FileObject; 01013 PrivateCacheMap->ReadAheadMask = PAGE_SIZE - 1; 01014 01015 // 01016 // Initialize the spin lock. 01017 // 01018 01019 KeInitializeSpinLock( &PrivateCacheMap->ReadAheadSpinLock ); 01020 01021 InsertTailList( &SharedCacheMap->PrivateList, &PrivateCacheMap->PrivateLinks ); 01022 01023 FileObject->PrivateCacheMap = PrivateCacheMap; 01024 01025 } else { 01026 01027 // 01028 // We raced with another initializer for the same fileobject and must 01029 // drop our (to this point speculative) opencount. 01030 // 01031 01032 ASSERT( SharedCacheMap->OpenCount > 1 ); 01033 01034 CcDecrementOpenCount( SharedCacheMap, 'rpnI' ); 01035 SharedCacheMap = NULL; 01036 } 01037 } 01038 01039 MustUninitialize = FALSE; 01040 try_exit: NOTHING; 01041 } 01042 finally { 01043 01044 // 01045 // See if we got an error and must uninitialize the SharedCacheMap 01046 // 01047 01048 if (MustUninitialize) { 01049 01050 if (!SharedListOwned) { 01051 CcAcquireMasterLock( &OldIrql ); 01052 } 01053 if (WeSetBeingCreated) { 01054 if (SharedCacheMap->CreateEvent != NULL) { 01055 KeSetEvent( SharedCacheMap->CreateEvent, 0, FALSE ); 01056 } 01057 ClearFlag(SharedCacheMap->Flags, BEING_CREATED); 01058 } 01059 01060 // 01061 // Now release our open count. 01062 // 01063 01064 CcDecrementOpenCount( SharedCacheMap, 'umnI' ); 01065 01066 if ((SharedCacheMap->OpenCount == 0) && 01067 !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) && 01068 (SharedCacheMap->DirtyPages == 0)) { 01069 01070 // 01071 // On PinAccess it is safe and necessary to eliminate 01072 // the structure immediately. 01073 // 01074 01075 if (PinAccess) { 01076 01077 CcDeleteSharedCacheMap( SharedCacheMap, OldIrql, FALSE ); 01078 01079 // 01080 // If it is not PinAccess, we must lazy delete, because 01081 // we could get into a deadlock trying to acquire the 01082 // stream exclusive when we dereference the file object. 01083 // 01084 01085 } else { 01086 01087 // 01088 // Move it to the dirty list so the lazy write scan will 01089 // see it. 01090 // 01091 01092 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 01093 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 01094 &SharedCacheMap->SharedCacheMapLinks ); 01095 01096 // 01097 // Make sure the Lazy Writer will wake up, because we 01098 // want him to delete this SharedCacheMap. 01099 // 01100 01101 LazyWriter.OtherWork = TRUE; 01102 if (!LazyWriter.ScanActive) { 01103 CcScheduleLazyWriteScan(); 01104 } 01105 01106 CcReleaseMasterLock( OldIrql ); 01107 } 01108 01109 } else { 01110 01111 CcReleaseMasterLock( OldIrql ); 01112 } 01113 01114 SharedListOwned = FALSE; 01115 01116 // 01117 // If we did not create this SharedCacheMap, then there is a 01118 // possibility that it is in the dirty list. Once we are sure 01119 // we have the spinlock, just make sure it is in the clean list 01120 // if there are no dirty bytes and the open count is nonzero. 01121 // (The latter test is almost guaranteed, of course, but we check 01122 // it to be safe.) 01123 // 01124 01125 } else if (!WeCreated && 01126 (SharedCacheMap != NULL)) { 01127 01128 if (!SharedListOwned) { 01129 01130 CcAcquireMasterLock( &OldIrql ); 01131 SharedListOwned = TRUE; 01132 } 01133 01134 if ((SharedCacheMap->DirtyPages == 0) && 01135 (SharedCacheMap->OpenCount != 0)) { 01136 01137 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 01138 InsertTailList( &CcCleanSharedCacheMapList, 01139 &SharedCacheMap->SharedCacheMapLinks ); 01140 } 01141 } 01142 01143 // 01144 // Release global resource 01145 // 01146 01147 if (SharedListOwned) { 01148 CcReleaseMasterLock( OldIrql ); 01149 } 01150 01151 if (CacheMapToFree != NULL) { 01152 ExFreePool(CacheMapToFree); 01153 } 01154 01155 } 01156 01157 DebugTrace(-1, me, "CcInitializeCacheMap -> VOID\n", 0 ); 01158 01159 return; 01160 }

NTKERNELAPI BOOLEAN CcIsThereDirtyData IN PVPB  Vpb  ) 
 

Definition at line 362 of file logsup.c.

References CcAcquireMasterLock, CcDirtySharedCacheMapList, CcReleaseMasterLock, ClearFlag, _SHARED_CACHE_MAP::DirtyPages, FALSE, _SHARED_CACHE_MAP::FileObject, FlagOn, _FILE_OBJECT::Flags, _SHARED_CACHE_MAP::Flags, FO_TEMPORARY_FILE, IS_CURSOR, SetFlag, _SHARED_CACHE_MAP::SharedCacheMapLinks, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, TRUE, _FILE_OBJECT::Vpb, and WRITE_QUEUED.

00368 : 00369 00370 This routine returns TRUE if the specified Vcb has any unwritten dirty 00371 data in the cache. 00372 00373 Arguments: 00374 00375 Vpb - specifies Vpb to check for 00376 00377 Return Value: 00378 00379 FALSE - if the Vpb has no dirty data 00380 TRUE - if the Vpb has dirty data 00381 00382 --*/ 00383 00384 { 00385 PSHARED_CACHE_MAP SharedCacheMap; 00386 KIRQL OldIrql; 00387 ULONG LoopsWithLockHeld = 0; 00388 00389 // 00390 // Synchronize with changes to the SharedCacheMap list. 00391 // 00392 00393 CcAcquireMasterLock( &OldIrql ); 00394 00395 SharedCacheMap = CONTAINING_RECORD( CcDirtySharedCacheMapList.SharedCacheMapLinks.Flink, 00396 SHARED_CACHE_MAP, 00397 SharedCacheMapLinks ); 00398 00399 while (&SharedCacheMap->SharedCacheMapLinks != &CcDirtySharedCacheMapList.SharedCacheMapLinks) { 00400 00401 // 00402 // Look at this one if the Vpb matches and if there is dirty data. 00403 // For what it's worth, don't worry about dirty data in temporary files, 00404 // as that should not concern the caller if it wants to dismount. 00405 // 00406 00407 if (!FlagOn(SharedCacheMap->Flags, IS_CURSOR) && 00408 (SharedCacheMap->FileObject->Vpb == Vpb) && 00409 (SharedCacheMap->DirtyPages != 0) && 00410 !FlagOn(SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE)) { 00411 00412 CcReleaseMasterLock( OldIrql ); 00413 return TRUE; 00414 } 00415 00416 // 00417 // Make sure we occassionally drop the lock. Set WRITE_QUEUED 00418 // to keep the guy from going away, and increment DirtyPages to 00419 // keep in in this list. 00420 // 00421 00422 if ((++LoopsWithLockHeld >= 20) && 00423 !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED | IS_CURSOR)) { 00424 00425 SetFlag( (volatile ULONG) SharedCacheMap->Flags, WRITE_QUEUED); 00426 (volatile ULONG) SharedCacheMap->DirtyPages += 1; 00427 CcReleaseMasterLock( OldIrql ); 00428 LoopsWithLockHeld = 0; 00429 CcAcquireMasterLock( &OldIrql ); 00430 ClearFlag( (volatile ULONG) SharedCacheMap->Flags, WRITE_QUEUED); 00431 (volatile ULONG) SharedCacheMap->DirtyPages -= 1; 00432 } 00433 00434 // 00435 // Now loop back for the next cache map. 00436 // 00437 00438 SharedCacheMap = 00439 CONTAINING_RECORD( SharedCacheMap->SharedCacheMapLinks.Flink, 00440 SHARED_CACHE_MAP, 00441 SharedCacheMapLinks ); 00442 } 00443 00444 CcReleaseMasterLock( OldIrql ); 00445 00446 return FALSE; 00447 }

NTKERNELAPI BOOLEAN CcMapData IN PFILE_OBJECT  FileObject,
IN PLARGE_INTEGER  FileOffset,
IN ULONG  Length,
IN BOOLEAN  Wait,
OUT PVOID *  Bcb,
OUT PVOID *  Buffer
 

Definition at line 52 of file pinsup.c.

References ASSERT, Buffer, CcBcbSpinLock, CcGetVirtualAddress(), CcMapDataNoWait, CcMapDataNoWaitMiss, CcMapDataWait, CcMapDataWaitMiss, CcMissCounter, CcPinFileData(), CcThrowAway, CcUnpinFileData(), COMPUTE_PAGES_SPANNED, DebugTrace, FALSE, me, MmResetPageFaultReadAhead, MmSavePageFaultReadAhead, MmSetPageFaultReadAhead, NULL, PAGE_SIZE, PsGetCurrentThread, TRUE, and UNPIN.

Referenced by LfsFlushLfcb(), LfsPinOrMapData(), UdfLookupDirEntryPostProcessing(), UdfLookupInitialDirEntry(), UdfLookupPsnOfExtent(), UdfMapMetadataView(), and UdfUpdateVcbPhase0().

00063 : 00064 00065 This routine attempts to map the specified file data in the cache. 00066 A pointer is returned to the desired data in the cache. 00067 00068 If the caller does not want to block on this call, then 00069 Wait should be supplied as FALSE. If Wait was supplied as FALSE and 00070 it is currently impossible to supply the requested data without 00071 blocking, then this routine will return FALSE. However, if the 00072 data is immediately accessible in the cache and no blocking is 00073 required, this routine returns TRUE with a pointer to the data. 00074 00075 Note that a call to this routine with Wait supplied as TRUE is 00076 considerably faster than a call with Wait supplies as FALSE, because 00077 in the Wait TRUE case we only have to make sure the data is mapped 00078 in order to return. 00079 00080 It is illegal to modify data that is only mapped, and can in fact lead 00081 to serious problems. It is impossible to check for this in all cases, 00082 however CcSetDirtyPinnedData may implement some Assertions to check for 00083 this. If the caller wishes to modify data that it has only mapped, then 00084 it must *first* call CcPinMappedData. 00085 00086 In any case, the caller MUST subsequently call CcUnpinData. 00087 Naturally if CcPinRead or CcPreparePinWrite were called multiple 00088 times for the same data, CcUnpinData must be called the same number 00089 of times. 00090 00091 The returned Buffer pointer is valid until the data is unpinned, at 00092 which point it is invalid to use the pointer further. This buffer pointer 00093 will remain valid if CcPinMappedData is called. 00094 00095 Note that under some circumstances (like Wait supplied as FALSE or more 00096 than a page is requested), this routine may actually pin the data, however 00097 it is not necessary, and in fact not correct, for the caller to be concerned 00098 about this. 00099 00100 Arguments: 00101 00102 FileObject - Pointer to the file object for a file which was 00103 opened with NO_INTERMEDIATE_BUFFERING clear, i.e., for 00104 which CcInitializeCacheMap was called by the file system. 00105 00106 FileOffset - Byte offset in file for desired data. 00107 00108 Length - Length of desired data in bytes. 00109 00110 Wait - FALSE if caller may not block, TRUE otherwise (see description 00111 above) 00112 00113 Bcb - On the first call this returns a pointer to a Bcb 00114 parameter which must be supplied as input on all subsequent 00115 calls, for this buffer 00116 00117 Buffer - Returns pointer to desired data, valid until the buffer is 00118 unpinned or freed. This pointer will remain valid if CcPinMappedData 00119 is called. 00120 00121 Return Value: 00122 00123 FALSE - if Wait was supplied as FALSE and the data was not delivered 00124 00125 TRUE - if the data is being delivered 00126 00127 --*/ 00128 00129 { 00130 PSHARED_CACHE_MAP SharedCacheMap; 00131 LARGE_INTEGER BeyondLastByte; 00132 ULONG ReceivedLength; 00133 ULONG SavedState; 00134 volatile UCHAR ch; 00135 ULONG PageCount = COMPUTE_PAGES_SPANNED((ULongToPtr(FileOffset->LowPart)), Length); 00136 PETHREAD Thread = PsGetCurrentThread(); 00137 00138 DebugTrace(+1, me, "CcMapData\n", 0 ); 00139 00140 MmSavePageFaultReadAhead( Thread, &SavedState ); 00141 00142 // 00143 // Increment performance counters 00144 // 00145 00146 if (Wait) { 00147 00148 CcMapDataWait += 1; 00149 00150 // 00151 // Initialize the indirect pointer to our miss counter. 00152 // 00153 00154 CcMissCounter = &CcMapDataWaitMiss; 00155 00156 } else { 00157 CcMapDataNoWait += 1; 00158 } 00159 00160 // 00161 // Get pointer to SharedCacheMap. 00162 // 00163 00164 SharedCacheMap = *(PSHARED_CACHE_MAP *)((PCHAR)FileObject->SectionObjectPointer 00165 + sizeof(PVOID)); 00166 00167 // 00168 // Call local routine to Map or Access the file data. If we cannot map 00169 // the data because of a Wait condition, return FALSE. 00170 // 00171 00172 if (Wait) { 00173 00174 *Buffer = CcGetVirtualAddress( SharedCacheMap, 00175 *FileOffset, 00176 (PVACB *)Bcb, 00177 &ReceivedLength ); 00178 00179 ASSERT( ReceivedLength >= Length ); 00180 00181 } else if (!CcPinFileData( FileObject, 00182 FileOffset, 00183 Length, 00184 TRUE, 00185 FALSE, 00186 Wait, 00187 (PBCB *)Bcb, 00188 Buffer, 00189 &BeyondLastByte )) { 00190 00191 DebugTrace(-1, me, "CcMapData -> FALSE\n", 0 ); 00192 00193 CcMapDataNoWaitMiss += 1; 00194 00195 return FALSE; 00196 00197 } else { 00198 00199 ASSERT( (BeyondLastByte.QuadPart - FileOffset->QuadPart) >= Length ); 00200 00201 #if LIST_DBG 00202 { 00203 KIRQL OldIrql; 00204 PBCB BcbTemp = (PBCB)*Bcb; 00205 00206 ExAcquireSpinLock( &CcBcbSpinLock, &OldIrql ); 00207 00208 if (BcbTemp->CcBcbLinks.Flink == NULL) { 00209 00210 InsertTailList( &CcBcbList, &BcbTemp->CcBcbLinks ); 00211 CcBcbCount += 1; 00212 ExReleaseSpinLock( &CcBcbSpinLock, OldIrql ); 00213 SetCallersAddress( BcbTemp ); 00214 00215 } else { 00216 ExReleaseSpinLock( &CcBcbSpinLock, OldIrql ); 00217 } 00218 00219 } 00220 #endif 00221 00222 } 00223 00224 // 00225 // Now let's just sit here and take the miss(es) like a man (and count them). 00226 // 00227 00228 try { 00229 00230 // 00231 // Loop to touch each page 00232 // 00233 00234 BeyondLastByte.LowPart = 0; 00235 00236 while (PageCount != 0) { 00237 00238 MmSetPageFaultReadAhead( Thread, PageCount - 1 ); 00239 00240 ch = *((volatile UCHAR *)(*Buffer) + BeyondLastByte.LowPart); 00241 00242 BeyondLastByte.LowPart += PAGE_SIZE; 00243 PageCount -= 1; 00244 } 00245 00246 } finally { 00247 00248 MmResetPageFaultReadAhead( Thread, SavedState ); 00249 00250 if (AbnormalTermination() && (*Bcb != NULL)) { 00251 CcUnpinFileData( (PBCB)*Bcb, TRUE, UNPIN ); 00252 *Bcb = NULL; 00253 } 00254 } 00255 00256 CcMissCounter = &CcThrowAway; 00257 00258 // 00259 // Increment the pointer as a reminder that it is read only. 00260 // 00261 00262 *(PCHAR *)Bcb += 1; 00263 00264 DebugTrace(-1, me, "CcMapData -> TRUE\n", 0 ); 00265 00266 return TRUE; 00267 }

NTKERNELAPI VOID CcMdlRead IN PFILE_OBJECT  FileObject,
IN PLARGE_INTEGER  FileOffset,
IN ULONG  Length,
OUT PMDL MdlChain,
OUT PIO_STATUS_BLOCK  IoStatus
 

Definition at line 30 of file mdlsup.c.

References ASSERT, _PRIVATE_CACHE_MAP::BeyondLastByte1, _PRIVATE_CACHE_MAP::BeyondLastByte2, CcFreeActiveVacb(), CcFreeVirtualAddress(), CcGetVirtualAddress(), CcMdlReadWait, CcMdlReadWaitMiss, CcMissCounter, CcScheduleReadAhead(), CcThrowAway, COMPUTE_PAGES_SPANNED, DebugTrace, DebugTrace2, ExRaiseStatus(), FALSE, _PRIVATE_CACHE_MAP::FileOffset1, _PRIVATE_CACHE_MAP::FileOffset2, _SHARED_CACHE_MAP::FileSize, FlagOn, FO_RANDOM_ACCESS, GetActiveVacb, IoAllocateMdl(), IoFreeMdl(), IoReadAccess, KernelMode, me, mm, MmProbeAndLockPages(), MmResetPageFaultReadAhead, MmSavePageFaultReadAhead, MmSetPageFaultReadAhead, MmUnlockPages(), _SHARED_CACHE_MAP::NeedToZero, _MDL::Next, NULL, PsGetCurrentThread, _PRIVATE_CACHE_MAP::ReadAheadEnabled, _PRIVATE_CACHE_MAP::ReadAheadLength, and TRUE.

Referenced by FsRtlMdlReadDev(), and UdfCommonRead().

00040 : 00041 00042 This routine attempts to lock the specified file data in the cache 00043 and return a description of it in an Mdl along with the correct 00044 I/O status. It is *not* safe to call this routine from Dpc level. 00045 00046 This routine is synchronous, and raises on errors. 00047 00048 As each call returns, the pages described by the Mdl are 00049 locked in memory, but not mapped in system space. If the caller 00050 needs the pages mapped in system space, then it must map them. 00051 00052 Note that each call is a "single shot" which should be followed by 00053 a call to CcMdlReadComplete. To resume an Mdl-based transfer, the 00054 caller must form one or more subsequent calls to CcMdlRead with 00055 appropriately adjusted parameters. 00056 00057 Arguments: 00058 00059 FileObject - Pointer to the file object for a file which was 00060 opened with NO_INTERMEDIATE_BUFFERING clear, i.e., for 00061 which CcInitializeCacheMap was called by the file system. 00062 00063 FileOffset - Byte offset in file for desired data. 00064 00065 Length - Length of desired data in bytes. 00066 00067 MdlChain - On output it returns a pointer to an Mdl chain describing 00068 the desired data. Note that even if FALSE is returned, 00069 one or more Mdls may have been allocated, as may be ascertained 00070 by the IoStatus.Information field (see below). 00071 00072 IoStatus - Pointer to standard I/O status block to receive the status 00073 for the transfer. (STATUS_SUCCESS guaranteed for cache 00074 hits, otherwise the actual I/O status is returned.) The 00075 I/O Information Field indicates how many bytes have been 00076 successfully locked down in the Mdl Chain. 00077 00078 Return Value: 00079 00080 None 00081 00082 Raises: 00083 00084 STATUS_INSUFFICIENT_RESOURCES - If a pool allocation failure occurs. 00085 00086 --*/ 00087 00088 { 00089 PSHARED_CACHE_MAP SharedCacheMap; 00090 PPRIVATE_CACHE_MAP PrivateCacheMap; 00091 PVOID CacheBuffer; 00092 LARGE_INTEGER FOffset; 00093 PMDL Mdl = NULL; 00094 PMDL MdlTemp; 00095 PETHREAD Thread = PsGetCurrentThread(); 00096 ULONG SavedState = 0; 00097 ULONG OriginalLength = Length; 00098 ULONG Information = 0; 00099 PVACB Vacb = NULL; 00100 ULONG SavedMissCounter = 0; 00101 00102 KIRQL OldIrql; 00103 ULONG ActivePage; 00104 ULONG PageIsDirty; 00105 PVACB ActiveVacb = NULL; 00106 00107 DebugTrace(+1, me, "CcMdlRead\n", 0 ); 00108 DebugTrace( 0, me, " FileObject = %08lx\n", FileObject ); 00109 DebugTrace2(0, me, " FileOffset = %08lx, %08lx\n", FileOffset->LowPart, 00110 FileOffset->HighPart ); 00111 DebugTrace( 0, me, " Length = %08lx\n", Length ); 00112 00113 // 00114 // Save the current readahead hints. 00115 // 00116 00117 MmSavePageFaultReadAhead( Thread, &SavedState ); 00118 00119 // 00120 // Get pointer to SharedCacheMap. 00121 // 00122 00123 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 00124 PrivateCacheMap = FileObject->PrivateCacheMap; 00125 00126 // 00127 // See if we have an active Vacb, that we need to free. 00128 // 00129 00130 GetActiveVacb( SharedCacheMap, OldIrql, ActiveVacb, ActivePage, PageIsDirty ); 00131 00132 // 00133 // If there is an end of a page to be zeroed, then free that page now, 00134 // so we don't send Greg the uninitialized data... 00135 // 00136 00137 if ((ActiveVacb != NULL) || (SharedCacheMap->NeedToZero != NULL)) { 00138 00139 CcFreeActiveVacb( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 00140 } 00141 00142 // 00143 // If read ahead is enabled, then do the read ahead here so it 00144 // overlaps with the copy (otherwise we will do it below). 00145 // Note that we are assuming that we will not get ahead of our 00146 // current transfer - if read ahead is working it should either 00147 // already be in memory or else underway. 00148 // 00149 00150 if (PrivateCacheMap->ReadAheadEnabled && (PrivateCacheMap->ReadAheadLength[1] == 0)) { 00151 CcScheduleReadAhead( FileObject, FileOffset, Length ); 00152 } 00153 00154 // 00155 // Increment performance counters 00156 // 00157 00158 CcMdlReadWait += 1; 00159 00160 // 00161 // This is not an exact solution, but when IoPageRead gets a miss, 00162 // it cannot tell whether it was CcCopyRead or CcMdlRead, but since 00163 // the miss should occur very soon, by loading the pointer here 00164 // probably the right counter will get incremented, and in any case, 00165 // we hope the errrors average out! 00166 // 00167 00168 CcMissCounter = &CcMdlReadWaitMiss; 00169 00170 FOffset = *FileOffset; 00171 00172 // 00173 // Check for read past file size, the caller must filter this case out. 00174 // 00175 00176 ASSERT( ( FOffset.QuadPart + (LONGLONG)Length ) <= SharedCacheMap->FileSize.QuadPart ); 00177 00178 // 00179 // Put try-finally around the loop to deal with any exceptions 00180 // 00181 00182 try { 00183 00184 // 00185 // Not all of the transfer will come back at once, so we have to loop 00186 // until the entire transfer is complete. 00187 // 00188 00189 while (Length != 0) { 00190 00191 ULONG ReceivedLength; 00192 LARGE_INTEGER BeyondLastByte; 00193 00194 // 00195 // Map the data and read it in (if necessary) with the 00196 // MmProbeAndLockPages call below. 00197 // 00198 00199 CacheBuffer = CcGetVirtualAddress( SharedCacheMap, 00200 FOffset, 00201 &Vacb, 00202 &ReceivedLength ); 00203 00204 if (ReceivedLength > Length) { 00205 ReceivedLength = Length; 00206 } 00207 00208 BeyondLastByte.QuadPart = FOffset.QuadPart + (LONGLONG)ReceivedLength; 00209 00210 // 00211 // Now attempt to allocate an Mdl to describe the mapped data. 00212 // 00213 00214 DebugTrace( 0, mm, "IoAllocateMdl:\n", 0 ); 00215 DebugTrace( 0, mm, " BaseAddress = %08lx\n", CacheBuffer ); 00216 DebugTrace( 0, mm, " Length = %08lx\n", ReceivedLength ); 00217 00218 Mdl = IoAllocateMdl( CacheBuffer, 00219 ReceivedLength, 00220 FALSE, 00221 FALSE, 00222 NULL ); 00223 00224 DebugTrace( 0, mm, " <Mdl = %08lx\n", Mdl ); 00225 00226 if (Mdl == NULL) { 00227 DebugTrace( 0, 0, "Failed to allocate Mdl\n", 0 ); 00228 00229 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 00230 } 00231 00232 DebugTrace( 0, mm, "MmProbeAndLockPages:\n", 0 ); 00233 DebugTrace( 0, mm, " Mdl = %08lx\n", Mdl ); 00234 00235 // 00236 // Set to see if the miss counter changes in order to 00237 // detect when we should turn on read ahead. 00238 // 00239 00240 SavedMissCounter += CcMdlReadWaitMiss; 00241 00242 MmSetPageFaultReadAhead( Thread, COMPUTE_PAGES_SPANNED( CacheBuffer, ReceivedLength ) - 1); 00243 MmProbeAndLockPages( Mdl, KernelMode, IoReadAccess ); 00244 00245 SavedMissCounter -= CcMdlReadWaitMiss; 00246 00247 // 00248 // Unmap the data now, now that the pages are locked down. 00249 // 00250 00251 CcFreeVirtualAddress( Vacb ); 00252 Vacb = NULL; 00253 00254 // 00255 // Now link the Mdl into the caller's chain 00256 // 00257 00258 if ( *MdlChain == NULL ) { 00259 *MdlChain = Mdl; 00260 } else { 00261 MdlTemp = CONTAINING_RECORD( *MdlChain, MDL, Next ); 00262 while (MdlTemp->Next != NULL) { 00263 MdlTemp = MdlTemp->Next; 00264 } 00265 MdlTemp->Next = Mdl; 00266 } 00267 Mdl = NULL; 00268 00269 // 00270 // Assume we did not get all the data we wanted, and set FOffset 00271 // to the end of the returned data. 00272 // 00273 00274 FOffset = BeyondLastByte; 00275 00276 // 00277 // Update number of bytes transferred. 00278 // 00279 00280 Information += ReceivedLength; 00281 00282 // 00283 // Calculate length left to transfer. 00284 // 00285 00286 Length -= ReceivedLength; 00287 } 00288 } 00289 finally { 00290 00291 CcMissCounter = &CcThrowAway; 00292 00293 // 00294 // Restore the readahead hints. 00295 // 00296 00297 MmResetPageFaultReadAhead( Thread, SavedState ); 00298 00299 if (AbnormalTermination()) { 00300 00301 // 00302 // We may have failed to allocate an Mdl while still having 00303 // data mapped. 00304 // 00305 00306 if (Vacb != NULL) { 00307 CcFreeVirtualAddress( Vacb ); 00308 } 00309 00310 if (Mdl != NULL) { 00311 IoFreeMdl( Mdl ); 00312 } 00313 00314 // 00315 // Otherwise loop to deallocate the Mdls 00316 // 00317 00318 while (*MdlChain != NULL) { 00319 MdlTemp = (*MdlChain)->Next; 00320 00321 DebugTrace( 0, mm, "MmUnlockPages/IoFreeMdl:\n", 0 ); 00322 DebugTrace( 0, mm, " Mdl = %08lx\n", *MdlChain ); 00323 00324 MmUnlockPages( *MdlChain ); 00325 IoFreeMdl( *MdlChain ); 00326 00327 *MdlChain = MdlTemp; 00328 } 00329 00330 DebugTrace(-1, me, "CcMdlRead -> Unwinding\n", 0 ); 00331 00332 } 00333 else { 00334 00335 // 00336 // Now enable read ahead if it looks like we got any misses, and do 00337 // the first one. 00338 // 00339 00340 if (!FlagOn( FileObject->Flags, FO_RANDOM_ACCESS ) && 00341 !PrivateCacheMap->ReadAheadEnabled && 00342 (SavedMissCounter != 0)) { 00343 00344 PrivateCacheMap->ReadAheadEnabled = TRUE; 00345 CcScheduleReadAhead( FileObject, FileOffset, OriginalLength ); 00346 } 00347 00348 // 00349 // Now that we have described our desired read ahead, let's 00350 // shift the read history down. 00351 // 00352 00353 PrivateCacheMap->FileOffset1 = PrivateCacheMap->FileOffset2; 00354 PrivateCacheMap->BeyondLastByte1 = PrivateCacheMap->BeyondLastByte2; 00355 PrivateCacheMap->FileOffset2 = *FileOffset; 00356 PrivateCacheMap->BeyondLastByte2.QuadPart = 00357 FileOffset->QuadPart + (LONGLONG)OriginalLength; 00358 00359 IoStatus->Status = STATUS_SUCCESS; 00360 IoStatus->Information = Information; 00361 } 00362 } 00363 00364 00365 DebugTrace( 0, me, " <MdlChain = %08lx\n", *MdlChain ); 00366 DebugTrace2(0, me, " <IoStatus = %08lx, %08lx\n", IoStatus->Status, 00367 IoStatus->Information ); 00368 DebugTrace(-1, me, "CcMdlRead -> VOID\n", 0 ); 00369 00370 return; 00371 }

NTKERNELAPI VOID CcMdlReadComplete IN PFILE_OBJECT  FileObject,
IN PMDL  MdlChain
 

Definition at line 381 of file mdlsup.c.

References CcMdlReadComplete2(), _DEVICE_OBJECT::DriverObject, _DRIVER_OBJECT::FastIoDispatch, IoGetRelatedDeviceObject(), _FAST_IO_DISPATCH::MdlReadComplete, NULL, and _FAST_IO_DISPATCH::SizeOfFastIoDispatch.

Referenced by UdfCompleteMdl().

00386 { 00387 PDEVICE_OBJECT DeviceObject; 00388 PFAST_IO_DISPATCH FastIoDispatch; 00389 00390 DeviceObject = IoGetRelatedDeviceObject( FileObject ); 00391 FastIoDispatch = DeviceObject->DriverObject->FastIoDispatch; 00392 00393 if ((FastIoDispatch != NULL) && 00394 (FastIoDispatch->SizeOfFastIoDispatch > FIELD_OFFSET(FAST_IO_DISPATCH, MdlWriteComplete)) && 00395 (FastIoDispatch->MdlReadComplete != NULL) && 00396 FastIoDispatch->MdlReadComplete( FileObject, MdlChain, DeviceObject )) { 00397 00398 NOTHING; 00399 00400 } else { 00401 CcMdlReadComplete2( FileObject, MdlChain ); 00402 } 00403 }

NTKERNELAPI VOID CcMdlReadComplete2 IN PFILE_OBJECT  FileObject,
IN PMDL  MdlChain
 

Definition at line 406 of file mdlsup.c.

References DebugTrace, IoFreeMdl(), me, mm, MmUnlockPages(), _MDL::Next, and NULL.

Referenced by CcMdlReadComplete(), and FsRtlMdlReadCompleteDev().

00413 : 00414 00415 This routine must be called at IPL0 after a call to CcMdlRead. The 00416 caller must simply supply the address of the MdlChain returned in 00417 CcMdlRead. 00418 00419 This call does the following: 00420 00421 Deletes the MdlChain 00422 00423 Arguments: 00424 00425 FileObject - Pointer to the file object for a file which was 00426 opened with NO_INTERMEDIATE_BUFFERING clear, i.e., for 00427 which CcInitializeCacheMap was called by the file system. 00428 00429 MdlChain - same as returned from corresponding call to CcMdlRead. 00430 00431 Return Value: 00432 00433 None. 00434 --*/ 00435 00436 { 00437 PMDL MdlNext; 00438 00439 DebugTrace(+1, me, "CcMdlReadComplete\n", 0 ); 00440 DebugTrace( 0, me, " FileObject = %08lx\n", FileObject ); 00441 DebugTrace( 0, me, " MdlChain = %08lx\n", MdlChain ); 00442 00443 // 00444 // Deallocate the Mdls 00445 // 00446 00447 while (MdlChain != NULL) { 00448 00449 MdlNext = MdlChain->Next; 00450 00451 DebugTrace( 0, mm, "MmUnlockPages/IoFreeMdl:\n", 0 ); 00452 DebugTrace( 0, mm, " Mdl = %08lx\n", MdlChain ); 00453 00454 MmUnlockPages( MdlChain ); 00455 00456 IoFreeMdl( MdlChain ); 00457 00458 MdlChain = MdlNext; 00459 } 00460 00461 DebugTrace(-1, me, "CcMdlReadComplete -> VOID\n", 0 ); 00462 }

NTKERNELAPI VOID CcMdlWriteComplete IN PFILE_OBJECT  FileObject,
IN PLARGE_INTEGER  FileOffset,
IN PMDL  MdlChain
 

Definition at line 812 of file mdlsup.c.

References CcMdlWriteComplete2(), _DEVICE_OBJECT::DriverObject, _DRIVER_OBJECT::FastIoDispatch, IoGetRelatedDeviceObject(), _FAST_IO_DISPATCH::MdlWriteComplete, NULL, and _FAST_IO_DISPATCH::SizeOfFastIoDispatch.

00818 { 00819 PDEVICE_OBJECT DeviceObject; 00820 PFAST_IO_DISPATCH FastIoDispatch; 00821 00822 DeviceObject = IoGetRelatedDeviceObject( FileObject ); 00823 FastIoDispatch = DeviceObject->DriverObject->FastIoDispatch; 00824 00825 if ((FastIoDispatch != NULL) && 00826 (FastIoDispatch->SizeOfFastIoDispatch > FIELD_OFFSET(FAST_IO_DISPATCH, MdlWriteComplete)) && 00827 (FastIoDispatch->MdlWriteComplete != NULL) && 00828 FastIoDispatch->MdlWriteComplete( FileObject, FileOffset, MdlChain, DeviceObject )) { 00829 00830 NOTHING; 00831 00832 } else { 00833 CcMdlWriteComplete2( FileObject, FileOffset, MdlChain ); 00834 } 00835 }

NTKERNELAPI VOID CcMdlWriteComplete2 IN PFILE_OBJECT  FileObject,
IN PLARGE_INTEGER  FileOffset,
IN PMDL  MdlChain
 

Definition at line 838 of file mdlsup.c.

References CcAcquireMasterLock, CcDecrementOpenCount, CcDirtySharedCacheMapList, CcReleaseMasterLock, CcScheduleLazyWriteScan(), CcSetDirtyInMask(), DebugTrace, _SHARED_CACHE_MAP::DirtyPages, FlagOn, _SHARED_CACHE_MAP::Flags, FO_WRITE_THROUGH, FsRtlNormalizeNtstatus(), IoFreeMdl(), LazyWriter, me, mm, MmFlushSection(), MmUnlockPages(), _MDL::Next, NT_SUCCESS, NTSTATUS(), NULL, _SHARED_CACHE_MAP::OpenCount, _LAZY_WRITER::OtherWork, _LAZY_WRITER::ScanActive, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, _SHARED_CACHE_MAP::SharedCacheMapLinks, TRUE, and WRITE_QUEUED.

Referenced by CcMdlWriteComplete(), and FsRtlMdlWriteCompleteDev().

00846 : 00847 00848 This routine must be called at IPL0 after a call to CcPrepareMdlWrite. 00849 The caller supplies the ActualLength of data that it actually wrote 00850 into the buffer, which may be less than or equal to the Length specified 00851 in CcPrepareMdlWrite. 00852 00853 This call does the following: 00854 00855 Makes sure the data up to ActualLength eventually gets written. 00856 If WriteThrough is FALSE, the data will not be written immediately. 00857 If WriteThrough is TRUE, then the data is written synchronously. 00858 00859 Unmaps the pages (if mapped), unlocks them and deletes the MdlChain 00860 00861 Arguments: 00862 00863 FileObject - Pointer to the file object for a file which was 00864 opened with NO_INTERMEDIATE_BUFFERING clear, i.e., for 00865 which CcInitializeCacheMap was called by the file system. 00866 00867 FileOffset - Original file offset read above. 00868 00869 MdlChain - same as returned from corresponding call to CcPrepareMdlWrite. 00870 00871 Return Value: 00872 00873 None 00874 00875 --*/ 00876 00877 { 00878 PMDL MdlNext; 00879 PSHARED_CACHE_MAP SharedCacheMap; 00880 LARGE_INTEGER FOffset; 00881 IO_STATUS_BLOCK IoStatus; 00882 KIRQL OldIrql; 00883 NTSTATUS StatusToRaise = STATUS_SUCCESS; 00884 00885 DebugTrace(+1, me, "CcMdlWriteComplete\n", 0 ); 00886 DebugTrace( 0, me, " FileObject = %08lx\n", FileObject ); 00887 DebugTrace( 0, me, " MdlChain = %08lx\n", MdlChain ); 00888 00889 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 00890 00891 // 00892 // Deallocate the Mdls 00893 // 00894 00895 FOffset.QuadPart = *(LONGLONG UNALIGNED *)FileOffset; 00896 while (MdlChain != NULL) { 00897 00898 MdlNext = MdlChain->Next; 00899 00900 DebugTrace( 0, mm, "MmUnlockPages/IoFreeMdl:\n", 0 ); 00901 DebugTrace( 0, mm, " Mdl = %08lx\n", MdlChain ); 00902 00903 // 00904 // Now clear the dirty bits in the Pte and set them in the 00905 // Pfn. 00906 // 00907 00908 MmUnlockPages( MdlChain ); 00909 00910 // 00911 // Extract the File Offset for this part of the transfer. 00912 // 00913 00914 if (FlagOn(FileObject->Flags, FO_WRITE_THROUGH)) { 00915 00916 MmFlushSection ( FileObject->SectionObjectPointer, 00917 &FOffset, 00918 MdlChain->ByteCount, 00919 &IoStatus, 00920 TRUE ); 00921 00922 // 00923 // If we got an I/O error, remember it. 00924 // 00925 00926 if (!NT_SUCCESS(IoStatus.Status)) { 00927 StatusToRaise = IoStatus.Status; 00928 } 00929 00930 } else { 00931 00932 // 00933 // Ignore the only exception (allocation error), and console 00934 // ourselves for having tried. 00935 // 00936 00937 CcSetDirtyInMask( SharedCacheMap, &FOffset, MdlChain->ByteCount ); 00938 } 00939 00940 FOffset.QuadPart = FOffset.QuadPart + (LONGLONG)(MdlChain->ByteCount); 00941 00942 IoFreeMdl( MdlChain ); 00943 00944 MdlChain = MdlNext; 00945 } 00946 00947 // 00948 // Now release our open count. 00949 // 00950 00951 CcAcquireMasterLock( &OldIrql ); 00952 00953 CcDecrementOpenCount( SharedCacheMap, 'ldmC' ); 00954 00955 if ((SharedCacheMap->OpenCount == 0) && 00956 !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) && 00957 (SharedCacheMap->DirtyPages == 0)) { 00958 00959 // 00960 // Move to the dirty list. 00961 // 00962 00963 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 00964 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 00965 &SharedCacheMap->SharedCacheMapLinks ); 00966 00967 // 00968 // Make sure the Lazy Writer will wake up, because we 00969 // want him to delete this SharedCacheMap. 00970 // 00971 00972 LazyWriter.OtherWork = TRUE; 00973 if (!LazyWriter.ScanActive) { 00974 CcScheduleLazyWriteScan(); 00975 } 00976 } 00977 00978 CcReleaseMasterLock( OldIrql ); 00979 00980 // 00981 // If we got an I/O error, raise it now. 00982 // 00983 00984 if (!NT_SUCCESS(StatusToRaise)) { 00985 FsRtlNormalizeNtstatus( StatusToRaise, 00986 STATUS_UNEXPECTED_IO_ERROR ); 00987 } 00988 00989 DebugTrace(-1, me, "CcMdlWriteComplete -> TRUE\n", 0 ); 00990 00991 return; 00992 }

NTKERNELAPI BOOLEAN CcPinMappedData IN PFILE_OBJECT  FileObject,
IN PLARGE_INTEGER  FileOffset,
IN ULONG  Length,
IN ULONG  Flags,
IN OUT PVOID *  Bcb
 

Definition at line 271 of file pinsup.c.

References BooleanFlagOn, Buffer, CACHE_NTC_BCB, CcAllocateObcb(), CcBcbSpinLock, CcFreeVirtualAddress(), CcPinFileData(), CcPinMappedDataCount, CcUnpinData(), DebugTrace, ExAcquireSharedStarveExclusive(), FALSE, FlagOn, _SHARED_CACHE_MAP::Flags, me, MODIFIED_WRITE_DISABLED, NULL, PIN_WAIT, Resource, TRUE, and try_return.

00281 : 00282 00283 This routine attempts to pin data that was previously only mapped. 00284 If the routine determines that in fact it was necessary to actually 00285 pin the data when CcMapData was called, then this routine does not 00286 have to do anything. 00287 00288 If the caller does not want to block on this call, then 00289 Wait should be supplied as FALSE. If Wait was supplied as FALSE and 00290 it is currently impossible to supply the requested data without 00291 blocking, then this routine will return FALSE. However, if the 00292 data is immediately accessible in the cache and no blocking is 00293 required, this routine returns TRUE with a pointer to the data. 00294 00295 If the data is not returned in the first call, the caller 00296 may request the data later with Wait = TRUE. It is not required 00297 that the caller request the data later. 00298 00299 If the caller subsequently modifies the data, it should call 00300 CcSetDirtyPinnedData. 00301 00302 In any case, the caller MUST subsequently call CcUnpinData. 00303 Naturally if CcPinRead or CcPreparePinWrite were called multiple 00304 times for the same data, CcUnpinData must be called the same number 00305 of times. 00306 00307 Note there are no performance counters in this routine, as the misses 00308 will almost always occur on the map above, and there will seldom be a 00309 miss on this conversion. 00310 00311 Arguments: 00312 00313 FileObject - Pointer to the file object for a file which was 00314 opened with NO_INTERMEDIATE_BUFFERING clear, i.e., for 00315 which CcInitializeCacheMap was called by the file system. 00316 00317 FileOffset - Byte offset in file for desired data. 00318 00319 Length - Length of desired data in bytes. 00320 00321 Flags - (PIN_WAIT, PIN_EXCLUSIVE, PIN_NO_READ, etc. as defined in cache.h) 00322 If the caller specifies PIN_NO_READ and PIN_EXCLUSIVE, then he must 00323 guarantee that no one else will be attempting to map the view, if he 00324 wants to guarantee that the Bcb is not mapped (view may be purged). 00325 If the caller specifies PIN_NO_READ without PIN_EXCLUSIVE, the data 00326 may or may not be mapped in the return Bcb. 00327 00328 Bcb - On the first call this returns a pointer to a Bcb 00329 parameter which must be supplied as input on all subsequent 00330 calls, for this buffer 00331 00332 Return Value: 00333 00334 FALSE - if Wait was not set and the data was not delivered 00335 00336 TRUE - if the data is being delivered 00337 00338 --*/ 00339 00340 { 00341 PVOID Buffer; 00342 LARGE_INTEGER BeyondLastByte; 00343 PSHARED_CACHE_MAP SharedCacheMap; 00344 LARGE_INTEGER LocalFileOffset = *FileOffset; 00345 POBCB MyBcb = NULL; 00346 PBCB *CurrentBcbPtr = (PBCB *)&MyBcb; 00347 BOOLEAN Result = FALSE; 00348 00349 DebugTrace(+1, me, "CcPinMappedData\n", 0 ); 00350 00351 // 00352 // If the Bcb is no longer ReadOnly, then just return. 00353 // 00354 00355 if ((*(PULONG)Bcb & 1) == 0) { 00356 return TRUE; 00357 } 00358 00359 // 00360 // Remove the Read Only flag 00361 // 00362 00363 *(PCHAR *)Bcb -= 1; 00364 00365 // 00366 // Get pointer to SharedCacheMap. 00367 // 00368 00369 SharedCacheMap = *(PSHARED_CACHE_MAP *)((PCHAR)FileObject->SectionObjectPointer 00370 + sizeof(PVOID)); 00371 00372 // 00373 // We only count the calls to this routine, since they are almost guaranteed 00374 // to be hits. 00375 // 00376 00377 CcPinMappedDataCount += 1; 00378 00379 // 00380 // Guarantee we will put the flag back if required. 00381 // 00382 00383 try { 00384 00385 if (((PBCB)*Bcb)->NodeTypeCode != CACHE_NTC_BCB) { 00386 00387 // 00388 // Form loop to handle occassional overlapped Bcb case. 00389 // 00390 00391 do { 00392 00393 // 00394 // If we have already been through the loop, then adjust 00395 // our file offset and length from the last time. 00396 // 00397 00398 if (MyBcb != NULL) { 00399 00400 // 00401 // If this is the second time through the loop, then it is time 00402 // to handle the overlap case and allocate an OBCB. 00403 // 00404 00405 if (CurrentBcbPtr == (PBCB *)&MyBcb) { 00406 00407 MyBcb = CcAllocateObcb( FileOffset, Length, (PBCB)MyBcb ); 00408 00409 // 00410 // Set CurrentBcbPtr to point at the first entry in 00411 // the vector (which is already filled in), before 00412 // advancing it below. 00413 // 00414 00415 CurrentBcbPtr = &MyBcb->Bcbs[0]; 00416 } 00417 00418 Length -= (ULONG)(BeyondLastByte.QuadPart - LocalFileOffset.QuadPart); 00419 LocalFileOffset.QuadPart = BeyondLastByte.QuadPart; 00420 CurrentBcbPtr += 1; 00421 } 00422 00423 // 00424 // Call local routine to Map or Access the file data. If we cannot map 00425 // the data because of a Wait condition, return FALSE. 00426 // 00427 00428 if (!CcPinFileData( FileObject, 00429 &LocalFileOffset, 00430 Length, 00431 (BOOLEAN)!FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED), 00432 FALSE, 00433 Flags, 00434 CurrentBcbPtr, 00435 &Buffer, 00436 &BeyondLastByte )) { 00437 00438 try_return( Result = FALSE ); 00439 } 00440 00441 // 00442 // Continue looping if we did not get everything. 00443 // 00444 00445 } while((BeyondLastByte.QuadPart - LocalFileOffset.QuadPart) < Length); 00446 00447 // 00448 // Free the Vacb before going on. 00449 // 00450 00451 CcFreeVirtualAddress( (PVACB)*Bcb ); 00452 00453 *Bcb = MyBcb; 00454 00455 // 00456 // Debug routines used to insert and remove Bcbs from the global list 00457 // 00458 00459 #if LIST_DBG 00460 { 00461 KIRQL OldIrql; 00462 PBCB BcbTemp = (PBCB)*Bcb; 00463 00464 ExAcquireSpinLock( &CcBcbSpinLock, &OldIrql ); 00465 00466 if (BcbTemp->CcBcbLinks.Flink == NULL) { 00467 00468 InsertTailList( &CcBcbList, &BcbTemp->CcBcbLinks ); 00469 CcBcbCount += 1; 00470 ExReleaseSpinLock( &CcBcbSpinLock, OldIrql ); 00471 SetCallersAddress( BcbTemp ); 00472 00473 } else { 00474 ExReleaseSpinLock( &CcBcbSpinLock, OldIrql ); 00475 } 00476 00477 } 00478 #endif 00479 } 00480 00481 // 00482 // If he really has a Bcb, all we have to do is acquire it shared since he is 00483 // no longer ReadOnly. 00484 // 00485 00486 else { 00487 00488 if (!ExAcquireSharedStarveExclusive( &((PBCB)*Bcb)->Resource, BooleanFlagOn(Flags, PIN_WAIT))) { 00489 00490 try_return( Result = FALSE ); 00491 } 00492 } 00493 00494 Result = TRUE; 00495 00496 try_exit: NOTHING; 00497 } 00498 finally { 00499 00500 if (!Result) { 00501 00502 // 00503 // Put the Read Only flag back 00504 // 00505 00506 *(PCHAR *)Bcb += 1; 00507 00508 // 00509 // We may have gotten partway through 00510 // 00511 00512 if (MyBcb != NULL) { 00513 CcUnpinData( MyBcb ); 00514 } 00515 } 00516 00517 DebugTrace(-1, me, "CcPinMappedData -> %02lx\n", Result ); 00518 } 00519 return Result; 00520 }

NTKERNELAPI BOOLEAN CcPinRead IN PFILE_OBJECT  FileObject,
IN PLARGE_INTEGER  FileOffset,
IN ULONG  Length,
IN ULONG  Flags,
OUT PVOID *  Bcb,
OUT PVOID *  Buffer
 

Definition at line 524 of file pinsup.c.

References Buffer, CcAllocateObcb(), CcBcbSpinLock, CcMissCounter, CcPinFileData(), CcPinReadNoWait, CcPinReadNoWaitMiss, CcPinReadWait, CcPinReadWaitMiss, CcThrowAway, CcUnpinData(), DebugTrace, FALSE, FlagOn, _SHARED_CACHE_MAP::Flags, me, MODIFIED_WRITE_DISABLED, NULL, PIN_WAIT, TRUE, and try_return.

Referenced by LfsPinOrMapData().

00535 : 00536 00537 This routine attempts to pin the specified file data in the cache. 00538 A pointer is returned to the desired data in the cache. This routine 00539 is intended for File System support and is not intended to be called 00540 from Dpc level. 00541 00542 If the caller does not want to block on this call, then 00543 Wait should be supplied as FALSE. If Wait was supplied as FALSE and 00544 it is currently impossible to supply the requested data without 00545 blocking, then this routine will return FALSE. However, if the 00546 data is immediately accessible in the cache and no blocking is 00547 required, this routine returns TRUE with a pointer to the data. 00548 00549 If the data is not returned in the first call, the caller 00550 may request the data later with Wait = TRUE. It is not required 00551 that the caller request the data later. 00552 00553 If the caller subsequently modifies the data, it should call 00554 CcSetDirtyPinnedData. 00555 00556 In any case, the caller MUST subsequently call CcUnpinData. 00557 Naturally if CcPinRead or CcPreparePinWrite were called multiple 00558 times for the same data, CcUnpinData must be called the same number 00559 of times. 00560 00561 The returned Buffer pointer is valid until the data is unpinned, at 00562 which point it is invalid to use the pointer further. 00563 00564 Arguments: 00565 00566 FileObject - Pointer to the file object for a file which was 00567 opened with NO_INTERMEDIATE_BUFFERING clear, i.e., for 00568 which CcInitializeCacheMap was called by the file system. 00569 00570 FileOffset - Byte offset in file for desired data. 00571 00572 Length - Length of desired data in bytes. 00573 00574 Flags - (PIN_WAIT, PIN_EXCLUSIVE, PIN_NO_READ, etc. as defined in cache.h) 00575 If the caller specifies PIN_NO_READ and PIN_EXCLUSIVE, then he must 00576 guarantee that no one else will be attempting to map the view, if he 00577 wants to guarantee that the Bcb is not mapped (view may be purged). 00578 If the caller specifies PIN_NO_READ without PIN_EXCLUSIVE, the data 00579 may or may not be mapped in the return Bcb. 00580 00581 Bcb - On the first call this returns a pointer to a Bcb 00582 parameter which must be supplied as input on all subsequent 00583 calls, for this buffer 00584 00585 Buffer - Returns pointer to desired data, valid until the buffer is 00586 unpinned or freed. 00587 00588 Return Value: 00589 00590 FALSE - if Wait was not set and the data was not delivered 00591 00592 TRUE - if the data is being delivered 00593 00594 --*/ 00595 00596 { 00597 PSHARED_CACHE_MAP SharedCacheMap; 00598 PVOID LocalBuffer; 00599 LARGE_INTEGER BeyondLastByte; 00600 LARGE_INTEGER LocalFileOffset = *FileOffset; 00601 POBCB MyBcb = NULL; 00602 PBCB *CurrentBcbPtr = (PBCB *)&MyBcb; 00603 BOOLEAN Result = FALSE; 00604 00605 DebugTrace(+1, me, "CcPinRead\n", 0 ); 00606 00607 // 00608 // Increment performance counters 00609 // 00610 00611 if (FlagOn(Flags, PIN_WAIT)) { 00612 00613 CcPinReadWait += 1; 00614 00615 // 00616 // Initialize the indirect pointer to our miss counter. 00617 // 00618 00619 CcMissCounter = &CcPinReadWaitMiss; 00620 00621 } else { 00622 CcPinReadNoWait += 1; 00623 } 00624 00625 // 00626 // Get pointer to SharedCacheMap. 00627 // 00628 00629 SharedCacheMap = *(PSHARED_CACHE_MAP *)((PCHAR)FileObject->SectionObjectPointer 00630 + sizeof(PVOID)); 00631 00632 try { 00633 00634 // 00635 // Form loop to handle occassional overlapped Bcb case. 00636 // 00637 00638 do { 00639 00640 // 00641 // If we have already been through the loop, then adjust 00642 // our file offset and length from the last time. 00643 // 00644 00645 if (MyBcb != NULL) { 00646 00647 // 00648 // If this is the second time through the loop, then it is time 00649 // to handle the overlap case and allocate an OBCB. 00650 // 00651 00652 if (CurrentBcbPtr == (PBCB *)&MyBcb) { 00653 00654 MyBcb = CcAllocateObcb( FileOffset, Length, (PBCB)MyBcb ); 00655 00656 // 00657 // Set CurrentBcbPtr to point at the first entry in 00658 // the vector (which is already filled in), before 00659 // advancing it below. 00660 // 00661 00662 CurrentBcbPtr = &MyBcb->Bcbs[0]; 00663 00664 // 00665 // Also on second time through, return starting Buffer 00666 // 00667 00668 *Buffer = LocalBuffer; 00669 } 00670 00671 Length -= (ULONG)(BeyondLastByte.QuadPart - LocalFileOffset.QuadPart); 00672 LocalFileOffset.QuadPart = BeyondLastByte.QuadPart; 00673 CurrentBcbPtr += 1; 00674 } 00675 00676 // 00677 // Call local routine to Map or Access the file data. If we cannot map 00678 // the data because of a Wait condition, return FALSE. 00679 // 00680 00681 if (!CcPinFileData( FileObject, 00682 &LocalFileOffset, 00683 Length, 00684 (BOOLEAN)!FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED), 00685 FALSE, 00686 Flags, 00687 CurrentBcbPtr, 00688 &LocalBuffer, 00689 &BeyondLastByte )) { 00690 00691 CcPinReadNoWaitMiss += 1; 00692 00693 try_return( Result = FALSE ); 00694 } 00695 00696 // 00697 // Continue looping if we did not get everything. 00698 // 00699 00700 } while((BeyondLastByte.QuadPart - LocalFileOffset.QuadPart) < Length); 00701 00702 *Bcb = MyBcb; 00703 00704 // 00705 // Debug routines used to insert and remove Bcbs from the global list 00706 // 00707 00708 #if LIST_DBG 00709 00710 { 00711 KIRQL OldIrql; 00712 PBCB BcbTemp = (PBCB)*Bcb; 00713 00714 ExAcquireSpinLock( &CcBcbSpinLock, &OldIrql ); 00715 00716 if (BcbTemp->CcBcbLinks.Flink == NULL) { 00717 00718 InsertTailList( &CcBcbList, &BcbTemp->CcBcbLinks ); 00719 CcBcbCount += 1; 00720 ExReleaseSpinLock( &CcBcbSpinLock, OldIrql ); 00721 SetCallersAddress( BcbTemp ); 00722 00723 } else { 00724 ExReleaseSpinLock( &CcBcbSpinLock, OldIrql ); 00725 } 00726 00727 } 00728 00729 #endif 00730 00731 // 00732 // In the normal (nonoverlapping) case we return the 00733 // correct buffer address here. 00734 // 00735 00736 if (CurrentBcbPtr == (PBCB *)&MyBcb) { 00737 *Buffer = LocalBuffer; 00738 } 00739 00740 Result = TRUE; 00741 00742 try_exit: NOTHING; 00743 } 00744 finally { 00745 00746 CcMissCounter = &CcThrowAway; 00747 00748 if (!Result) { 00749 00750 // 00751 // We may have gotten partway through 00752 // 00753 00754 if (MyBcb != NULL) { 00755 CcUnpinData( MyBcb ); 00756 } 00757 } 00758 00759 DebugTrace(-1, me, "CcPinRead -> %02lx\n", Result ); 00760 } 00761 00762 return Result; 00763 }

NTKERNELAPI VOID CcPrepareMdlWrite IN PFILE_OBJECT  FileObject,
IN PLARGE_INTEGER  FileOffset,
IN ULONG  Length,
OUT PMDL MdlChain,
OUT PIO_STATUS_BLOCK  IoStatus
 

Definition at line 466 of file mdlsup.c.

References _SHARED_CACHE_MAP::BcbSpinLock, CcAcquireMasterLock, CcFreeActiveVacb(), CcFreeVirtualAddress(), CcGetVirtualAddress(), CcIncrementOpenCount, CcMapAndRead(), CcReleaseMasterLock, CcSetDirtyInMask(), DebugTrace, DebugTrace2, ExRaiseStatus(), FALSE, GetActiveVacb, IoAllocateMdl(), IoFreeMdl(), IoWriteAccess, KernelMode, me, mm, MmDisablePageFaultClustering, MmEnablePageFaultClustering, MmProbeAndLockPages(), MmUnlockPages(), _SHARED_CACHE_MAP::NeedToZero, _MDL::Next, NULL, PAGE_SIZE, TRUE, _SHARED_CACHE_MAP::ValidDataGoal, VOID(), ZERO_FIRST_PAGE, ZERO_LAST_PAGE, and ZERO_MIDDLE_PAGES.

Referenced by FsRtlPrepareMdlWriteDev().

00476 : 00477 00478 This routine attempts to lock the specified file data in the cache 00479 and return a description of it in an Mdl along with the correct 00480 I/O status. Pages to be completely overwritten may be satisfied 00481 with emtpy pages. It is *not* safe to call this routine from Dpc level. 00482 00483 This call is synchronous and raises on error. 00484 00485 When this call returns, the caller may immediately begin 00486 to transfer data into the buffers via the Mdl. 00487 00488 When the call returns with TRUE, the pages described by the Mdl are 00489 locked in memory, but not mapped in system space. If the caller 00490 needs the pages mapped in system space, then it must map them. 00491 On the subsequent call to CcMdlWriteComplete the pages will be 00492 unmapped if they were mapped, and in any case unlocked and the Mdl 00493 deallocated. 00494 00495 Arguments: 00496 00497 FileObject - Pointer to the file object for a file which was 00498 opened with NO_INTERMEDIATE_BUFFERING clear, i.e., for 00499 which CcInitializeCacheMap was called by the file system. 00500 00501 FileOffset - Byte offset in file for desired data. 00502 00503 Length - Length of desired data in bytes. 00504 00505 MdlChain - On output it returns a pointer to an Mdl chain describing 00506 the desired data. Note that even if FALSE is returned, 00507 one or more Mdls may have been allocated, as may be ascertained 00508 by the IoStatus.Information field (see below). 00509 00510 IoStatus - Pointer to standard I/O status block to receive the status 00511 for the in-transfer of the data. (STATUS_SUCCESS guaranteed 00512 for cache hits, otherwise the actual I/O status is returned.) 00513 The I/O Information Field indicates how many bytes have been 00514 successfully locked down in the Mdl Chain. 00515 00516 Return Value: 00517 00518 None 00519 00520 --*/ 00521 00522 { 00523 PSHARED_CACHE_MAP SharedCacheMap; 00524 PVOID CacheBuffer; 00525 LARGE_INTEGER FOffset; 00526 PMDL Mdl = NULL; 00527 PMDL MdlTemp; 00528 LARGE_INTEGER Temp; 00529 ULONG SavedState = 0; 00530 ULONG ZeroFlags = 0; 00531 ULONG Information = 0; 00532 00533 KIRQL OldIrql; 00534 ULONG ActivePage; 00535 ULONG PageIsDirty; 00536 PVACB Vacb = NULL; 00537 00538 DebugTrace(+1, me, "CcPrepareMdlWrite\n", 0 ); 00539 DebugTrace( 0, me, " FileObject = %08lx\n", FileObject ); 00540 DebugTrace2(0, me, " FileOffset = %08lx, %08lx\n", FileOffset->LowPart, 00541 FileOffset->HighPart ); 00542 DebugTrace( 0, me, " Length = %08lx\n", Length ); 00543 00544 // 00545 // Get pointer to SharedCacheMap. 00546 // 00547 00548 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 00549 00550 // 00551 // See if we have an active Vacb, that we need to free. 00552 // 00553 00554 GetActiveVacb( SharedCacheMap, OldIrql, Vacb, ActivePage, PageIsDirty ); 00555 00556 // 00557 // If there is an end of a page to be zeroed, then free that page now, 00558 // so it does not cause our data to get zeroed. If there is an active 00559 // page, free it so we have the correct ValidDataGoal. 00560 // 00561 00562 if ((Vacb != NULL) || (SharedCacheMap->NeedToZero != NULL)) { 00563 00564 CcFreeActiveVacb( SharedCacheMap, Vacb, ActivePage, PageIsDirty ); 00565 Vacb = NULL; 00566 } 00567 00568 FOffset = *FileOffset; 00569 00570 // 00571 // Put try-finally around the loop to deal with exceptions 00572 // 00573 00574 try { 00575 00576 // 00577 // Not all of the transfer will come back at once, so we have to loop 00578 // until the entire transfer is complete. 00579 // 00580 00581 while (Length != 0) { 00582 00583 ULONG ReceivedLength; 00584 LARGE_INTEGER BeyondLastByte; 00585 00586 // 00587 // Map and see how much we could potentially access at this 00588 // FileOffset, then cut it down if it is more than we need. 00589 // 00590 00591 CacheBuffer = CcGetVirtualAddress( SharedCacheMap, 00592 FOffset, 00593 &Vacb, 00594 &ReceivedLength ); 00595 00596 if (ReceivedLength > Length) { 00597 ReceivedLength = Length; 00598 } 00599 00600 BeyondLastByte.QuadPart = FOffset.QuadPart + (LONGLONG)ReceivedLength; 00601 00602 // 00603 // At this point we can calculate the ZeroFlags. 00604 // 00605 00606 // 00607 // We can always zero middle pages, if any. 00608 // 00609 00610 ZeroFlags = ZERO_MIDDLE_PAGES; 00611 00612 // 00613 // See if we are completely overwriting the first or last page. 00614 // 00615 00616 if (((FOffset.LowPart & (PAGE_SIZE - 1)) == 0) && 00617 (ReceivedLength >= PAGE_SIZE)) { 00618 ZeroFlags |= ZERO_FIRST_PAGE; 00619 } 00620 00621 if ((BeyondLastByte.LowPart & (PAGE_SIZE - 1)) == 0) { 00622 ZeroFlags |= ZERO_LAST_PAGE; 00623 } 00624 00625 // 00626 // See if the entire transfer is beyond valid data length, 00627 // or at least starting from the second page. 00628 // 00629 00630 Temp = FOffset; 00631 Temp.LowPart &= ~(PAGE_SIZE -1); 00632 ExAcquireFastLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 00633 Temp.QuadPart = SharedCacheMap->ValidDataGoal.QuadPart - Temp.QuadPart; 00634 ExReleaseFastLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00635 00636 if (Temp.QuadPart <= 0) { 00637 ZeroFlags |= ZERO_FIRST_PAGE | ZERO_MIDDLE_PAGES | ZERO_LAST_PAGE; 00638 } else if ((Temp.HighPart == 0) && (Temp.LowPart <= PAGE_SIZE)) { 00639 ZeroFlags |= ZERO_MIDDLE_PAGES | ZERO_LAST_PAGE; 00640 } 00641 00642 (VOID)CcMapAndRead( SharedCacheMap, 00643 &FOffset, 00644 ReceivedLength, 00645 ZeroFlags, 00646 TRUE, 00647 CacheBuffer ); 00648 00649 // 00650 // Now attempt to allocate an Mdl to describe the mapped data. 00651 // 00652 00653 DebugTrace( 0, mm, "IoAllocateMdl:\n", 0 ); 00654 DebugTrace( 0, mm, " BaseAddress = %08lx\n", CacheBuffer ); 00655 DebugTrace( 0, mm, " Length = %08lx\n", ReceivedLength ); 00656 00657 Mdl = IoAllocateMdl( CacheBuffer, 00658 ReceivedLength, 00659 FALSE, 00660 FALSE, 00661 NULL ); 00662 00663 DebugTrace( 0, mm, " <Mdl = %08lx\n", Mdl ); 00664 00665 if (Mdl == NULL) { 00666 DebugTrace( 0, 0, "Failed to allocate Mdl\n", 0 ); 00667 00668 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 00669 } 00670 00671 DebugTrace( 0, mm, "MmProbeAndLockPages:\n", 0 ); 00672 DebugTrace( 0, mm, " Mdl = %08lx\n", Mdl ); 00673 00674 MmDisablePageFaultClustering(&SavedState); 00675 MmProbeAndLockPages( Mdl, KernelMode, IoWriteAccess ); 00676 MmEnablePageFaultClustering(SavedState); 00677 SavedState = 0; 00678 00679 // 00680 // Now that some data (maybe zeros) is locked in memory and 00681 // set dirty, it is safe, and necessary for us to advance 00682 // valid data goal, so that we will not subsequently ask 00683 // for a zero page. Note if we are extending valid data, 00684 // our caller has the file exclusive. 00685 // 00686 00687 ExAcquireFastLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 00688 if (BeyondLastByte.QuadPart > SharedCacheMap->ValidDataGoal.QuadPart) { 00689 SharedCacheMap->ValidDataGoal = BeyondLastByte; 00690 } 00691 ExReleaseFastLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00692 00693 // 00694 // Unmap the data now, now that the pages are locked down. 00695 // 00696 00697 CcFreeVirtualAddress( Vacb ); 00698 Vacb = NULL; 00699 00700 // 00701 // Now link the Mdl into the caller's chain 00702 // 00703 00704 if ( *MdlChain == NULL ) { 00705 *MdlChain = Mdl; 00706 } else { 00707 MdlTemp = CONTAINING_RECORD( *MdlChain, MDL, Next ); 00708 while (MdlTemp->Next != NULL) { 00709 MdlTemp = MdlTemp->Next; 00710 } 00711 MdlTemp->Next = Mdl; 00712 } 00713 Mdl = NULL; 00714 00715 // 00716 // Assume we did not get all the data we wanted, and set FOffset 00717 // to the end of the returned data. 00718 // 00719 00720 FOffset = BeyondLastByte; 00721 00722 // 00723 // Update number of bytes transferred. 00724 // 00725 00726 Information += ReceivedLength; 00727 00728 // 00729 // Calculate length left to transfer. 00730 // 00731 00732 Length -= ReceivedLength; 00733 } 00734 } 00735 finally { 00736 00737 if (AbnormalTermination()) { 00738 00739 if (SavedState != 0) { 00740 MmEnablePageFaultClustering(SavedState); 00741 } 00742 00743 if (Vacb != NULL) { 00744 CcFreeVirtualAddress( Vacb ); 00745 } 00746 00747 if (Mdl != NULL) { 00748 IoFreeMdl( Mdl ); 00749 } 00750 00751 // 00752 // Otherwise loop to deallocate the Mdls 00753 // 00754 00755 FOffset = *FileOffset; 00756 while (*MdlChain != NULL) { 00757 MdlTemp = (*MdlChain)->Next; 00758 00759 DebugTrace( 0, mm, "MmUnlockPages/IoFreeMdl:\n", 0 ); 00760 DebugTrace( 0, mm, " Mdl = %08lx\n", *MdlChain ); 00761 00762 MmUnlockPages( *MdlChain ); 00763 00764 // 00765 // Extract the File Offset for this part of the transfer, and 00766 // tell the lazy writer to write these pages, since we have 00767 // marked them dirty. Ignore the only exception (allocation 00768 // error), and console ourselves for having tried. 00769 // 00770 00771 CcSetDirtyInMask( SharedCacheMap, &FOffset, (*MdlChain)->ByteCount ); 00772 00773 FOffset.QuadPart = FOffset.QuadPart + (LONGLONG)((*MdlChain)->ByteCount); 00774 00775 IoFreeMdl( *MdlChain ); 00776 00777 *MdlChain = MdlTemp; 00778 } 00779 00780 DebugTrace(-1, me, "CcPrepareMdlWrite -> Unwinding\n", 0 ); 00781 } 00782 else { 00783 00784 IoStatus->Status = STATUS_SUCCESS; 00785 IoStatus->Information = Information; 00786 00787 // 00788 // Make sure the SharedCacheMap does not go away while 00789 // the Mdl write is in progress. We decrment below. 00790 // 00791 00792 CcAcquireMasterLock( &OldIrql ); 00793 CcIncrementOpenCount( SharedCacheMap, 'ldmP' ); 00794 CcReleaseMasterLock( OldIrql ); 00795 } 00796 } 00797 00798 DebugTrace( 0, me, " <MdlChain = %08lx\n", *MdlChain ); 00799 DebugTrace(-1, me, "CcPrepareMdlWrite -> VOID\n", 0 ); 00800 00801 return; 00802 }

NTKERNELAPI BOOLEAN CcPreparePinWrite IN PFILE_OBJECT  FileObject,
IN PLARGE_INTEGER  FileOffset,
IN ULONG  Length,
IN BOOLEAN  Zero,
IN ULONG  Flags,
OUT PVOID *  Bcb,
OUT PVOID *  Buffer
 

Definition at line 767 of file pinsup.c.

References Buffer, CcAllocateObcb(), CcBcbSpinLock, CcMissCounter, CcPinFileData(), CcSetDirtyPinnedData(), CcThrowAway, CcUnpinData(), DebugTrace, FALSE, me, NULL, TRUE, try_return, and Zero.

00779 : 00780 00781 This routine attempts to lock the specified file data in the cache 00782 and return a pointer to it along with the correct 00783 I/O status. Pages to be completely overwritten may be satisfied 00784 with emtpy pages. 00785 00786 If not all of the pages can be prepared, and Wait was supplied as 00787 FALSE, then this routine will return FALSE, and its outputs will 00788 be meaningless. The caller may request the data later with 00789 Wait = TRUE. However, it is not required that the caller request 00790 the data later. 00791 00792 If Wait is supplied as TRUE, and all of the pages can be prepared 00793 without blocking, this call will return TRUE immediately. Otherwise, 00794 this call will block until all of the pages can be prepared, and 00795 then return TRUE. 00796 00797 When this call returns with TRUE, the caller may immediately begin 00798 to transfer data into the buffers via the Buffer pointer. The 00799 buffer will already be marked dirty. 00800 00801 The caller MUST subsequently call CcUnpinData. 00802 Naturally if CcPinRead or CcPreparePinWrite were called multiple 00803 times for the same data, CcUnpinData must be called the same number 00804 of times. 00805 00806 The returned Buffer pointer is valid until the data is unpinned, at 00807 which point it is invalid to use the pointer further. 00808 00809 Arguments: 00810 00811 FileObject - Pointer to the file object for a file which was 00812 opened with NO_INTERMEDIATE_BUFFERING clear, i.e., for 00813 which CcInitializeCacheMap was called by the file system. 00814 00815 FileOffset - Byte offset in file for desired data. 00816 00817 Length - Length of desired data in bytes. 00818 00819 Zero - If supplied as TRUE, the buffer will be zeroed on return. 00820 00821 Flags - (PIN_WAIT, PIN_EXCLUSIVE, PIN_NO_READ, etc. as defined in cache.h) 00822 If the caller specifies PIN_NO_READ and PIN_EXCLUSIVE, then he must 00823 guarantee that no one else will be attempting to map the view, if he 00824 wants to guarantee that the Bcb is not mapped (view may be purged). 00825 If the caller specifies PIN_NO_READ without PIN_EXCLUSIVE, the data 00826 may or may not be mapped in the return Bcb. 00827 00828 Bcb - This returns a pointer to a Bcb parameter which must be 00829 supplied as input to CcPinWriteComplete. 00830 00831 Buffer - Returns pointer to desired data, valid until the buffer is 00832 unpinned or freed. 00833 00834 Return Value: 00835 00836 FALSE - if Wait was not set and the data was not delivered 00837 00838 TRUE - if the pages are being delivered 00839 00840 --*/ 00841 00842 { 00843 PSHARED_CACHE_MAP SharedCacheMap; 00844 PVOID LocalBuffer; 00845 LARGE_INTEGER BeyondLastByte; 00846 LARGE_INTEGER LocalFileOffset = *FileOffset; 00847 POBCB MyBcb = NULL; 00848 PBCB *CurrentBcbPtr = (PBCB *)&MyBcb; 00849 ULONG OriginalLength = Length; 00850 BOOLEAN Result = FALSE; 00851 00852 DebugTrace(+1, me, "CcPreparePinWrite\n", 0 ); 00853 00854 // 00855 // Get pointer to SharedCacheMap. 00856 // 00857 00858 SharedCacheMap = *(PSHARED_CACHE_MAP *)((PCHAR)FileObject->SectionObjectPointer 00859 + sizeof(PVOID)); 00860 00861 try { 00862 00863 // 00864 // Form loop to handle occassional overlapped Bcb case. 00865 // 00866 00867 do { 00868 00869 // 00870 // If we have already been through the loop, then adjust 00871 // our file offset and length from the last time. 00872 // 00873 00874 if (MyBcb != NULL) { 00875 00876 // 00877 // If this is the second time through the loop, then it is time 00878 // to handle the overlap case and allocate an OBCB. 00879 // 00880 00881 if (CurrentBcbPtr == (PBCB *)&MyBcb) { 00882 00883 MyBcb = CcAllocateObcb( FileOffset, Length, (PBCB)MyBcb ); 00884 00885 // 00886 // Set CurrentBcbPtr to point at the first entry in 00887 // the vector (which is already filled in), before 00888 // advancing it below. 00889 // 00890 00891 CurrentBcbPtr = &MyBcb->Bcbs[0]; 00892 00893 // 00894 // Also on second time through, return starting Buffer 00895 // 00896 00897 *Buffer = LocalBuffer; 00898 } 00899 00900 Length -= (ULONG)(BeyondLastByte.QuadPart - LocalFileOffset.QuadPart); 00901 LocalFileOffset.QuadPart = BeyondLastByte.QuadPart; 00902 CurrentBcbPtr += 1; 00903 } 00904 00905 // 00906 // Call local routine to Map or Access the file data. If we cannot map 00907 // the data because of a Wait condition, return FALSE. 00908 // 00909 00910 if (!CcPinFileData( FileObject, 00911 &LocalFileOffset, 00912 Length, 00913 FALSE, 00914 TRUE, 00915 Flags, 00916 CurrentBcbPtr, 00917 &LocalBuffer, 00918 &BeyondLastByte )) { 00919 00920 try_return( Result = FALSE ); 00921 } 00922 00923 // 00924 // Continue looping if we did not get everything. 00925 // 00926 00927 } while((BeyondLastByte.QuadPart - LocalFileOffset.QuadPart) < Length); 00928 00929 *Bcb = MyBcb; 00930 00931 // 00932 // Debug routines used to insert and remove Bcbs from the global list 00933 // 00934 00935 #if LIST_DBG 00936 00937 { 00938 KIRQL OldIrql; 00939 PBCB BcbTemp = (PBCB)*Bcb; 00940 00941 ExAcquireSpinLock( &CcBcbSpinLock, &OldIrql ); 00942 00943 if (BcbTemp->CcBcbLinks.Flink == NULL) { 00944 00945 InsertTailList( &CcBcbList, &BcbTemp->CcBcbLinks ); 00946 CcBcbCount += 1; 00947 ExReleaseSpinLock( &CcBcbSpinLock, OldIrql ); 00948 SetCallersAddress( BcbTemp ); 00949 00950 } else { 00951 ExReleaseSpinLock( &CcBcbSpinLock, OldIrql ); 00952 } 00953 00954 } 00955 00956 #endif 00957 00958 // 00959 // In the normal (nonoverlapping) case we return the 00960 // correct buffer address here. 00961 // 00962 00963 if (CurrentBcbPtr == (PBCB *)&MyBcb) { 00964 *Buffer = LocalBuffer; 00965 } 00966 00967 if (Zero) { 00968 RtlZeroMemory( *Buffer, OriginalLength ); 00969 } 00970 00971 CcSetDirtyPinnedData( MyBcb, NULL ); 00972 00973 Result = TRUE; 00974 00975 try_exit: NOTHING; 00976 } 00977 finally { 00978 00979 CcMissCounter = &CcThrowAway; 00980 00981 if (!Result) { 00982 00983 // 00984 // We may have gotten partway through 00985 // 00986 00987 if (MyBcb != NULL) { 00988 CcUnpinData( MyBcb ); 00989 } 00990 } 00991 00992 DebugTrace(-1, me, "CcPreparePinWrite -> %02lx\n", Result ); 00993 } 00994 00995 return Result; 00996 }

NTKERNELAPI BOOLEAN CcPurgeCacheSection IN PSECTION_OBJECT_POINTERS  SectionObjectPointer,
IN PLARGE_INTEGER FileOffset  OPTIONAL,
IN ULONG  Length,
IN BOOLEAN  UninitializeCacheMaps
 

Definition at line 2331 of file fssup.c.

References ASSERT, CcAcquireMasterLock, CcCollisionDelay, CcDecrementOpenCount, CcDirtySharedCacheMapList, CcFreeActiveVacb(), CcIncrementOpenCount, CcReleaseMasterLock, CcScheduleLazyWriteScan(), CcUninitializeCacheMap(), CcUnmapVacbArray(), CcWaitOnActiveCount(), DebugTrace, DebugTrace2, _SHARED_CACHE_MAP::DirtyPages, FALSE, _PRIVATE_CACHE_MAP::FileObject, FlagOn, _SHARED_CACHE_MAP::Flags, GetActiveVacbAtDpcLevel, KeDelayExecutionThread(), KernelMode, LazyWriter, me, mm, MmCanFileBeTruncated(), MmPurgeSection(), NULL, _SHARED_CACHE_MAP::OpenCount, _LAZY_WRITER::OtherWork, PAGE_SIZE, _SHARED_CACHE_MAP::PrivateList, _LAZY_WRITER::ScanActive, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, _SHARED_CACHE_MAP::SharedCacheMapLinks, TRUE, _SHARED_CACHE_MAP::Vacbs, VOID(), and WRITE_QUEUED.

Referenced by CcPurgeAndClearCacheSection(), CcSetFileSizes(), CcUninitializeCacheMap(), CcUnmapAndPurge(), CcZeroEndOfLastPage(), UdfPurgeVolume(), and UdfUpdateVcbPhase0().

02340 : 02341 02342 This routine may be called to force a purge of the cache section, 02343 even if it is cached. Note, if a user has the file mapped, then the purge 02344 will *not* take effect, and this must be considered part of normal application 02345 interaction. The purpose of purge is to throw away potentially nonzero 02346 data, so that it will be read in again and presumably zeroed. This is 02347 not really a security issue, but rather an effort to not confuse the 02348 application when it sees nonzero data. We cannot help the fact that 02349 a user-mapped view forces us to hang on to stale data. 02350 02351 This routine is intended to be called whenever previously written 02352 data is being truncated from the file, and the file is not being 02353 deleted. 02354 02355 The file must be acquired exclusive in order to call this routine. 02356 02357 Arguments: 02358 02359 SectionObjectPointer - A pointer to the Section Object Pointers 02360 structure in the nonpaged Fcb. 02361 02362 FileOffset - Offset from which file should be purged - rounded down 02363 to page boundary. If NULL, purge the entire file. 02364 02365 Length - Defines the length of the byte range to purge, starting at 02366 FileOffset. This parameter is ignored if FileOffset is 02367 specified as NULL. If FileOffset is specified and Length 02368 is 0, then purge from FileOffset to the end of the file. 02369 02370 UninitializeCacheMaps - If TRUE, we should uninitialize all the private 02371 cache maps before purging the data. 02372 02373 ReturnValue: 02374 02375 FALSE - if the section was not successfully purged 02376 TRUE - if the section was successfully purged 02377 02378 --*/ 02379 02380 { 02381 KIRQL OldIrql; 02382 PSHARED_CACHE_MAP SharedCacheMap; 02383 PPRIVATE_CACHE_MAP PrivateCacheMap; 02384 ULONG ActivePage; 02385 ULONG PageIsDirty; 02386 BOOLEAN PurgeWorked = TRUE; 02387 PVACB Vacb = NULL; 02388 02389 DebugTrace(+1, me, "CcPurgeCacheSection:\n", 0 ); 02390 DebugTrace( 0, mm, " SectionObjectPointer = %08lx\n", SectionObjectPointer ); 02391 DebugTrace2(0, me, " FileOffset = %08lx, %08lx\n", 02392 ARGUMENT_PRESENT(FileOffset) ? FileOffset->LowPart 02393 : 0, 02394 ARGUMENT_PRESENT(FileOffset) ? FileOffset->HighPart 02395 : 0 ); 02396 DebugTrace( 0, me, " Length = %08lx\n", Length ); 02397 02398 02399 // 02400 // If you want us to uninitialize cache maps, the RtlZeroMemory paths 02401 // below depend on actually having to purge something after zeroing. 02402 // 02403 02404 ASSERT(!UninitializeCacheMaps || (Length == 0) || (Length >= PAGE_SIZE * 2)); 02405 02406 // 02407 // Serialize Creation/Deletion of all Shared CacheMaps 02408 // 02409 02410 CcAcquireMasterLock( &OldIrql ); 02411 02412 // 02413 // Get pointer to SharedCacheMap via File Object. 02414 // 02415 02416 SharedCacheMap = SectionObjectPointer->SharedCacheMap; 02417 02418 // 02419 // Increment open count to make sure the SharedCacheMap stays around, 02420 // then release the spinlock so that we can call Mm. 02421 // 02422 02423 if (SharedCacheMap != NULL) { 02424 02425 CcIncrementOpenCount( SharedCacheMap, 'scPS' ); 02426 02427 // 02428 // If there is an active Vacb, then nuke it now (before waiting!). 02429 // 02430 02431 GetActiveVacbAtDpcLevel( SharedCacheMap, Vacb, ActivePage, PageIsDirty ); 02432 } 02433 02434 CcReleaseMasterLock( OldIrql ); 02435 02436 if (Vacb != NULL) { 02437 02438 CcFreeActiveVacb( SharedCacheMap, Vacb, ActivePage, PageIsDirty ); 02439 } 02440 02441 // 02442 // Use try-finally to insure cleanup of the Open Count and Vacb on the 02443 // way out. 02444 // 02445 02446 try { 02447 02448 // 02449 // Increment open count to make sure the SharedCacheMap stays around, 02450 // then release the spinlock so that we can call Mm. 02451 // 02452 02453 if (SharedCacheMap != NULL) { 02454 02455 // 02456 // Now loop to make sure that no one is currently caching the file. 02457 // 02458 02459 if (UninitializeCacheMaps) { 02460 02461 while (!IsListEmpty( &SharedCacheMap->PrivateList )) { 02462 02463 PrivateCacheMap = CONTAINING_RECORD( SharedCacheMap->PrivateList.Flink, 02464 PRIVATE_CACHE_MAP, 02465 PrivateLinks ); 02466 02467 CcUninitializeCacheMap( PrivateCacheMap->FileObject, NULL, NULL ); 02468 } 02469 } 02470 02471 // 02472 // Now, let's unmap and purge here. 02473 // 02474 // We still need to wait for any dangling cache read or writes. 02475 // 02476 // In fact we have to loop and wait because the lazy writer can 02477 // sneak in and do an CcGetVirtualAddressIfMapped, and we are not 02478 // synchronized. 02479 // 02480 02481 while ((SharedCacheMap->Vacbs != NULL) && 02482 !CcUnmapVacbArray( SharedCacheMap, FileOffset, Length, FALSE )) { 02483 02484 CcWaitOnActiveCount( SharedCacheMap ); 02485 } 02486 } 02487 02488 // 02489 // Purge failures are extremely rare if there are no user mapped sections. 02490 // However, it is possible that we will get one from our own mapping, if 02491 // the file is being lazy deleted from a previous open. For that case 02492 // we wait here until the purge succeeds, so that we are not left with 02493 // old user file data. Although Length is actually invariant in this loop, 02494 // we do need to keep checking that we are allowed to truncate in case a 02495 // user maps the file during a delay. 02496 // 02497 02498 while (!(PurgeWorked = MmPurgeSection(SectionObjectPointer, 02499 FileOffset, 02500 Length, 02501 (BOOLEAN)((SharedCacheMap !=NULL) && 02502 ARGUMENT_PRESENT(FileOffset)))) && 02503 (Length == 0) && 02504 MmCanFileBeTruncated(SectionObjectPointer, FileOffset)) { 02505 02506 (VOID)KeDelayExecutionThread( KernelMode, FALSE, &CcCollisionDelay ); 02507 } 02508 02509 } finally { 02510 02511 // 02512 // Reduce the open count on the SharedCacheMap if there was one. 02513 // 02514 02515 if (SharedCacheMap != NULL) { 02516 02517 // 02518 // Serialize again to decrement the open count. 02519 // 02520 02521 CcAcquireMasterLock( &OldIrql ); 02522 02523 CcDecrementOpenCount( SharedCacheMap, 'scPF' ); 02524 02525 if ((SharedCacheMap->OpenCount == 0) && 02526 !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) && 02527 (SharedCacheMap->DirtyPages == 0)) { 02528 02529 // 02530 // Move to the dirty list. 02531 // 02532 02533 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 02534 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 02535 &SharedCacheMap->SharedCacheMapLinks ); 02536 02537 // 02538 // Make sure the Lazy Writer will wake up, because we 02539 // want him to delete this SharedCacheMap. 02540 // 02541 02542 LazyWriter.OtherWork = TRUE; 02543 if (!LazyWriter.ScanActive) { 02544 CcScheduleLazyWriteScan(); 02545 } 02546 } 02547 02548 CcReleaseMasterLock( OldIrql ); 02549 } 02550 } 02551 02552 DebugTrace(-1, me, "CcPurgeCacheSection -> %02lx\n", PurgeWorked ); 02553 02554 return PurgeWorked; 02555 }

NTKERNELAPI PVOID CcRemapBcb IN PVOID  Bcb  ) 
 

Definition at line 5024 of file cachesub.c.

References ASSERT, CACHE_NTC_BCB, CACHE_NTC_OBCB, CcAcquireVacbLock, CcBeyondVacbs, CcReleaseVacbLock, CcVacbs, and _VACB::Overlay.

05030 : 05031 05032 This routine may be called by a file system to map a Bcb an additional 05033 time in order to preserve it through several calls that perform additional 05034 maps and unpins. 05035 05036 05037 Arguments: 05038 05039 Bcb - Supplies a pointer to a previously returned Bcb. 05040 05041 Return Value: 05042 05043 Bcb with read-only indicator. 05044 05045 --*/ 05046 05047 { 05048 KIRQL OldIrql; 05049 PVACB Vacb; 05050 05051 // 05052 // Remove read-only bit 05053 // 05054 05055 Bcb = (PVOID) ((ULONG_PTR)Bcb & ~1); 05056 05057 if (((PBCB)Bcb)->NodeTypeCode == CACHE_NTC_OBCB) { 05058 05059 // 05060 // If this is an overlapped BCB, use the first Vacb in the 05061 // array 05062 // 05063 05064 Vacb = ((POBCB)Bcb)->Bcbs[0]->Vacb; 05065 05066 } else if (((PBCB)Bcb)->NodeTypeCode == CACHE_NTC_BCB) { 05067 05068 // 05069 // If this is a BCB, extract the Vcb from it 05070 // 05071 05072 Vacb = ((PBCB)Bcb)->Vacb; 05073 05074 } else { 05075 05076 // 05077 // Otherwise, there is no signature to match. Assume 05078 // it is a Vacb. 05079 // 05080 05081 Vacb = (PVACB) Bcb; 05082 } 05083 05084 ASSERT((Vacb >= CcVacbs) && (Vacb < CcBeyondVacbs)); 05085 05086 // 05087 // Safely bump the active count 05088 // 05089 05090 CcAcquireVacbLock( &OldIrql ); 05091 05092 Vacb->Overlay.ActiveCount += 1; 05093 05094 CcReleaseVacbLock( OldIrql ); 05095 05096 return (PVOID) ((ULONG_PTR)Vacb | 1); 05097 }

NTKERNELAPI VOID CcRepinBcb IN PVOID  Bcb  ) 
 

Definition at line 5101 of file cachesub.c.

05107 : 05108 05109 This routine may be called by a file system to pin a Bcb an additional 05110 time in order to reserve it for Write Through or error recovery. 05111 Typically the file system would do this the first time that it sets a 05112 pinned buffer dirty while processing a WriteThrough request, or any 05113 time that it determines that a buffer will be required for WriteThrough. 05114 05115 The call to this routine must be followed by a call to CcUnpinRepinnedBcb. 05116 CcUnpinRepinnedBcb should normally be called during request completion 05117 after all other resources have been released. CcUnpinRepinnedBcb 05118 synchronously writes the buffer (for WriteThrough requests) and performs 05119 the matching unpin for this call. 05120 05121 Arguments: 05122 05123 Bcb - Supplies a pointer to a previously pinned Bcb 05124 05125 Return Value: 05126 05127 None. 05128 05129 --*/ 05130 05131 { 05132 KIRQL OldIrql; 05133 05134 ExAcquireFastLock( &((PBCB)Bcb)->SharedCacheMap->BcbSpinLock, &OldIrql ); 05135 05136 ((PBCB)Bcb)->PinCount += 1; 05137 05138 ExReleaseFastLock( &((PBCB)Bcb)->SharedCacheMap->BcbSpinLock, OldIrql ); 05139 }

NTKERNELAPI VOID CcScheduleReadAhead IN PFILE_OBJECT  FileObject,
IN PLARGE_INTEGER  FileOffset,
IN ULONG  Length
 

Definition at line 1253 of file cachesub.c.

References ASSERT, _PRIVATE_CACHE_MAP::BeyondLastByte1, _PRIVATE_CACHE_MAP::BeyondLastByte2, CcAcquireMasterLock, CcAllocateWorkQueueEntry, CcExpressWorkQueue, CcIncrementOpenCount, CcPostWorkQueue(), CcReleaseMasterLock, DebugTrace, DebugTrace2, DISABLE_READ_AHEAD, FALSE, _PRIVATE_CACHE_MAP::FileOffset1, _PRIVATE_CACHE_MAP::FileOffset2, FlagOn, _SHARED_CACHE_MAP::Flags, FO_SEQUENTIAL_ONLY, _WORK_QUEUE_ENTRY::Function, me, NOISE_BITS, NULL, ObReferenceObject, PAGE_SIZE, _WORK_QUEUE_ENTRY::Parameters, PWORK_QUEUE_ENTRY, ReadAhead, _PRIVATE_CACHE_MAP::ReadAheadActive, _PRIVATE_CACHE_MAP::ReadAheadLength, _PRIVATE_CACHE_MAP::ReadAheadMask, _PRIVATE_CACHE_MAP::ReadAheadOffset, _PRIVATE_CACHE_MAP::ReadAheadSpinLock, ROUND_TO_PAGES, and TRUE.

Referenced by CcCopyRead(), CcFastCopyRead(), and CcMdlRead().

01261 : 01262 01263 This routine is called by Copy Read and Mdl Read file system routines to 01264 perform common Read Ahead processing. The input parameters describe 01265 the current read which has just been completed, or perhaps only started 01266 in the case of Mdl Reads. Based on these parameters, an 01267 assessment is made on how much data should be read ahead, and whether 01268 that data has already been read ahead. 01269 01270 The processing is divided into two parts: 01271 01272 CALCULATE READ AHEAD REQUIREMENTS (CcScheduleReadAhead) 01273 01274 PERFORM READ AHEAD (CcPerformReadAhead) 01275 01276 File systems should always call CcReadAhead, which will conditionally 01277 call CcScheduleReadAhead (if the read is large enough). If such a call 01278 determines that there is read ahead work to do, and no read ahead is 01279 currently active, then it will set ReadAheadActive and schedule read 01280 ahead to be peformed by the Lazy Writer, who will call CcPeformReadAhead. 01281 01282 Arguments: 01283 01284 FileObject - supplies pointer to FileObject on which readahead should be 01285 considered. 01286 01287 FileOffset - supplies the FileOffset at which the last read just occurred. 01288 01289 Length - supplies the length of the last read. 01290 01291 Return Value: 01292 01293 None 01294 --*/ 01295 01296 { 01297 LARGE_INTEGER NewOffset; 01298 LARGE_INTEGER NewBeyond; 01299 LARGE_INTEGER FileOffset1, FileOffset2; 01300 KIRQL OldIrql; 01301 PSHARED_CACHE_MAP SharedCacheMap; 01302 PPRIVATE_CACHE_MAP PrivateCacheMap; 01303 PWORK_QUEUE_ENTRY WorkQueueEntry; 01304 ULONG ReadAheadSize; 01305 BOOLEAN Changed = FALSE; 01306 01307 DebugTrace(+1, me, "CcScheduleReadAhead:\n", 0 ); 01308 DebugTrace2(0, me, " FileOffset = %08lx, %08lx\n", FileOffset->LowPart, 01309 FileOffset->HighPart ); 01310 DebugTrace( 0, me, " Length = %08lx\n", Length ); 01311 01312 SharedCacheMap = *(PSHARED_CACHE_MAP *)((PCHAR)FileObject->SectionObjectPointer 01313 + sizeof(PVOID)); 01314 PrivateCacheMap = FileObject->PrivateCacheMap; 01315 01316 if ((PrivateCacheMap == NULL) || 01317 (SharedCacheMap == NULL) || 01318 FlagOn(SharedCacheMap->Flags, DISABLE_READ_AHEAD)) { 01319 01320 DebugTrace(-1, me, "CcScheduleReadAhead -> VOID (Nooped)\n", 0 ); 01321 01322 return; 01323 } 01324 01325 // 01326 // Round boundaries of transfer up to some greater granularity, so that 01327 // sequential reads will be recognized even if a few bytes are skipped 01328 // between records. 01329 // 01330 01331 NewOffset = *FileOffset; 01332 NewBeyond.QuadPart = FileOffset->QuadPart + (LONGLONG)Length; 01333 01334 // 01335 // Find the next read ahead boundary beyond the current read. 01336 // 01337 01338 ReadAheadSize = (Length + PrivateCacheMap->ReadAheadMask) & ~PrivateCacheMap->ReadAheadMask; 01339 FileOffset2.QuadPart = NewBeyond.QuadPart + (LONGLONG)ReadAheadSize; 01340 FileOffset2.LowPart &= ~PrivateCacheMap->ReadAheadMask; 01341 01342 // 01343 // CALCULATE READ AHEAD REQUIREMENTS 01344 // 01345 01346 // 01347 // Take out the ReadAhead spinlock to synchronize our read ahead decision. 01348 // 01349 01350 ExAcquireSpinLock( &PrivateCacheMap->ReadAheadSpinLock, &OldIrql ); 01351 01352 // 01353 // Read Ahead Case 0. 01354 // 01355 // Sequential-only hint in the file object. For this case we will 01356 // try and always keep two read ahead granularities read ahead from 01357 // and including the end of the current transfer. This case has the 01358 // lowest overhead, and the code is completely immune to how the 01359 // caller skips around. Sequential files use ReadAheadOffset[1] in 01360 // the PrivateCacheMap as their "high water mark". 01361 // 01362 01363 if (FlagOn(FileObject->Flags, FO_SEQUENTIAL_ONLY)) { 01364 01365 // 01366 // If the next boundary is greater than or equal to the high-water mark, 01367 // then read ahead. 01368 // 01369 01370 if (FileOffset2.QuadPart >= PrivateCacheMap->ReadAheadOffset[1].QuadPart) { 01371 01372 // 01373 // On the first read if we are using a large read ahead granularity, 01374 // and the read did not get it all, we will just get the rest of the 01375 // first data we want. 01376 // 01377 01378 if ((FileOffset->QuadPart == 0) 01379 01380 && 01381 01382 (PrivateCacheMap->ReadAheadMask > (PAGE_SIZE - 1)) 01383 01384 && 01385 01386 ((Length + PAGE_SIZE - 1) <= PrivateCacheMap->ReadAheadMask)) { 01387 01388 FileOffset1.QuadPart = (LONGLONG)( ROUND_TO_PAGES(Length) ); 01389 PrivateCacheMap->ReadAheadLength[0] = ReadAheadSize - FileOffset1.LowPart; 01390 FileOffset2.QuadPart = (LONGLONG)ReadAheadSize; 01391 01392 // 01393 // Calculate the next read ahead boundary. 01394 // 01395 01396 } else { 01397 01398 FileOffset1.QuadPart = PrivateCacheMap->ReadAheadOffset[1].QuadPart + 01399 (LONGLONG)ReadAheadSize; 01400 01401 // 01402 // If the end of the current read is actually beyond where we would 01403 // normally do our read ahead, then we have fallen behind, and we must 01404 // advance to that spot. 01405 // 01406 01407 if (FileOffset2.QuadPart > FileOffset1.QuadPart) { 01408 FileOffset1 = FileOffset2; 01409 } 01410 PrivateCacheMap->ReadAheadLength[0] = ReadAheadSize; 01411 FileOffset2.QuadPart = FileOffset1.QuadPart + (LONGLONG)ReadAheadSize; 01412 } 01413 01414 // 01415 // Now issue the next two read aheads. 01416 // 01417 01418 PrivateCacheMap->ReadAheadOffset[0] = FileOffset1; 01419 01420 PrivateCacheMap->ReadAheadOffset[1] = FileOffset2; 01421 PrivateCacheMap->ReadAheadLength[1] = ReadAheadSize; 01422 01423 Changed = TRUE; 01424 } 01425 01426 // 01427 // Read Ahead Case 1. 01428 // 01429 // If this is the third of three sequential reads, then we will see if 01430 // we can read ahead. Note that if the first read to a file is to 01431 // offset 0, it passes this test. 01432 // 01433 01434 } else if ((NewOffset.HighPart == PrivateCacheMap->BeyondLastByte2.HighPart) 01435 01436 && 01437 01438 ((NewOffset.LowPart & ~NOISE_BITS) 01439 == (PrivateCacheMap->BeyondLastByte2.LowPart & ~NOISE_BITS)) 01440 01441 && 01442 01443 (PrivateCacheMap->FileOffset2.HighPart 01444 == PrivateCacheMap->BeyondLastByte1.HighPart) 01445 01446 && 01447 01448 ((PrivateCacheMap->FileOffset2.LowPart & ~NOISE_BITS) 01449 == (PrivateCacheMap->BeyondLastByte1.LowPart & ~NOISE_BITS))) { 01450 01451 // 01452 // On the first read if we are using a large read ahead granularity, 01453 // and the read did not get it all, we will just get the rest of the 01454 // first data we want. 01455 // 01456 01457 if ((FileOffset->QuadPart == 0) 01458 01459 && 01460 01461 (PrivateCacheMap->ReadAheadMask > (PAGE_SIZE - 1)) 01462 01463 && 01464 01465 ((Length + PAGE_SIZE - 1) <= PrivateCacheMap->ReadAheadMask)) { 01466 01467 FileOffset2.QuadPart = (LONGLONG)( ROUND_TO_PAGES(Length) ); 01468 } 01469 01470 // 01471 // Round read offset to next read ahead boundary. 01472 // 01473 01474 else { 01475 FileOffset2.QuadPart = NewBeyond.QuadPart + (LONGLONG)ReadAheadSize; 01476 01477 FileOffset2.LowPart &= ~PrivateCacheMap->ReadAheadMask; 01478 } 01479 01480 // 01481 // Set read ahead length to be the same as for the most recent read, 01482 // up to our max. 01483 // 01484 01485 if (FileOffset2.QuadPart != PrivateCacheMap->ReadAheadOffset[1].QuadPart) { 01486 01487 ASSERT( FileOffset2.HighPart >= 0 ); 01488 01489 Changed = TRUE; 01490 PrivateCacheMap->ReadAheadOffset[1] = FileOffset2; 01491 PrivateCacheMap->ReadAheadLength[1] = ReadAheadSize; 01492 } 01493 } 01494 01495 // 01496 // Read Ahead Case 2. 01497 // 01498 // If this is the third read following a particular stride, then we 01499 // will see if we can read ahead. One example of an application that 01500 // might do this is a spreadsheet. Note that this code even works 01501 // for negative strides. 01502 // 01503 01504 else if ( ( NewOffset.QuadPart - 01505 PrivateCacheMap->FileOffset2.QuadPart ) == 01506 ( PrivateCacheMap->FileOffset2.QuadPart - 01507 PrivateCacheMap->FileOffset1.QuadPart )) { 01508 01509 // 01510 // According to the current stride, the next offset will be: 01511 // 01512 // NewOffset + (NewOffset - FileOffset2) 01513 // 01514 // which is the same as: 01515 // 01516 // (NewOffset * 2) - FileOffset2 01517 // 01518 01519 FileOffset2.QuadPart = ( NewOffset.QuadPart << 1 ) - PrivateCacheMap->FileOffset2.QuadPart; 01520 01521 // 01522 // If our stride is going backwards through the file, we 01523 // have to detect the case where the next step would wrap. 01524 // 01525 01526 if (FileOffset2.HighPart >= 0) { 01527 01528 // 01529 // The read ahead length must be extended by the same amount that 01530 // we will round the PrivateCacheMap->ReadAheadOffset down. 01531 // 01532 01533 Length += FileOffset2.LowPart & (PAGE_SIZE - 1); 01534 01535 // 01536 // Now round the PrivateCacheMap->ReadAheadOffset down. 01537 // 01538 01539 FileOffset2.LowPart &= ~(PAGE_SIZE - 1); 01540 PrivateCacheMap->ReadAheadOffset[1] = FileOffset2; 01541 01542 // 01543 // Round to page boundary. 01544 // 01545 01546 PrivateCacheMap->ReadAheadLength[1] = (ULONG) ROUND_TO_PAGES(Length); 01547 Changed = TRUE; 01548 } 01549 } 01550 01551 // 01552 // Get out if the ReadAhead requirements did not change. 01553 // 01554 01555 if (!Changed || PrivateCacheMap->ReadAheadActive) { 01556 01557 DebugTrace( 0, me, "Read ahead already in progress or no change\n", 0 ); 01558 01559 ExReleaseSpinLock( &PrivateCacheMap->ReadAheadSpinLock, OldIrql ); 01560 return; 01561 } 01562 01563 // 01564 // Otherwise, we will proceed and try to schedule the read ahead 01565 // ourselves. 01566 // 01567 01568 PrivateCacheMap->ReadAheadActive = TRUE; 01569 01570 // 01571 // Release spin lock on way out 01572 // 01573 01574 ExReleaseSpinLock( &PrivateCacheMap->ReadAheadSpinLock, OldIrql ); 01575 01576 // 01577 // Queue the read ahead request to the Lazy Writer's work queue. 01578 // 01579 01580 DebugTrace( 0, me, "Queueing read ahead to worker thread\n", 0 ); 01581 01582 WorkQueueEntry = CcAllocateWorkQueueEntry(); 01583 01584 // 01585 // If we failed to allocate a work queue entry, then, we will 01586 // quietly bag it. Read ahead is only an optimization, and 01587 // no one ever requires that it occur. 01588 // 01589 01590 if (WorkQueueEntry != NULL) { 01591 01592 // 01593 // We must reference this file object so that it cannot go away 01594 // until we finish Read Ahead processing in the Worker Thread. 01595 // 01596 01597 ObReferenceObject ( FileObject ); 01598 01599 // 01600 // Increment open count to make sure the SharedCacheMap stays around. 01601 // 01602 01603 CcAcquireMasterLock( &OldIrql ); 01604 CcIncrementOpenCount( SharedCacheMap, 'adRQ' ); 01605 CcReleaseMasterLock( OldIrql ); 01606 01607 WorkQueueEntry->Function = (UCHAR)ReadAhead; 01608 WorkQueueEntry->Parameters.Read.FileObject = FileObject; 01609 01610 CcPostWorkQueue( WorkQueueEntry, &CcExpressWorkQueue ); 01611 } 01612 01613 // 01614 // If we failed to allocate a Work Queue Entry, or all of the pages 01615 // are resident we must set the active flag false. 01616 // 01617 01618 else { 01619 01620 ExAcquireFastLock( &PrivateCacheMap->ReadAheadSpinLock, &OldIrql ); 01621 PrivateCacheMap->ReadAheadActive = FALSE; 01622 ExReleaseFastLock( &PrivateCacheMap->ReadAheadSpinLock, OldIrql ); 01623 } 01624 01625 DebugTrace(-1, me, "CcScheduleReadAhead -> VOID\n", 0 ); 01626 01627 return; 01628 }

NTKERNELAPI VOID CcSetAdditionalCacheAttributes IN PFILE_OBJECT  FileObject,
IN BOOLEAN  DisableReadAhead,
IN BOOLEAN  DisableWriteBehind
 

Definition at line 32 of file logsup.c.

References CcAcquireMasterLock, CcReleaseMasterLock, ClearFlag, DISABLE_READ_AHEAD, DISABLE_WRITE_BEHIND, _SHARED_CACHE_MAP::Flags, MODIFIED_WRITE_DISABLED, and SetFlag.

Referenced by LfsInitializeLogFile(), and LfsOpenLogFile().

00040 : 00041 00042 This routine supports the setting of disable read ahead or disable write 00043 behind flags to control Cache Manager operation. This routine may be 00044 called any time after calling CcInitializeCacheMap. Initially both 00045 read ahead and write behind are enabled. Note that the state of both 00046 of these flags must be specified on each call to this routine. 00047 00048 Arguments: 00049 00050 FileObject - File object for which the respective flags are to be set. 00051 00052 DisableReadAhead - FALSE to enable read ahead, TRUE to disable it. 00053 00054 DisableWriteBehind - FALSE to enable write behind, TRUE to disable it. 00055 00056 Return Value: 00057 00058 None. 00059 00060 --*/ 00061 00062 { 00063 PSHARED_CACHE_MAP SharedCacheMap; 00064 KIRQL OldIrql; 00065 00066 // 00067 // Get pointer to SharedCacheMap. 00068 // 00069 00070 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 00071 00072 // 00073 // Now set the flags and return. 00074 // 00075 00076 CcAcquireMasterLock( &OldIrql ); 00077 if (DisableReadAhead) { 00078 SetFlag(SharedCacheMap->Flags, DISABLE_READ_AHEAD); 00079 } else { 00080 ClearFlag(SharedCacheMap->Flags, DISABLE_READ_AHEAD); 00081 } 00082 if (DisableWriteBehind) { 00083 SetFlag(SharedCacheMap->Flags, DISABLE_WRITE_BEHIND | MODIFIED_WRITE_DISABLED); 00084 } else { 00085 ClearFlag(SharedCacheMap->Flags, DISABLE_WRITE_BEHIND); 00086 } 00087 CcReleaseMasterLock( OldIrql ); 00088 }

NTKERNELAPI VOID CcSetBcbOwnerPointer IN PVOID  Bcb,
IN PVOID  OwnerPointer
 

Definition at line 1078 of file pinsup.c.

References ASSERT, CACHE_NTC_OBCB, ExSetResourceOwnerPointer(), NULL, and Resource.

Referenced by LfsGetLbcb().

01085 : 01086 01087 This routine may be called to set the resource owner for the Bcb resource, 01088 for cases where another thread will do the unpin *and* the current thread 01089 may exit. 01090 01091 Arguments: 01092 01093 Bcb - Bcb parameter returned from the last call to CcPinRead. 01094 01095 OwnerPointer - A valid resource owner pointer, which means a pointer to 01096 an allocated system address, with the low-order two bits 01097 set. The address may not be deallocated until after the 01098 unpin call. 01099 01100 Return Value: 01101 01102 None. 01103 01104 --*/ 01105 01106 { 01107 ASSERT(((ULONG_PTR)Bcb & 1) == 0); 01108 01109 // 01110 // Handle the overlapped Bcb case. 01111 // 01112 01113 if (((POBCB)Bcb)->NodeTypeCode == CACHE_NTC_OBCB) { 01114 01115 PBCB *BcbPtrPtr = &((POBCB)Bcb)->Bcbs[0]; 01116 01117 // 01118 // Loop to set owner for all Bcbs. 01119 // 01120 01121 while (*BcbPtrPtr != NULL) { 01122 ExSetResourceOwnerPointer( &(*BcbPtrPtr)->Resource, OwnerPointer ); 01123 BcbPtrPtr++; 01124 } 01125 01126 // 01127 // Otherwise, it is a normal Bcb 01128 // 01129 01130 } else { 01131 01132 // 01133 // Handle normal case. 01134 // 01135 01136 ExSetResourceOwnerPointer( &((PBCB)Bcb)->Resource, OwnerPointer ); 01137 } 01138 }

NTKERNELAPI VOID CcSetDirtyPageThreshold IN PFILE_OBJECT  FileObject,
IN ULONG  DirtyPageThreshold
 

Definition at line 2747 of file fssup.c.

References _SHARED_CACHE_MAP::DirtyPageThreshold, FlagOn, FSRTL_FLAG_LIMIT_MODIFIED_PAGES, NULL, and SetFlag.

02754 : 02755 02756 This routine may be called to set a dirty page threshold for this 02757 stream. The write throttling will kick in whenever the file system 02758 attempts to exceed the dirty page threshold for this file. 02759 02760 Arguments: 02761 02762 FileObject - Supplies file object for the stream 02763 02764 DirtyPageThreshold - Supplies the dirty page threshold for this stream, 02765 or 0 for no threshold. 02766 02767 Return Value: 02768 02769 None 02770 02771 Environment: 02772 02773 The caller must guarantee exclusive access to the FsRtl header flags, 02774 for example, by calling this routine once during create of the structure 02775 containing the header. Then it would call the routine again when actually 02776 caching the stream. 02777 02778 --*/ 02779 02780 { 02781 KIRQL OldIrql; 02782 PSHARED_CACHE_MAP SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 02783 02784 if (SharedCacheMap != NULL) { 02785 02786 SharedCacheMap->DirtyPageThreshold = DirtyPageThreshold; 02787 } 02788 02789 // 02790 // Test the flag before setting, in case the caller is no longer properly 02791 // synchronized. 02792 // 02793 02794 if (!FlagOn(((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->Flags, 02795 FSRTL_FLAG_LIMIT_MODIFIED_PAGES)) { 02796 02797 SetFlag(((PFSRTL_COMMON_FCB_HEADER)(FileObject->FsContext))->Flags, 02798 FSRTL_FLAG_LIMIT_MODIFIED_PAGES); 02799 } 02800 }

NTKERNELAPI VOID CcSetDirtyPinnedData IN PVOID  BcbVoid,
IN PLARGE_INTEGER Lsn  OPTIONAL
 

Definition at line 2560 of file cachesub.c.

References ASSERT, _SHARED_CACHE_MAP::BcbSpinLock, CACHE_NTC_OBCB, CcAcquireMasterLockAtDpcLevel, CcDirtySharedCacheMapList, CcReleaseMasterLockFromDpcLevel, CcScheduleLazyWriteScan(), CcTotalDirtyPages, DebugTrace, _SHARED_CACHE_MAP::DirtyPages, DISABLE_WRITE_BEHIND, FlagOn, _SHARED_CACHE_MAP::Flags, LazyWriter, me, NULL, PAGE_SHIFT, POBCB, _LAZY_WRITER::ScanActive, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, _SHARED_CACHE_MAP::SharedCacheMapLinks, TRUE, and _SHARED_CACHE_MAP::ValidDataGoal.

Referenced by CcCopyWrite(), CcPreparePinWrite(), CcReleaseByteRangeFromWrite(), CcUnpinRepinnedBcb(), CcZeroData(), LfsFlushLfcb(), and LfsFlushLogPage().

02567 : 02568 02569 This routine may be called to set a Bcb (returned by CcPinFileData) 02570 dirty, and a candidate for the Lazy Writer. All Bcbs should be set 02571 dirty by calling this routine, even if they are to be flushed 02572 another way. 02573 02574 Arguments: 02575 02576 Bcb - Supplies a pointer to a pinned (by CcPinFileData) Bcb, to 02577 be set dirty. 02578 02579 Lsn - Lsn to be remembered with page. 02580 02581 Return Value: 02582 02583 None 02584 02585 --*/ 02586 02587 { 02588 PBCB Bcbs[2]; 02589 PBCB *BcbPtrPtr; 02590 KIRQL OldIrql; 02591 PSHARED_CACHE_MAP SharedCacheMap; 02592 02593 DebugTrace(+1, me, "CcSetDirtyPinnedData: Bcb = %08lx\n", BcbVoid ); 02594 02595 // 02596 // Assume this is a normal Bcb, and set up for loop below. 02597 // 02598 02599 Bcbs[0] = (PBCB)BcbVoid; 02600 Bcbs[1] = NULL; 02601 BcbPtrPtr = &Bcbs[0]; 02602 02603 // 02604 // If it is an overlap Bcb, then point into the Bcb vector 02605 // for the loop. 02606 // 02607 02608 if (Bcbs[0]->NodeTypeCode == CACHE_NTC_OBCB) { 02609 BcbPtrPtr = &((POBCB)Bcbs[0])->Bcbs[0]; 02610 } 02611 02612 // 02613 // Loop to set all Bcbs dirty 02614 // 02615 02616 while (*BcbPtrPtr != NULL) { 02617 02618 Bcbs[0] = *(BcbPtrPtr++); 02619 02620 // 02621 // Should be no ReadOnly Bcbs 02622 // 02623 02624 ASSERT(((ULONG_PTR)Bcbs[0] & 1) != 1); 02625 02626 SharedCacheMap = Bcbs[0]->SharedCacheMap; 02627 02628 // 02629 // We have to acquire the shared cache map list, because we 02630 // may be changing lists. 02631 // 02632 02633 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 02634 02635 if (!Bcbs[0]->Dirty) { 02636 02637 ULONG Pages = Bcbs[0]->ByteLength >> PAGE_SHIFT; 02638 02639 // 02640 // Set dirty to keep the Bcb from going away until 02641 // it is set Undirty, and assign the next modification time stamp. 02642 // 02643 02644 Bcbs[0]->Dirty = TRUE; 02645 02646 // 02647 // Initialize the OldestLsn field. 02648 // 02649 02650 if (ARGUMENT_PRESENT(Lsn)) { 02651 Bcbs[0]->OldestLsn = *Lsn; 02652 Bcbs[0]->NewestLsn = *Lsn; 02653 } 02654 02655 // 02656 // Move it to the dirty list if these are the first dirty pages, 02657 // and this is not disabled for write behind. 02658 // 02659 // Increase the count of dirty bytes in the shared cache map. 02660 // 02661 02662 CcAcquireMasterLockAtDpcLevel(); 02663 if ((SharedCacheMap->DirtyPages == 0) && 02664 !FlagOn(SharedCacheMap->Flags, DISABLE_WRITE_BEHIND)) { 02665 02666 // 02667 // If the lazy write scan is not active, then start it. 02668 // 02669 02670 if (!LazyWriter.ScanActive) { 02671 CcScheduleLazyWriteScan(); 02672 } 02673 02674 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 02675 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 02676 &SharedCacheMap->SharedCacheMapLinks ); 02677 } 02678 02679 SharedCacheMap->DirtyPages += Pages; 02680 CcTotalDirtyPages += Pages; 02681 CcReleaseMasterLockFromDpcLevel(); 02682 } 02683 02684 // 02685 // If this Lsn happens to be older/newer than the ones we have stored, then 02686 // change it. 02687 // 02688 02689 if (ARGUMENT_PRESENT(Lsn)) { 02690 02691 if ((Bcbs[0]->OldestLsn.QuadPart == 0) || (Lsn->QuadPart < Bcbs[0]->OldestLsn.QuadPart)) { 02692 Bcbs[0]->OldestLsn = *Lsn; 02693 } 02694 02695 if (Lsn->QuadPart > Bcbs[0]->NewestLsn.QuadPart) { 02696 Bcbs[0]->NewestLsn = *Lsn; 02697 } 02698 } 02699 02700 // 02701 // See if we need to advance our goal for ValidDataLength. 02702 // 02703 02704 if ( Bcbs[0]->BeyondLastByte.QuadPart > SharedCacheMap->ValidDataGoal.QuadPart ) { 02705 02706 SharedCacheMap->ValidDataGoal = Bcbs[0]->BeyondLastByte; 02707 } 02708 02709 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 02710 } 02711 02712 DebugTrace(-1, me, "CcSetDirtyPinnedData -> VOID\n", 0 ); 02713 }

NTKERNELAPI VOID CcSetFileSizes IN PFILE_OBJECT  FileObject,
IN PCC_FILE_SIZES  FileSizes
 

Definition at line 1821 of file fssup.c.

References CcAcquireMasterLock, CcDecrementOpenCount, CcDeleteMbcb(), CcDirtySharedCacheMapList, CcExtendVacbArray(), CcFreeActiveVacb(), CcIncrementOpenCount, CcPurgeAndClearCacheSection(), CcPurgeCacheSection(), CcReleaseMasterLock, CcScheduleLazyWriteScan(), DebugTrace, DebugTrace2, DEFAULT_EXTEND_MODULO, _SHARED_CACHE_MAP::DirtyPages, ExRaiseStatus(), FALSE, _SHARED_CACHE_MAP::FileSize, FlagOn, _SHARED_CACHE_MAP::Flags, FsRtlNormalizeNtstatus(), GetActiveVacbAtDpcLevel, LazyWriter, _SHARED_CACHE_MAP::Mbcb, me, mm, MmExtendSection(), MmFlushSection(), _SHARED_CACHE_MAP::NeedToZero, NT_SUCCESS, NTSTATUS(), NULL, _SHARED_CACHE_MAP::OpenCount, _LAZY_WRITER::OtherWork, PAGE_SIZE, PIN_ACCESS, _LAZY_WRITER::ScanActive, _SHARED_CACHE_MAP::Section, _SHARED_CACHE_MAP::SectionSize, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, _SHARED_CACHE_MAP::SharedCacheMapLinks, Status, TRUE, _SHARED_CACHE_MAP::VacbActiveCount, _SHARED_CACHE_MAP::ValidDataGoal, _SHARED_CACHE_MAP::ValidDataLength, and WRITE_QUEUED.

Referenced by UdfLookupMetaVsnOfExtent(), and UdfUpdateVcbPhase0().

01828 : 01829 01830 This routine must be called whenever a file has been extended to reflect 01831 this extension in the cache maps and underlying section. Calling this 01832 routine has a benign effect if the current size of the section is 01833 already greater than or equal to the new AllocationSize. 01834 01835 This routine must also be called whenever the FileSize for a file changes 01836 to reflect these changes in the Cache Manager. 01837 01838 This routine seems rather large, but in the normal case it only acquires 01839 a spinlock, updates some fields, and exits. Less often it will either 01840 extend the section, or truncate/purge the file, but it would be unexpected 01841 to do both. On the other hand, the idea of this routine is that it does 01842 "everything" required when AllocationSize or FileSize change. 01843 01844 Arguments: 01845 01846 FileObject - A file object for which CcInitializeCacheMap has been 01847 previously called. 01848 01849 FileSizes - A pointer to AllocationSize, FileSize and ValidDataLength 01850 for the file. AllocationSize is ignored if it is not larger 01851 than the current section size (i.e., it is ignored unless it 01852 has grown). ValidDataLength is not used. 01853 01854 01855 Return Value: 01856 01857 None 01858 01859 --*/ 01860 01861 { 01862 LARGE_INTEGER NewSectionSize; 01863 LARGE_INTEGER NewFileSize; 01864 LARGE_INTEGER NewValidDataLength; 01865 IO_STATUS_BLOCK IoStatus; 01866 PSHARED_CACHE_MAP SharedCacheMap; 01867 NTSTATUS Status; 01868 KIRQL OldIrql; 01869 PVACB ActiveVacb; 01870 ULONG ActivePage; 01871 ULONG PageIsDirty; 01872 01873 DebugTrace(+1, me, "CcSetFileSizes:\n", 0 ); 01874 DebugTrace( 0, me, " FileObject = %08lx\n", FileObject ); 01875 DebugTrace( 0, me, " FileSizes = %08lx\n", FileSizes ); 01876 01877 // 01878 // Make a local copy of the new file size and section size. 01879 // 01880 01881 NewSectionSize = FileSizes->AllocationSize; 01882 NewFileSize = FileSizes->FileSize; 01883 NewValidDataLength = FileSizes->ValidDataLength; 01884 01885 // 01886 // Serialize Creation/Deletion of all Shared CacheMaps 01887 // 01888 01889 CcAcquireMasterLock( &OldIrql ); 01890 01891 // 01892 // Get pointer to SharedCacheMap via File Object. 01893 // 01894 01895 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 01896 01897 // 01898 // If the file is not cached, just get out. 01899 // 01900 01901 if ((SharedCacheMap == NULL) || (SharedCacheMap->Section == NULL)) { 01902 01903 CcReleaseMasterLock( OldIrql ); 01904 01905 // 01906 // Let's try to purge the file incase this is a truncate. In the 01907 // vast majority of cases when there is no shared cache map, there 01908 // is no data section either, so this call will eventually be 01909 // no-oped in Mm. 01910 // 01911 01912 // 01913 // First flush the first page we are keeping, if it has data, before 01914 // we throw it away. 01915 // 01916 01917 if (NewFileSize.LowPart & (PAGE_SIZE - 1)) { 01918 MmFlushSection( FileObject->SectionObjectPointer, &NewFileSize, 1, &IoStatus, FALSE ); 01919 } 01920 01921 CcPurgeCacheSection( FileObject->SectionObjectPointer, 01922 &NewFileSize, 01923 0, 01924 FALSE ); 01925 01926 DebugTrace(-1, me, "CcSetFileSizes -> VOID\n", 0 ); 01927 01928 return; 01929 } 01930 01931 // 01932 // Make call a Noop if file is not mapped, or section already big enough. 01933 // 01934 01935 if ( NewSectionSize.QuadPart > SharedCacheMap->SectionSize.QuadPart ) { 01936 01937 // 01938 // Increment open count to make sure the SharedCacheMap stays around, 01939 // then release the spinlock so that we can call Mm. 01940 // 01941 01942 CcIncrementOpenCount( SharedCacheMap, '1fSS' ); 01943 CcReleaseMasterLock( OldIrql ); 01944 01945 // 01946 // Round new section size to pages. 01947 // 01948 01949 NewSectionSize.QuadPart = NewSectionSize.QuadPart + (LONGLONG)(DEFAULT_EXTEND_MODULO - 1); 01950 NewSectionSize.LowPart &= ~(DEFAULT_EXTEND_MODULO - 1); 01951 01952 // 01953 // Use try-finally to make sure we get the open count decremented. 01954 // 01955 01956 try { 01957 01958 // 01959 // Call MM to extend the section. 01960 // 01961 01962 DebugTrace( 0, mm, "MmExtendSection:\n", 0 ); 01963 DebugTrace( 0, mm, " Section = %08lx\n", SharedCacheMap->Section ); 01964 DebugTrace2(0, mm, " Size = %08lx, %08lx\n", 01965 NewSectionSize.LowPart, NewSectionSize.HighPart ); 01966 01967 Status = MmExtendSection( SharedCacheMap->Section, &NewSectionSize, TRUE ); 01968 01969 if (!NT_SUCCESS(Status)) { 01970 01971 DebugTrace( 0, 0, "Error from MmExtendSection, Status = %08lx\n", 01972 Status ); 01973 01974 ExRaiseStatus( FsRtlNormalizeNtstatus( Status, 01975 STATUS_UNEXPECTED_MM_EXTEND_ERR )); 01976 } 01977 01978 // 01979 // Extend the Vacb array. 01980 // 01981 01982 CcExtendVacbArray( SharedCacheMap, NewSectionSize ); 01983 01984 } finally { 01985 01986 // 01987 // Serialize again to decrement the open count. 01988 // 01989 01990 CcAcquireMasterLock( &OldIrql ); 01991 01992 CcDecrementOpenCount( SharedCacheMap, '1fSF' ); 01993 01994 if ((SharedCacheMap->OpenCount == 0) && 01995 !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) && 01996 (SharedCacheMap->DirtyPages == 0)) { 01997 01998 // 01999 // Move to the dirty list. 02000 // 02001 02002 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 02003 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 02004 &SharedCacheMap->SharedCacheMapLinks ); 02005 02006 // 02007 // Make sure the Lazy Writer will wake up, because we 02008 // want him to delete this SharedCacheMap. 02009 // 02010 02011 LazyWriter.OtherWork = TRUE; 02012 if (!LazyWriter.ScanActive) { 02013 CcScheduleLazyWriteScan(); 02014 } 02015 } 02016 02017 CcReleaseMasterLock( OldIrql ); 02018 } 02019 02020 // 02021 // It is now very unlikely that we have any more work to do, but just 02022 // in case we reacquire the spinlock and check again if we are cached. 02023 // 02024 02025 CcAcquireMasterLock( &OldIrql ); 02026 02027 // 02028 // Get pointer to SharedCacheMap via File Object. 02029 // 02030 02031 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 02032 02033 // 02034 // If the file is not cached, just get out. 02035 // 02036 02037 if (SharedCacheMap == NULL) { 02038 02039 CcReleaseMasterLock( OldIrql ); 02040 02041 DebugTrace(-1, me, "CcSetFileSizes -> VOID\n", 0 ); 02042 02043 return; 02044 } 02045 } 02046 02047 // 02048 // If we are shrinking either of these two sizes, then we must free the 02049 // active page, since it may be locked. 02050 // 02051 02052 CcIncrementOpenCount( SharedCacheMap, '2fSS' ); 02053 02054 try { 02055 02056 if ( ( NewFileSize.QuadPart < SharedCacheMap->ValidDataGoal.QuadPart ) || 02057 ( NewFileSize.QuadPart < SharedCacheMap->FileSize.QuadPart )) { 02058 02059 GetActiveVacbAtDpcLevel( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 02060 02061 if ((ActiveVacb != NULL) || (SharedCacheMap->NeedToZero != NULL)) { 02062 02063 CcReleaseMasterLock( OldIrql ); 02064 02065 CcFreeActiveVacb( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 02066 02067 // 02068 // Serialize again to reduce ValidDataLength. It cannot change 02069 // because the caller must have the file exclusive. 02070 // 02071 02072 CcAcquireMasterLock( &OldIrql ); 02073 } 02074 } 02075 02076 // 02077 // If the section did not grow, see if the file system supports ValidDataLength, 02078 // then update the valid data length in the file system. 02079 // 02080 02081 if ( SharedCacheMap->ValidDataLength.QuadPart != MAXLONGLONG ) { 02082 02083 if ( NewFileSize.QuadPart < SharedCacheMap->ValidDataLength.QuadPart ) { 02084 SharedCacheMap->ValidDataLength = NewFileSize; 02085 } 02086 02087 // 02088 // Update our notion of ValidDataGoal (how far the file has been written 02089 // in the cache) with caller's ValidDataLength. (Our ValidDataLength controls 02090 // when we issue ValidDataLength callbacks.) *** For now play it safe by 02091 // only growing here, which is the historical problem at hand, as with 02092 // compressed and uncompressed stream caches. 02093 // 02094 02095 if (NewValidDataLength.QuadPart > SharedCacheMap->ValidDataGoal.QuadPart) { 02096 SharedCacheMap->ValidDataGoal = NewValidDataLength; 02097 } 02098 } 02099 02100 // 02101 // On truncate, be nice guys and actually purge away user data from 02102 // the cache. However, the PinAccess check is important to avoid deadlocks 02103 // in Ntfs. 02104 // 02105 // It is also important to check the Vacb Active count. The caller 02106 // must have the file exclusive, therefore, no one else can be actively 02107 // doing anything in the file. Normally the Active count will be zero 02108 // (like in a normal call from Set File Info), and we can go ahead and truncate. 02109 // However, if the active count is nonzero, chances are this very thread has 02110 // something pinned or mapped, and we will deadlock if we try to purge and 02111 // wait for the count to go zero. A rare case of this which deadlocked DaveC 02112 // on Christmas Day of 1992, is where Ntfs was trying to convert an attribute 02113 // from resident to nonresident - which is a good example of a case where the 02114 // purge was not needed. 02115 // 02116 02117 if ( (NewFileSize.QuadPart < SharedCacheMap->FileSize.QuadPart ) && 02118 !FlagOn(SharedCacheMap->Flags, PIN_ACCESS) && 02119 (SharedCacheMap->VacbActiveCount == 0)) { 02120 02121 // 02122 // Release the spinlock so that we can call Mm. 02123 // 02124 02125 CcReleaseMasterLock( OldIrql ); 02126 02127 // 02128 // If we are actually truncating to zero (a size which has particular 02129 // meaning to the Lazy Writer scan!), then we must reset the Mbcb if 02130 // there is one, so that we do not keep dirty pages around forever. 02131 // 02132 02133 if ((NewFileSize.QuadPart == 0) && (SharedCacheMap->Mbcb != NULL)) { 02134 CcDeleteMbcb( SharedCacheMap ); 02135 } 02136 02137 CcPurgeAndClearCacheSection( SharedCacheMap, &NewFileSize ); 02138 02139 // 02140 // Serialize again to decrement the open count. 02141 // 02142 02143 CcAcquireMasterLock( &OldIrql ); 02144 } 02145 02146 } finally { 02147 02148 // 02149 // We should only be raising without owning the spinlock. 02150 // 02151 02152 if (AbnormalTermination()) { 02153 02154 CcAcquireMasterLock( &OldIrql ); 02155 } 02156 02157 CcDecrementOpenCount( SharedCacheMap, '2fSF' ); 02158 02159 SharedCacheMap->FileSize = NewFileSize; 02160 02161 if ((SharedCacheMap->OpenCount == 0) && 02162 !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) && 02163 (SharedCacheMap->DirtyPages == 0)) { 02164 02165 // 02166 // Move to the dirty list. 02167 // 02168 02169 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 02170 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 02171 &SharedCacheMap->SharedCacheMapLinks ); 02172 02173 // 02174 // Make sure the Lazy Writer will wake up, because we 02175 // want him to delete this SharedCacheMap. 02176 // 02177 02178 LazyWriter.OtherWork = TRUE; 02179 if (!LazyWriter.ScanActive) { 02180 CcScheduleLazyWriteScan(); 02181 } 02182 } 02183 02184 CcReleaseMasterLock( OldIrql ); 02185 } 02186 02187 DebugTrace(-1, me, "CcSetFileSizes -> VOID\n", 0 ); 02188 02189 return; 02190 }

NTKERNELAPI VOID CcSetLogHandleForFile IN PFILE_OBJECT  FileObject,
IN PVOID  LogHandle,
IN PFLUSH_TO_LSN  FlushToLsnRoutine
 

Definition at line 92 of file logsup.c.

References _SHARED_CACHE_MAP::FlushToLsnRoutine, and _SHARED_CACHE_MAP::LogHandle.

00100 : 00101 00102 This routine may be called to instruct the Cache Manager to store the 00103 specified log handle with the shared cache map for a file, to support 00104 subsequent calls to the other routines in this module which effectively 00105 perform an associative search for files by log handle. 00106 00107 Arguments: 00108 00109 FileObject - File for which the log handle should be stored. 00110 00111 LogHandle - Log Handle to store. 00112 00113 FlushToLsnRoutine - A routine to call before flushing buffers for this 00114 file, to insure a log file is flushed to the most 00115 recent Lsn for any Bcb being flushed. 00116 00117 Return Value: 00118 00119 None. 00120 00121 --*/ 00122 00123 { 00124 PSHARED_CACHE_MAP SharedCacheMap; 00125 00126 // 00127 // Get pointer to SharedCacheMap. 00128 // 00129 00130 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 00131 00132 // 00133 // Now set the log file handle and flush routine 00134 // 00135 00136 SharedCacheMap->LogHandle = LogHandle; 00137 SharedCacheMap->FlushToLsnRoutine = FlushToLsnRoutine; 00138 }

NTKERNELAPI VOID CcSetReadAheadGranularity IN PFILE_OBJECT  FileObject,
IN ULONG  Granularity
 

Definition at line 1222 of file cachesub.c.

References PPRIVATE_CACHE_MAP.

Referenced by UdfCommonRead().

01229 : 01230 01231 This routine may be called to set the read ahead granularity used by 01232 the Cache Manager. The default is PAGE_SIZE. The number is decremented 01233 and stored as a mask. 01234 01235 Arguments: 01236 01237 FileObject - File Object for which granularity shall be set 01238 01239 Granularity - new granularity, which must be an even power of 2 and 01240 >= PAGE_SIZE 01241 01242 Return Value: 01243 01244 None 01245 --*/ 01246 01247 { 01248 ((PPRIVATE_CACHE_MAP)FileObject->PrivateCacheMap)->ReadAheadMask = Granularity - 1; 01249 }

NTKERNELAPI BOOLEAN CcUninitializeCacheMap IN PFILE_OBJECT  FileObject,
IN PLARGE_INTEGER TruncateSize  OPTIONAL,
IN PCACHE_UNINITIALIZE_EVENT UninitializeCompleteEvent  OPTIONAL
 

Referenced by CcPurgeCacheSection(), UdfCommonCleanup(), and UdfDeleteInternalStream().

NTKERNELAPI VOID CcUnpinData IN PVOID  Bcb  ) 
 

Definition at line 1000 of file pinsup.c.

References CACHE_NTC_OBCB, CcUnpinFileData(), DebugTrace, ExFreePool(), FALSE, me, NULL, TRUE, and UNPIN.

Referenced by CcPinMappedData(), CcPinRead(), CcPreparePinWrite(), LfsCopyReadLogRecord(), LfsDeallocateLcb(), LfsFindLastLsn(), LfsFindNextLsn(), LfsFlushLfcb(), LfsFlushLogPage(), LfsGetLbcb(), LfsInitializeLogFilePriv(), LfsPinOrMapData(), LfsReadNextLogRecord(), LfsReadRestart(), LfsReadRestartArea(), LfsRestartLogFile(), and LfsSearchForwardByClient().

01006 : 01007 01008 This routine must be called at IPL0, some time after calling CcPinRead 01009 or CcPreparePinWrite. It performs any cleanup that is necessary. 01010 01011 Arguments: 01012 01013 Bcb - Bcb parameter returned from the last call to CcPinRead. 01014 01015 Return Value: 01016 01017 None. 01018 01019 --*/ 01020 01021 { 01022 DebugTrace(+1, me, "CcUnpinData:\n", 0 ); 01023 DebugTrace( 0, me, " >Bcb = %08lx\n", Bcb ); 01024 01025 // 01026 // Test for ReadOnly and unpin accordingly. 01027 // 01028 01029 if (((ULONG_PTR)Bcb & 1) != 0) { 01030 01031 // 01032 // Remove the Read Only flag 01033 // 01034 01035 (PCHAR)Bcb -= 1; 01036 01037 CcUnpinFileData( (PBCB)Bcb, TRUE, UNPIN ); 01038 01039 } else { 01040 01041 // 01042 // Handle the overlapped Bcb case. 01043 // 01044 01045 if (((POBCB)Bcb)->NodeTypeCode == CACHE_NTC_OBCB) { 01046 01047 PBCB *BcbPtrPtr = &((POBCB)Bcb)->Bcbs[0]; 01048 01049 // 01050 // Loop to free all Bcbs with recursive calls 01051 // (rather than dealing with RO for this uncommon case). 01052 // 01053 01054 while (*BcbPtrPtr != NULL) { 01055 CcUnpinData(*(BcbPtrPtr++)); 01056 } 01057 01058 // 01059 // Then free the pool for the Obcb 01060 // 01061 01062 ExFreePool( Bcb ); 01063 01064 // 01065 // Otherwise, it is a normal Bcb 01066 // 01067 01068 } else { 01069 CcUnpinFileData( (PBCB)Bcb, FALSE, UNPIN ); 01070 } 01071 } 01072 01073 DebugTrace(-1, me, "CcUnPinData -> VOID\n", 0 ); 01074 }

NTKERNELAPI VOID CcUnpinDataForThread IN PVOID  Bcb,
IN ERESOURCE_THREAD  ResourceThreadId
 

Definition at line 1142 of file pinsup.c.

References CACHE_NTC_OBCB, CcUnpinFileData(), DebugTrace, ExFreePool(), ExReleaseResourceForThread, me, NULL, Resource, TRUE, and UNPIN.

Referenced by LfsCloseLogFile(), and LfsFlushLfcb().

01149 : 01150 01151 This routine must be called at IPL0, some time after calling CcPinRead 01152 or CcPreparePinWrite. It performs any cleanup that is necessary, 01153 releasing the Bcb resource for the given thread. 01154 01155 Arguments: 01156 01157 Bcb - Bcb parameter returned from the last call to CcPinRead. 01158 01159 Return Value: 01160 01161 None. 01162 01163 --*/ 01164 01165 { 01166 DebugTrace(+1, me, "CcUnpinDataForThread:\n", 0 ); 01167 DebugTrace( 0, me, " >Bcb = %08lx\n", Bcb ); 01168 DebugTrace( 0, me, " >ResoureceThreadId = %08lx\n", ResoureceThreadId ); 01169 01170 // 01171 // Test for ReadOnly and unpin accordingly. 01172 // 01173 01174 if (((ULONG_PTR)Bcb & 1) != 0) { 01175 01176 // 01177 // Remove the Read Only flag 01178 // 01179 01180 (PCHAR)Bcb -= 1; 01181 01182 CcUnpinFileData( (PBCB)Bcb, TRUE, UNPIN ); 01183 01184 } else { 01185 01186 // 01187 // Handle the overlapped Bcb case. 01188 // 01189 01190 if (((POBCB)Bcb)->NodeTypeCode == CACHE_NTC_OBCB) { 01191 01192 PBCB *BcbPtrPtr = &((POBCB)Bcb)->Bcbs[0]; 01193 01194 // 01195 // Loop to free all Bcbs with recursive calls 01196 // (rather than dealing with RO for this uncommon case). 01197 // 01198 01199 while (*BcbPtrPtr != NULL) { 01200 CcUnpinDataForThread( *(BcbPtrPtr++), ResourceThreadId ); 01201 } 01202 01203 // 01204 // Then free the pool for the Obcb 01205 // 01206 01207 ExFreePool( Bcb ); 01208 01209 // 01210 // Otherwise, it is a normal Bcb 01211 // 01212 01213 } else { 01214 01215 // 01216 // If not readonly, we can release the resource for the thread first, 01217 // and then call CcUnpinFileData. Release resource first in case 01218 // Bcb gets deallocated. 01219 // 01220 01221 ExReleaseResourceForThread( &((PBCB)Bcb)->Resource, ResourceThreadId ); 01222 CcUnpinFileData( (PBCB)Bcb, TRUE, UNPIN ); 01223 } 01224 } 01225 DebugTrace(-1, me, "CcUnpinDataForThread -> VOID\n", 0 ); 01226 }

NTKERNELAPI VOID CcUnpinRepinnedBcb IN PVOID  Bcb,
IN BOOLEAN  WriteThrough,
OUT PIO_STATUS_BLOCK  IoStatus
 

Definition at line 5143 of file cachesub.c.

References ASSERT, CcDeferredWrites, CcPostDeferredWrites(), CcSetDirtyPinnedData(), CcUnpinFileData(), DebugTrace, DebugTrace2, ExAcquireResourceExclusive, FALSE, _SHARED_CACHE_MAP::FileObject, FlagOn, _SHARED_CACHE_MAP::Flags, me, MmFlushSection(), MmSetAddressRangeModified(), MODIFIED_WRITE_DISABLED, NULL, Resource, RetryError, _FILE_OBJECT::SectionObjectPointer, SET_CLEAN, TRUE, and UNPIN.

05151 : 05152 05153 This routine may be called to Write a previously pinned buffer 05154 through to the file. It must have been preceded by a call to 05155 CcRepinBcb. As this routine must acquire the Bcb 05156 resource exclusive, the caller must be extremely careful to avoid 05157 deadlocks. Ideally the caller owns no resources at all when it 05158 calls this routine, or else the caller should guarantee that it 05159 has nothing else pinned in this same file. (The latter rule is 05160 the one used to avoid deadlocks in calls from CcCopyWrite and 05161 CcMdlWrite.) 05162 05163 Arguments: 05164 05165 Bcb - Pointer to a Bcb which was previously specified in a call 05166 to CcRepinBcb. 05167 05168 WriteThrough - TRUE if the Bcb should be written through. 05169 05170 IoStatus - Returns the I/O status for the operation. 05171 05172 Return Value: 05173 05174 None. 05175 05176 --*/ 05177 05178 { 05179 PSHARED_CACHE_MAP SharedCacheMap = ((PBCB)Bcb)->SharedCacheMap; 05180 05181 DebugTrace(+1, me, "CcUnpinRepinnedBcb\n", 0 ); 05182 DebugTrace( 0, me, " Bcb = %08lx\n", Bcb ); 05183 DebugTrace( 0, me, " WriteThrough = %02lx\n", WriteThrough ); 05184 05185 // 05186 // Set status to success for non write through case. 05187 // 05188 05189 IoStatus->Status = STATUS_SUCCESS; 05190 05191 if (WriteThrough) { 05192 05193 // 05194 // Acquire Bcb exclusive to eliminate possible modifiers of the buffer, 05195 // since we are about to write its buffer. 05196 // 05197 05198 if (FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED)) { 05199 ExAcquireResourceExclusive( &((PBCB)Bcb)->Resource, TRUE ); 05200 } 05201 05202 // 05203 // Now, there is a chance that the LazyWriter has already written 05204 // it, since the resource was free. We will only write it if it 05205 // is still dirty. 05206 // 05207 05208 if (((PBCB)Bcb)->Dirty) { 05209 05210 // 05211 // First we make sure that the dirty bit in the PFN database is set. 05212 // 05213 05214 ASSERT( ((PBCB)Bcb)->BaseAddress != NULL ); 05215 MmSetAddressRangeModified( ((PBCB)Bcb)->BaseAddress, 05216 ((PBCB)Bcb)->ByteLength ); 05217 05218 // 05219 // Now release the Bcb resource and set it clean. Note we do not check 05220 // here for errors, and just return the I/O status. Errors on writes 05221 // are rare to begin with. Nonetheless, our strategy is to rely on 05222 // one or more of the following (depending on the file system) to prevent 05223 // errors from getting to us. 05224 // 05225 // - Retries and/or other forms of error recovery in the disk driver 05226 // - Mirroring driver 05227 // - Hot fixing in the noncached path of the file system 05228 // 05229 // In the unexpected case that a write error does get through, we 05230 // report it to our caller, but go ahead and set the Bcb clean. There 05231 // seems to be no point in letting Bcbs (and pages in physical memory) 05232 // accumulate which can never go away because we get an unrecoverable I/O 05233 // error. 05234 // 05235 05236 // 05237 // We specify TRUE here for ReadOnly so that we will keep the 05238 // resource during the flush. 05239 // 05240 05241 CcUnpinFileData( (PBCB)Bcb, TRUE, SET_CLEAN ); 05242 05243 // 05244 // Write it out. 05245 // 05246 05247 MmFlushSection( ((PBCB)Bcb)->SharedCacheMap->FileObject->SectionObjectPointer, 05248 &((PBCB)Bcb)->FileOffset, 05249 ((PBCB)Bcb)->ByteLength, 05250 IoStatus, 05251 TRUE ); 05252 05253 // 05254 // If we got verify required, we have to mark the buffer dirty again 05255 // so we will try again later. 05256 // 05257 05258 if (RetryError(IoStatus->Status)) { 05259 CcSetDirtyPinnedData( (PBCB)Bcb, NULL ); 05260 } 05261 05262 // 05263 // Now remove the final pin count now that we have set it clean. 05264 // 05265 05266 CcUnpinFileData( (PBCB)Bcb, FALSE, UNPIN ); 05267 05268 // 05269 // See if there is any deferred writes we can post. 05270 // 05271 05272 if (!IsListEmpty(&CcDeferredWrites)) { 05273 CcPostDeferredWrites(); 05274 } 05275 } 05276 else { 05277 05278 // 05279 // Lazy Writer got there first, just free the resource and unpin. 05280 // 05281 05282 CcUnpinFileData( (PBCB)Bcb, FALSE, UNPIN ); 05283 05284 } 05285 05286 DebugTrace2(0, me, " <IoStatus = %08lx, %08lx\n", IoStatus->Status, 05287 IoStatus->Information ); 05288 } 05289 05290 // 05291 // Non-WriteThrough case 05292 // 05293 05294 else { 05295 05296 CcUnpinFileData( (PBCB)Bcb, TRUE, UNPIN ); 05297 05298 // 05299 // Set status to success for non write through case. 05300 // 05301 05302 IoStatus->Status = STATUS_SUCCESS; 05303 } 05304 05305 DebugTrace(-1, me, "CcUnpinRepinnedBcb -> VOID\n", 0 ); 05306 }

NTSTATUS CcWaitForCurrentLazyWriterActivity  ) 
 

Definition at line 158 of file lazyrite.c.

References CcAcquireMasterLock, CcAllocateWorkQueueEntry, CcPostTickWorkQueue, CcReleaseMasterLock, CcScheduleLazyWriteScan(), Event(), EventSet, Executive, FALSE, _WORK_QUEUE_ENTRY::Function, KeInitializeEvent, KernelMode, KeWaitForSingleObject(), LazyWriter, NULL, _LAZY_WRITER::OtherWork, _WORK_QUEUE_ENTRY::Parameters, _LAZY_WRITER::ScanActive, TRUE, and _WORK_QUEUE_ENTRY::WorkQueueLinks.

Referenced by UdfLockVolumeInternal().

00163 : 00164 00165 This routine allows a thread to receive notification when the current tick 00166 of lazy writer work has completed. It must not be called within a lazy 00167 writer workitem! The caller must not be holding synchronization that could 00168 block a Cc workitem! 00169 00170 In particular, this lets a caller insure that all avaliable lazy closes at 00171 the time of the call have completed. 00172 00173 Arguments: 00174 00175 None. 00176 00177 Return Value: 00178 00179 Final result of the wait. 00180 00181 --*/ 00182 00183 { 00184 KIRQL OldIrql; 00185 KEVENT Event; 00186 PWORK_QUEUE_ENTRY WorkQueueEntry; 00187 00188 WorkQueueEntry = CcAllocateWorkQueueEntry(); 00189 00190 if (WorkQueueEntry == NULL) { 00191 return STATUS_INSUFFICIENT_RESOURCES; 00192 } 00193 00194 WorkQueueEntry->Function = (UCHAR)EventSet; 00195 KeInitializeEvent( &Event, NotificationEvent, FALSE ); 00196 WorkQueueEntry->Parameters.Event.Event = &Event; 00197 00198 // 00199 // Add this to the post-tick work queue and wake the lazy writer for it. 00200 // The lazy writer will add this to the end of the next batch of work 00201 // he issues. 00202 // 00203 00204 CcAcquireMasterLock( &OldIrql ); 00205 00206 InsertTailList( &CcPostTickWorkQueue, &WorkQueueEntry->WorkQueueLinks ); 00207 00208 LazyWriter.OtherWork = TRUE; 00209 if (!LazyWriter.ScanActive) { 00210 CcScheduleLazyWriteScan(); 00211 } 00212 00213 CcReleaseMasterLock( OldIrql ); 00214 00215 return KeWaitForSingleObject( &Event, Executive, KernelMode, FALSE, NULL ); 00216 }

NTKERNELAPI BOOLEAN CcZeroData IN PFILE_OBJECT  FileObject,
IN PLARGE_INTEGER  StartOffset,
IN PLARGE_INTEGER  EndOffset,
IN BOOLEAN  Wait
 

Definition at line 2952 of file fssup.c.

References ASSERT, _MDL::ByteCount, CcAggressiveZeroCount, CcAggressiveZeroThreshold, CcPinFileData(), CcSetDirtyPinnedData(), CcUnpinFileData(), COMPUTE_PAGES_SPANNED, DebugTrace, Event(), ExAllocatePoolWithTag, Executive, ExFreePool(), ExRaiseStatus(), FALSE, FlagOn, FO_WRITE_THROUGH, IoAllocateMdl(), IoFreeMdl(), IoGetRelatedDeviceObject(), IoReadAccess, IoSynchronousPageWrite(), KeInitializeEvent, KernelMode, KeSweepDcache(), KeWaitForSingleObject(), LowPagePriority, _MDL::MappedSystemVa, MAX_ZERO_TRANSFER, MAX_ZEROS_IN_CACHE, MDL_PAGES_LOCKED, MDL_SOURCE_IS_NONPAGED_POOL, _MDL::MdlFlags, me, MIN_ZERO_TRANSFER, MmAvailablePages, MmBuildMdlForNonPagedPool(), MmDisablePageFaultClustering, MmEnablePageFaultClustering, MmGetMdlPfnArray, MmGetSystemAddressForMdl, MmGetSystemAddressForMdlSafe, MmProbeAndLockPages(), MmSetAddressRangeModified(), MmUnlockPages(), MmUnmapLockedPages(), NonPagedPoolCacheAligned, NT_SUCCESS, NTSTATUS(), NULL, PAGE_SIZE, _DEVICE_OBJECT::SectorSize, Status, TRUE, try_return, and UNPIN.

Referenced by FsRtlCopyWrite(), and FsRtlPrepareMdlWriteDev().

02961 : 02962 02963 This routine attempts to zero the specified file data and deliver the 02964 correct I/O status. 02965 02966 If the caller does not want to block (such as for disk I/O), then 02967 Wait should be supplied as FALSE. If Wait was supplied as FALSE and 02968 it is currently impossible to zero all of the requested data without 02969 blocking, then this routine will return FALSE. However, if the 02970 required space is immediately accessible in the cache and no blocking is 02971 required, this routine zeros the data and returns TRUE. 02972 02973 If the caller supplies Wait as TRUE, then this routine is guaranteed 02974 to zero the data and return TRUE. If the correct space is immediately 02975 accessible in the cache, then no blocking will occur. Otherwise, 02976 the necessary work will be initiated to read and/or free cache data, 02977 and the caller will be blocked until the data can be received. 02978 02979 File system Fsd's should typically supply Wait = TRUE if they are 02980 processing a synchronous I/O requests, or Wait = FALSE if they are 02981 processing an asynchronous request. 02982 02983 File system threads should supply Wait = TRUE. 02984 02985 IMPORTANT NOTE: File systems which call this routine must be prepared 02986 to handle a special form of a write call where the Mdl is already 02987 supplied. Namely, if Irp->MdlAddress is supplied, the file system 02988 must check the low order bit of Irp->MdlAddress->ByteOffset. If it 02989 is set, that means that the Irp was generated in this routine and 02990 the file system must do two things: 02991 02992 Decrement Irp->MdlAddress->ByteOffset and Irp->UserBuffer 02993 02994 Clear Irp->MdlAddress immediately prior to completing the 02995 request, as this routine expects to reuse the Mdl and 02996 ultimately deallocate the Mdl itself. 02997 02998 Arguments: 02999 03000 FileObject - pointer to the FileObject for which a range of bytes 03001 is to be zeroed. This FileObject may either be for 03002 a cached file or a noncached file. If the file is 03003 not cached, then WriteThrough must be TRUE and 03004 StartOffset and EndOffset must be on sector boundaries. 03005 03006 StartOffset - Start offset in file to be zeroed. 03007 03008 EndOffset - End offset in file to be zeroed. 03009 03010 Wait - FALSE if caller may not block, TRUE otherwise (see description 03011 above) 03012 03013 Return Value: 03014 03015 FALSE - if Wait was supplied as FALSE and the data was not zeroed. 03016 03017 TRUE - if the data has been zeroed. 03018 03019 Raises: 03020 03021 STATUS_INSUFFICIENT_RESOURCES - If a pool allocation failure occurs. 03022 This can only occur if Wait was specified as TRUE. (If Wait is 03023 specified as FALSE, and an allocation failure occurs, this 03024 routine simply returns FALSE.) 03025 03026 --*/ 03027 03028 { 03029 PSHARED_CACHE_MAP SharedCacheMap; 03030 PVOID CacheBuffer; 03031 LARGE_INTEGER FOffset; 03032 LARGE_INTEGER ToGo; 03033 ULONG ZeroBytes, ZeroTransfer; 03034 ULONG SectorMask; 03035 ULONG i; 03036 BOOLEAN WriteThrough; 03037 BOOLEAN AggressiveZero = FALSE; 03038 ULONG SavedState = 0; 03039 ULONG MaxZerosInCache = MAX_ZEROS_IN_CACHE; 03040 ULONG NumberOfColors = 1; 03041 03042 PBCB Bcb = NULL; 03043 PCHAR Zeros = NULL; 03044 PMDL ZeroMdl = NULL; 03045 ULONG MaxBytesMappedInMdl = 0; 03046 BOOLEAN Result = TRUE; 03047 03048 PPFN_NUMBER Page; 03049 ULONG SavedByteCount; 03050 LARGE_INTEGER SizeLeft; 03051 03052 DebugTrace(+1, me, "CcZeroData\n", 0 ); 03053 03054 WriteThrough = (BOOLEAN)(((FileObject->Flags & FO_WRITE_THROUGH) != 0) || 03055 (FileObject->PrivateCacheMap == NULL)); 03056 03057 // 03058 // If the caller specified Wait, but the FileObject is WriteThrough, 03059 // then we need to just get out. 03060 // 03061 03062 if (WriteThrough && !Wait) { 03063 03064 DebugTrace(-1, me, "CcZeroData->FALSE (WriteThrough && !Wait)\n", 0 ); 03065 03066 return FALSE; 03067 } 03068 03069 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 03070 03071 SectorMask = IoGetRelatedDeviceObject(FileObject)->SectorSize - 1; 03072 03073 FOffset = *StartOffset; 03074 03075 // 03076 // Calculate how much to zero this time. 03077 // 03078 03079 ToGo.QuadPart = EndOffset->QuadPart - FOffset.QuadPart; 03080 03081 // 03082 // This magic number is what the fastpaths throttle on, and they will present 03083 // non-sector aligned zeroing requests. As long as we will always handle them 03084 // on the cached path, we are OK. 03085 // 03086 // If we will not make the cached path, the request must be aligned. 03087 // 03088 03089 ASSERT( ToGo.QuadPart <= 0x2000 || 03090 ((ToGo.LowPart & SectorMask) == 0 && 03091 (FOffset.LowPart & SectorMask) == 0)); 03092 03093 // 03094 // We will only do zeroing in the cache if the caller is using a 03095 // cached file object, and did not specify WriteThrough. We are 03096 // willing to zero some data in the cache if our total is not too 03097 // much, or there is sufficient available pages. 03098 // 03099 03100 if (((ToGo.QuadPart <= 0x2000) || 03101 (MmAvailablePages >= ((MAX_ZEROS_IN_CACHE / PAGE_SIZE) * 4))) && !WriteThrough) { 03102 03103 try { 03104 03105 while (MaxZerosInCache != 0) { 03106 03107 ULONG ReceivedLength; 03108 LARGE_INTEGER BeyondLastByte; 03109 03110 if ( ToGo.QuadPart > (LONGLONG)MaxZerosInCache ) { 03111 03112 // 03113 // If Wait == FALSE, then there is no point in getting started, 03114 // because we would have to start all over again zeroing with 03115 // Wait == TRUE, since we would fall out of this loop and 03116 // start synchronously writing pages to disk. 03117 // 03118 03119 if (!Wait) { 03120 03121 DebugTrace(-1, me, "CcZeroData -> FALSE\n", 0 ); 03122 03123 try_return( Result = FALSE ); 03124 } 03125 } 03126 else { 03127 MaxZerosInCache = ToGo.LowPart; 03128 } 03129 03130 // 03131 // Call local routine to Map or Access the file data, then zero the data, 03132 // then call another local routine to free the data. If we cannot map 03133 // the data because of a Wait condition, return FALSE. 03134 // 03135 // Note that this call may result in an exception, however, if it 03136 // does no Bcb is returned and this routine has absolutely no 03137 // cleanup to perform. Therefore, we do not have a try-finally 03138 // and we allow the possibility that we will simply be unwound 03139 // without notice. 03140 // 03141 03142 if (!CcPinFileData( FileObject, 03143 &FOffset, 03144 MaxZerosInCache, 03145 FALSE, 03146 TRUE, 03147 Wait, 03148 &Bcb, 03149 &CacheBuffer, 03150 &BeyondLastByte )) { 03151 03152 DebugTrace(-1, me, "CcZeroData -> FALSE\n", 0 ); 03153 03154 try_return( Result = FALSE ); 03155 } 03156 03157 // 03158 // Calculate how much data is described by Bcb starting at our desired 03159 // file offset. If it is more than we need, we will zero the whole thing 03160 // anyway. 03161 // 03162 03163 ReceivedLength = (ULONG)(BeyondLastByte.QuadPart - FOffset.QuadPart ); 03164 03165 // 03166 // Now attempt to allocate an Mdl to describe the mapped data. 03167 // 03168 03169 ZeroMdl = IoAllocateMdl( CacheBuffer, 03170 ReceivedLength, 03171 FALSE, 03172 FALSE, 03173 NULL ); 03174 03175 if (ZeroMdl == NULL) { 03176 03177 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 03178 } 03179 03180 // 03181 // It is necessary to probe and lock the pages, or else 03182 // the pages may not still be in memory when we do the 03183 // MmSetAddressRangeModified for the dirty Bcb. 03184 // 03185 03186 MmDisablePageFaultClustering(&SavedState); 03187 MmProbeAndLockPages( ZeroMdl, KernelMode, IoReadAccess ); 03188 MmEnablePageFaultClustering(SavedState); 03189 SavedState = 0; 03190 03191 // 03192 // Assume we did not get all the data we wanted, and set FOffset 03193 // to the end of the returned data, and advance buffer pointer. 03194 // 03195 03196 FOffset = BeyondLastByte; 03197 03198 // 03199 // Figure out how many bytes we are allowed to zero in the cache. 03200 // Note it is possible we have zeroed a little more than our maximum, 03201 // because we hit an existing Bcb that extended beyond the range. 03202 // 03203 03204 if (MaxZerosInCache <= ReceivedLength) { 03205 MaxZerosInCache = 0; 03206 } 03207 else { 03208 MaxZerosInCache -= ReceivedLength; 03209 } 03210 03211 // 03212 // Now set the Bcb dirty. We have to explicitly set the address 03213 // range modified here, because that work otherwise gets deferred 03214 // to the Lazy Writer. 03215 // 03216 03217 MmSetAddressRangeModified( CacheBuffer, ReceivedLength ); 03218 CcSetDirtyPinnedData( Bcb, NULL ); 03219 03220 // 03221 // Unmap the data now 03222 // 03223 03224 CcUnpinFileData( Bcb, FALSE, UNPIN ); 03225 Bcb = NULL; 03226 03227 // 03228 // Unlock and free the Mdl (we only loop back if we crossed 03229 // a 256KB boundary. 03230 // 03231 03232 MmUnlockPages( ZeroMdl ); 03233 IoFreeMdl( ZeroMdl ); 03234 ZeroMdl = NULL; 03235 } 03236 03237 try_exit: NOTHING; 03238 } finally { 03239 03240 if (SavedState != 0) { 03241 MmEnablePageFaultClustering(SavedState); 03242 } 03243 03244 // 03245 // Clean up only necessary in abnormal termination. 03246 // 03247 03248 if (Bcb != NULL) { 03249 03250 CcUnpinFileData( Bcb, FALSE, UNPIN ); 03251 } 03252 03253 // 03254 // Since the last thing in the above loop which can 03255 // fail is the MmProbeAndLockPages, we only need to 03256 // free the Mdl here. 03257 // 03258 03259 if (ZeroMdl != NULL) { 03260 03261 IoFreeMdl( ZeroMdl ); 03262 } 03263 } 03264 03265 // 03266 // If hit a wait condition above, return it now. 03267 // 03268 03269 if (!Result) { 03270 return FALSE; 03271 } 03272 03273 // 03274 // If we finished, get out nbow. 03275 // 03276 03277 if ( FOffset.QuadPart >= EndOffset->QuadPart ) { 03278 return TRUE; 03279 } 03280 } 03281 03282 // 03283 // We either get here because we decided above not to zero anything in 03284 // the cache directly, or else we zeroed up to our maximum and still 03285 // have some left to zero direct to the file on disk. In either case, 03286 // we will now zero from FOffset to *EndOffset, and then flush this 03287 // range in case the file is cached/mapped, and there are modified 03288 // changes in memory. 03289 // 03290 03291 // 03292 // Round FOffset and EndOffset up to sector boundaries, since 03293 // we will be doing disk I/O, and calculate size left. 03294 // 03295 03296 ASSERT( (FOffset.LowPart & SectorMask) == 0 ); 03297 03298 FOffset.QuadPart += (LONGLONG)SectorMask; 03299 FOffset.LowPart &= ~SectorMask; 03300 SizeLeft.QuadPart = EndOffset->QuadPart + (LONGLONG)SectorMask; 03301 SizeLeft.LowPart &= ~SectorMask; 03302 SizeLeft.QuadPart -= FOffset.QuadPart; 03303 03304 ASSERT( (FOffset.LowPart & SectorMask) == 0 ); 03305 ASSERT( (SizeLeft.LowPart & SectorMask) == 0 ); 03306 03307 if (SizeLeft.QuadPart == 0) { 03308 return TRUE; 03309 } 03310 03311 // 03312 // try-finally to guarantee cleanup. 03313 // 03314 03315 try { 03316 03317 // 03318 // Allocate a page to hold the zeros we will write, and 03319 // zero it. 03320 // 03321 03322 ZeroBytes = NumberOfColors * PAGE_SIZE; 03323 03324 if (SizeLeft.HighPart == 0 && SizeLeft.LowPart < ZeroBytes) { 03325 ZeroBytes = SizeLeft.LowPart; 03326 } 03327 03328 Zeros = (PCHAR)ExAllocatePoolWithTag( NonPagedPoolCacheAligned, ZeroBytes, 'eZcC' ); 03329 03330 if (Zeros != NULL) { 03331 03332 // 03333 // Allocate and initialize an Mdl to describe the zeros 03334 // we need to transfer. Allocate to cover the maximum 03335 // size required, and we will use and reuse it in the 03336 // loop below, initialized correctly. 03337 // 03338 03339 if (SizeLeft.HighPart == 0 && SizeLeft.LowPart < MAX_ZERO_TRANSFER) { 03340 03341 ZeroTransfer = SizeLeft.LowPart; 03342 03343 } else { 03344 03345 // 03346 // See how aggressive we can afford to be. 03347 // 03348 03349 if (InterlockedIncrement( &CcAggressiveZeroCount ) <= CcAggressiveZeroThreshold) { 03350 AggressiveZero = TRUE; 03351 ZeroTransfer = MAX_ZERO_TRANSFER; 03352 } else { 03353 InterlockedDecrement( &CcAggressiveZeroCount ); 03354 ZeroTransfer = MIN_ZERO_TRANSFER; 03355 } 03356 } 03357 03358 // 03359 // Since the maximum zero may start at a very aggresive level, fall back 03360 // until we really have to give up. Since filter drivers, filesystems and 03361 // even storage drivers may need to map this Mdl, we have to pre-map it 03362 // into system space so that we know enough PTEs are avaliable. We also 03363 // need to throttle our consumption of virtual addresses based on the size 03364 // of the system and the number of parallel instances of this work outstanding. 03365 // This may be a bit of overkill, but since running out of PTEs is a fatal 03366 // event for the rest of the system, try to help out while still being fast. 03367 // 03368 03369 while (TRUE) { 03370 03371 // 03372 // Spin down trying to get an MDL which can describe our operation. 03373 // 03374 03375 while (TRUE) { 03376 03377 ZeroMdl = IoAllocateMdl( Zeros, ZeroTransfer, FALSE, FALSE, NULL ); 03378 03379 // 03380 // Throttle ourselves to what we've physically allocated. Note that 03381 // we could have started with an odd multiple of this number. If we 03382 // tried for exactly that size and failed, we're toast. 03383 // 03384 03385 if (ZeroMdl || ZeroTransfer == ZeroBytes) { 03386 03387 break; 03388 } 03389 03390 Fall_Back: 03391 03392 // 03393 // Fallback by half and round down to a sector multiple. 03394 // 03395 03396 ZeroTransfer /= 2; 03397 ZeroTransfer &= ~SectorMask; 03398 if (ZeroTransfer < ZeroBytes) { 03399 ZeroTransfer = ZeroBytes; 03400 } 03401 03402 ASSERT( (ZeroTransfer & SectorMask) == 0 && ZeroTransfer != 0); 03403 } 03404 03405 if (ZeroMdl == NULL) { 03406 03407 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 03408 } 03409 03410 // 03411 // If we have throttled all the way down, stop and just build a 03412 // simple MDL describing our previous allocation. 03413 // 03414 03415 if (ZeroTransfer == ZeroBytes) { 03416 03417 MmBuildMdlForNonPagedPool( ZeroMdl ); 03418 break; 03419 } 03420 03421 // 03422 // Now we will temporarily lock the allocated pages 03423 // only, and then replicate the page frame numbers through 03424 // the entire Mdl to keep writing the same pages of zeros. 03425 // 03426 // It would be nice if Mm exported a way for us to not have 03427 // to pull the Mdl apart and rebuild it ourselves, but this 03428 // is so bizzare a purpose as to be tolerable. 03429 // 03430 03431 SavedByteCount = ZeroMdl->ByteCount; 03432 ZeroMdl->ByteCount = ZeroBytes; 03433 MmBuildMdlForNonPagedPool( ZeroMdl ); 03434 03435 ZeroMdl->MdlFlags &= ~MDL_SOURCE_IS_NONPAGED_POOL; 03436 ZeroMdl->MdlFlags |= MDL_PAGES_LOCKED; 03437 ZeroMdl->MappedSystemVa = NULL; 03438 ZeroMdl->ByteCount = SavedByteCount; 03439 Page = MmGetMdlPfnArray( ZeroMdl ); 03440 for (i = NumberOfColors; 03441 i < (COMPUTE_PAGES_SPANNED( 0, SavedByteCount )); 03442 i++) { 03443 03444 *(Page + i) = *(Page + i - NumberOfColors); 03445 } 03446 03447 if (MmGetSystemAddressForMdlSafe( ZeroMdl, LowPagePriority ) == NULL) { 03448 03449 // 03450 // Blow away this Mdl and trim for the retry. Since it didn't 03451 // get mapped, there is nothing fancy to do. 03452 // 03453 03454 IoFreeMdl( ZeroMdl ); 03455 goto Fall_Back; 03456 } 03457 03458 break; 03459 } 03460 03461 // 03462 // We failed to allocate the space we wanted, so we will go to 03463 // half of a page and limp along. 03464 // 03465 03466 } else { 03467 03468 // 03469 // Of course, if we have a device which has large sectors, that defines 03470 // the lower limit of our attempt. 03471 // 03472 03473 if (IoGetRelatedDeviceObject(FileObject)->SectorSize < PAGE_SIZE / 2) { 03474 03475 ZeroBytes = PAGE_SIZE / 2; 03476 Zeros = (PCHAR)ExAllocatePoolWithTag( NonPagedPoolCacheAligned, ZeroBytes, 'eZcC' ); 03477 } 03478 03479 // 03480 // If we cannot get even that much, then let's write a sector at a time. 03481 // 03482 03483 if (Zeros == NULL) { 03484 03485 ZeroBytes = IoGetRelatedDeviceObject(FileObject)->SectorSize; 03486 Zeros = (PCHAR)ExAllocatePoolWithTag( NonPagedPoolCacheAligned, ZeroBytes, 'eZcC' ); 03487 03488 // 03489 // If we cannot get even the minimum, we have to give up. 03490 // 03491 03492 if (Zeros == NULL) { 03493 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 03494 } 03495 } 03496 03497 // 03498 // Allocate and initialize an Mdl to describe the zeros 03499 // we need to transfer. Allocate to cover the maximum 03500 // size required, and we will use and reuse it in the 03501 // loop below, initialized correctly. 03502 // 03503 03504 ZeroTransfer = ZeroBytes; 03505 ZeroMdl = IoAllocateMdl( Zeros, ZeroBytes, FALSE, FALSE, NULL ); 03506 03507 ASSERT( (ZeroTransfer & SectorMask) == 0 ); 03508 03509 if (ZeroMdl == NULL) { 03510 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 03511 } 03512 03513 // 03514 // Now we will lock and map the allocated pages. 03515 // 03516 03517 MmBuildMdlForNonPagedPool( ZeroMdl ); 03518 03519 ASSERT( ZeroMdl->MappedSystemVa == Zeros ); 03520 } 03521 03522 #ifdef MIPS 03523 #ifdef MIPS_PREFILL 03524 RtlFillMemory( Zeros, ZeroBytes, 0xDD ); 03525 KeSweepDcache( TRUE ); 03526 #endif 03527 #endif 03528 03529 // 03530 // Zero the buffer now. 03531 // 03532 03533 RtlZeroMemory( Zeros, ZeroBytes ); 03534 03535 // 03536 // We have a mapped and zeroed range back by an MDL to use. Note the 03537 // size we have for cleanup, since we will possibly wind this down 03538 // over the operation. 03539 // 03540 03541 ASSERT( MmGetSystemAddressForMdl(ZeroMdl) ); 03542 MaxBytesMappedInMdl = ZeroMdl->ByteCount; 03543 03544 // 03545 // Now loop to write buffers full of zeros through to the file 03546 // until we reach the starting Vbn for the transfer. 03547 // 03548 03549 ASSERT( ZeroTransfer != 0 && 03550 (ZeroTransfer & SectorMask) == 0 && 03551 (SizeLeft.LowPart & SectorMask) == 0 ); 03552 03553 while ( SizeLeft.QuadPart != 0 ) { 03554 03555 IO_STATUS_BLOCK IoStatus; 03556 NTSTATUS Status; 03557 KEVENT Event; 03558 03559 // 03560 // See if we really need to write that many zeros, and 03561 // trim the size back if not. 03562 // 03563 03564 if ( (LONGLONG)ZeroTransfer > SizeLeft.QuadPart ) { 03565 03566 ZeroTransfer = SizeLeft.LowPart; 03567 } 03568 03569 // 03570 // (Re)initialize the kernel event to FALSE. 03571 // 03572 03573 KeInitializeEvent( &Event, NotificationEvent, FALSE ); 03574 03575 // 03576 // Initiate and wait for the synchronous transfer. 03577 // 03578 03579 ZeroMdl->ByteCount = ZeroTransfer; 03580 03581 Status = IoSynchronousPageWrite( FileObject, 03582 ZeroMdl, 03583 &FOffset, 03584 &Event, 03585 &IoStatus ); 03586 03587 // 03588 // If pending is returned (which is a successful status), 03589 // we must wait for the request to complete. 03590 // 03591 03592 if (Status == STATUS_PENDING) { 03593 KeWaitForSingleObject( &Event, 03594 Executive, 03595 KernelMode, 03596 FALSE, 03597 (PLARGE_INTEGER)NULL); 03598 } 03599 03600 03601 // 03602 // If we got an error back in Status, then the Iosb 03603 // was not written, so we will just copy the status 03604 // there, then test the final status after that. 03605 // 03606 03607 if (!NT_SUCCESS(Status)) { 03608 ExRaiseStatus( Status ); 03609 } 03610 03611 if (!NT_SUCCESS(IoStatus.Status)) { 03612 ExRaiseStatus( IoStatus.Status ); 03613 } 03614 03615 // 03616 // If we succeeded, then update where we are at by how much 03617 // we wrote, and loop back to see if there is more. 03618 // 03619 03620 FOffset.QuadPart = FOffset.QuadPart + (LONGLONG)ZeroTransfer; 03621 SizeLeft.QuadPart = SizeLeft.QuadPart - (LONGLONG)ZeroTransfer; 03622 } 03623 } 03624 finally{ 03625 03626 // 03627 // Clean up anything from zeroing pages on a noncached 03628 // write. 03629 // 03630 03631 if (ZeroMdl != NULL) { 03632 03633 if ((MaxBytesMappedInMdl != 0) && 03634 !FlagOn(ZeroMdl->MdlFlags, MDL_SOURCE_IS_NONPAGED_POOL)) { 03635 ZeroMdl->ByteCount = MaxBytesMappedInMdl; 03636 MmUnmapLockedPages (ZeroMdl->MappedSystemVa, ZeroMdl); 03637 } 03638 03639 IoFreeMdl( ZeroMdl ); 03640 } 03641 03642 if (AggressiveZero) { 03643 InterlockedDecrement( &CcAggressiveZeroCount ); 03644 } 03645 03646 if (Zeros != NULL) { 03647 ExFreePool( Zeros ); 03648 } 03649 03650 DebugTrace(-1, me, "CcZeroData -> TRUE\n", 0 ); 03651 } 03652 03653 return TRUE; 03654 }

NTKERNELAPI VOID CcZeroEndOfLastPage IN PFILE_OBJECT  FileObject  ) 
 

Definition at line 2804 of file fssup.c.

References _SHARED_CACHE_MAP::ActiveVacb, ASSERT, CcAcquireMasterLock, CcDecrementOpenCount, CcDirtySharedCacheMapList, CcFlushCache(), CcFreeActiveVacb(), CcIncrementOpenCount, CcPurgeCacheSection(), CcReleaseMasterLock, CcScheduleLazyWriteScan(), _SHARED_CACHE_MAP::DirtyPages, FALSE, FlagOn, _SHARED_CACHE_MAP::Flags, FSRTL_FLAG2_PURGE_WHEN_MAPPED, FSRTL_FLAG_ADVANCED_HEADER, FSRTL_FLAG_USER_MAPPED_FILE, FsRtlAcquireFileExclusive(), FsRtlReleaseFile(), GetActiveVacbAtDpcLevel, LazyWriter, _SHARED_CACHE_MAP::NeedToZero, NULL, _SHARED_CACHE_MAP::OpenCount, _LAZY_WRITER::OtherWork, _LAZY_WRITER::ScanActive, SetFlag, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, _SHARED_CACHE_MAP::SharedCacheMapLinks, TRUE, and WRITE_QUEUED.

Referenced by MiCreateImageFileMap(), and NtCreateSection().

02810 : 02811 02812 This routine is only called by Mm before mapping a user view to 02813 a section. If there is an uninitialized page at the end of the 02814 file, we zero it by freeing that page. 02815 02816 Parameters: 02817 02818 FileObject - File object for section to be mapped 02819 02820 Return Value: 02821 02822 None 02823 --*/ 02824 02825 { 02826 PSHARED_CACHE_MAP SharedCacheMap; 02827 ULONG ActivePage; 02828 ULONG PageIsDirty; 02829 KIRQL OldIrql; 02830 PVOID NeedToZero = NULL; 02831 PVACB ActiveVacb = NULL; 02832 IO_STATUS_BLOCK Iosb; 02833 BOOLEAN PurgeResult; 02834 02835 // 02836 // See if we have an active Vacb, that we need to free. 02837 // 02838 02839 FsRtlAcquireFileExclusive( FileObject ); 02840 CcAcquireMasterLock( &OldIrql ); 02841 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 02842 02843 if (SharedCacheMap != NULL) { 02844 02845 // 02846 // See if there is an active vacb. 02847 // 02848 02849 if ((SharedCacheMap->ActiveVacb != NULL) || ((NeedToZero = SharedCacheMap->NeedToZero) != NULL)) { 02850 02851 CcIncrementOpenCount( SharedCacheMap, 'peZS' ); 02852 GetActiveVacbAtDpcLevel( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 02853 } 02854 } 02855 02856 CcReleaseMasterLock( OldIrql ); 02857 02858 // 02859 // Remember in FsRtl header is there is a user section. 02860 // If this is an advanced header then also acquire the mutex to access 02861 // this field. 02862 // 02863 02864 if (FlagOn( ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->Flags, 02865 FSRTL_FLAG_ADVANCED_HEADER )) { 02866 02867 ExAcquireFastMutex( ((PFSRTL_ADVANCED_FCB_HEADER)FileObject->FsContext)->FastMutex ); 02868 02869 SetFlag( ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->Flags, 02870 FSRTL_FLAG_USER_MAPPED_FILE ); 02871 02872 ExReleaseFastMutex( ((PFSRTL_ADVANCED_FCB_HEADER)FileObject->FsContext)->FastMutex ); 02873 02874 } else { 02875 02876 SetFlag( ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->Flags, 02877 FSRTL_FLAG_USER_MAPPED_FILE ); 02878 } 02879 02880 // 02881 // Free the active vacb now so we don't deadlock if we have to purge 02882 // 02883 02884 02885 if ((ActiveVacb != NULL) || (NeedToZero != NULL)) { 02886 CcFreeActiveVacb( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 02887 } 02888 02889 02890 if (FlagOn( ((PFSRTL_COMMON_FCB_HEADER)FileObject->FsContext)->Flags2, FSRTL_FLAG2_PURGE_WHEN_MAPPED )) { 02891 02892 if (FileObject->SectionObjectPointer->SharedCacheMap) { 02893 ASSERT( ((PSHARED_CACHE_MAP)(FileObject->SectionObjectPointer->SharedCacheMap))->VacbActiveCount == 0 ); 02894 } 02895 02896 CcFlushCache( FileObject->SectionObjectPointer, NULL, 0, &Iosb ); 02897 PurgeResult = CcPurgeCacheSection( FileObject->SectionObjectPointer, NULL, 0, FALSE ); 02898 02899 if (FileObject->SectionObjectPointer->SharedCacheMap) { 02900 ASSERT( ((PSHARED_CACHE_MAP)(FileObject->SectionObjectPointer->SharedCacheMap))->VacbActiveCount == 0 ); 02901 } 02902 } 02903 02904 02905 FsRtlReleaseFile( FileObject ); 02906 02907 // 02908 // If the file is cached and we have a Vacb to free, we need to 02909 // use the lazy writer callback to synchronize so no one will be 02910 // extending valid data. 02911 // 02912 02913 if ((ActiveVacb != NULL) || (NeedToZero != NULL)) { 02914 02915 // 02916 // Serialize again to decrement the open count. 02917 // 02918 02919 CcAcquireMasterLock( &OldIrql ); 02920 02921 CcDecrementOpenCount( SharedCacheMap, 'peZF' ); 02922 02923 if ((SharedCacheMap->OpenCount == 0) && 02924 !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) && 02925 (SharedCacheMap->DirtyPages == 0)) { 02926 02927 // 02928 // Move to the dirty list. 02929 // 02930 02931 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 02932 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 02933 &SharedCacheMap->SharedCacheMapLinks ); 02934 02935 // 02936 // Make sure the Lazy Writer will wake up, because we 02937 // want him to delete this SharedCacheMap. 02938 // 02939 02940 LazyWriter.OtherWork = TRUE; 02941 if (!LazyWriter.ScanActive) { 02942 CcScheduleLazyWriteScan(); 02943 } 02944 } 02945 02946 CcReleaseMasterLock( OldIrql ); 02947 } 02948 }


Variable Documentation

ULONG CcCopyReadNoWait
 

Definition at line 211 of file cache.h.

Referenced by CcCopyRead(), and NtQuerySystemInformation().

ULONG CcCopyReadNoWaitMiss
 

Definition at line 213 of file cache.h.

Referenced by CcCopyRead(), and NtQuerySystemInformation().

ULONG CcCopyReadWait
 

Definition at line 212 of file cache.h.

Referenced by CcCopyRead(), CcFastCopyRead(), and NtQuerySystemInformation().

ULONG CcCopyReadWaitMiss
 

Definition at line 214 of file cache.h.

Referenced by CcCopyRead(), CcFastCopyRead(), and NtQuerySystemInformation().

ULONG CcDataFlushes
 

Definition at line 225 of file cache.h.

Referenced by IoSynchronousPageWrite(), and NtQuerySystemInformation().

ULONG CcDataPages
 

Definition at line 226 of file cache.h.

Referenced by IoSynchronousPageWrite(), and NtQuerySystemInformation().

ULONG CcFastMdlReadNotPossible
 

Definition at line 197 of file cache.h.

Referenced by FsRtlMdlReadDev(), and NtQuerySystemInformation().

ULONG CcFastMdlReadNoWait
 

Definition at line 194 of file cache.h.

Referenced by NtQuerySystemInformation().

ULONG CcFastMdlReadResourceMiss
 

Definition at line 196 of file cache.h.

Referenced by NtQuerySystemInformation().

ULONG CcFastMdlReadWait
 

Definition at line 195 of file cache.h.

Referenced by FsRtlMdlReadDev(), and NtQuerySystemInformation().

ULONG CcFastReadNotPossible
 

Definition at line 192 of file cache.h.

Referenced by FsRtlCopyRead(), and NtQuerySystemInformation().

ULONG CcFastReadNoWait
 

Definition at line 189 of file cache.h.

Referenced by FsRtlCopyRead(), and NtQuerySystemInformation().

ULONG CcFastReadResourceMiss
 

Definition at line 191 of file cache.h.

Referenced by FsRtlCopyRead(), and NtQuerySystemInformation().

ULONG CcFastReadWait
 

Definition at line 190 of file cache.h.

Referenced by FsRtlCopyRead(), and NtQuerySystemInformation().

ULONG CcLazyWriteIos
 

Definition at line 223 of file cache.h.

Referenced by CcFlushCache(), and NtQuerySystemInformation().

ULONG CcLazyWritePages
 

Definition at line 224 of file cache.h.

Referenced by CcFlushCache(), and NtQuerySystemInformation().

ULONG CcMapDataNoWait
 

Definition at line 199 of file cache.h.

Referenced by CcMapData(), and NtQuerySystemInformation().

ULONG CcMapDataNoWaitMiss
 

Definition at line 201 of file cache.h.

Referenced by CcMapData(), and NtQuerySystemInformation().

ULONG CcMapDataWait
 

Definition at line 200 of file cache.h.

Referenced by CcMapData(), and NtQuerySystemInformation().

ULONG CcMapDataWaitMiss
 

Definition at line 202 of file cache.h.

Referenced by CcMapData(), and NtQuerySystemInformation().

ULONG CcMdlReadNoWait
 

Definition at line 216 of file cache.h.

Referenced by NtQuerySystemInformation().

ULONG CcMdlReadNoWaitMiss
 

Definition at line 218 of file cache.h.

Referenced by NtQuerySystemInformation().

ULONG CcMdlReadWait
 

Definition at line 217 of file cache.h.

Referenced by CcMdlRead(), and NtQuerySystemInformation().

ULONG CcMdlReadWaitMiss
 

Definition at line 219 of file cache.h.

Referenced by CcMdlRead(), and NtQuerySystemInformation().

PULONG CcMissCounter
 

Definition at line 228 of file cache.h.

Referenced by CcCopyRead(), CcFastCopyRead(), CcMapData(), CcMdlRead(), CcPerformReadAhead(), CcPinRead(), CcPreparePinWrite(), and IoPageRead().

ULONG CcPinMappedDataCount
 

Definition at line 204 of file cache.h.

Referenced by CcPinMappedData(), and NtQuerySystemInformation().

ULONG CcPinReadNoWait
 

Definition at line 206 of file cache.h.

Referenced by CcPinRead(), and NtQuerySystemInformation().

ULONG CcPinReadNoWaitMiss
 

Definition at line 208 of file cache.h.

Referenced by CcPinRead(), and NtQuerySystemInformation().

ULONG CcPinReadWait
 

Definition at line 207 of file cache.h.

Referenced by CcPinRead(), and NtQuerySystemInformation().

ULONG CcPinReadWaitMiss
 

Definition at line 209 of file cache.h.

Referenced by CcPinRead(), and NtQuerySystemInformation().

ULONG CcReadAheadIos
 

Definition at line 221 of file cache.h.

Referenced by CcPerformReadAhead(), and NtQuerySystemInformation().

ULONG CcThrowAway
 

Definition at line 183 of file cache.h.

Referenced by CcCopyRead(), CcFastCopyRead(), CcMapData(), CcMdlRead(), CcPerformReadAhead(), CcPinRead(), and CcPreparePinWrite().


Generated on Sat May 15 19:42:59 2004 for test by doxygen 1.3.7