Main Page | Class Hierarchy | Class List | File List | Class Members | File Members

cachesub.c File Reference

#include "cc.h"

Go to the source code of this file.

Defines

#define BugCheckFileId   (CACHE_BUG_CHECK_CACHESUB)
#define me   0x00000002
#define RetryError(STS)   (((STS) == STATUS_VERIFY_REQUIRED) || ((STS) == STATUS_FILE_LOCK_CONFLICT))

Functions

BOOLEAN CcFindBcb (IN PSHARED_CACHE_MAP SharedCacheMap, IN PLARGE_INTEGER FileOffset, IN OUT PLARGE_INTEGER BeyondLastByte, OUT PBCB *Bcb)
PBCB CcAllocateInitializeBcb (IN OUT PSHARED_CACHE_MAP SharedCacheMap OPTIONAL, IN OUT PBCB AfterBcb, IN PLARGE_INTEGER FileOffset, IN PLARGE_INTEGER Length)
NTSTATUS CcSetValidData (IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER ValidDataLength)
BOOLEAN CcAcquireByteRangeForWrite (IN PSHARED_CACHE_MAP SharedCacheMap, IN PLARGE_INTEGER TargetOffset OPTIONAL, IN ULONG TargetLength, OUT PLARGE_INTEGER FileOffset, OUT PULONG Length, OUT PBCB *FirstBcb)
VOID CcReleaseByteRangeFromWrite (IN PSHARED_CACHE_MAP SharedCacheMap, IN PLARGE_INTEGER FileOffset, IN ULONG Length, IN PBCB FirstBcb, IN BOOLEAN VerifyRequired)
PBITMAP_RANGE CcFindBitmapRangeToDirty (IN PMBCB Mbcb, IN LONGLONG Page, IN PULONG *FreePageForSetting)
PBITMAP_RANGE CcFindBitmapRangeToClean (IN PMBCB Mbcb, IN LONGLONG Page)
BOOLEAN CcLogError (IN PDEVICE_OBJECT Device, IN NTSTATUS Error, IN NTSTATUS DeviceError, IN PUNICODE_STRING FileName)
BOOLEAN CcPinFileData (IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN ULONG Length, IN BOOLEAN ReadOnly, IN BOOLEAN WriteOnly, IN ULONG Flags, OUT PBCB *Bcb, OUT PVOID *BaseAddress, OUT PLARGE_INTEGER BeyondLastByte)
VOID FASTCALL CcUnpinFileData (IN OUT PBCB Bcb, IN BOOLEAN ReadOnly, IN UNMAP_ACTIONS UnmapAction)
VOID CcSetReadAheadGranularity (IN PFILE_OBJECT FileObject, IN ULONG Granularity)
VOID CcScheduleReadAhead (IN PFILE_OBJECT FileObject, IN PLARGE_INTEGER FileOffset, IN ULONG Length)
VOID FASTCALL CcPerformReadAhead (IN PFILE_OBJECT FileObject)
VOID CcSetDirtyInMask (IN PSHARED_CACHE_MAP SharedCacheMap, IN PLARGE_INTEGER FileOffset, IN ULONG Length)
VOID CcSetDirtyPinnedData (IN PVOID BcbVoid, IN PLARGE_INTEGER Lsn OPTIONAL)
VOID FASTCALL CcWriteBehind (IN PSHARED_CACHE_MAP SharedCacheMap, IN PIO_STATUS_BLOCK IoStatus)
LARGE_INTEGER CcGetFlushedValidData (IN PSECTION_OBJECT_POINTERS SectionObjectPointer, IN BOOLEAN CcInternalCaller)
VOID CcFlushCache (IN PSECTION_OBJECT_POINTERS SectionObjectPointer, IN PLARGE_INTEGER FileOffset OPTIONAL, IN ULONG Length, OUT PIO_STATUS_BLOCK IoStatus OPTIONAL)
PVOID CcRemapBcb (IN PVOID Bcb)
VOID CcRepinBcb (IN PVOID Bcb)
VOID CcUnpinRepinnedBcb (IN PVOID Bcb, IN BOOLEAN WriteThrough, OUT PIO_STATUS_BLOCK IoStatus)
VOID FASTCALL CcDeallocateBcb (IN PBCB Bcb)
BOOLEAN CcMapAndRead (IN PSHARED_CACHE_MAP SharedCacheMap, IN PLARGE_INTEGER FileOffset, IN ULONG Length, IN ULONG ZeroFlags, IN BOOLEAN Wait, IN PVOID BaseAddress)
VOID CcFreeActiveVacb (IN PSHARED_CACHE_MAP SharedCacheMap, IN PVACB ActiveVacb OPTIONAL, IN ULONG ActivePage, IN ULONG PageIsDirty)
VOID CcMapAndCopy (IN PSHARED_CACHE_MAP SharedCacheMap, IN PVOID UserBuffer, IN PLARGE_INTEGER FileOffset, IN ULONG Length, IN ULONG ZeroFlags, IN BOOLEAN WriteThrough)

Variables

ULONG CcMaxDirtyWrite = 0x10000


Define Documentation

#define BugCheckFileId   (CACHE_BUG_CHECK_CACHESUB)
 

Definition at line 27 of file cachesub.c.

#define me   0x00000002
 

Definition at line 33 of file cachesub.c.

Referenced by CcAcquireByteRangeForWrite(), CcCopyRead(), CcCopyWrite(), CcDeleteSharedCacheMap(), CcFastCopyRead(), CcFastCopyWrite(), CcFindBcb(), CcFlushCache(), CcInitializeCacheMap(), CcLazyWriteScan(), CcMapAndCopy(), CcMapData(), CcMdlRead(), CcMdlReadComplete2(), CcMdlWriteComplete2(), CcPerformReadAhead(), CcPinFileData(), CcPinMappedData(), CcPinRead(), CcPostWorkQueue(), CcPrepareMdlWrite(), CcPreparePinWrite(), CcPurgeCacheSection(), CcReleaseByteRangeFromWrite(), CcScheduleReadAhead(), CcSetDirtyPinnedData(), CcSetFileSizes(), CcSetValidData(), CcUninitializeCacheMap(), CcUnpinData(), CcUnpinDataForThread(), CcUnpinFileData(), CcUnpinRepinnedBcb(), CcWorkerThread(), CcWriteBehind(), and CcZeroData().

#define RetryError STS   )     (((STS) == STATUS_VERIFY_REQUIRED) || ((STS) == STATUS_FILE_LOCK_CONFLICT))
 

Definition at line 39 of file cachesub.c.

Referenced by CcFlushCache(), CcUnpinRepinnedBcb(), and CcWriteBehind().


Function Documentation

BOOLEAN CcAcquireByteRangeForWrite IN PSHARED_CACHE_MAP  SharedCacheMap,
IN PLARGE_INTEGER TargetOffset  OPTIONAL,
IN ULONG  TargetLength,
OUT PLARGE_INTEGER  FileOffset,
OUT PULONG  Length,
OUT PBCB FirstBcb
 

Definition at line 2861 of file cachesub.c.

References ASSERT, _BITMAP_RANGE::BasePage, _BCB::BcbLinks, _BCB::BeyondLastByte, _BITMAP_RANGE::Bitmap, _MBCB::BitmapRanges, BooleanFlagOn, _BCB::ByteLength, CACHE_NTC_BCB, CcAcquireMasterLockAtDpcLevel, CcCleanSharedCacheMapList, CcExceptionFilter(), CcFindBcb(), CcFindBitmapRangeToClean(), CcPagesYetToWrite, CcReleaseMasterLockFromDpcLevel, CcTotalDirtyPages, CcUnpinFileData(), DebugTrace, DebugTrace2, _BCB::Dirty, _BITMAP_RANGE::DirtyPages, _MBCB::DirtyPages, DISABLE_WRITE_BEHIND, ExAcquireResourceExclusive, FALSE, _BCB::FileOffset, _BITMAP_RANGE::FirstDirtyPage, FlagOn, _BITMAP_RANGE::LastDirtyPage, _BITMAP_RANGE::Links, MAX_WRITE_BEHIND, MBCB_BITMAP_BLOCK_SIZE, me, MODIFIED_WRITE_DISABLED, _BCB::NodeTypeCode, NULL, PAGE_SHIFT, PAGE_SIZE, _MBCB::PagesToWrite, _BCB::PinCount, _BCB::Resource, _MBCB::ResumeWritePage, SET_CLEAN, TRUE, UNPIN, and VACB_SIZE_OF_FIRST_LEVEL.

Referenced by CcFlushCache().

02872 : 02873 02874 This routine is called by the Lazy Writer to try to find a contiguous 02875 range of bytes from the specified SharedCacheMap that are dirty and 02876 should be flushed. After flushing, these bytes should be released 02877 by calling CcReleaseByteRangeFromWrite. 02878 02879 Dirty ranges are returned in strictly increasing order. 02880 02881 Arguments: 02882 02883 SharedCacheMap - for the file for which the dirty byte range is sought 02884 02885 TargetOffset - If specified, then only the specified range is 02886 to be flushed. 02887 02888 TargetLength - If target offset specified, this completes the range. 02889 In any case, this field is zero for the Lazy Writer, 02890 and nonzero for explicit flush calls. 02891 02892 FileOffset - Returns the offset for the beginning of the dirty byte 02893 range to flush 02894 02895 Length - Returns the length of bytes in the range. 02896 02897 FirstBcb - Returns the first Bcb in the list for the range, to be used 02898 when calling CcReleaseByteRangeFromWrite, or NULL if dirty 02899 pages were found in the mask Bcb. 02900 02901 Return Value: 02902 02903 FALSE - if no dirty byte range could be found to match the necessary 02904 criteria. 02905 02906 TRUE - if a dirty byte range is being returned. 02907 02908 --*/ 02909 02910 { 02911 KIRQL OldIrql; 02912 PMBCB Mbcb; 02913 PBCB Bcb; 02914 LARGE_INTEGER LsnToFlushTo = {0, 0}; 02915 02916 BOOLEAN BcbLookasideCheck = FALSE; 02917 02918 PBITMAP_RANGE BitmapRange; 02919 PULONG EndPtr; 02920 PULONG MaskPtr; 02921 ULONG Mask; 02922 LONGLONG FirstDirtyPage; 02923 ULONG OriginalFirstDirtyPage; 02924 LONGLONG LastDirtyPage = MAXLONGLONG; 02925 02926 DebugTrace(+1, me, "CcAcquireByteRangeForWrite:\n", 0); 02927 DebugTrace( 0, me, " SharedCacheMap = %08lx\n", SharedCacheMap); 02928 02929 // 02930 // Initially clear outputs. 02931 // 02932 02933 FileOffset->QuadPart = 0; 02934 *Length = 0; 02935 02936 // 02937 // We must acquire the SharedCacheMap->BcbSpinLock. 02938 // 02939 02940 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 02941 02942 // 02943 // See if there is a simple Mask Bcb, and if there is anything dirty in 02944 // it. If so we will simply handle that case here by processing the bitmap. 02945 // 02946 02947 Mbcb = SharedCacheMap->Mbcb; 02948 02949 if ((Mbcb != NULL) && 02950 (Mbcb->DirtyPages != 0) && 02951 ((Mbcb->PagesToWrite != 0) || (TargetLength != 0))) { 02952 02953 // 02954 // If a target range was specified (outside call to CcFlush for a range), 02955 // then calculate FirstPage and EndPtr based on these inputs. 02956 // 02957 02958 if (ARGUMENT_PRESENT(TargetOffset)) { 02959 02960 FirstDirtyPage = TargetOffset->QuadPart >> PAGE_SHIFT; 02961 LastDirtyPage = (TargetOffset->QuadPart + TargetLength - 1) >> PAGE_SHIFT; 02962 02963 // 02964 // Find the bitmap range containing the first dirty page. 02965 // 02966 02967 BitmapRange = CcFindBitmapRangeToClean( Mbcb, FirstDirtyPage ); 02968 02969 // 02970 // If the target range is not dirty, get out. We may have even 02971 // gotten back a nonoverlapping bitmap range. 02972 // 02973 02974 if ((LastDirtyPage < (BitmapRange->BasePage + BitmapRange->FirstDirtyPage)) || 02975 (FirstDirtyPage > (BitmapRange->BasePage + BitmapRange->LastDirtyPage))) { 02976 02977 goto Scan_Bcbs; 02978 } 02979 02980 if (LastDirtyPage < (BitmapRange->BasePage + BitmapRange->LastDirtyPage)) { 02981 EndPtr = &BitmapRange->Bitmap[(ULONG)(LastDirtyPage - BitmapRange->BasePage) / 32]; 02982 } else { 02983 EndPtr = &BitmapRange->Bitmap[BitmapRange->LastDirtyPage / 32]; 02984 } 02985 02986 02987 // 02988 // Otherwise, for the Lazy Writer pick up where we left off. 02989 // 02990 02991 } else { 02992 02993 // 02994 // If a length was specified, then it is an explicit flush, and 02995 // we want to start with the first dirty page, else the Lazy Writer 02996 // starts from the ResumeWritePage. 02997 // 02998 02999 FirstDirtyPage = 0; 03000 if (TargetLength == 0) { 03001 FirstDirtyPage = Mbcb->ResumeWritePage; 03002 } 03003 03004 // 03005 // Now find the next (cyclic) dirty page from this point. 03006 // 03007 03008 BitmapRange = CcFindBitmapRangeToClean( Mbcb, FirstDirtyPage ); 03009 03010 // 03011 // If the page we thought we were looking for is beyond the last dirty page 03012 // of this range, then CcFindBitmapRangeToClean must have wrapped back to 03013 // the start of the file, and we should resume on the first dirty page of 03014 // this range. 03015 // 03016 03017 if (FirstDirtyPage > (BitmapRange->BasePage + BitmapRange->LastDirtyPage)) { 03018 FirstDirtyPage = BitmapRange->BasePage + BitmapRange->FirstDirtyPage; 03019 } 03020 03021 EndPtr = &BitmapRange->Bitmap[BitmapRange->LastDirtyPage / 32]; 03022 } 03023 03024 // 03025 // Now we can skip over any clean pages. 03026 // 03027 03028 if (FirstDirtyPage < (BitmapRange->BasePage + BitmapRange->FirstDirtyPage)) { 03029 FirstDirtyPage = BitmapRange->BasePage + BitmapRange->FirstDirtyPage; 03030 } 03031 03032 // 03033 // Form a few other inputs for our dirty page scan. 03034 // 03035 03036 MaskPtr = &BitmapRange->Bitmap[(ULONG)(FirstDirtyPage - BitmapRange->BasePage) / 32]; 03037 Mask = (ULONG)(-1 << (FirstDirtyPage % 32)); 03038 OriginalFirstDirtyPage = (ULONG)(FirstDirtyPage - BitmapRange->BasePage); 03039 03040 // 03041 // Because of the possibility of getting stuck on a "hot spot" which gets 03042 // modified over and over, we want to be very careful to resume exactly 03043 // at the recorded resume point. If there is nothing there, then we 03044 // fall into the loop below to scan for nozero long words in the bitmap, 03045 // starting at the next longword. 03046 // 03047 03048 if ((*MaskPtr & Mask) == 0) { 03049 03050 // 03051 // Before entering loop, set all mask bits and insure we increment from 03052 // an even Ulong boundary. 03053 // 03054 03055 Mask = MAXULONG; 03056 FirstDirtyPage &= ~31; 03057 03058 // 03059 // To scan the bitmap faster, we scan for entire long words which are 03060 // nonzero. 03061 // 03062 03063 do { 03064 03065 MaskPtr += 1; 03066 FirstDirtyPage += 32; 03067 03068 // 03069 // If we go beyond the end, then we must wrap back to the first 03070 // dirty page. We will just go back to the start of the first 03071 // longword. 03072 // 03073 03074 if (MaskPtr > EndPtr) { 03075 03076 // 03077 // We can backup the last dirty page hint to where we 03078 // started scanning, if we are the lazy writer. 03079 // 03080 03081 if (TargetLength == 0) { 03082 ASSERT(OriginalFirstDirtyPage >= BitmapRange->FirstDirtyPage); 03083 BitmapRange->LastDirtyPage = OriginalFirstDirtyPage - 1; 03084 } 03085 03086 // 03087 // We hit the end of our scan. Let's assume we are supposed 03088 // to move on to the next range with dirty pages. 03089 // 03090 03091 do { 03092 03093 // 03094 // Go to the next range. 03095 // 03096 03097 BitmapRange = (PBITMAP_RANGE)BitmapRange->Links.Flink; 03098 03099 // 03100 // Did we hit the listhead? 03101 // 03102 03103 if (BitmapRange == (PBITMAP_RANGE)&Mbcb->BitmapRanges) { 03104 03105 // 03106 // If this is an explicit flush, then it is time to 03107 // get out. 03108 // 03109 03110 if (TargetLength != 0) { 03111 goto Scan_Bcbs; 03112 } 03113 03114 // 03115 // Otherwise, we must wrap back to the first range in the 03116 // Lazy Writer Scan. 03117 // 03118 03119 BitmapRange = (PBITMAP_RANGE)BitmapRange->Links.Flink; 03120 } 03121 03122 } while (BitmapRange->DirtyPages == 0); 03123 03124 // 03125 // Now we have a new range with dirty pages, but if this is 03126 // an explicit flush of a specified range, we may be done. 03127 // 03128 03129 if ((LastDirtyPage < (BitmapRange->BasePage + BitmapRange->FirstDirtyPage)) || 03130 (FirstDirtyPage > (BitmapRange->BasePage + BitmapRange->LastDirtyPage))) { 03131 03132 goto Scan_Bcbs; 03133 } 03134 03135 // 03136 // Otherwise, we need to set up our context to resume scanning in this 03137 // range. 03138 // 03139 03140 MaskPtr = &BitmapRange->Bitmap[BitmapRange->FirstDirtyPage / 32]; 03141 EndPtr = &BitmapRange->Bitmap[BitmapRange->LastDirtyPage / 32]; 03142 FirstDirtyPage = BitmapRange->BasePage + (BitmapRange->FirstDirtyPage & ~31); 03143 OriginalFirstDirtyPage = BitmapRange->FirstDirtyPage; 03144 } 03145 } while (*MaskPtr == 0); 03146 } 03147 03148 // 03149 // Calculate the first set bit in the mask that we hit on. 03150 // 03151 03152 Mask = ~Mask + 1; 03153 03154 // 03155 // Now loop to find the first set bit. 03156 // 03157 03158 while ((*MaskPtr & Mask) == 0) { 03159 03160 Mask <<= 1; 03161 FirstDirtyPage += 1; 03162 } 03163 03164 // 03165 // If a TargetOffset was specified, then make sure we do not start 03166 // beyond the specified range or a dirty Bcb in the range. 03167 // 03168 03169 if (ARGUMENT_PRESENT(TargetOffset)) { 03170 03171 if (FirstDirtyPage >= ((TargetOffset->QuadPart + TargetLength + PAGE_SIZE - 1) >> PAGE_SHIFT)) { 03172 03173 goto Scan_Bcbs; 03174 } 03175 03176 // 03177 // If Bcbs are present on this file, we must go scan to see if they 03178 // describe a range that must be written first. If this is not the 03179 // case, we'll hop back and continue building the range from the mask Bcb. 03180 // 03181 // Note that this case will be very rare. Bcbs are introduced into user 03182 // files in limited situations (CcZero) and the reverse is never allowed 03183 // to happen. 03184 // 03185 03186 if (!IsListEmpty(&SharedCacheMap->BcbList)) { 03187 03188 BcbLookasideCheck = TRUE; 03189 goto Scan_Bcbs; 03190 } 03191 } 03192 03193 Accept_Page: 03194 03195 // 03196 // Now loop to count the set bits at that point, clearing them as we 03197 // go because we plan to write the corresponding pages. Stop as soon 03198 // as we find a clean page, or we reach our maximum write size. Of 03199 // course we want to ignore long word boundaries and keep trying to 03200 // extend the write. We do not check for wrapping around the end of 03201 // the bitmap here, because we guarantee some zero bits at the end 03202 // in CcSetDirtyInMask. 03203 // 03204 03205 while (((*MaskPtr & Mask) != 0) && (*Length < (MAX_WRITE_BEHIND / PAGE_SIZE)) && 03206 (!ARGUMENT_PRESENT(TargetOffset) || ((FirstDirtyPage + *Length) < 03207 (ULONG)((TargetOffset->QuadPart + TargetLength + PAGE_SIZE - 1) >> PAGE_SHIFT)))) { 03208 03209 ASSERT(MaskPtr <= (&BitmapRange->Bitmap[BitmapRange->LastDirtyPage / 32])); 03210 03211 *MaskPtr -= Mask; 03212 *Length += 1; 03213 Mask <<= 1; 03214 03215 if (Mask == 0) { 03216 03217 MaskPtr += 1; 03218 Mask = 1; 03219 03220 if (MaskPtr > EndPtr) { 03221 break; 03222 } 03223 } 03224 } 03225 03226 // 03227 // Now reduce the count of pages we were supposed to write this time, 03228 // possibly clearing this count. 03229 // 03230 03231 if (*Length < Mbcb->PagesToWrite) { 03232 03233 Mbcb->PagesToWrite -= *Length; 03234 03235 } else { 03236 03237 Mbcb->PagesToWrite = 0; 03238 } 03239 03240 // 03241 // Reduce the dirty page counts by the number of pages we just cleared. 03242 // 03243 03244 ASSERT(Mbcb->DirtyPages >= *Length); 03245 Mbcb->DirtyPages -= *Length; 03246 BitmapRange->DirtyPages -= *Length; 03247 03248 CcAcquireMasterLockAtDpcLevel(); 03249 CcTotalDirtyPages -= *Length; 03250 SharedCacheMap->DirtyPages -= *Length; 03251 03252 // 03253 // Normally we need to reduce CcPagesYetToWrite appropriately. 03254 // 03255 03256 if (CcPagesYetToWrite > *Length) { 03257 CcPagesYetToWrite -= *Length; 03258 } else { 03259 CcPagesYetToWrite = 0; 03260 } 03261 03262 // 03263 // If we took out the last dirty page, then move the SharedCacheMap 03264 // back to the clean list. 03265 // 03266 03267 if (SharedCacheMap->DirtyPages == 0) { 03268 03269 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 03270 InsertTailList( &CcCleanSharedCacheMapList, 03271 &SharedCacheMap->SharedCacheMapLinks ); 03272 } 03273 CcReleaseMasterLockFromDpcLevel(); 03274 03275 // 03276 // If the number of dirty pages for the Mbcb went to zero, we can reset 03277 // our hint fields now. 03278 // 03279 03280 if (BitmapRange->DirtyPages == 0) { 03281 03282 BitmapRange->FirstDirtyPage = MAXULONG; 03283 BitmapRange->LastDirtyPage = 0; 03284 03285 // 03286 // Assume this is a large file and that the resume point should 03287 // be at the beginning of the next range. In all cases if the resume 03288 // point is set too high, the next resume will just wrap back to 0 anyway. 03289 // 03290 03291 Mbcb->ResumeWritePage = BitmapRange->BasePage + (MBCB_BITMAP_BLOCK_SIZE * 8); 03292 03293 // 03294 // Otherwise we have to update the hint fields. 03295 // 03296 03297 } else { 03298 03299 // 03300 // Advance the first dirty page hint if we can. 03301 // 03302 03303 if (BitmapRange->FirstDirtyPage == OriginalFirstDirtyPage) { 03304 03305 BitmapRange->FirstDirtyPage = (ULONG)(FirstDirtyPage - BitmapRange->BasePage) + *Length; 03306 } 03307 03308 // 03309 // Set to resume the next scan at the next bit for 03310 // the Lazy Writer. 03311 // 03312 03313 if (TargetLength == 0) { 03314 03315 Mbcb->ResumeWritePage = FirstDirtyPage + *Length; 03316 } 03317 } 03318 03319 // 03320 // We can save a callback by letting our caller know when 03321 // we have no more pages to write. 03322 // 03323 03324 if (IsListEmpty(&SharedCacheMap->BcbList)) { 03325 SharedCacheMap->PagesToWrite = Mbcb->PagesToWrite; 03326 } 03327 03328 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 03329 03330 // 03331 // Now form all of our outputs. We calculated *Length as a page count, 03332 // but our caller wants it in bytes. 03333 // 03334 03335 *Length <<= PAGE_SHIFT; 03336 FileOffset->QuadPart = (LONGLONG)FirstDirtyPage << PAGE_SHIFT; 03337 *FirstBcb = NULL; 03338 03339 DebugTrace2(0, me, " <FileOffset = %08lx, %08lx\n", FileOffset->LowPart, 03340 FileOffset->HighPart ); 03341 DebugTrace( 0, me, " <Length = %08lx\n", *Length ); 03342 DebugTrace(-1, me, "CcAcquireByteRangeForWrite -> TRUE\n", 0 ); 03343 03344 return TRUE; 03345 } 03346 03347 // 03348 // We get here if there is no Mbcb or no dirty pages in it. Note that we 03349 // wouldn't even be here if there were no dirty pages in this SharedCacheMap. 03350 // 03351 03352 // 03353 // Now point to last Bcb in List, and loop until we hit one of the 03354 // breaks below or the beginning of the list. 03355 // 03356 03357 Scan_Bcbs: 03358 03359 // 03360 // Use while TRUE to handle case where the current target range wraps 03361 // (escape is at the bottom). 03362 // 03363 03364 while (TRUE) { 03365 03366 Bcb = CONTAINING_RECORD( SharedCacheMap->BcbList.Blink, BCB, BcbLinks ); 03367 03368 // 03369 // If we are to resume from a nonzero FileOffset, call CcFindBcb 03370 // to get a quicker start. This is only useful on files that make 03371 // use of significant pinned access, of course. 03372 // 03373 03374 if (FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED)) { 03375 03376 PLARGE_INTEGER StartingOffset; 03377 03378 if (ARGUMENT_PRESENT(TargetOffset)) { 03379 StartingOffset = TargetOffset; 03380 } else { 03381 StartingOffset = (PLARGE_INTEGER)&SharedCacheMap->BeyondLastFlush; 03382 } 03383 03384 if (StartingOffset->QuadPart != 0) { 03385 03386 LARGE_INTEGER StartingOffsetBias; 03387 03388 StartingOffsetBias.QuadPart = StartingOffset->QuadPart + PAGE_SIZE; 03389 03390 // 03391 // Position ourselves. If we did not find a Bcb for the page, then 03392 // a lower FileOffset was returned, so we want to move forward one. 03393 // 03394 03395 if (!CcFindBcb( SharedCacheMap, 03396 StartingOffset, 03397 &StartingOffsetBias, 03398 &Bcb )) { 03399 Bcb = CONTAINING_RECORD( Bcb->BcbLinks.Blink, BCB, BcbLinks ); 03400 } 03401 } 03402 } 03403 03404 while (&Bcb->BcbLinks != &SharedCacheMap->BcbList) { 03405 03406 // 03407 // Skip over this item if it is a listhead. 03408 // 03409 03410 if (Bcb->NodeTypeCode != CACHE_NTC_BCB) { 03411 03412 Bcb = CONTAINING_RECORD( Bcb->BcbLinks.Blink, BCB, BcbLinks ); 03413 continue; 03414 } 03415 03416 // 03417 // If we are doing a specified range, then get out if we hit a 03418 // higher Bcb. 03419 // 03420 03421 if (ARGUMENT_PRESENT(TargetOffset) && 03422 ((TargetOffset->QuadPart + TargetLength) <= Bcb->FileOffset.QuadPart)) { 03423 03424 break; 03425 } 03426 03427 // 03428 // If we have not started a run, then see if this Bcb is a candidate 03429 // to start one. 03430 // 03431 03432 if (*Length == 0) { 03433 03434 // 03435 // Else see if the Bcb is dirty, and is in our specified range, if 03436 // there is one. 03437 // 03438 03439 if (!Bcb->Dirty || 03440 (ARGUMENT_PRESENT(TargetOffset) && (TargetOffset->QuadPart >= Bcb->BeyondLastByte.QuadPart)) || 03441 (!ARGUMENT_PRESENT(TargetOffset) && (Bcb->FileOffset.QuadPart < SharedCacheMap->BeyondLastFlush))) { 03442 03443 Bcb = CONTAINING_RECORD( Bcb->BcbLinks.Blink, BCB, BcbLinks ); 03444 continue; 03445 03446 } 03447 03448 // 03449 // If we have a candidate dirty page from the mask Bcb, see 03450 // if it describes a prior range. We must decide to return 03451 // the first dirty range. 03452 // 03453 03454 if (BcbLookasideCheck && FirstDirtyPage <= (ULONG)(Bcb->FileOffset.QuadPart >> PAGE_SHIFT)) { 03455 goto Accept_Page; 03456 } 03457 } 03458 03459 // 03460 // Else, if we have started a run, then if this guy cannot be 03461 // appended to the run, then break. Note that we ignore the 03462 // Bcb's modification time stamp here to simplify the test. 03463 // 03464 // If the Bcb is currently pinned, then there is no sense in causing 03465 // contention, so we will skip over this guy as well. 03466 // 03467 // Finally, if the new Bcb is in the next Vacb level, we will skip it 03468 // to avoid problems with Bcb listheads going away in the middle of 03469 // CcReleaseByteRangeFromWrite. 03470 // 03471 03472 else { 03473 if (!Bcb->Dirty || ( Bcb->FileOffset.QuadPart != ( FileOffset->QuadPart + (LONGLONG)*Length)) || 03474 (*Length + Bcb->ByteLength > MAX_WRITE_BEHIND) || 03475 (Bcb->PinCount != 0) || 03476 ((Bcb->FileOffset.QuadPart & (VACB_SIZE_OF_FIRST_LEVEL - 1)) == 0)) { 03477 03478 break; 03479 } 03480 } 03481 03482 // 03483 // Increment PinCount to prevent Bcb from going away once the 03484 // SpinLock is released, or we set it clean for the case where 03485 // modified write is allowed. 03486 // 03487 03488 Bcb->PinCount += 1; 03489 03490 // 03491 // Release the SpinLock before waiting on the resource. 03492 // 03493 03494 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 03495 03496 if (FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED) && 03497 !FlagOn(SharedCacheMap->Flags, DISABLE_WRITE_BEHIND)) { 03498 03499 // 03500 // Now acquire the Bcb exclusive, so that we know that nobody 03501 // has it pinned and thus no one can be modifying the described 03502 // buffer. To acquire the first Bcb in a run, we can afford 03503 // to wait, because we are not holding any resources. However 03504 // if we already have a Bcb, then we better not wait, because 03505 // someone could have this Bcb pinned, and then wait for the 03506 // Bcb we already have exclusive. 03507 // 03508 // For streams for which we have not disabled modified page 03509 // writing, we do not need to acquire this resource, and the 03510 // foreground processing will not be acquiring the Bcb either. 03511 // 03512 03513 if (!ExAcquireResourceExclusive( &Bcb->Resource, 03514 (BOOLEAN)(*Length == 0) )) { 03515 03516 DebugTrace( 0, me, "Could not acquire 2nd Bcb\n", 0 ); 03517 03518 // 03519 // Release the Bcb count we took out above. We say 03520 // ReadOnly = TRUE since we do not own the resource, 03521 // and SetClean = FALSE because we just want to decement 03522 // the count. 03523 // 03524 03525 CcUnpinFileData( Bcb, TRUE, UNPIN ); 03526 03527 // 03528 // When we leave the loop, we have to have the spin lock 03529 // 03530 03531 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 03532 break; 03533 } 03534 03535 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 03536 03537 // 03538 // If someone has the file open WriteThrough, then the Bcb may no 03539 // longer be dirty. If so, call CcUnpinFileData to decrement the 03540 // PinCount we incremented and free the resource. 03541 // 03542 03543 if (!Bcb->Dirty) { 03544 03545 // 03546 // Release the spinlock so that we can call CcUnpinFileData 03547 // 03548 03549 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 03550 03551 CcUnpinFileData( Bcb, FALSE, UNPIN ); 03552 03553 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 03554 03555 // 03556 // Now if we already have some data we can just break to return 03557 // it, otherwise we have to restart the scan, since our Bcb 03558 // may have gone away. 03559 // 03560 03561 if (*Length != 0) { 03562 break; 03563 } 03564 else { 03565 03566 Bcb = CONTAINING_RECORD( SharedCacheMap->BcbList.Blink, BCB, BcbLinks ); 03567 continue; 03568 } 03569 } 03570 03571 // 03572 // If we are not in the disable modified write mode (normal user data) 03573 // then we must set the buffer clean before doing the write, since we 03574 // are unsynchronized with anyone producing dirty data. That way if we, 03575 // for example, are writing data out while it is actively being changed, 03576 // at least the changer will mark the buffer dirty afterwards and cause 03577 // us to write it again later. 03578 // 03579 03580 } else { 03581 03582 CcUnpinFileData( Bcb, TRUE, SET_CLEAN ); 03583 03584 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 03585 } 03586 03587 DebugTrace( 0, me, "Adding Bcb = %08lx to run\n", Bcb ); 03588 03589 // 03590 // No matter what, once we've reached this point we are returning 03591 // a range from the Bcbs. 03592 // 03593 03594 BcbLookasideCheck = FALSE; 03595 03596 // 03597 // Update all of our return values. Note that FirstBcb refers to the 03598 // FirstBcb in terms of how the Bcb list is ordered. Since the Bcb list 03599 // is ordered by descending file offsets, FirstBcb will actually return 03600 // the Bcb with the highest FileOffset. 03601 // 03602 03603 if (*Length == 0) { 03604 *FileOffset = Bcb->FileOffset; 03605 } 03606 *FirstBcb = Bcb; 03607 *Length += Bcb->ByteLength; 03608 03609 // 03610 // If there is a log file flush callback for this stream, then we must 03611 // remember the largest Lsn we are about to flush. 03612 // 03613 03614 if ((SharedCacheMap->FlushToLsnRoutine != NULL) && 03615 (Bcb->NewestLsn.QuadPart > LsnToFlushTo.QuadPart)) { 03616 03617 LsnToFlushTo = Bcb->NewestLsn; 03618 } 03619 03620 Bcb = CONTAINING_RECORD( Bcb->BcbLinks.Blink, BCB, BcbLinks ); 03621 } 03622 03623 // 03624 // If we have a candidate dirty page from the mask Bcb, accept it 03625 // since no Bcb has been found. 03626 // 03627 03628 if (BcbLookasideCheck) { 03629 03630 ASSERT( *Length == 0 ); 03631 goto Accept_Page; 03632 } 03633 03634 // 03635 // If we found something, update our last flush range and reduce 03636 // PagesToWrite. 03637 // 03638 03639 if (*Length != 0) { 03640 03641 // 03642 // If this is the Lazy Writer, then update BeyondLastFlush and 03643 // the PagesToWrite target. 03644 // 03645 03646 if (!ARGUMENT_PRESENT(TargetOffset)) { 03647 03648 SharedCacheMap->BeyondLastFlush = FileOffset->QuadPart + *Length; 03649 03650 if (SharedCacheMap->PagesToWrite > (*Length >> PAGE_SHIFT)) { 03651 SharedCacheMap->PagesToWrite -= (*Length >> PAGE_SHIFT); 03652 } else { 03653 SharedCacheMap->PagesToWrite = 0; 03654 } 03655 } 03656 03657 break; 03658 03659 // 03660 // Else, if we scanned the entire file, get out - nothing to write now. 03661 // 03662 03663 } else if ((SharedCacheMap->BeyondLastFlush == 0) || ARGUMENT_PRESENT(TargetOffset)) { 03664 break; 03665 } 03666 03667 // 03668 // Otherwise, we may have not found anything because there is nothing 03669 // beyond the last flush. In that case it is time to wrap back to 0 03670 // and keep scanning. 03671 // 03672 03673 SharedCacheMap->BeyondLastFlush = 0; 03674 } 03675 03676 03677 03678 // 03679 // Now release the spinlock file while we go off and do the I/O 03680 // 03681 03682 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 03683 03684 // 03685 // If we need to flush to some Lsn, this is the time to do it now 03686 // that we have found the largest Lsn and freed the spin lock. 03687 // 03688 03689 if (LsnToFlushTo.QuadPart != 0) { 03690 03691 try { 03692 03693 (*SharedCacheMap->FlushToLsnRoutine) ( SharedCacheMap->LogHandle, 03694 LsnToFlushTo ); 03695 } except( CcExceptionFilter( GetExceptionCode() )) { 03696 03697 // 03698 // If there was an error, it will be raised. We cannot 03699 // write anything until we successfully flush the log 03700 // file, so we will release everything here and just 03701 // return with 0 bytes. 03702 // 03703 03704 LARGE_INTEGER LastOffset; 03705 PBCB NextBcb; 03706 03707 // 03708 // Now loop to free up all of the Bcbs. Set the time 03709 // stamps to 0, so that we are guaranteed to try to 03710 // flush them again on the next sweep. 03711 // 03712 03713 do { 03714 NextBcb = CONTAINING_RECORD( (*FirstBcb)->BcbLinks.Flink, BCB, BcbLinks ); 03715 03716 // 03717 // Skip over any listheads. 03718 // 03719 03720 if ((*FirstBcb)->NodeTypeCode == CACHE_NTC_BCB) { 03721 03722 LastOffset = (*FirstBcb)->FileOffset; 03723 03724 CcUnpinFileData( *FirstBcb, 03725 BooleanFlagOn(SharedCacheMap->Flags, DISABLE_WRITE_BEHIND), 03726 UNPIN ); 03727 } 03728 03729 *FirstBcb = NextBcb; 03730 } while (FileOffset->QuadPart != LastOffset.QuadPart); 03731 03732 // 03733 // Show we did not acquire anything. 03734 // 03735 03736 *Length = 0; 03737 } 03738 } 03739 03740 // 03741 // If we got anything, return TRUE. 03742 // 03743 03744 DebugTrace2(0, me, " <FileOffset = %08lx, %08lx\n", FileOffset->LowPart, 03745 FileOffset->HighPart ); 03746 DebugTrace( 0, me, " <Length = %08lx\n", *Length ); 03747 DebugTrace(-1, me, "CcAcquireByteRangeForWrite -> %02lx\n", *Length != 0 ); 03748 03749 return ((BOOLEAN)(*Length != 0)); 03750 }

PBCB CcAllocateInitializeBcb IN OUT PSHARED_CACHE_MAP SharedCacheMap  OPTIONAL,
IN OUT PBCB  AfterBcb,
IN PLARGE_INTEGER  FileOffset,
IN PLARGE_INTEGER  Length
 

Definition at line 5541 of file cachesub.c.

References ASSERT, _BCB::BcbLinks, _LAZY_WRITER::BcbZone, _BCB::BeyondLastByte, _BCB::ByteLength, CACHE_NTC_BCB, CcAcquireVacbLockAtDpcLevel, CcBcbSpinLock, CcBugCheck, CcFindBcb(), CcLockVacbLevel, CcReleaseVacbLockFromDpcLevel, DISABLE_WRITE_BEHIND, ExAllocateFromZone, ExAllocatePoolWithTag, ExDisableResourceBoost, ExExtendZone(), ExInitializeResource, _BCB::FileOffset, _ERESOURCE::Flag, FlagOn, LazyWriter, MmLargeSystem, MmQuerySystemSize(), _BCB::NodeIsInZone, _BCB::NodeTypeCode, NonPagedPool, NT_SUCCESS, NULL, PAGE_SIZE, _BCB::PinCount, _BCB::Resource, ResourceNeverExclusive, SetFlag, _BCB::SharedCacheMap, TRUE, and VACB_SIZE_OF_FIRST_LEVEL.

Referenced by CcPinFileData(), and CcSetDirtyInMask().

05550 : 05551 05552 This routine allocates and initializes a Bcb to describe the specified 05553 byte range, and inserts it into the Bcb List of the specified Shared 05554 Cache Map. The Bcb List spin lock must currently be acquired. 05555 05556 BcbSpinLock must be acquired on entry. 05557 05558 Arguments: 05559 05560 SharedCacheMap - Supplies the SharedCacheMap for the new Bcb. 05561 05562 AfterBcb - Supplies where in the descending-order BcbList the new Bcb 05563 should be inserted: either the ListHead (masquerading as 05564 a Bcb) or a Bcb. 05565 05566 FileOffset - Supplies File Offset for the desired data. 05567 05568 TrialLength - Supplies length of desired data. 05569 05570 Return Value: 05571 05572 Address of the allocated and initialized Bcb 05573 05574 --*/ 05575 05576 { 05577 PBCB Bcb; 05578 CSHORT NodeIsInZone; 05579 ULONG RoundedBcbSize = (sizeof(BCB) + 7) & ~7; 05580 05581 // 05582 // Loop until we have a new Work Queue Entry 05583 // 05584 05585 while (TRUE) { 05586 05587 PVOID Segment; 05588 ULONG SegmentSize; 05589 05590 ExAcquireSpinLockAtDpcLevel( &CcBcbSpinLock ); 05591 Bcb = ExAllocateFromZone( &LazyWriter.BcbZone ); 05592 ExReleaseSpinLockFromDpcLevel( &CcBcbSpinLock ); 05593 05594 if (Bcb != NULL) { 05595 NodeIsInZone = 1; 05596 break; 05597 } 05598 05599 // 05600 // Allocation failure - on large systems, extend zone by a page. 05601 // 05602 05603 if ( MmQuerySystemSize() == MmLargeSystem ) { 05604 05605 SegmentSize = PAGE_SIZE; 05606 05607 if ((Segment = ExAllocatePoolWithTag( NonPagedPool, SegmentSize, 'zBcC')) == NULL) { 05608 return NULL; 05609 } 05610 05611 ExAcquireSpinLockAtDpcLevel( &CcBcbSpinLock ); 05612 if (!NT_SUCCESS(ExExtendZone( &LazyWriter.BcbZone, Segment, SegmentSize ))) { 05613 CcBugCheck( 0, 0, 0 ); 05614 } 05615 ExReleaseSpinLockFromDpcLevel( &CcBcbSpinLock ); 05616 } else { 05617 if ((Bcb = ExAllocatePoolWithTag( NonPagedPool, sizeof(BCB), 'cBcC')) == NULL) { 05618 return NULL; 05619 } 05620 NodeIsInZone = 0; 05621 break; 05622 } 05623 } 05624 05625 // 05626 // Initialize the newly allocated Bcb. First zero it, then fill in 05627 // nonzero fields. 05628 // 05629 05630 RtlZeroMemory( Bcb, RoundedBcbSize ); 05631 05632 Bcb->NodeIsInZone = NodeIsInZone; 05633 05634 // 05635 // For Mbcb's, SharedCacheMap is NULL, and the rest of this initialization 05636 // is not desired. 05637 // 05638 05639 if (SharedCacheMap != NULL) { 05640 05641 Bcb->NodeTypeCode = CACHE_NTC_BCB; 05642 Bcb->FileOffset = *FileOffset; 05643 Bcb->ByteLength = TrialLength->LowPart; 05644 Bcb->BeyondLastByte.QuadPart = FileOffset->QuadPart + TrialLength->QuadPart; 05645 Bcb->PinCount += 1; 05646 ExInitializeResource( &Bcb->Resource ); 05647 Bcb->SharedCacheMap = SharedCacheMap; 05648 05649 // 05650 // Since CcCalculateVacbLockCount has to be able to walk 05651 // the BcbList with only the VacbSpinLock, we take that one 05652 // out to change the list and set the count. 05653 // 05654 05655 CcAcquireVacbLockAtDpcLevel(); 05656 InsertTailList( &AfterBcb->BcbLinks, &Bcb->BcbLinks ); 05657 05658 ASSERT( (SharedCacheMap->SectionSize.QuadPart < VACB_SIZE_OF_FIRST_LEVEL) || 05659 (CcFindBcb(SharedCacheMap, FileOffset, &Bcb->BeyondLastByte, &AfterBcb) && 05660 (Bcb == AfterBcb)) ); 05661 05662 // 05663 // Now for large metadata streams we lock the Vacb level. 05664 // 05665 05666 CcLockVacbLevel( SharedCacheMap, FileOffset->QuadPart ); 05667 CcReleaseVacbLockFromDpcLevel(); 05668 05669 // 05670 // If this resource was no write behind, let Ex know that the 05671 // resource will never be acquired exclusive. Also disable 05672 // boost (I know this is useless, but KenR said I had to do it). 05673 // 05674 05675 if (SharedCacheMap && 05676 FlagOn(SharedCacheMap->Flags, DISABLE_WRITE_BEHIND)) { 05677 #if DBG 05678 SetFlag(Bcb->Resource.Flag, ResourceNeverExclusive); 05679 #endif 05680 ExDisableResourceBoost( &Bcb->Resource ); 05681 } 05682 } 05683 05684 return Bcb; 05685 }

VOID FASTCALL CcDeallocateBcb IN PBCB  Bcb  ) 
 

Definition at line 5694 of file cachesub.c.

References _LAZY_WRITER::BcbZone, CACHE_NTC_BCB, CcBcbSpinLock, ExDeleteResource, ExFreePool(), ExFreeToZone, and LazyWriter.

Referenced by CcDeleteMbcb(), CcDeleteSharedCacheMap(), and CcUnpinFileData().

05700 : 05701 05702 This routine deallocates a Bcb to the BcbZone. It must 05703 already be removed from the BcbList. 05704 05705 Arguments: 05706 05707 Bcb - the Bcb to deallocate 05708 05709 Return Value: 05710 05711 None 05712 05713 --*/ 05714 05715 { 05716 KIRQL OldIrql; 05717 05718 // 05719 // Deallocate Resource structures 05720 // 05721 05722 if (Bcb->NodeTypeCode == CACHE_NTC_BCB) { 05723 05724 ExDeleteResource( &Bcb->Resource ); 05725 } 05726 05727 if ( Bcb->NodeIsInZone ) { 05728 05729 // 05730 // Synchronize access to the BcbZone 05731 // 05732 05733 ExAcquireSpinLock( &CcBcbSpinLock, &OldIrql ); 05734 ExFreeToZone( &LazyWriter.BcbZone, Bcb ); 05735 ExReleaseSpinLock( &CcBcbSpinLock, OldIrql ); 05736 05737 } else { 05738 ExFreePool(Bcb); 05739 } 05740 return; 05741 }

BOOLEAN CcFindBcb IN PSHARED_CACHE_MAP  SharedCacheMap,
IN PLARGE_INTEGER  FileOffset,
IN OUT PLARGE_INTEGER  BeyondLastByte,
OUT PBCB Bcb
 

Definition at line 5314 of file cachesub.c.

References ASSERT, BCB, _BCB::BcbLinks, _BCB::BeyondLastByte, CACHE_NTC_BCB, DebugTrace, DebugTrace2, FALSE, _BCB::FileOffset, GetBcbListHead, me, _BCB::NodeTypeCode, SIZE_PER_BCB_LIST, and TRUE.

Referenced by CcAcquireByteRangeForWrite(), CcAllocateInitializeBcb(), and CcPinFileData().

05323 : 05324 05325 This routine is called to find a Bcb describing the specified byte range 05326 of a file. It returns TRUE if it could at least find a Bcb which describes 05327 the beginning of the specified byte range, or else FALSE if the first 05328 part of the byte range is not present. In the latter case, the requested 05329 byte range (TrialLength) is truncated if there is currently a Bcb which 05330 describes bytes beyond the beginning of the byte range. 05331 05332 The caller may see if the entire byte range is being returned by examining 05333 the Bcb, and the caller (or caller's caller) may then make subsequent 05334 calls if the data is not all returned. 05335 05336 The BcbSpinLock must be currently acquired. 05337 05338 Arguments: 05339 05340 SharedCacheMap - Supplies a pointer to the SharedCacheMap for the file 05341 in which the byte range is desired. 05342 05343 FileOffset - Supplies the file offset for the beginning of the desired 05344 byte range. 05345 05346 BeyondLastByte - Supplies the file offset of the ending of the desired 05347 byte range + 1. Note that this offset will be truncated 05348 on return if the Bcb was not found, but bytes beyond the 05349 beginning of the Bcb are contained in another Bcb. 05350 05351 Bcb - returns a Bcb describing the beginning of the byte range if also 05352 returning TRUE, or else the point in the Bcb list to insert after. 05353 05354 Return Value: 05355 05356 FALSE - if no Bcb describes the beginning of the desired byte range 05357 05358 TRUE - if a Bcb is being returned describing at least an initial 05359 part of the byte range. 05360 05361 --*/ 05362 05363 { 05364 PLIST_ENTRY BcbList; 05365 PBCB Bcbt; 05366 BOOLEAN Found = FALSE; 05367 05368 DebugTrace(+1, me, "CcFindBcb:\n", 0 ); 05369 DebugTrace( 0, me, " SharedCacheMap = %08lx\n", SharedCacheMap ); 05370 DebugTrace2(0, me, " FileOffset = %08lx, %08lx\n", FileOffset->LowPart, 05371 FileOffset->HighPart ); 05372 DebugTrace2(0, me, " TrialLength = %08lx, %08lx\n", TrialLength->LowPart, 05373 TrialLength->HighPart ); 05374 05375 // 05376 // We want to terminate scans by testing the NodeTypeCode field from the 05377 // BcbLinks, so we want to see the SharedCacheMap signature from the same 05378 // offset. 05379 // 05380 05381 ASSERT(FIELD_OFFSET(SHARED_CACHE_MAP, BcbList) == FIELD_OFFSET(BCB, BcbLinks)); 05382 05383 // 05384 // Similarly, when we hit one of the BcbListHeads in the array, small negative 05385 // offsets are all structure pointers, so we are counting on the Bcb signature 05386 // to have some non-Ulong address bits set. 05387 // 05388 05389 ASSERT((CACHE_NTC_BCB & 3) != 0); 05390 05391 // 05392 // Get address of Bcb listhead that is *after* the Bcb we are looking for, 05393 // for backwards scan. It is important that we fail in the forward 05394 // direction so that we are looking in the right segment of the Bcb list. 05395 // 05396 05397 BcbList = GetBcbListHead( SharedCacheMap, FileOffset->QuadPart + SIZE_PER_BCB_LIST, TRUE ); 05398 05399 // 05400 // Search for an entry that overlaps the specified range, or until we hit 05401 // a listhead. 05402 // 05403 05404 Bcbt = CONTAINING_RECORD(BcbList->Flink, BCB, BcbLinks); 05405 05406 // 05407 // First see if we really have to do Large arithmetic or not, and 05408 // then use either a 32-bit loop or a 64-bit loop to search for 05409 // the Bcb. 05410 // 05411 05412 if (FileOffset->HighPart == 0 && 05413 Bcbt->NodeTypeCode == CACHE_NTC_BCB && 05414 Bcbt->BeyondLastByte.HighPart == 0) { 05415 05416 // 05417 // 32-bit - loop until we get back to a listhead. 05418 // 05419 05420 while (Bcbt->NodeTypeCode == CACHE_NTC_BCB) { 05421 05422 // 05423 // Since the Bcb list is in descending order, we first check 05424 // if we are completely beyond the current entry, and if so 05425 // get out. 05426 // 05427 05428 if (FileOffset->LowPart >= Bcbt->BeyondLastByte.LowPart) { 05429 break; 05430 } 05431 05432 // 05433 // Next check if the first byte we are looking for is 05434 // contained in the current Bcb. If so, we either have 05435 // a partial hit and must truncate to the exact amount 05436 // we have found, or we may have a complete hit. In 05437 // either case we break with Found == TRUE. 05438 // 05439 05440 if (FileOffset->LowPart >= Bcbt->FileOffset.LowPart) { 05441 Found = TRUE; 05442 break; 05443 } 05444 05445 // 05446 // Now we know we must loop back and keep looking, but we 05447 // still must check for the case where the tail end of the 05448 // bytes we are looking for are described by the current 05449 // Bcb. If so we must truncate what we are looking for, 05450 // because this routine is only supposed to return bytes 05451 // from the start of the desired range. 05452 // 05453 05454 if (BeyondLastByte->LowPart >= Bcbt->FileOffset.LowPart) { 05455 BeyondLastByte->LowPart = Bcbt->FileOffset.LowPart; 05456 } 05457 05458 // 05459 // Advance to next entry in list (which is possibly back to 05460 // the listhead) and loop back. 05461 // 05462 05463 Bcbt = CONTAINING_RECORD( Bcbt->BcbLinks.Flink, 05464 BCB, 05465 BcbLinks ); 05466 05467 } 05468 05469 } else { 05470 05471 // 05472 // 64-bit - Loop until we get back to a listhead. 05473 // 05474 05475 while (Bcbt->NodeTypeCode == CACHE_NTC_BCB) { 05476 05477 // 05478 // Since the Bcb list is in descending order, we first check 05479 // if we are completely beyond the current entry, and if so 05480 // get out. 05481 // 05482 05483 if (FileOffset->QuadPart >= Bcbt->BeyondLastByte.QuadPart) { 05484 break; 05485 } 05486 05487 // 05488 // Next check if the first byte we are looking for is 05489 // contained in the current Bcb. If so, we either have 05490 // a partial hit and must truncate to the exact amount 05491 // we have found, or we may have a complete hit. In 05492 // either case we break with Found == TRUE. 05493 // 05494 05495 if (FileOffset->QuadPart >= Bcbt->FileOffset.QuadPart) { 05496 Found = TRUE; 05497 break; 05498 } 05499 05500 // 05501 // Now we know we must loop back and keep looking, but we 05502 // still must check for the case where the tail end of the 05503 // bytes we are looking for are described by the current 05504 // Bcb. If so we must truncate what we are looking for, 05505 // because this routine is only supposed to return bytes 05506 // from the start of the desired range. 05507 // 05508 05509 if (BeyondLastByte->QuadPart >= Bcbt->FileOffset.QuadPart) { 05510 BeyondLastByte->QuadPart = Bcbt->FileOffset.QuadPart; 05511 } 05512 05513 // 05514 // Advance to next entry in list (which is possibly back to 05515 // the listhead) and loop back. 05516 // 05517 05518 Bcbt = CONTAINING_RECORD( Bcbt->BcbLinks.Flink, 05519 BCB, 05520 BcbLinks ); 05521 05522 } 05523 } 05524 05525 *Bcb = Bcbt; 05526 05527 DebugTrace2(0, me, " <TrialLength = %08lx, %08lx\n", TrialLength->LowPart, 05528 TrialLength->HighPart ); 05529 DebugTrace( 0, me, " <Bcb = %08lx\n", *Bcb ); 05530 DebugTrace(-1, me, "CcFindBcb -> %02lx\n", Found ); 05531 05532 return Found; 05533 }

PBITMAP_RANGE CcFindBitmapRangeToClean IN PMBCB  Mbcb,
IN LONGLONG  Page
 

Definition at line 2157 of file cachesub.c.

References ASSERT, _BITMAP_RANGE::BasePage, _BITMAP_RANGE::DirtyPages, _BITMAP_RANGE::LastDirtyPage, _BITMAP_RANGE::Links, and TRUE.

Referenced by CcAcquireByteRangeForWrite(), and CcGetFlushedValidData().

02164 : 02165 02166 This routine starts from the specified page, and looks for a range with dirty 02167 pages. The caller must guarantee that some range exists with dirty pages. If 02168 the end of the ranges is hit before finding any dirty ranges, then this routine 02169 loops back to the start of the range list. 02170 02171 Arguments: 02172 02173 Mbcb - Supplies the Mbcb in which to find the range. 02174 02175 Page - Supplies the page number for the first page to scan from. 02176 02177 Return Value: 02178 02179 The desired bitmap range with dirty pages. 02180 02181 Environment: 02182 02183 The BcbSpinLock must be held on entry. 02184 02185 --*/ 02186 02187 { 02188 PBITMAP_RANGE BitmapRange; 02189 02190 // 02191 // Point to the first bitmap range. 02192 // 02193 02194 BitmapRange = (PBITMAP_RANGE)Mbcb->BitmapRanges.Flink; 02195 02196 // 02197 // Loop through the list until we find the range to return. 02198 // 02199 02200 do { 02201 02202 // 02203 // If we hit the listhead, then wrap to find the first dirty range. 02204 // 02205 02206 if (BitmapRange == (PBITMAP_RANGE)&Mbcb->BitmapRanges) { 02207 02208 // 02209 // If Page is already 0, we are in an infinite loop. 02210 // 02211 02212 ASSERT(Page != 0); 02213 02214 // 02215 // Clear Page and fall through to advance to first range. 02216 // 02217 02218 Page = 0; 02219 02220 02221 // 02222 // Otherwise, if we are in range, return the first range 02223 // with dirty pages. 02224 // 02225 02226 } else if ((Page <= (BitmapRange->BasePage + BitmapRange->LastDirtyPage)) && 02227 (BitmapRange->DirtyPages != 0)) { 02228 return BitmapRange; 02229 } 02230 02231 // 02232 // Advance to the next range (or possibly back to the listhead). 02233 // 02234 02235 BitmapRange = (PBITMAP_RANGE)BitmapRange->Links.Flink; 02236 02237 } while (TRUE); 02238 }

PBITMAP_RANGE CcFindBitmapRangeToDirty IN PMBCB  Mbcb,
IN LONGLONG  Page,
IN PULONG *  FreePageForSetting
 

Definition at line 1996 of file cachesub.c.

References ASSERT, _BITMAP_RANGE::BasePage, _BITMAP_RANGE::Bitmap, BITMAP_RANGE, _BITMAP_RANGE::DirtyPages, ExAllocatePoolWithTag, _BITMAP_RANGE::FirstDirtyPage, _BITMAP_RANGE::LastDirtyPage, _BITMAP_RANGE::Links, MBCB_BITMAP_BLOCK_SIZE, NonPagedPool, and NULL.

Referenced by CcSetDirtyInMask().

02004 : 02005 02006 This routine looks for the bitmap range containing the specified page. 02007 If it is found it is returned so the caller can set some dirty bits. 02008 If it is not found, then an attempt is made to come up with a free range 02009 and set it up to describe the desired range. To come up with a free range, 02010 first we attempt to recycle the lowest range that does not currently contain 02011 any dirty pages. If there is no such range, then we allocate one. 02012 02013 Arguments: 02014 02015 Mbcb - Supplies the Mbcb in which to find the range. 02016 02017 Page - Supplies the page number for the first page to be set dirty. 02018 02019 FreePageForSetting - Supplies a free bitmap page of zeros from the zone; the 02020 caller's pointer is cleared on return if this page is used. 02021 02022 Return Value: 02023 02024 The desired bitmap range, or NULL if one could not be allocated. 02025 02026 Environment: 02027 02028 The BcbSpinLock must be held on entry. 02029 02030 --*/ 02031 02032 { 02033 PBITMAP_RANGE BitmapRange, FreeRange; 02034 PLIST_ENTRY InsertPoint; 02035 LONGLONG BasePage; 02036 02037 // 02038 // Initialize FreeRange and InsertPoint for the case we have 02039 // to initialize a range. 02040 // 02041 02042 FreeRange = NULL; 02043 InsertPoint = &Mbcb->BitmapRanges; 02044 02045 // 02046 // Point to the first bitmap range. 02047 // 02048 02049 BitmapRange = (PBITMAP_RANGE)InsertPoint->Flink; 02050 02051 // 02052 // Calculate the desired BasePage from the caller's page. 02053 // 02054 02055 BasePage = (Page & ~(LONGLONG)((MBCB_BITMAP_BLOCK_SIZE * 8) - 1)); 02056 02057 // 02058 // Loop through the list until we find the range or we have a free range 02059 // and correct insertion point. 02060 // 02061 02062 do { 02063 02064 // 02065 // If we get an exact match, then we must have hit a fully-initialized 02066 // range which we can return. 02067 // 02068 02069 if (BasePage == BitmapRange->BasePage) { 02070 return BitmapRange; 02071 02072 // 02073 // Otherwise, see if the range is free and we have not captured a 02074 // free range yet. 02075 // 02076 02077 } else if ((BitmapRange->DirtyPages == 0) && (FreeRange == NULL)) { 02078 FreeRange = BitmapRange; 02079 02080 // 02081 // If we did not capture a free range, see if we need to update our 02082 // insertion point. 02083 // 02084 02085 } else if (BasePage > BitmapRange->BasePage) { 02086 InsertPoint = &BitmapRange->Links; 02087 } 02088 02089 // 02090 // Advance to the next range (or possibly back to the listhead). 02091 // 02092 02093 BitmapRange = (PBITMAP_RANGE)BitmapRange->Links.Flink; 02094 02095 // 02096 // Loop until we hit the end, or we know we are done updating both InsertPoint 02097 // and FreeRange. 02098 // 02099 02100 } while ((BitmapRange != (PBITMAP_RANGE)&Mbcb->BitmapRanges) && 02101 ((BasePage >= BitmapRange->BasePage) || 02102 (FreeRange == NULL))); 02103 02104 // 02105 // If we found a FreeRange we can use, then remove it from the list. 02106 // 02107 02108 if (FreeRange != NULL) { 02109 RemoveEntryList( &FreeRange->Links ); 02110 02111 // 02112 // Otherwise we have to allocate the small bitmap range structure. We usually 02113 // try to avoid calling the pool package while owning a spin lock, but note the 02114 // following things which must be true if we hit this point: 02115 // 02116 // The file is larger than 3 bitmap ranges (normally 384MB on Intel). 02117 // Three ranges plus all previously allocated ranges are simultaneously dirty. 02118 // 02119 // The second point is fairly unlikely, especially for a sequential writer. It 02120 // can occur for a random writer in a large file, but eventually we will allocate 02121 // enough ranges to always describe how many ranges he can keep dirty at once! 02122 // 02123 02124 } else { 02125 FreeRange = ExAllocatePoolWithTag( NonPagedPool, sizeof(BITMAP_RANGE), 'rBcC' ); 02126 if (FreeRange == NULL) { 02127 return NULL; 02128 } 02129 RtlZeroMemory( FreeRange, sizeof(BITMAP_RANGE) ); 02130 } 02131 02132 // 02133 // Insert and initialize. 02134 // 02135 02136 InsertHeadList( InsertPoint, &FreeRange->Links ); 02137 FreeRange->BasePage = BasePage; 02138 FreeRange->FirstDirtyPage = MAXULONG; 02139 FreeRange->LastDirtyPage = 0; 02140 02141 // 02142 // If the range does not have a bitmap yet, then consume the one we were passed 02143 // in. 02144 // 02145 02146 if (FreeRange->Bitmap == NULL) { 02147 ASSERT(*FreePageForSetting != NULL); 02148 FreeRange->Bitmap = *FreePageForSetting; 02149 *FreePageForSetting = NULL; 02150 } 02151 02152 return FreeRange; 02153 }

VOID CcFlushCache IN PSECTION_OBJECT_POINTERS  SectionObjectPointer,
IN PLARGE_INTEGER FileOffset  OPTIONAL,
IN ULONG  Length,
OUT PIO_STATUS_BLOCK IoStatus  OPTIONAL
 

Definition at line 4411 of file cachesub.c.

References _SHARED_CACHE_MAP::ActivePage, _SHARED_CACHE_MAP::ActiveVacb, CC_REQUEUE, CcAcquireByteRangeForWrite(), CcAcquireMasterLock, CcDecrementOpenCount, CcDeferredWrites, CcDirtySharedCacheMapList, CcExceptionFilter(), CcFreeActiveVacb(), CcFreeVirtualAddress(), CcGetVirtualAddressIfMapped(), CcIdleDelayTick, CcIncrementOpenCount, CcLazyWriteHotSpots, CcLazyWriteIos, CcLazyWritePages, CcNoDelay, CcPostDeferredWrites(), CcReleaseByteRangeFromWrite(), CcReleaseMasterLock, CcScheduleLazyWriteScan(), DebugTrace, DebugTrace2, _SHARED_CACHE_MAP::DirtyPages, FALSE, _SHARED_CACHE_MAP::FileObject, _SHARED_CACHE_MAP::FileSize, FlagOn, _SHARED_CACHE_MAP::Flags, _FILE_OBJECT::FsContext, FSRTL_FLAG_USER_MAPPED_FILE, GetActiveVacbAtDpcLevel, KeQueryTickCount(), LAZY_WRITE_OCCURRED, _SHARED_CACHE_MAP::LazyWritePassCount, LazyWriter, me, mm, MmFlushSection(), MmSetAddressRangeModified(), MODIFIED_WRITE_DISABLED, _SHARED_CACHE_MAP::NeedToZero, _SHARED_CACHE_MAP::NeedToZeroPage, NT_SUCCESS, NTSTATUS(), NULL, Offset, _SHARED_CACHE_MAP::OpenCount, _LAZY_WRITER::OtherWork, PAGE_SHIFT, PAGE_SIZE, _SHARED_CACHE_MAP::PagesToWrite, PFSRTL_COMMON_FCB_HEADER, PIN_ACCESS, RetryError, _LAZY_WRITER::ScanActive, _FILE_OBJECT::SectionObjectPointer, SetFlag, _SHARED_CACHE_MAP::SharedCacheMapLinks, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, Status, TRUE, _SHARED_CACHE_MAP::ValidDataGoal, _SHARED_CACHE_MAP::ValidDataLength, and WRITE_QUEUED.

Referenced by CcWriteBehind(), CcZeroEndOfLastPage(), LfsFlushLfcb(), LfsFlushLogPage(), and MiFlushDataSection().

04420 : 04421 04422 This routine may be called to flush dirty data from the cache to the 04423 cached file on disk. Any byte range within the file may be flushed, 04424 or the entire file may be flushed by omitting the FileOffset parameter. 04425 04426 This routine does not take a Wait parameter; the caller should assume 04427 that it will always block. 04428 04429 Arguments: 04430 04431 SectionObjectPointer - A pointer to the Section Object Pointers 04432 structure in the nonpaged Fcb. 04433 04434 FileOffset - If this parameter is supplied (not NULL), then only the 04435 byte range specified by FileOffset and Length are flushed. 04436 If &CcNoDelay is specified, then this signifies the call 04437 from the Lazy Writer, and the lazy write scan should resume 04438 as normal from the last spot where it left off in the file. 04439 04440 Length - Defines the length of the byte range to flush, starting at 04441 FileOffset. This parameter is ignored if FileOffset is 04442 specified as NULL. 04443 04444 IoStatus - The I/O status resulting from the flush operation. 04445 04446 Return Value: 04447 04448 None. 04449 04450 --*/ 04451 04452 { 04453 LARGE_INTEGER NextFileOffset, TargetOffset; 04454 ULONG NextLength; 04455 PBCB FirstBcb; 04456 KIRQL OldIrql; 04457 PSHARED_CACHE_MAP SharedCacheMap; 04458 IO_STATUS_BLOCK TrashStatus; 04459 PVOID TempVa; 04460 ULONG RemainingLength, TempLength; 04461 NTSTATUS PopupStatus; 04462 BOOLEAN HotSpot; 04463 ULONG BytesWritten = 0; 04464 BOOLEAN PopupRequired = FALSE; 04465 BOOLEAN VerifyRequired = FALSE; 04466 BOOLEAN IsLazyWriter = FALSE; 04467 BOOLEAN FreeActiveVacb = FALSE; 04468 PVACB ActiveVacb = NULL; 04469 NTSTATUS Status = STATUS_SUCCESS; 04470 LARGE_INTEGER EndTick, CurrentTick; 04471 04472 DebugTrace(+1, me, "CcFlushCache:\n", 0 ); 04473 DebugTrace( 0, mm, " SectionObjectPointer = %08lx\n", SectionObjectPointer ); 04474 DebugTrace2(0, me, " FileOffset = %08lx, %08lx\n", 04475 ARGUMENT_PRESENT(FileOffset) ? FileOffset->LowPart 04476 : 0, 04477 ARGUMENT_PRESENT(FileOffset) ? FileOffset->HighPart 04478 : 0 ); 04479 DebugTrace( 0, me, " Length = %08lx\n", Length ); 04480 04481 // 04482 // If IoStatus passed a Null pointer, set up to through status away. 04483 // 04484 04485 if (!ARGUMENT_PRESENT(IoStatus)) { 04486 IoStatus = &TrashStatus; 04487 } 04488 IoStatus->Status = STATUS_SUCCESS; 04489 IoStatus->Information = 0; 04490 04491 // 04492 // See if this is the Lazy Writer. Since he wants to use this common 04493 // routine, which is also a public routine callable by file systems, 04494 // the Lazy Writer shows his call by specifying CcNoDelay as the file offset! 04495 // 04496 // Also, in case we do not write anything because we see only HotSpot(s), 04497 // initialize the Status to indicate a retryable error, so CcWorkerThread 04498 // knows we did not make any progress. Of course any actual flush will 04499 // overwrite this code. 04500 // 04501 04502 if (FileOffset == &CcNoDelay) { 04503 IoStatus->Status = STATUS_VERIFY_REQUIRED; 04504 IsLazyWriter = TRUE; 04505 FileOffset = NULL; 04506 } 04507 04508 // 04509 // If there is nothing to do, return here. 04510 // 04511 04512 if (ARGUMENT_PRESENT(FileOffset) && (Length == 0)) { 04513 04514 DebugTrace(-1, me, "CcFlushCache -> VOID\n", 0 ); 04515 return; 04516 } 04517 04518 // 04519 // See if the file is cached. 04520 // 04521 04522 CcAcquireMasterLock( &OldIrql ); 04523 04524 SharedCacheMap = SectionObjectPointer->SharedCacheMap; 04525 04526 if (SharedCacheMap != NULL) { 04527 04528 // 04529 // Increment the open count to keep it from going away. 04530 // 04531 04532 CcIncrementOpenCount( SharedCacheMap, 'fcCS' ); 04533 04534 if ((SharedCacheMap->NeedToZero != NULL) || (SharedCacheMap->ActiveVacb != NULL)) { 04535 04536 ULONG FirstPage = 0; 04537 ULONG LastPage = MAXULONG; 04538 04539 if (ARGUMENT_PRESENT(FileOffset)) { 04540 04541 FirstPage = (ULONG)(FileOffset->QuadPart >> PAGE_SHIFT); 04542 LastPage = (ULONG)((FileOffset->QuadPart + Length - 1) >> PAGE_SHIFT); 04543 } 04544 04545 // 04546 // Make sure we do not flush the active page without zeroing any 04547 // uninitialized data. Also, it is very important to free the active 04548 // page if it is the one to be flushed, so that we get the dirty 04549 // bit out to the Pfn. 04550 // 04551 04552 if (((((LONGLONG)LastPage + 1) << PAGE_SHIFT) > SharedCacheMap->ValidDataGoal.QuadPart) || 04553 04554 ((SharedCacheMap->NeedToZero != NULL) && 04555 (FirstPage <= SharedCacheMap->NeedToZeroPage) && 04556 (LastPage >= SharedCacheMap->NeedToZeroPage)) || 04557 04558 ((SharedCacheMap->ActiveVacb != NULL) && 04559 (FirstPage <= SharedCacheMap->ActivePage) && 04560 (LastPage >= SharedCacheMap->ActivePage))) { 04561 04562 GetActiveVacbAtDpcLevel( SharedCacheMap, ActiveVacb, RemainingLength, TempLength ); 04563 FreeActiveVacb = TRUE; 04564 } 04565 } 04566 } 04567 04568 CcReleaseMasterLock( OldIrql ); 04569 04570 if (FreeActiveVacb) { 04571 CcFreeActiveVacb( SharedCacheMap, ActiveVacb, RemainingLength, TempLength ); 04572 } 04573 04574 // 04575 // If there is a user-mapped file, then we perform the "service" of 04576 // flushing even data not written via the file system. Note that this 04577 // is pretty important for folks provoking the flush/purge of a coherency 04578 // operation. 04579 // 04580 // It is critical this happen before we examine our own hints. In the course 04581 // of this flush it is possible valid data length will be advanced by the 04582 // underlying filesystem, with CcZero'ing behind - which will cause us to 04583 // make some dirty zeroes in the cache. Syscache bug! Note how coherency 04584 // flushing works ... 04585 // 04586 04587 if ((SharedCacheMap == NULL) 04588 04589 || 04590 04591 FlagOn(((PFSRTL_COMMON_FCB_HEADER)(SharedCacheMap->FileObject->FsContext))->Flags, 04592 FSRTL_FLAG_USER_MAPPED_FILE) && !IsLazyWriter) { 04593 04594 // 04595 // Call MM to flush the section through our view. 04596 // 04597 04598 DebugTrace( 0, mm, "MmFlushSection:\n", 0 ); 04599 DebugTrace( 0, mm, " SectionObjectPointer = %08lx\n", SectionObjectPointer ); 04600 DebugTrace2(0, me, " FileOffset = %08lx, %08lx\n", 04601 ARGUMENT_PRESENT(FileOffset) ? FileOffset->LowPart 04602 : 0, 04603 ARGUMENT_PRESENT(FileOffset) ? FileOffset->HighPart 04604 : 0 ); 04605 DebugTrace( 0, mm, " RegionSize = %08lx\n", Length ); 04606 04607 try { 04608 04609 Status = MmFlushSection( SectionObjectPointer, 04610 FileOffset, 04611 Length, 04612 IoStatus, 04613 TRUE ); 04614 04615 } except( CcExceptionFilter( IoStatus->Status = GetExceptionCode() )) { 04616 04617 KdPrint(("CACHE MANAGER: MmFlushSection raised %08lx\n", IoStatus->Status)); 04618 } 04619 04620 if ((!NT_SUCCESS(IoStatus->Status)) && !RetryError(IoStatus->Status)) { 04621 04622 PopupRequired = TRUE; 04623 PopupStatus = IoStatus->Status; 04624 } 04625 04626 DebugTrace2(0, mm, " <IoStatus = %08lx, %08lx\n", 04627 IoStatus->Status, IoStatus->Information ); 04628 } 04629 04630 // 04631 // Scan for dirty pages if there is a shared cache map. 04632 // 04633 04634 if (SharedCacheMap != NULL) { 04635 04636 // 04637 // If FileOffset was not specified then set to flush entire region 04638 // and set valid data length to the goal so that we will not get 04639 // any more call backs. 04640 // 04641 04642 if (!IsLazyWriter && !ARGUMENT_PRESENT(FileOffset)) { 04643 04644 SharedCacheMap->ValidDataLength = SharedCacheMap->ValidDataGoal; 04645 } 04646 04647 // 04648 // If this is an explicit flush, initialize our offset to scan for. 04649 // 04650 04651 if (ARGUMENT_PRESENT(FileOffset)) { 04652 TargetOffset = *FileOffset; 04653 } 04654 04655 // 04656 // Assume we want to pass the explicit flush flag in Length. 04657 // But overwrite it if a length really was specified. On 04658 // subsequent loops, NextLength will have some nonzero value. 04659 // 04660 04661 NextLength = 1; 04662 if (Length != 0) { 04663 NextLength = Length; 04664 } 04665 04666 // 04667 // Now calculate the tick that will signal the expiration of a 04668 // lazy writer tick interval. 04669 // 04670 04671 if (IsLazyWriter) { 04672 04673 KeQueryTickCount( &EndTick ); 04674 EndTick.QuadPart += CcIdleDelayTick; 04675 } 04676 04677 // 04678 // Loop as long as we find buffers to flush for this 04679 // SharedCacheMap, and we are not trying to delete the guy. 04680 // 04681 04682 while (((SharedCacheMap->PagesToWrite != 0) || !IsLazyWriter) 04683 04684 && 04685 ((SharedCacheMap->FileSize.QuadPart != 0) || 04686 FlagOn(SharedCacheMap->Flags, PIN_ACCESS)) 04687 04688 && 04689 04690 !VerifyRequired 04691 04692 && 04693 04694 CcAcquireByteRangeForWrite ( SharedCacheMap, 04695 IsLazyWriter ? NULL : (ARGUMENT_PRESENT(FileOffset) ? 04696 &TargetOffset : NULL), 04697 IsLazyWriter ? 0: NextLength, 04698 &NextFileOffset, 04699 &NextLength, 04700 &FirstBcb )) { 04701 04702 // 04703 // Assume this range is not a hot spot. 04704 // 04705 04706 HotSpot = FALSE; 04707 04708 // 04709 // We defer calling Mm to set address range modified until here, to take 04710 // overhead out of the main line path, and to reduce the number of TBIS 04711 // on a multiprocessor. 04712 // 04713 04714 RemainingLength = NextLength; 04715 04716 do { 04717 04718 // 04719 // See if the next file offset is mapped. (If not, the dirty bit 04720 // was propagated on the unmap.) 04721 // 04722 04723 if ((TempVa = CcGetVirtualAddressIfMapped( SharedCacheMap, 04724 NextFileOffset.QuadPart + NextLength - RemainingLength, 04725 &ActiveVacb, 04726 &TempLength)) != NULL) { 04727 04728 // 04729 // Reduce TempLength to RemainingLength if necessary, and 04730 // call MM. 04731 // 04732 04733 if (TempLength > RemainingLength) { 04734 TempLength = RemainingLength; 04735 } 04736 04737 // 04738 // Clear the Dirty bit (if set) in the PTE and set the 04739 // Pfn modified. Assume if the Pte was dirty, that this may 04740 // be a hot spot. Do not do hot spots for metadata, and unless 04741 // they are within ValidDataLength as reported to the file system 04742 // via CcSetValidData. 04743 // 04744 04745 HotSpot = (BOOLEAN)((MmSetAddressRangeModified(TempVa, TempLength) || HotSpot) && 04746 ((NextFileOffset.QuadPart + NextLength) < 04747 (SharedCacheMap->ValidDataLength.QuadPart)) && 04748 ((SharedCacheMap->LazyWritePassCount & 0xF) != 0) && IsLazyWriter) && 04749 !FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED); 04750 04751 CcFreeVirtualAddress( ActiveVacb ); 04752 04753 } else { 04754 04755 // 04756 // Reduce TempLength to RemainingLength if necessary. 04757 // 04758 04759 if (TempLength > RemainingLength) { 04760 TempLength = RemainingLength; 04761 } 04762 } 04763 04764 // 04765 // Reduce RemainingLength by what we processed. 04766 // 04767 04768 RemainingLength -= TempLength; 04769 04770 // 04771 // Loop until done. 04772 // 04773 04774 } while (RemainingLength != 0); 04775 04776 CcLazyWriteHotSpots += HotSpot; 04777 04778 // 04779 // Now flush, now flush if we do not think it is a hot spot. 04780 // 04781 04782 if (!HotSpot) { 04783 04784 MmFlushSection( SharedCacheMap->FileObject->SectionObjectPointer, 04785 &NextFileOffset, 04786 NextLength, 04787 IoStatus, 04788 !IsLazyWriter ); 04789 04790 if (NT_SUCCESS(IoStatus->Status)) { 04791 04792 if (!FlagOn(SharedCacheMap->Flags, LAZY_WRITE_OCCURRED)) { 04793 04794 CcAcquireMasterLock( &OldIrql ); 04795 SetFlag(SharedCacheMap->Flags, LAZY_WRITE_OCCURRED); 04796 CcReleaseMasterLock( OldIrql ); 04797 } 04798 04799 // 04800 // Increment performance counters 04801 // 04802 04803 if (IsLazyWriter) { 04804 04805 CcLazyWriteIos += 1; 04806 CcLazyWritePages += (NextLength + PAGE_SIZE - 1) >> PAGE_SHIFT; 04807 } 04808 04809 } else { 04810 04811 LARGE_INTEGER Offset = NextFileOffset; 04812 ULONG RetryLength = NextLength; 04813 04814 DebugTrace2( 0, 0, "I/O Error on Cache Flush: %08lx, %08lx\n", 04815 IoStatus->Status, IoStatus->Information ); 04816 04817 if (RetryError(IoStatus->Status)) { 04818 04819 VerifyRequired = TRUE; 04820 04821 // 04822 // Loop to write each page individually, starting with one 04823 // more try on the page that got the error, in case that page 04824 // or any page beyond it can be successfully written 04825 // individually. Note that Offset and RetryLength are 04826 // guaranteed to be in integral pages, but the Information 04827 // field from the failed request is not. 04828 // 04829 // We ignore errors now, and give it one last shot, before 04830 // setting the pages clean (see below). 04831 // 04832 04833 } else { 04834 04835 do { 04836 04837 DebugTrace2( 0, 0, "Trying page at offset %08lx, %08lx\n", 04838 Offset.LowPart, Offset.HighPart ); 04839 04840 MmFlushSection ( SharedCacheMap->FileObject->SectionObjectPointer, 04841 &Offset, 04842 PAGE_SIZE, 04843 IoStatus, 04844 !IsLazyWriter ); 04845 04846 DebugTrace2( 0, 0, "I/O status = %08lx, %08lx\n", 04847 IoStatus->Status, IoStatus->Information ); 04848 04849 if (NT_SUCCESS(IoStatus->Status)) { 04850 CcAcquireMasterLock( &OldIrql ); 04851 SetFlag(SharedCacheMap->Flags, LAZY_WRITE_OCCURRED); 04852 CcReleaseMasterLock( OldIrql ); 04853 } 04854 04855 if ((!NT_SUCCESS(IoStatus->Status)) && !RetryError(IoStatus->Status)) { 04856 04857 PopupRequired = TRUE; 04858 PopupStatus = IoStatus->Status; 04859 } 04860 04861 VerifyRequired = VerifyRequired || RetryError(IoStatus->Status); 04862 04863 Offset.QuadPart = Offset.QuadPart + (LONGLONG)PAGE_SIZE; 04864 RetryLength -= PAGE_SIZE; 04865 04866 } while(RetryLength > 0); 04867 } 04868 } 04869 } 04870 04871 // 04872 // Now release the Bcb resources and set them clean. Note we do not check 04873 // here for errors, and just returned in the I/O status. Errors on writes 04874 // are rare to begin with. Nonetheless, our strategy is to rely on 04875 // one or more of the following (depending on the file system) to prevent 04876 // errors from getting to us. 04877 // 04878 // - Retries and/or other forms of error recovery in the disk driver 04879 // - Mirroring driver 04880 // - Hot fixing in the noncached path of the file system 04881 // 04882 // In the unexpected case that a write error does get through, we 04883 // *currently* just set the Bcbs clean anyway, rather than let 04884 // Bcbs and pages accumulate which cannot be written. Note we did 04885 // a popup above to at least notify the guy. 04886 // 04887 // Set the pages dirty again if we either saw a HotSpot or got 04888 // verify required. 04889 // 04890 04891 CcReleaseByteRangeFromWrite ( SharedCacheMap, 04892 &NextFileOffset, 04893 NextLength, 04894 FirstBcb, 04895 (BOOLEAN)(HotSpot || VerifyRequired) ); 04896 04897 // 04898 // See if there is any deferred writes we should post. 04899 // 04900 04901 BytesWritten += NextLength; 04902 if ((BytesWritten >= 0x40000) && !IsListEmpty(&CcDeferredWrites)) { 04903 CcPostDeferredWrites(); 04904 BytesWritten = 0; 04905 } 04906 04907 // 04908 // If we're the lazy writer and have spent more than the active tick 04909 // length in this loop, break out for a requeue so we share the 04910 // file resources. 04911 // 04912 04913 if (IsLazyWriter) { 04914 04915 KeQueryTickCount( &CurrentTick ); 04916 04917 // 04918 // Note that CcIdleDelay is a relative (negative) timestamp. 04919 // 04920 04921 if (CurrentTick.QuadPart > EndTick.QuadPart) { 04922 04923 IoStatus->Information = CC_REQUEUE; 04924 break; 04925 } 04926 } 04927 04928 // 04929 // Now for explicit flushes, we should advance our range. 04930 // 04931 04932 if (ARGUMENT_PRESENT(FileOffset)) { 04933 04934 NextFileOffset.QuadPart += NextLength; 04935 04936 // 04937 // Done yet? 04938 // 04939 04940 if ((FileOffset->QuadPart + Length) <= NextFileOffset.QuadPart) { 04941 break; 04942 } 04943 04944 // 04945 // Calculate new target range 04946 // 04947 04948 NextLength = (ULONG)((FileOffset->QuadPart + Length) - NextFileOffset.QuadPart); 04949 TargetOffset = NextFileOffset; 04950 } 04951 } 04952 } 04953 04954 // 04955 // See if there are any deferred writes we should post if 04956 // we escaped the loop without checking after a series of 04957 // flushes. 04958 // 04959 04960 if (BytesWritten != 0 && !IsListEmpty(&CcDeferredWrites)) { 04961 04962 CcPostDeferredWrites(); 04963 } 04964 04965 // 04966 // Now we can get rid of the open count, and clean up as required. 04967 // 04968 04969 if (SharedCacheMap != NULL) { 04970 04971 // 04972 // Serialize again to decrement the open count. 04973 // 04974 04975 CcAcquireMasterLock( &OldIrql ); 04976 04977 CcDecrementOpenCount( SharedCacheMap, 'fcCF' ); 04978 04979 if ((SharedCacheMap->OpenCount == 0) && 04980 !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) && 04981 (SharedCacheMap->DirtyPages == 0)) { 04982 04983 // 04984 // Move to the dirty list. 04985 // 04986 04987 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 04988 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 04989 &SharedCacheMap->SharedCacheMapLinks ); 04990 04991 // 04992 // Make sure the Lazy Writer will wake up, because we 04993 // want him to delete this SharedCacheMap. 04994 // 04995 04996 LazyWriter.OtherWork = TRUE; 04997 if (!LazyWriter.ScanActive) { 04998 CcScheduleLazyWriteScan(); 04999 } 05000 } 05001 05002 CcReleaseMasterLock( OldIrql ); 05003 } 05004 05005 // 05006 // Make sure and return the first error to our caller. In the 05007 // case of the Lazy Writer, a popup will be issued. 05008 // 05009 05010 if (PopupRequired) { 05011 IoStatus->Status = PopupStatus; 05012 } 05013 05014 // 05015 // Let the Lazy writer know if we did anything, so he can 05016 05017 DebugTrace(-1, me, "CcFlushCache -> VOID\n", 0 ); 05018 05019 return; 05020 }

VOID CcFreeActiveVacb IN PSHARED_CACHE_MAP  SharedCacheMap,
IN PVACB ActiveVacb  OPTIONAL,
IN ULONG  ActivePage,
IN ULONG  PageIsDirty
 

Definition at line 5885 of file cachesub.c.

References ACTIVE_PAGE_IS_DIRTY, CcAcquireMasterLock, CcFreeVirtualAddress(), CcReleaseMasterLock, CcSetDirtyInMask(), CcTotalDirtyPages, ClearFlag, FlagOn, MmUnlockCachedPage(), NULL, PAGE_SHIFT, PAGE_SIZE, and VACB_MAPPING_GRANULARITY.

Referenced by CcCopyRead(), CcCopyWrite(), CcDeleteSharedCacheMap(), CcFastCopyRead(), CcFastCopyWrite(), CcFlushCache(), CcGetVacbMiss(), CcMapAndCopy(), CcMdlRead(), CcPinFileData(), CcPrepareMdlWrite(), CcPurgeCacheSection(), CcSetFileSizes(), CcUninitializeCacheMap(), CcWriteBehind(), and CcZeroEndOfLastPage().

05894 : 05895 05896 This routine may be called to zero the end of a locked page or 05897 free the ActiveVacb for a Shared Cache Map, if there is one. 05898 Note that some callers are not synchronized with foreground 05899 activity, and may therefore not have an ActiveVacb. Examples 05900 of unsynchronized callers are CcZeroEndOfLastPage (which is 05901 called by MM) and any flushing done by CcWriteBehind. 05902 05903 Arguments: 05904 05905 SharedCacheMap - SharedCacheMap to examine for page to be zeroed. 05906 05907 ActiveVacb - Vacb to free 05908 05909 ActivePage - Page that was used 05910 05911 PageIsDirty - ACTIVE_PAGE_IS_DIRTY if the active page is dirty 05912 05913 Return Value: 05914 05915 None 05916 05917 --*/ 05918 05919 { 05920 LARGE_INTEGER ActiveOffset; 05921 PVOID ActiveAddress; 05922 ULONG BytesLeftInPage; 05923 KIRQL OldIrql; 05924 05925 // 05926 // If the page was locked, then unlock it. 05927 // 05928 05929 if (SharedCacheMap->NeedToZero != NULL) { 05930 05931 PVACB NeedToZeroVacb; 05932 05933 // 05934 // Zero the rest of the page under spinlock control, 05935 // and then clear the address field. This field makes 05936 // zero->nonzero transitions only when the file is exclusive, 05937 // but it can make nonzero->zero transitions any time the 05938 // spinlock is not held. 05939 // 05940 05941 ExAcquireFastLock( &SharedCacheMap->ActiveVacbSpinLock, &OldIrql ); 05942 05943 // 05944 // The address could already be gone. 05945 // 05946 05947 ActiveAddress = SharedCacheMap->NeedToZero; 05948 if (ActiveAddress != NULL) { 05949 05950 BytesLeftInPage = PAGE_SIZE - ((((ULONG)((ULONG_PTR)ActiveAddress) - 1) & (PAGE_SIZE - 1)) + 1); 05951 05952 RtlZeroBytes( ActiveAddress, BytesLeftInPage ); 05953 SharedCacheMap->NeedToZero = NULL; 05954 NeedToZeroVacb = SharedCacheMap->NeedToZeroVacb; 05955 } 05956 ExReleaseFastLock( &SharedCacheMap->ActiveVacbSpinLock, OldIrql ); 05957 05958 // 05959 // Now call MM to unlock the address. Note we will never store the 05960 // address at the start of the page, but we can sometimes store 05961 // the start of the next page when we have exactly filled the page. 05962 // 05963 05964 if (ActiveAddress != NULL) { 05965 MmUnlockCachedPage( (PVOID)((PCHAR)ActiveAddress - 1) ); 05966 CcFreeVirtualAddress( NeedToZeroVacb ); 05967 } 05968 } 05969 05970 // 05971 // See if caller actually has an ActiveVacb 05972 // 05973 05974 if (ActiveVacb != NULL) { 05975 05976 // 05977 // See if the page is dirty 05978 // 05979 05980 if (PageIsDirty) { 05981 05982 ActiveOffset.QuadPart = (LONGLONG)ActivePage << PAGE_SHIFT; 05983 ActiveAddress = (PVOID)((PCHAR)ActiveVacb->BaseAddress + 05984 (ActiveOffset.LowPart & (VACB_MAPPING_GRANULARITY - 1))); 05985 05986 // 05987 // Tell the Lazy Writer to write the page. 05988 // 05989 05990 CcSetDirtyInMask( SharedCacheMap, &ActiveOffset, PAGE_SIZE ); 05991 05992 // 05993 // Now we need to clear the flag and decrement some counts if there is 05994 // no other active Vacb which snuck in. 05995 // 05996 05997 CcAcquireMasterLock( &OldIrql ); 05998 ExAcquireSpinLockAtDpcLevel( &SharedCacheMap->ActiveVacbSpinLock ); 05999 if ((SharedCacheMap->ActiveVacb == NULL) && 06000 FlagOn(SharedCacheMap->Flags, ACTIVE_PAGE_IS_DIRTY)) { 06001 06002 ClearFlag(SharedCacheMap->Flags, ACTIVE_PAGE_IS_DIRTY); 06003 SharedCacheMap->DirtyPages -= 1; 06004 CcTotalDirtyPages -= 1; 06005 } 06006 ExReleaseSpinLockFromDpcLevel( &SharedCacheMap->ActiveVacbSpinLock ); 06007 CcReleaseMasterLock( OldIrql ); 06008 } 06009 06010 // 06011 // Now free the Vacb. 06012 // 06013 06014 CcFreeVirtualAddress( ActiveVacb ); 06015 } 06016 }

LARGE_INTEGER CcGetFlushedValidData IN PSECTION_OBJECT_POINTERS  SectionObjectPointer,
IN BOOLEAN  CcInternalCaller
 

Definition at line 4232 of file cachesub.c.

References ASSERT, _BITMAP_RANGE::BasePage, _BCB::BcbLinks, _SHARED_CACHE_MAP::BcbList, _SHARED_CACHE_MAP::BcbSpinLock, CACHE_NTC_BCB, CcAcquireMasterLock, CcAcquireMasterLockAtDpcLevel, CcDecrementOpenCount, CcDirtySharedCacheMapList, CcFindBitmapRangeToClean(), CcIncrementOpenCount, CcReleaseMasterLock, CcScheduleLazyWriteScan(), _BCB::Dirty, _SHARED_CACHE_MAP::DirtyPages, _MBCB::DirtyPages, _BCB::FileOffset, _BITMAP_RANGE::FirstDirtyPage, FlagOn, _SHARED_CACHE_MAP::Flags, LazyWriter, _SHARED_CACHE_MAP::Mbcb, _BCB::NodeTypeCode, NULL, _SHARED_CACHE_MAP::OpenCount, _LAZY_WRITER::OtherWork, PAGE_SHIFT, _LAZY_WRITER::ScanActive, _SHARED_CACHE_MAP::SharedCacheMapLinks, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, TRUE, _SHARED_CACHE_MAP::ValidDataGoal, and WRITE_QUEUED.

Referenced by CcWriteBehind().

04239 : 04240 04241 This routine may be called by a file system to find out how far the Cache Manager 04242 has flushed in the stream. More accurately, this routine returns either the FileOffset 04243 of the lowest dirty page currently in the file. 04244 04245 NOTE that even though the routine takes SectionObjectPointer, the caller must insure 04246 that the stream is cached and stays cached for the duration of this routine, much like 04247 for the copy routines, etc. 04248 04249 Arguments: 04250 04251 SectionObjectPointer - A pointer to the Section Object Pointers 04252 structure in the nonpaged Fcb. 04253 04254 CcInternalCaller - must be TRUE if the caller is coming from Cc, FALSE otherwise. 04255 TRUE imples the need for self-synchronization. 04256 04257 Return Value: 04258 04259 The derived number for flushed ValidData, or MAXLONGLONG in the quad part if 04260 the Section is not cached. (Naturally the caller can guarantee that this case 04261 does not occur, and internal callers do.) 04262 04263 --*/ 04264 04265 { 04266 PSHARED_CACHE_MAP SharedCacheMap; 04267 KIRQL OldIrql; 04268 LARGE_INTEGER NewValidDataLength; 04269 04270 // 04271 // External callers may be unsynchronized with this shared cache map 04272 // perhaps going away underneath this call. NTFS and his 04273 // pair of streams for compression-on-the-wire is a good example of 04274 // someone who may be synchronized in one stream but needs to peek at 04275 // the other. 04276 // 04277 04278 if (!CcInternalCaller) { 04279 04280 CcAcquireMasterLock( &OldIrql ); 04281 04282 SharedCacheMap = SectionObjectPointer->SharedCacheMap; 04283 04284 if (SharedCacheMap == NULL) { 04285 CcReleaseMasterLock( OldIrql ); 04286 NewValidDataLength.QuadPart = MAXLONGLONG; 04287 return NewValidDataLength; 04288 } 04289 04290 CcIncrementOpenCount( SharedCacheMap, 'dfGS' ); 04291 CcReleaseMasterLock( OldIrql ); 04292 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 04293 04294 } else { 04295 04296 SharedCacheMap = SectionObjectPointer->SharedCacheMap; 04297 } 04298 04299 ASSERT( SharedCacheMap != NULL ); 04300 04301 // 04302 // If the file is entirely clean, then we wish to return 04303 // the new ValidDataLength as equal to ValidDataGoal. 04304 // 04305 04306 NewValidDataLength = SharedCacheMap->ValidDataGoal; 04307 04308 // 04309 // If there may be dirty pages we will look at the last Bcb in the 04310 // descending-order Bcb list, and see if it describes data beyond 04311 // ValidDataGoal. 04312 // 04313 // It is important to note that since we use DirtyPages as a faux 04314 // reference count over some short windows (+1, -1) the simple 04315 // fact it is nonzero does *not* mean the file is dirty. 04316 // 04317 // (This test is logically too conservative. For example, the last Bcb 04318 // may not even be dirty (in which case we should look at its 04319 // predecessor), or we may have earlier written valid data to this 04320 // byte range (which also means if we knew this we could look at 04321 // the predessor). This simply means that the Lazy Writer may not 04322 // successfully get ValidDataLength updated in a file being randomly 04323 // accessed until the level of file access dies down, or at the latest 04324 // until the file is closed. However, security will never be 04325 // compromised.) 04326 // 04327 04328 if (SharedCacheMap->DirtyPages) { 04329 04330 PBITMAP_RANGE BitmapRange; 04331 PBCB LastBcb; 04332 PMBCB Mbcb = SharedCacheMap->Mbcb; 04333 04334 if ((Mbcb != NULL) && (Mbcb->DirtyPages != 0)) { 04335 04336 BitmapRange = CcFindBitmapRangeToClean( Mbcb, 0 ); 04337 04338 ASSERT(BitmapRange->FirstDirtyPage != MAXULONG); 04339 04340 NewValidDataLength.QuadPart = (BitmapRange->BasePage + BitmapRange->FirstDirtyPage) 04341 << PAGE_SHIFT; 04342 } 04343 04344 LastBcb = CONTAINING_RECORD( SharedCacheMap->BcbList.Flink, 04345 BCB, 04346 BcbLinks ); 04347 04348 while (&LastBcb->BcbLinks != &SharedCacheMap->BcbList) { 04349 04350 if ((LastBcb->NodeTypeCode == CACHE_NTC_BCB) && LastBcb->Dirty) { 04351 break; 04352 } 04353 04354 LastBcb = CONTAINING_RECORD( LastBcb->BcbLinks.Flink, 04355 BCB, 04356 BcbLinks ); 04357 } 04358 04359 // 04360 // Check the Base of the last entry. 04361 // 04362 04363 if ((&LastBcb->BcbLinks != &SharedCacheMap->BcbList) && 04364 (LastBcb->FileOffset.QuadPart < NewValidDataLength.QuadPart )) { 04365 04366 NewValidDataLength = LastBcb->FileOffset; 04367 } 04368 } 04369 04370 if (!CcInternalCaller) { 04371 04372 // 04373 // Remove our reference. 04374 // 04375 04376 CcAcquireMasterLockAtDpcLevel(); 04377 CcDecrementOpenCount( SharedCacheMap, 'dfGF' ); 04378 04379 if ((SharedCacheMap->OpenCount == 0) && 04380 !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) && 04381 (SharedCacheMap->DirtyPages == 0)) { 04382 04383 // 04384 // Move to the dirty list. 04385 // 04386 04387 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 04388 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 04389 &SharedCacheMap->SharedCacheMapLinks ); 04390 04391 // 04392 // Make sure the Lazy Writer will wake up, because we 04393 // want him to delete this SharedCacheMap. 04394 // 04395 04396 LazyWriter.OtherWork = TRUE; 04397 if (!LazyWriter.ScanActive) { 04398 CcScheduleLazyWriteScan(); 04399 } 04400 } 04401 04402 ExReleaseSpinLockFromDpcLevel( &SharedCacheMap->BcbSpinLock ); 04403 CcReleaseMasterLock( OldIrql ); 04404 } 04405 04406 return NewValidDataLength; 04407 }

BOOLEAN CcLogError IN PDEVICE_OBJECT  Device,
IN NTSTATUS  Error,
IN NTSTATUS  DeviceError,
IN PUNICODE_STRING  FileName
 

Definition at line 6458 of file cachesub.c.

References ASSERT, DISPATCH_LEVEL, Error, FALSE, FileName, IoAllocateErrorLogEntry(), IoWriteErrorLogEntry(), L, min, NTSTATUS(), NULL, String, and TRUE.

Referenced by CcWriteBehind().

06467 : 06468 06469 This routine writes an eventlog entry to the eventlog. 06470 06471 Arguments: 06472 06473 Device - The device the error occured on, i.e if a file write dropped the 06474 device for the file 06475 06476 Error - The error to log in the eventlog record 06477 06478 DeviceError - The actual error that occured in the device - will be logged 06479 as user data 06480 06481 FileName - A ptr to the filename if any involved - will be truncated if 06482 too long to fit 06483 06484 Return Value: 06485 06486 True if successful, false if internal memory allocation failed 06487 06488 --*/ 06489 06490 { 06491 UCHAR ErrorPacketLength; 06492 PIO_ERROR_LOG_PACKET ErrorLogEntry = NULL; 06493 BOOLEAN RetVal = FALSE; 06494 PWCHAR String; 06495 ULONG StringSpace; 06496 06497 ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL ); 06498 06499 ErrorPacketLength = sizeof(IO_ERROR_LOG_PACKET) + sizeof(NTSTATUS); 06500 if (FileName) { 06501 if ((FileName->Length + sizeof(WCHAR) + ErrorPacketLength) >= ERROR_LOG_MAXIMUM_SIZE) { 06502 ErrorPacketLength = ERROR_LOG_MAXIMUM_SIZE - 1; 06503 } else { 06504 ErrorPacketLength += (UCHAR)(FileName->Length + sizeof(WCHAR)); 06505 } 06506 } 06507 06508 ASSERT( ErrorPacketLength < UCHAR_MAX ); 06509 06510 ErrorLogEntry = (PIO_ERROR_LOG_PACKET) 06511 IoAllocateErrorLogEntry( Device, 06512 ErrorPacketLength ); 06513 if (ErrorLogEntry) { 06514 ErrorLogEntry->ErrorCode = Error; 06515 ErrorLogEntry->DumpDataSize = sizeof(NTSTATUS); 06516 ErrorLogEntry->DumpData[0] = DeviceError; 06517 06518 if (FileName && FileName->Length) { 06519 06520 // 06521 // The filename string at the end of the error log entry. We may have 06522 // to truncate to fit in the limited space if the name is too long 06523 // 06524 06525 StringSpace = ErrorPacketLength - sizeof( IO_ERROR_LOG_PACKET ) - sizeof( WCHAR ); 06526 String = (PWCHAR) (ErrorLogEntry + 1); 06527 ErrorLogEntry->NumberOfStrings = 1; 06528 ErrorLogEntry->StringOffset = sizeof( IO_ERROR_LOG_PACKET ); 06529 RtlCopyMemory( String, 06530 FileName->Buffer, 06531 min( FileName->Length, StringSpace ) ); 06532 06533 // 06534 // Make sure the string is null terminated. 06535 // 06536 06537 String += min( (FileName->Length / sizeof( WCHAR )), (StringSpace / sizeof( WCHAR )) ); 06538 *String = L'\0'; 06539 } 06540 06541 IoWriteErrorLogEntry( ErrorLogEntry ); 06542 RetVal = TRUE; 06543 } 06544 06545 return RetVal; 06546 }

VOID CcMapAndCopy IN PSHARED_CACHE_MAP  SharedCacheMap,
IN PVOID  UserBuffer,
IN PLARGE_INTEGER  FileOffset,
IN ULONG  Length,
IN ULONG  ZeroFlags,
IN BOOLEAN  WriteThrough
 

Definition at line 6024 of file cachesub.c.

References ACTIVE_PAGE_IS_DIRTY, ASSERT, _VACB::BaseAddress, CcAcquireVacbLock, CcCopyReadExceptionFilter(), CcFreeActiveVacb(), CcFreeVirtualAddress(), CcGetVirtualAddress(), CcMaxDirtyWrite, CcReleaseVacbLock, CcSetDirtyInMask(), DebugTrace, DebugTrace2, ExRaiseStatus(), FlagOn, FsRtlNormalizeNtstatus(), me, MmCopyToCachedPage(), MmFlushSection(), MmResetPageFaultReadAhead, MmSavePageFaultReadAhead, MmSetAddressRangeModified(), MmSetPageFaultReadAhead, NT_SUCCESS, NTSTATUS(), NULL, _VACB::Overlay, PAGE_SHIFT, PAGE_SIZE, PsGetCurrentThread, SetActiveVacb, Status, TRUE, try_return, ZERO_FIRST_PAGE, ZERO_LAST_PAGE, and ZERO_MIDDLE_PAGES.

Referenced by CcCopyWrite(), and CcFastCopyWrite().

06035 : 06036 06037 This routine may be called to copy the specified user data to the 06038 cache via a special Mm routine which copies the data to uninitialized 06039 pages and returns. 06040 06041 Arguments: 06042 06043 SharedCacheMap - Supplies the address of the SharedCacheMap for the 06044 data. 06045 06046 UserBuffer - unsafe buffer supplying the user's data to be written 06047 06048 FileOffset - Supplies the file offset to be modified 06049 06050 Length - Supplies the total amount of data 06051 06052 ZeroFlags - Defines which pages may be zeroed if not resident. 06053 06054 WriteThrough - Supplies whether the data is to be written through or not 06055 06056 Return Value: 06057 06058 None 06059 06060 --*/ 06061 06062 { 06063 ULONG ReceivedLength; 06064 ULONG ZeroCase; 06065 PVOID CacheBuffer; 06066 PVOID SavedMappedBuffer; 06067 ULONG SavedMappedLength; 06068 ULONG ActivePage; 06069 KIRQL OldIrql; 06070 LARGE_INTEGER PFileOffset; 06071 IO_STATUS_BLOCK IoStatus; 06072 NTSTATUS Status; 06073 ULONG SavedState; 06074 BOOLEAN MorePages; 06075 ULONG SavedTotalLength = Length; 06076 LARGE_INTEGER LocalOffset = *FileOffset; 06077 ULONG PageOffset = FileOffset->LowPart & (PAGE_SIZE - 1); 06078 PVACB Vacb = NULL; 06079 PETHREAD Thread = PsGetCurrentThread(); 06080 06081 // 06082 // Initialize SavePage to TRUE to skip the finally clause on zero-length 06083 // writes. 06084 // 06085 06086 BOOLEAN SavePage = TRUE; 06087 06088 DebugTrace(+1, me, "CcMapAndCopy:\n", 0 ); 06089 DebugTrace( 0, me, " SharedCacheMap = %08lx\n", SharedCacheMap ); 06090 DebugTrace2(0, me, " FileOffset = %08lx, %08lx\n", FileOffset->LowPart, 06091 FileOffset->HighPart ); 06092 DebugTrace( 0, me, " Length = %08lx\n", Length ); 06093 06094 MmSavePageFaultReadAhead( Thread, &SavedState ); 06095 06096 // 06097 // try around everything for cleanup. 06098 // 06099 06100 try { 06101 06102 while (Length != 0) { 06103 06104 CacheBuffer = CcGetVirtualAddress( SharedCacheMap, 06105 LocalOffset, 06106 &Vacb, 06107 &ReceivedLength ); 06108 06109 // 06110 // If we got more than we need, make sure to only use 06111 // the right amount. 06112 // 06113 06114 if (ReceivedLength > Length) { 06115 ReceivedLength = Length; 06116 } 06117 SavedMappedBuffer = CacheBuffer; 06118 SavedMappedLength = ReceivedLength; 06119 Length -= ReceivedLength; 06120 06121 // 06122 // Now loop to touch all of the pages, calling MM to insure 06123 // that if we fault, we take in exactly the number of pages 06124 // we need. 06125 // 06126 06127 CacheBuffer = (PVOID)((PCHAR)CacheBuffer - PageOffset); 06128 ReceivedLength += PageOffset; 06129 06130 // 06131 // Loop to touch or zero the pages. 06132 // 06133 06134 ZeroCase = ZERO_FIRST_PAGE; 06135 06136 // 06137 // Set up offset to page for use below. 06138 // 06139 06140 PFileOffset = LocalOffset; 06141 PFileOffset.LowPart -= PageOffset; 06142 06143 while (TRUE) { 06144 06145 // 06146 // Calculate whether we wish to save an active page 06147 // or not. 06148 // 06149 06150 SavePage = ((Length == 0) && 06151 (ReceivedLength < PAGE_SIZE) && 06152 (SavedTotalLength <= (PAGE_SIZE / 2)) && 06153 !WriteThrough); 06154 06155 MorePages = (ReceivedLength > PAGE_SIZE); 06156 06157 // 06158 // Copy the data to the user buffer. 06159 // 06160 06161 try { 06162 06163 // 06164 // It is possible that there is a locked page 06165 // hanging around, and so we need to nuke it here. 06166 // 06167 06168 if (SharedCacheMap->NeedToZero != NULL) { 06169 CcFreeActiveVacb( SharedCacheMap, NULL, 0, 0 ); 06170 } 06171 06172 Status = STATUS_SUCCESS; 06173 if (FlagOn(ZeroFlags, ZeroCase)) { 06174 06175 Status = MmCopyToCachedPage( CacheBuffer, 06176 UserBuffer, 06177 PageOffset, 06178 MorePages ? 06179 (PAGE_SIZE - PageOffset) : 06180 (ReceivedLength - PageOffset), 06181 SavePage ); 06182 06183 if (!NT_SUCCESS(Status)) { 06184 06185 ExRaiseStatus( FsRtlNormalizeNtstatus( Status, 06186 STATUS_INVALID_USER_BUFFER )); 06187 } 06188 06189 // 06190 // Otherwise, we have to actually copy the data ourselves. 06191 // 06192 06193 } else { 06194 06195 MmSetPageFaultReadAhead( Thread, 06196 (MorePages && FlagOn(ZeroFlags, ZERO_LAST_PAGE)) ? 1 : 0); 06197 06198 RtlCopyBytes( (PVOID)((PCHAR)CacheBuffer + PageOffset), 06199 UserBuffer, 06200 MorePages ? 06201 (PAGE_SIZE - PageOffset) : 06202 (ReceivedLength - PageOffset) ); 06203 06204 MmResetPageFaultReadAhead( Thread, SavedState ); 06205 06206 } 06207 06208 } except( CcCopyReadExceptionFilter( GetExceptionInformation(), 06209 &Status ) ) { 06210 06211 // 06212 // If we got an access violation, then the user buffer went 06213 // away. Otherwise we must have gotten an I/O error trying 06214 // to bring the data in. 06215 // 06216 06217 if (Status == STATUS_ACCESS_VIOLATION) { 06218 ExRaiseStatus( STATUS_INVALID_USER_BUFFER ); 06219 } 06220 else { 06221 ExRaiseStatus( FsRtlNormalizeNtstatus( Status, 06222 STATUS_UNEXPECTED_IO_ERROR )); 06223 } 06224 } 06225 06226 // 06227 // Now get out quickly if it is a small write and we want 06228 // to save the page. 06229 // 06230 06231 if (SavePage) { 06232 06233 ActivePage = (ULONG)( Vacb->Overlay.FileOffset.QuadPart >> PAGE_SHIFT ) + 06234 (ULONG)(((PCHAR)CacheBuffer - (PCHAR)Vacb->BaseAddress) >> 06235 PAGE_SHIFT); 06236 06237 PFileOffset.LowPart += ReceivedLength; 06238 06239 // 06240 // If the cache page was not locked, then clear the address 06241 // to zero from. 06242 // 06243 06244 if (Status == STATUS_CACHE_PAGE_LOCKED) { 06245 06246 // 06247 // We need to guarantee this Vacb for zeroing and calling 06248 // MmUnlockCachedPage, so we increment the active count here 06249 // and remember it for CcFreeActiveVacb. 06250 // 06251 06252 CcAcquireVacbLock( &OldIrql ); 06253 Vacb->Overlay.ActiveCount += 1; 06254 06255 ExAcquireSpinLockAtDpcLevel( &SharedCacheMap->ActiveVacbSpinLock ); 06256 06257 ASSERT(SharedCacheMap->NeedToZero == NULL); 06258 06259 SharedCacheMap->NeedToZero = (PVOID)((PCHAR)CacheBuffer + 06260 (PFileOffset.LowPart & (PAGE_SIZE - 1))); 06261 SharedCacheMap->NeedToZeroPage = ActivePage; 06262 SharedCacheMap->NeedToZeroVacb = Vacb; 06263 06264 ExReleaseSpinLockFromDpcLevel( &SharedCacheMap->ActiveVacbSpinLock ); 06265 CcReleaseVacbLock( OldIrql ); 06266 06267 } 06268 06269 SetActiveVacb( SharedCacheMap, 06270 OldIrql, 06271 Vacb, 06272 ActivePage, 06273 ACTIVE_PAGE_IS_DIRTY ); 06274 06275 try_return( NOTHING ); 06276 } 06277 06278 // 06279 // If it looks like we may save a page and exit on the next loop, 06280 // then we must make sure to mark the current page dirty. Note 06281 // that Cc[Fast]CopyWrite will finish the last part of any page 06282 // before allowing us to free the Active Vacb above, therefore 06283 // this case only occurs for a small random write. 06284 // 06285 06286 if ((SavedTotalLength <= (PAGE_SIZE / 2)) && !WriteThrough) { 06287 06288 CcSetDirtyInMask( SharedCacheMap, &PFileOffset, ReceivedLength ); 06289 } 06290 06291 UserBuffer = (PVOID)((PCHAR)UserBuffer + (PAGE_SIZE - PageOffset)); 06292 PageOffset = 0; 06293 06294 // 06295 // If there is more than a page to go (including what we just 06296 // copied), then adjust our buffer pointer and counts, and 06297 // determine if we are to the last page yet. 06298 // 06299 06300 if (MorePages) { 06301 06302 CacheBuffer = (PCHAR)CacheBuffer + PAGE_SIZE; 06303 ReceivedLength -= PAGE_SIZE; 06304 06305 // 06306 // Update our offset to the page. Note that 32-bit 06307 // add is ok since we cannot cross a Vacb boundary 06308 // and we reinitialize this offset before entering 06309 // this loop again. 06310 // 06311 06312 PFileOffset.LowPart += PAGE_SIZE; 06313 06314 if (ReceivedLength > PAGE_SIZE) { 06315 ZeroCase = ZERO_MIDDLE_PAGES; 06316 } else { 06317 ZeroCase = ZERO_LAST_PAGE; 06318 } 06319 06320 } else { 06321 06322 break; 06323 } 06324 } 06325 06326 // 06327 // If there is still more to write (ie. we are going to step 06328 // onto the next vacb) AND we just dirtied more than 64K, then 06329 // do a vicarious MmFlushSection here. This prevents us from 06330 // creating unlimited dirty pages while holding the file 06331 // resource exclusive. We also do not need to set the pages 06332 // dirty in the mask in this case. 06333 // 06334 06335 if (Length > CcMaxDirtyWrite) { 06336 06337 MmSetAddressRangeModified( SavedMappedBuffer, SavedMappedLength ); 06338 MmFlushSection( SharedCacheMap->FileObject->SectionObjectPointer, 06339 &LocalOffset, 06340 SavedMappedLength, 06341 &IoStatus, 06342 TRUE ); 06343 06344 if (!NT_SUCCESS(IoStatus.Status)) { 06345 ExRaiseStatus( FsRtlNormalizeNtstatus( IoStatus.Status, 06346 STATUS_UNEXPECTED_IO_ERROR )); 06347 } 06348 06349 // 06350 // For write through files, call Mm to propagate the dirty bits 06351 // here while we have the view mapped, so we know the flush will 06352 // work below. Again - do not set dirty in the mask. 06353 // 06354 06355 } else if (WriteThrough) { 06356 06357 MmSetAddressRangeModified( SavedMappedBuffer, SavedMappedLength ); 06358 06359 // 06360 // For the normal case, just set the pages dirty for the Lazy Writer 06361 // now. 06362 // 06363 06364 } else { 06365 06366 CcSetDirtyInMask( SharedCacheMap, &LocalOffset, SavedMappedLength ); 06367 } 06368 06369 CcFreeVirtualAddress( Vacb ); 06370 Vacb = NULL; 06371 06372 // 06373 // If we have to loop back to get at least a page, it will be ok to 06374 // zero the first page. If we are not getting at least a page, we 06375 // must make sure we clear the ZeroFlags if we cannot zero the last 06376 // page. 06377 // 06378 06379 if (Length >= PAGE_SIZE) { 06380 ZeroFlags |= ZERO_FIRST_PAGE; 06381 } else if ((ZeroFlags & ZERO_LAST_PAGE) == 0) { 06382 ZeroFlags = 0; 06383 } 06384 06385 // 06386 // Note that if ReceivedLength (and therefore SavedMappedLength) 06387 // was truncated to the transfer size then the new LocalOffset 06388 // computed below is not correct. This is not an issue since 06389 // in that case (Length == 0) and we would never get here. 06390 // 06391 06392 LocalOffset.QuadPart = LocalOffset.QuadPart + (LONGLONG)SavedMappedLength; 06393 } 06394 try_exit: NOTHING; 06395 } 06396 06397 // 06398 // Cleanup on the way out. 06399 // 06400 06401 finally { 06402 06403 MmResetPageFaultReadAhead( Thread, SavedState ); 06404 06405 // 06406 // We have no work to do if we have squirreled away the Vacb. 06407 // 06408 06409 if (!SavePage || AbnormalTermination()) { 06410 06411 // 06412 // Make sure we do not leave anything mapped or dirty in the PTE 06413 // on the way out. 06414 // 06415 06416 if (Vacb != NULL) { 06417 06418 CcFreeVirtualAddress( Vacb ); 06419 } 06420 06421 // 06422 // Either flush the whole range because of write through, or 06423 // mark it dirty for the lazy writer. 06424 // 06425 06426 if (WriteThrough) { 06427 06428 MmFlushSection ( SharedCacheMap->FileObject->SectionObjectPointer, 06429 FileOffset, 06430 SavedTotalLength, 06431 &IoStatus, 06432 TRUE ); 06433 06434 if (!NT_SUCCESS(IoStatus.Status)) { 06435 ExRaiseStatus( FsRtlNormalizeNtstatus( IoStatus.Status, 06436 STATUS_UNEXPECTED_IO_ERROR )); 06437 } 06438 06439 // 06440 // Advance ValidDataGoal 06441 // 06442 06443 LocalOffset.QuadPart = FileOffset->QuadPart + (LONGLONG)SavedTotalLength; 06444 if (LocalOffset.QuadPart > SharedCacheMap->ValidDataGoal.QuadPart) { 06445 SharedCacheMap->ValidDataGoal = LocalOffset; 06446 } 06447 } 06448 } 06449 } 06450 06451 DebugTrace(-1, me, "CcMapAndCopy -> %02lx\n", Result ); 06452 06453 return; 06454 }

BOOLEAN CcMapAndRead IN PSHARED_CACHE_MAP  SharedCacheMap,
IN PLARGE_INTEGER  FileOffset,
IN ULONG  Length,
IN ULONG  ZeroFlags,
IN BOOLEAN  Wait,
IN PVOID  BaseAddress
 

Definition at line 5749 of file cachesub.c.

References COMPUTE_PAGES_SPANNED, FALSE, FlagOn, MmCheckCachedPageState(), MmResetPageFaultReadAhead, MmSavePageFaultReadAhead, MmSetPageFaultReadAhead, PAGE_SIZE, PsGetCurrentThread, TRUE, try_return, ZERO_FIRST_PAGE, ZERO_LAST_PAGE, and ZERO_MIDDLE_PAGES.

Referenced by CcPinFileData(), and CcPrepareMdlWrite().

05760 : 05761 05762 This routine may be called to insure that the specified data is mapped, 05763 read into memory and locked. If TRUE is returned, then the 05764 correct I/O status for the transfer is also returned, along with 05765 a system-space address for the data. 05766 05767 Arguments: 05768 05769 SharedCacheMap - Supplies the address of the SharedCacheMap for the 05770 data. 05771 05772 FileOffset - Supplies the file offset of the desired data. 05773 05774 Length - Supplies the total amount of data desired. 05775 05776 ZeroFlags - Defines which pages may be zeroed if not resident. 05777 05778 Wait - Supplies FALSE if the caller is not willing to block for the 05779 data, or TRUE if the caller is willing to block. 05780 05781 BaseAddress - Supplies the system base address at which the data may 05782 be accessed. 05783 05784 Return Value: 05785 05786 FALSE - if the caller supplied Wait = FALSE and the data could not 05787 be returned without blocking. 05788 05789 TRUE - if the data is being returned. 05790 05791 Note: this routine may raise an exception due to a map or read failure, 05792 however, this can only happen if Wait was specified as TRUE, since 05793 mapping and reading will not be performed if the caller cannot wait. 05794 05795 --*/ 05796 05797 { 05798 ULONG ZeroCase; 05799 ULONG SavedState; 05800 BOOLEAN Result = FALSE; 05801 PETHREAD Thread = PsGetCurrentThread(); 05802 05803 MmSavePageFaultReadAhead( Thread, &SavedState ); 05804 05805 // 05806 // try around everything for cleanup. 05807 // 05808 05809 try { 05810 05811 ULONG PagesToGo; 05812 05813 // 05814 // Now loop to touch all of the pages, calling MM to insure 05815 // that if we fault, we take in exactly the number of pages 05816 // we need. 05817 // 05818 05819 PagesToGo = COMPUTE_PAGES_SPANNED( BaseAddress, Length ); 05820 05821 // 05822 // Loop to touch or zero the pages. 05823 // 05824 05825 ZeroCase = ZERO_FIRST_PAGE; 05826 05827 while (PagesToGo) { 05828 05829 // 05830 // If we cannot zero this page, or Mm failed to return 05831 // a zeroed page, then just fault it in. 05832 // 05833 05834 MmSetPageFaultReadAhead( Thread, (PagesToGo - 1) ); 05835 05836 if (!FlagOn(ZeroFlags, ZeroCase) || 05837 !MmCheckCachedPageState(BaseAddress, TRUE)) { 05838 05839 // 05840 // If we get here, it is almost certainly due to the fact 05841 // that we can not take a zero page. MmCheckCachedPageState 05842 // will so rarely return FALSE, that we will not worry 05843 // about it. We will only check if the page is there if 05844 // Wait is FALSE, so that we can do the right thing. 05845 // 05846 05847 if (!MmCheckCachedPageState(BaseAddress, FALSE) && !Wait) { 05848 try_return( Result = FALSE ); 05849 } 05850 } 05851 05852 BaseAddress = (PCHAR)BaseAddress + PAGE_SIZE; 05853 PagesToGo -= 1; 05854 05855 if (PagesToGo == 1) { 05856 ZeroCase = ZERO_LAST_PAGE; 05857 } else { 05858 ZeroCase = ZERO_MIDDLE_PAGES; 05859 } 05860 } 05861 05862 try_return( Result = TRUE ); 05863 05864 try_exit: NOTHING; 05865 } 05866 05867 // 05868 // Cleanup on the way out. 05869 // 05870 05871 finally { 05872 05873 MmResetPageFaultReadAhead(Thread, SavedState); 05874 } 05875 05876 return Result; 05877 }

VOID FASTCALL CcPerformReadAhead IN PFILE_OBJECT  FileObject  ) 
 

Definition at line 1633 of file cachesub.c.

References _CACHE_MANAGER_CALLBACKS::AcquireForReadAhead, _SHARED_CACHE_MAP::Callbacks, CcAcquireMasterLock, CcDecrementOpenCount, CcDirtySharedCacheMapList, CcFreeVirtualAddress(), CcGetVirtualAddress(), CcMissCounter, CcReadAheadIos, CcReleaseMasterLock, CcScheduleLazyWriteScan(), CcThrowAway, COMPUTE_PAGES_SPANNED, DebugTrace, _SHARED_CACHE_MAP::DirtyPages, FALSE, _SHARED_CACHE_MAP::FileSize, FlagOn, _SHARED_CACHE_MAP::Flags, FO_SEQUENTIAL_ONLY, _SHARED_CACHE_MAP::LazyWriteContext, LazyWriter, MAX_READ_AHEAD, me, MmCheckCachedPageState(), MmResetPageFaultReadAhead, MmSavePageFaultReadAhead, MmSetPageFaultReadAhead, NULL, ObDereferenceObject, Offset, _SHARED_CACHE_MAP::OpenCount, _LAZY_WRITER::OtherWork, PAGE_SIZE, PsGetCurrentThread, _PRIVATE_CACHE_MAP::ReadAheadActive, _PRIVATE_CACHE_MAP::ReadAheadEnabled, _PRIVATE_CACHE_MAP::ReadAheadLength, _PRIVATE_CACHE_MAP::ReadAheadOffset, _PRIVATE_CACHE_MAP::ReadAheadSpinLock, _CACHE_MANAGER_CALLBACKS::ReleaseFromReadAhead, _LAZY_WRITER::ScanActive, _SHARED_CACHE_MAP::SharedCacheMapLinks, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, TRUE, try_return, and WRITE_QUEUED.

Referenced by CcWorkerThread().

01639 : 01640 01641 This routine is called by the Lazy Writer to perform read ahead which 01642 has been scheduled for this file by CcScheduleReadAhead. 01643 01644 Arguments: 01645 01646 FileObject - supplies pointer to FileObject on which readahead should be 01647 considered. 01648 01649 Return Value: 01650 01651 None 01652 --*/ 01653 01654 { 01655 KIRQL OldIrql; 01656 PSHARED_CACHE_MAP SharedCacheMap; 01657 PPRIVATE_CACHE_MAP PrivateCacheMap; 01658 ULONG i; 01659 LARGE_INTEGER ReadAheadOffset[2]; 01660 ULONG ReadAheadLength[2]; 01661 PCACHE_MANAGER_CALLBACKS Callbacks; 01662 PVOID Context; 01663 ULONG SavedState; 01664 BOOLEAN Done; 01665 BOOLEAN HitEof = FALSE; 01666 BOOLEAN ReadAheadPerformed = FALSE; 01667 ULONG FaultOccurred = 0; 01668 PETHREAD Thread = PsGetCurrentThread(); 01669 PVACB Vacb = NULL; 01670 01671 BOOLEAN ResourceHeld = FALSE; 01672 01673 DebugTrace(+1, me, "CcPerformReadAhead:\n", 0 ); 01674 DebugTrace( 0, me, " FileObject = %08lx\n", FileObject ); 01675 01676 MmSavePageFaultReadAhead( Thread, &SavedState ); 01677 01678 try { 01679 01680 // 01681 // Since we have the open count biased, we can safely access the 01682 // SharedCacheMap. 01683 // 01684 01685 SharedCacheMap = FileObject->SectionObjectPointer->SharedCacheMap; 01686 01687 Callbacks = SharedCacheMap->Callbacks; 01688 Context = SharedCacheMap->LazyWriteContext; 01689 01690 // 01691 // After the first time, keep looping as long as there are new 01692 // read ahead requirements. (We will skip out below.) 01693 // 01694 01695 while (TRUE) { 01696 01697 // 01698 // Get SharedCacheMap and PrivateCacheMap. If either are now NULL, get 01699 // out. 01700 // 01701 01702 CcAcquireMasterLock( &OldIrql ); 01703 01704 PrivateCacheMap = FileObject->PrivateCacheMap; 01705 01706 // 01707 // Now capture the information that we need, so that we can drop the 01708 // SharedList Resource. This information is advisory only anyway, and 01709 // the caller must guarantee that the FileObject is referenced. 01710 // 01711 01712 if (PrivateCacheMap != NULL) { 01713 01714 ExAcquireSpinLockAtDpcLevel( &PrivateCacheMap->ReadAheadSpinLock ); 01715 01716 // 01717 // We are done when the lengths are 0 01718 // 01719 01720 Done = ((PrivateCacheMap->ReadAheadLength[0] | 01721 PrivateCacheMap->ReadAheadLength[1]) == 0); 01722 01723 ReadAheadOffset[0] = PrivateCacheMap->ReadAheadOffset[0]; 01724 ReadAheadOffset[1] = PrivateCacheMap->ReadAheadOffset[1]; 01725 ReadAheadLength[0] = PrivateCacheMap->ReadAheadLength[0]; 01726 ReadAheadLength[1] = PrivateCacheMap->ReadAheadLength[1]; 01727 PrivateCacheMap->ReadAheadLength[0] = 0; 01728 PrivateCacheMap->ReadAheadLength[1] = 0; 01729 01730 ExReleaseSpinLockFromDpcLevel( &PrivateCacheMap->ReadAheadSpinLock ); 01731 } 01732 01733 CcReleaseMasterLock( OldIrql ); 01734 01735 // 01736 // Acquire the file shared. 01737 // 01738 01739 (*Callbacks->AcquireForReadAhead)( Context, TRUE ); 01740 ResourceHeld = TRUE; 01741 01742 if ((PrivateCacheMap == NULL) || Done) { 01743 01744 try_return( NOTHING ); 01745 } 01746 01747 // 01748 // PERFORM READ AHEAD 01749 // 01750 // 01751 // Now loop until everything is read in. The Read ahead is accomplished 01752 // by touching the pages with an appropriate ReadAhead parameter in MM. 01753 // 01754 01755 i = 0; 01756 01757 do { 01758 01759 LARGE_INTEGER Offset, SavedOffset; 01760 ULONG Length, SavedLength; 01761 01762 Offset = ReadAheadOffset[i]; 01763 Length = ReadAheadLength[i]; 01764 SavedOffset = Offset; 01765 SavedLength = Length; 01766 01767 if ((Length != 0) 01768 01769 && 01770 01771 ( Offset.QuadPart <= SharedCacheMap->FileSize.QuadPart )) { 01772 01773 ReadAheadPerformed = TRUE; 01774 01775 // 01776 // Keep length within file and MAX_READ_AHEAD 01777 // 01778 01779 if ( ( Offset.QuadPart + (LONGLONG)Length ) >= SharedCacheMap->FileSize.QuadPart ) { 01780 01781 Length = (ULONG)( SharedCacheMap->FileSize.QuadPart - Offset.QuadPart ); 01782 HitEof = TRUE; 01783 01784 } 01785 if (Length > MAX_READ_AHEAD) { 01786 Length = MAX_READ_AHEAD; 01787 } 01788 01789 // 01790 // Now loop to read all of the desired data in. This loop 01791 // is more or less like the same loop to read data in 01792 // CcCopyRead, except that we do not copy anything, just 01793 // unmap as soon as it is in. 01794 // 01795 01796 while (Length != 0) { 01797 01798 ULONG ReceivedLength; 01799 PVOID CacheBuffer; 01800 ULONG PagesToGo; 01801 01802 // 01803 // Call local routine to Map or Access the file data. 01804 // If we cannot map the data because of a Wait condition, 01805 // return FALSE. 01806 // 01807 // Since this routine is intended to be called from 01808 // the finally handler from file system read modules, 01809 // it is imperative that it not raise any exceptions. 01810 // Therefore, if any expected exception is raised, we 01811 // will simply get out. 01812 // 01813 01814 CacheBuffer = CcGetVirtualAddress( SharedCacheMap, 01815 Offset, 01816 &Vacb, 01817 &ReceivedLength ); 01818 01819 // 01820 // If we got more than we need, make sure to only transfer 01821 // the right amount. 01822 // 01823 01824 if (ReceivedLength > Length) { 01825 ReceivedLength = Length; 01826 } 01827 01828 // 01829 // Now loop to touch all of the pages, calling MM to insure 01830 // that if we fault, we take in exactly the number of pages 01831 // we need. 01832 // 01833 01834 PagesToGo = COMPUTE_PAGES_SPANNED( CacheBuffer, 01835 ReceivedLength ); 01836 01837 CcMissCounter = &CcReadAheadIos; 01838 01839 while (PagesToGo) { 01840 01841 MmSetPageFaultReadAhead( Thread, (PagesToGo - 1) ); 01842 FaultOccurred |= !MmCheckCachedPageState(CacheBuffer, FALSE); 01843 01844 CacheBuffer = (PCHAR)CacheBuffer + PAGE_SIZE; 01845 PagesToGo -= 1; 01846 } 01847 CcMissCounter = &CcThrowAway; 01848 01849 // 01850 // Calculate how much data we have left to go. 01851 // 01852 01853 Length -= ReceivedLength; 01854 01855 // 01856 // Assume we did not get all the data we wanted, and set 01857 // Offset to the end of the returned data. 01858 // 01859 01860 Offset.QuadPart = Offset.QuadPart + (LONGLONG)ReceivedLength; 01861 01862 // 01863 // It was only a page, so we can just leave this loop 01864 // After freeing the address. 01865 // 01866 01867 CcFreeVirtualAddress( Vacb ); 01868 Vacb = NULL; 01869 } 01870 } 01871 i += 1; 01872 } while (i <= 1); 01873 01874 // 01875 // Release the file 01876 // 01877 01878 (*Callbacks->ReleaseFromReadAhead)( Context ); 01879 ResourceHeld = FALSE; 01880 } 01881 01882 try_exit: NOTHING; 01883 } 01884 finally { 01885 01886 MmResetPageFaultReadAhead(Thread, SavedState); 01887 CcMissCounter = &CcThrowAway; 01888 01889 // 01890 // If we got an error faulting a single page in, release the Vacb 01891 // here. It is important to free any mapping before dropping the 01892 // resource to prevent purge problems. 01893 // 01894 01895 if (Vacb != NULL) { 01896 CcFreeVirtualAddress( Vacb ); 01897 } 01898 01899 // 01900 // Release the file 01901 // 01902 01903 if (ResourceHeld) { 01904 (*Callbacks->ReleaseFromReadAhead)( Context ); 01905 } 01906 01907 // 01908 // To show we are done, we must make sure the PrivateCacheMap is 01909 // still there. 01910 // 01911 01912 CcAcquireMasterLock( &OldIrql ); 01913 01914 PrivateCacheMap = FileObject->PrivateCacheMap; 01915 01916 // 01917 // Show readahead is going inactive. 01918 // 01919 01920 if (PrivateCacheMap != NULL) { 01921 01922 ExAcquireSpinLockAtDpcLevel( &PrivateCacheMap->ReadAheadSpinLock ); 01923 PrivateCacheMap->ReadAheadActive = FALSE; 01924 01925 // 01926 // If he said sequential only and we smashed into Eof, then 01927 // let's reset the highwater mark in case he wants to read the 01928 // file sequentially again. 01929 // 01930 01931 if (HitEof && FlagOn(FileObject->Flags, FO_SEQUENTIAL_ONLY)) { 01932 PrivateCacheMap->ReadAheadOffset[1].LowPart = 01933 PrivateCacheMap->ReadAheadOffset[1].HighPart = 0; 01934 } 01935 01936 // 01937 // If no faults occurred, turn read ahead off. 01938 // 01939 01940 if (ReadAheadPerformed && !FaultOccurred) { 01941 PrivateCacheMap->ReadAheadEnabled = FALSE; 01942 } 01943 01944 ExReleaseSpinLockFromDpcLevel( &PrivateCacheMap->ReadAheadSpinLock ); 01945 } 01946 01947 // 01948 // Free SharedCacheMap list 01949 // 01950 01951 CcReleaseMasterLock( OldIrql ); 01952 01953 ObDereferenceObject( FileObject ); 01954 01955 // 01956 // Serialize again to decrement the open count. 01957 // 01958 01959 CcAcquireMasterLock( &OldIrql ); 01960 01961 CcDecrementOpenCount( SharedCacheMap, 'adRP' ); 01962 01963 if ((SharedCacheMap->OpenCount == 0) && 01964 !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED) && 01965 (SharedCacheMap->DirtyPages == 0)) { 01966 01967 // 01968 // Move to the dirty list. 01969 // 01970 01971 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 01972 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 01973 &SharedCacheMap->SharedCacheMapLinks ); 01974 01975 // 01976 // Make sure the Lazy Writer will wake up, because we 01977 // want him to delete this SharedCacheMap. 01978 // 01979 01980 LazyWriter.OtherWork = TRUE; 01981 if (!LazyWriter.ScanActive) { 01982 CcScheduleLazyWriteScan(); 01983 } 01984 } 01985 01986 CcReleaseMasterLock( OldIrql ); 01987 } 01988 01989 DebugTrace(-1, me, "CcPerformReadAhead -> VOID\n", 0 ); 01990 01991 return; 01992 }

BOOLEAN CcPinFileData IN PFILE_OBJECT  FileObject,
IN PLARGE_INTEGER  FileOffset,
IN ULONG  Length,
IN BOOLEAN  ReadOnly,
IN BOOLEAN  WriteOnly,
IN ULONG  Flags,
OUT PBCB Bcb,
OUT PVOID *  BaseAddress,
OUT PLARGE_INTEGER  BeyondLastByte
 

Definition at line 116 of file cachesub.c.

References ASSERT, _BCB::BaseAddress, _SHARED_CACHE_MAP::BcbSpinLock, _BCB::BeyondLastByte, _BCB::ByteLength, CcAllocateInitializeBcb(), CcDereferenceFileOffset(), CcFindBcb(), CcFreeActiveVacb(), CcFreeVirtualAddress(), CcGetVirtualAddress(), CcMapAndRead(), CcReferenceFileOffset(), CcUnpinFileData(), DebugTrace, DebugTrace2, ExAcquireResourceExclusive, ExAcquireSharedStarveExclusive(), ExRaiseStatus(), FALSE, _BCB::FileOffset, FlagOn, _SHARED_CACHE_MAP::Flags, GetActiveVacb, me, MODIFIED_WRITE_DISABLED, _SHARED_CACHE_MAP::NeedToZero, NULL, PAGE_SIZE, PIN_ACCESS, PIN_EXCLUSIVE, PIN_IF_BCB, PIN_NO_READ, PIN_WAIT, _BCB::PinCount, _BCB::Resource, ROUND_TO_PAGES, _SHARED_CACHE_MAP::SectionSize, TRUE, try_return, UNPIN, _BCB::Vacb, VACB_MAPPING_GRANULARITY, _SHARED_CACHE_MAP::ValidDataGoal, VOID(), ZERO_FIRST_PAGE, ZERO_LAST_PAGE, and ZERO_MIDDLE_PAGES.

Referenced by CcCopyRead(), CcCopyWrite(), CcMapData(), CcPinMappedData(), CcPinRead(), CcPreparePinWrite(), and CcZeroData().

00130 : 00131 00132 This routine locks the specified range of file data into memory. 00133 00134 Note that the data desired by the caller (or the first part of it) 00135 may be in one of three states: 00136 00137 No Bcb exists which describes the data 00138 00139 A Bcb exists describing the data, but it is not mapped 00140 (BcbOut->BaseAddress == NULL) 00141 00142 A Bcb exists describing the data, and it is mapped 00143 00144 Given the above three states, and given that the caller may call 00145 with either Wait == FALSE or Wait == TRUE, this routine has basically 00146 six cases. What has to be done, and the order in which things must be 00147 done varies quite a bit with each of these six cases. The most 00148 straight-forward implementation of this routine, with the least amount 00149 of branching, is achieved by determining which of the six cases applies, 00150 and dispatching fairly directly to that case. The handling of the 00151 cases is summarized in the following table: 00152 00153 Wait == TRUE Wait == FALSE 00154 ------------ ------------- 00155 00156 no Bcb Case 1: Case 2: 00157 00158 CcAllocateInitializeBcb CcMapAndRead (exit if FALSE) 00159 Acquire Bcb Exclusive CcAllocateInitializeBcb 00160 Release BcbList SpinLock Acquire Bcb Shared if not ReadOnly 00161 CcMapAndRead w/ Wait Release BcbList SpinLock 00162 Convert/Release Bcb Resource 00163 00164 Bcb not Case 3: Case 4: 00165 mapped 00166 Increment PinCount Acquire Bcb Exclusive (exit if FALSE) 00167 Release BcbList SpinLock CcMapAndRead (exit if FALSE) 00168 Acquire Bcb Excl. w/ Wait Increment PinCount 00169 if still not mapped Convert/Release Bcb Resource 00170 CcMapAndRead w/ Wait Release BcbList SpinLock 00171 Convert/Release Bcb Resource 00172 00173 Bcb mapped Case 5: Case 6: 00174 00175 Increment PinCount if not ReadOnly 00176 Release BcbList SpinLock Acquire Bcb shared (exit if FALSE) 00177 if not ReadOnly Increment PinCount 00178 Acquire Bcb Shared Release BcbList SpinLock 00179 00180 It is important to note that most changes to this routine will affect 00181 multiple cases from above. 00182 00183 Arguments: 00184 00185 FileObject - Pointer to File Object for file 00186 00187 FileOffset - Offset in file at which map should begin 00188 00189 Length - Length of desired map in bytes 00190 00191 ReadOnly - Supplies TRUE if caller will only read the mapped data (i.e., 00192 TRUE for CcCopyRead, CcMapData and CcMdlRead and FALSE for 00193 everyone else) 00194 00195 WriteOnly - The specified range of bytes will only be written. 00196 00197 Flags - (PIN_WAIT, PIN_EXCLUSIVE, PIN_NO_READ, etc. as defined in cache.h) 00198 00199 Bcb - Returns a pointer to the Bcb representing the pinned data. 00200 00201 BaseAddress - Returns base address of desired data 00202 00203 BeyondLastByte - Returns the File Offset of the first byte beyond the 00204 last accessible byte. 00205 00206 Return Value: 00207 00208 FALSE - if PIN_WAIT was set, and it was impossible to lock all 00209 of the data without blocking 00210 TRUE - if the desired data, is being returned 00211 00212 Raises: 00213 00214 STATUS_INSUFFICIENT_RESOURCES - If a pool allocation failure occurs. 00215 This can only occur if Wait was specified as TRUE. (If Wait is 00216 specified as FALSE, and an allocation failure occurs, this 00217 routine simply returns FALSE.) 00218 00219 --*/ 00220 00221 { 00222 PSHARED_CACHE_MAP SharedCacheMap; 00223 LARGE_INTEGER TrialBound; 00224 KIRQL OldIrql; 00225 PBCB BcbOut = NULL; 00226 ULONG ZeroFlags = 0; 00227 BOOLEAN SpinLockAcquired = FALSE; 00228 BOOLEAN Result = FALSE; 00229 00230 ULONG ReceivedLength; 00231 ULONG ActivePage; 00232 ULONG PageIsDirty; 00233 PVACB Vacb = NULL; 00234 00235 DebugTrace(+1, me, "CcPinFileData:\n", 0 ); 00236 DebugTrace( 0, me, " FileObject = %08lx\n", FileObject ); 00237 DebugTrace2(0, me, " FileOffset = %08lx, %08lx\n", FileOffset->LowPart, 00238 FileOffset->HighPart ); 00239 DebugTrace( 0, me, " Length = %08lx\n", Length ); 00240 DebugTrace( 0, me, " Flags = %02lx\n", Flags ); 00241 00242 // 00243 // Get pointer to SharedCacheMap via File Object. 00244 // 00245 00246 SharedCacheMap = *(PSHARED_CACHE_MAP *)((PCHAR)FileObject->SectionObjectPointer 00247 + sizeof(PVOID)); 00248 00249 // 00250 // See if we have an active Vacb, that we need to free. 00251 // 00252 00253 GetActiveVacb( SharedCacheMap, OldIrql, Vacb, ActivePage, PageIsDirty ); 00254 00255 // 00256 // If there is an end of a page to be zeroed, then free that page now, 00257 // so it does not cause our data to get zeroed. If there is an active 00258 // page, free it so we have the correct ValidDataGoal. 00259 // 00260 00261 if ((Vacb != NULL) || (SharedCacheMap->NeedToZero != NULL)) { 00262 00263 CcFreeActiveVacb( SharedCacheMap, Vacb, ActivePage, PageIsDirty ); 00264 Vacb = NULL; 00265 } 00266 00267 // 00268 // Make sure the calling file system is not asking to map beyond the 00269 // end of the section, for example, that it did not forget to do 00270 // CcExtendCacheSection. 00271 // 00272 00273 ASSERT( ( FileOffset->QuadPart + (LONGLONG)Length ) <= 00274 SharedCacheMap->SectionSize.QuadPart ); 00275 00276 // 00277 // Initially clear output 00278 // 00279 00280 *Bcb = NULL; 00281 *BaseAddress = NULL; 00282 00283 if (!FlagOn(Flags, PIN_NO_READ)) { 00284 00285 *BaseAddress = CcGetVirtualAddress( SharedCacheMap, 00286 *FileOffset, 00287 &Vacb, 00288 &ReceivedLength ); 00289 00290 } else { 00291 00292 // 00293 // In the PIN_NO_READ case, we simply need to make sure that the 00294 // sparse structure containing the Bcb listheads is expanded in the 00295 // region of the file we are interested in. 00296 // 00297 // Fake a ReceivedLength that matches the remaining bytes in the view. 00298 // 00299 00300 ReceivedLength = VACB_MAPPING_GRANULARITY - 00301 (ULONG)(FileOffset->QuadPart & (VACB_MAPPING_GRANULARITY - 1)); 00302 00303 // 00304 // Now simply cause a reference that will expand a multilevel Vacb. 00305 // 00306 00307 CcReferenceFileOffset( SharedCacheMap, *FileOffset ); 00308 } 00309 00310 // 00311 // Acquire Bcb List Exclusive to look for Bcb 00312 // 00313 00314 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 00315 SpinLockAcquired = TRUE; 00316 00317 // 00318 // Use try to guarantee cleanup on the way out. 00319 // 00320 00321 try { 00322 00323 BOOLEAN Found; 00324 LARGE_INTEGER FOffset; 00325 LARGE_INTEGER TLength; 00326 00327 // 00328 // Search for Bcb describing the largest matching "prefix" byte range, 00329 // or where to insert it. 00330 // 00331 00332 TrialBound.QuadPart = FileOffset->QuadPart + (LONGLONG)Length; 00333 Found = CcFindBcb( SharedCacheMap, FileOffset, &TrialBound, &BcbOut ); 00334 00335 00336 // 00337 // Cases 1 and 2 - Bcb was not found. 00338 // 00339 // First caculate data to pin down. 00340 // 00341 00342 if (!Found) { 00343 00344 // 00345 // Get out if the user specified PIN_IF_BCB. 00346 // 00347 00348 if (FlagOn(Flags, PIN_IF_BCB)) { 00349 00350 // 00351 // We need to zap BcbOut since this is a hint to the cleanup code 00352 // to remove the Bcb if we are returning FALSE. 00353 // 00354 00355 BcbOut = NULL; 00356 try_return( Result = FALSE ); 00357 } 00358 00359 // 00360 // Not found, calculate data to pin down. 00361 // 00362 // Round local copy of FileOffset down to page boundary, and 00363 // round copies of size and minimum size up. Also make sure that 00364 // we keep the length from crossing the end of the SharedCacheMap. 00365 // 00366 00367 FOffset = *FileOffset; 00368 TLength.QuadPart = TrialBound.QuadPart - FOffset.QuadPart; 00369 00370 TLength.LowPart += FOffset.LowPart & (PAGE_SIZE - 1); 00371 ReceivedLength += FOffset.LowPart & (PAGE_SIZE - 1); 00372 00373 // 00374 // At this point we can calculate the ReadOnly flag for 00375 // the purposes of whether to use the Bcb resource, and 00376 // we can calculate the ZeroFlags. 00377 // 00378 00379 if ((!ReadOnly && !FlagOn(SharedCacheMap->Flags, PIN_ACCESS)) || WriteOnly) { 00380 00381 // 00382 // We can always zero middle pages, if any. 00383 // 00384 00385 ZeroFlags = ZERO_MIDDLE_PAGES; 00386 00387 if (((FOffset.LowPart & (PAGE_SIZE - 1)) == 0) && 00388 (Length >= PAGE_SIZE)) { 00389 ZeroFlags |= ZERO_FIRST_PAGE; 00390 } 00391 00392 if ((TLength.LowPart & (PAGE_SIZE - 1)) == 0) { 00393 ZeroFlags |= ZERO_LAST_PAGE; 00394 } 00395 } 00396 00397 // 00398 // We treat Bcbs as ReadOnly (do not acquire resource) if they 00399 // are in sections for which we have not disabled modified writing. 00400 // 00401 00402 if (!FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED)) { 00403 ReadOnly = TRUE; 00404 } 00405 00406 TLength.LowPart = (ULONG) ROUND_TO_PAGES( TLength.LowPart ); 00407 00408 // 00409 // Round BaseAddress and FOffset down to the bottom of a page. 00410 // 00411 00412 *BaseAddress = ((PCHAR)*BaseAddress - (FileOffset->LowPart & (PAGE_SIZE - 1))); 00413 FOffset.LowPart &= ~(PAGE_SIZE - 1); 00414 00415 // 00416 // Even if we are readonly, we can still zero pages entirely 00417 // beyond valid data length. 00418 // 00419 00420 if (FOffset.QuadPart >= SharedCacheMap->ValidDataGoal.QuadPart) { 00421 00422 ZeroFlags |= ZERO_FIRST_PAGE | ZERO_MIDDLE_PAGES | ZERO_LAST_PAGE; 00423 00424 } else if ((FOffset.QuadPart + (LONGLONG)PAGE_SIZE) >= 00425 SharedCacheMap->ValidDataGoal.QuadPart) { 00426 00427 ZeroFlags |= ZERO_MIDDLE_PAGES | ZERO_LAST_PAGE; 00428 } 00429 00430 // 00431 // We will get into trouble if we try to read more than we 00432 // can map by one Vacb. So make sure that our lengths stay 00433 // within a Vacb. 00434 // 00435 00436 if (TLength.LowPart > ReceivedLength) { 00437 TLength.LowPart = ReceivedLength; 00438 } 00439 00440 00441 // 00442 // Case 1 - Bcb was not found and Wait is TRUE. 00443 // 00444 // Note that it is important to minimize the time that the Bcb 00445 // List spin lock is held, as well as guarantee we do not take 00446 // any faults while holding this lock. 00447 // 00448 // If we can (and perhaps will) wait, then it is important to 00449 // allocate the Bcb acquire it exclusive and free the Bcb List. 00450 // We then procede to read in the data, and anyone else finding 00451 // our Bcb will have to wait shared to insure that the data is 00452 // in. 00453 // 00454 00455 if (FlagOn(Flags, PIN_WAIT)) { 00456 00457 BcbOut = CcAllocateInitializeBcb( SharedCacheMap, 00458 BcbOut, 00459 &FOffset, 00460 &TLength ); 00461 00462 if (BcbOut == NULL) { 00463 DebugTrace( 0, 0, "Bcb allocation failure\n", 0 ); 00464 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00465 SpinLockAcquired = FALSE; 00466 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 00467 } 00468 00469 // 00470 // Now just acquire the newly-allocated Bcb shared, and 00471 // release the spin lock. 00472 // 00473 00474 if (!ReadOnly) { 00475 if (FlagOn(Flags, PIN_EXCLUSIVE)) { 00476 (VOID)ExAcquireResourceExclusive( &BcbOut->Resource, TRUE ); 00477 } else { 00478 (VOID)ExAcquireSharedStarveExclusive( &BcbOut->Resource, TRUE ); 00479 } 00480 } 00481 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00482 SpinLockAcquired = FALSE; 00483 00484 // 00485 // Now read in the data. 00486 // 00487 00488 if (!FlagOn(Flags, PIN_NO_READ)) { 00489 00490 (VOID)CcMapAndRead( SharedCacheMap, 00491 &FOffset, 00492 TLength.LowPart, 00493 ZeroFlags, 00494 TRUE, 00495 *BaseAddress ); 00496 00497 // 00498 // Now we have to reacquire the Bcb List spinlock to load 00499 // up the mapping if we are the first one, else we collided 00500 // with someone else who loaded the mapping first, and we 00501 // will just free our mapping. It is guaranteed that the 00502 // data will be mapped to the same place. 00503 // 00504 00505 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 00506 00507 if (BcbOut->BaseAddress == NULL) { 00508 00509 BcbOut->BaseAddress = *BaseAddress; 00510 BcbOut->Vacb = Vacb; 00511 Vacb = NULL; 00512 } 00513 00514 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00515 00516 // 00517 // Calculate Base Address of the data we want. 00518 // 00519 00520 *BaseAddress = (PCHAR)BcbOut->BaseAddress + 00521 (ULONG)( FileOffset->QuadPart - BcbOut->FileOffset.QuadPart ); 00522 } 00523 00524 // 00525 // Success! 00526 // 00527 00528 try_return( Result = TRUE ); 00529 } 00530 00531 00532 // 00533 // Case 2 - Bcb was not found and Wait is FALSE 00534 // 00535 // If we cannot wait, then we go immediately see if the data is 00536 // there (CcMapAndRead), and then only set up the Bcb and release 00537 // the spin lock if the data is there. Note here we call 00538 // CcMapAndRead while holding the spin lock, because we know we 00539 // will not fault and not block before returning. 00540 // 00541 00542 else { 00543 00544 // 00545 // Now try to allocate and initialize the Bcb. If we 00546 // fail to allocate one, then return FALSE, since we know that 00547 // Wait = FALSE. The caller may get lucky if he calls 00548 // us back with Wait = TRUE. 00549 // 00550 00551 BcbOut = CcAllocateInitializeBcb( SharedCacheMap, 00552 BcbOut, 00553 &FOffset, 00554 &TLength ); 00555 00556 if (BcbOut == NULL) { 00557 00558 try_return( Result = FALSE ); 00559 } 00560 00561 // 00562 // If we are not ReadOnly, we must acquire the newly-allocated 00563 // resource shared, and then we can free the spin lock. 00564 // 00565 00566 if (!ReadOnly) { 00567 ExAcquireSharedStarveExclusive( &BcbOut->Resource, TRUE ); 00568 } 00569 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00570 SpinLockAcquired = FALSE; 00571 00572 // 00573 // Note that since this call has Wait = FALSE, it cannot 00574 // get an exception (see procedure header). 00575 // 00576 00577 ASSERT( !FlagOn(Flags, PIN_NO_READ) ); 00578 if (!CcMapAndRead( SharedCacheMap, 00579 &FOffset, 00580 TLength.LowPart, 00581 ZeroFlags, 00582 FALSE, 00583 *BaseAddress )) { 00584 00585 try_return( Result = FALSE ); 00586 } 00587 00588 // 00589 // Now we have to reacquire the Bcb List spinlock to load 00590 // up the mapping if we are the first one, else we collided 00591 // with someone else who loaded the mapping first, and we 00592 // will just free our mapping. It is guaranteed that the 00593 // data will be mapped to the same place. 00594 // 00595 00596 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 00597 00598 if (BcbOut->BaseAddress == NULL) { 00599 00600 BcbOut->BaseAddress = *BaseAddress; 00601 BcbOut->Vacb = Vacb; 00602 Vacb = NULL; 00603 } 00604 00605 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00606 00607 // 00608 // Calculate Base Address of the data we want. 00609 // 00610 00611 *BaseAddress = (PCHAR)BcbOut->BaseAddress + 00612 (ULONG)( FileOffset->QuadPart - BcbOut->FileOffset.QuadPart ); 00613 00614 // 00615 // Success! 00616 // 00617 00618 try_return( Result = TRUE ); 00619 } 00620 00621 } else { 00622 00623 // 00624 // We treat Bcbs as ReadOnly (do not acquire resource) if they 00625 // are in sections for which we have not disabled modified writing. 00626 // 00627 00628 if (!FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED)) { 00629 ReadOnly = TRUE; 00630 } 00631 } 00632 00633 00634 // 00635 // Cases 3 and 4 - Bcb is there but not mapped 00636 // 00637 00638 if (BcbOut->BaseAddress == NULL) { 00639 00640 // 00641 // It is too complicated to attempt to calculate any ZeroFlags in this 00642 // case, because we have to not only do the tests above, but also 00643 // compare to the byte range in the Bcb since we will be passing 00644 // those parameters to CcMapAndRead. Also, the probability of hitting 00645 // some window where zeroing is of any advantage is quite small. 00646 // 00647 00648 // 00649 // Set up to just reread the Bcb exactly as the data in it is 00650 // described. 00651 // 00652 00653 *BaseAddress = ((PCHAR)*BaseAddress - (FileOffset->LowPart - BcbOut->FileOffset.LowPart)); 00654 FOffset = BcbOut->FileOffset; 00655 TLength.QuadPart = (LONGLONG)BcbOut->ByteLength; 00656 00657 // 00658 // Case 3 - Bcb is there but not mapped and Wait is TRUE 00659 // 00660 // Increment the PinCount, and then release the BcbList 00661 // SpinLock so that we can wait to acquire the Bcb exclusive. 00662 // Once we have the Bcb exclusive, map and read it in if no 00663 // one beats us to it. Someone may have beat us to it since 00664 // we had to release the SpinLock above. 00665 // 00666 00667 if (FlagOn(Flags, PIN_WAIT)) { 00668 00669 BcbOut->PinCount += 1; 00670 00671 // 00672 // Now we have to release the BcbList SpinLock in order to 00673 // acquire the Bcb shared. 00674 // 00675 00676 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00677 SpinLockAcquired = FALSE; 00678 if (!ReadOnly) { 00679 if (FlagOn(Flags, PIN_EXCLUSIVE)) { 00680 (VOID)ExAcquireResourceExclusive( &BcbOut->Resource, TRUE ); 00681 } else { 00682 (VOID)ExAcquireSharedStarveExclusive( &BcbOut->Resource, TRUE ); 00683 } 00684 } 00685 00686 // 00687 // Now procede to map and read the data in. 00688 // 00689 // Now read in the data. 00690 // 00691 00692 if (!FlagOn(Flags, PIN_NO_READ)) { 00693 00694 (VOID)CcMapAndRead( SharedCacheMap, 00695 &FOffset, 00696 TLength.LowPart, 00697 ZeroFlags, 00698 TRUE, 00699 *BaseAddress ); 00700 00701 // 00702 // Now we have to reacquire the Bcb List spinlock to load 00703 // up the mapping if we are the first one, else we collided 00704 // with someone else who loaded the mapping first, and we 00705 // will just free our mapping. It is guaranteed that the 00706 // data will be mapped to the same place. 00707 // 00708 00709 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 00710 00711 if (BcbOut->BaseAddress == NULL) { 00712 00713 BcbOut->BaseAddress = *BaseAddress; 00714 BcbOut->Vacb = Vacb; 00715 Vacb = NULL; 00716 } 00717 00718 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00719 00720 // 00721 // 00722 // Calculate Base Address of the data we want. 00723 // 00724 00725 *BaseAddress = (PCHAR)BcbOut->BaseAddress + 00726 (ULONG)( FileOffset->QuadPart - BcbOut->FileOffset.QuadPart ); 00727 } 00728 00729 // 00730 // Success! 00731 // 00732 00733 try_return( Result = TRUE ); 00734 } 00735 00736 00737 // 00738 // Case 4 - Bcb is there but not mapped, and Wait is FALSE 00739 // 00740 // Since we cannot wait, we go immediately see if the data is 00741 // there (CcMapAndRead), and then only set up the Bcb and release 00742 // the spin lock if the data is there. Note here we call 00743 // CcMapAndRead while holding the spin lock, because we know we 00744 // will not fault and not block before returning. 00745 // 00746 00747 else { 00748 00749 if (!ReadOnly && !ExAcquireSharedStarveExclusive( &BcbOut->Resource, FALSE )) { 00750 00751 // 00752 // If we cannot get the resource and have not incremented PinCount, then 00753 // suppress the unpin on cleanup. 00754 // 00755 00756 BcbOut = NULL; 00757 try_return( Result = FALSE ); 00758 } 00759 00760 BcbOut->PinCount += 1; 00761 00762 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00763 SpinLockAcquired = FALSE; 00764 00765 // 00766 // Note that since this call has Wait = FALSE, it cannot 00767 // get an exception (see procedure header). 00768 // 00769 00770 ASSERT( !FlagOn(Flags, PIN_NO_READ) ); 00771 if (!CcMapAndRead( SharedCacheMap, 00772 &BcbOut->FileOffset, 00773 BcbOut->ByteLength, 00774 ZeroFlags, 00775 FALSE, 00776 *BaseAddress )) { 00777 00778 try_return( Result = FALSE ); 00779 } 00780 00781 // 00782 // Now we have to reacquire the Bcb List spinlock to load 00783 // up the mapping if we are the first one, else we collided 00784 // with someone else who loaded the mapping first, and we 00785 // will just free our mapping. It is guaranteed that the 00786 // data will be mapped to the same place. 00787 // 00788 00789 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 00790 00791 if (BcbOut->BaseAddress == NULL) { 00792 00793 BcbOut->BaseAddress = *BaseAddress; 00794 BcbOut->Vacb = Vacb; 00795 Vacb = NULL; 00796 } 00797 00798 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00799 00800 // 00801 // Calculate Base Address of the data we want. 00802 // 00803 00804 *BaseAddress = (PCHAR)BcbOut->BaseAddress + 00805 (ULONG)( FileOffset->QuadPart - BcbOut->FileOffset.QuadPart ); 00806 00807 // 00808 // Success! 00809 // 00810 00811 try_return( Result = TRUE ); 00812 } 00813 } 00814 00815 00816 // 00817 // Cases 5 and 6 - Bcb is there and it is mapped 00818 // 00819 00820 else { 00821 00822 // 00823 // Case 5 - Bcb is there and mapped, and Wait is TRUE 00824 // 00825 // We can just increment the PinCount, release the SpinLock 00826 // and then acquire the Bcb Shared if we are not ReadOnly. 00827 // 00828 00829 if (FlagOn(Flags, PIN_WAIT)) { 00830 00831 BcbOut->PinCount += 1; 00832 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00833 SpinLockAcquired = FALSE; 00834 00835 // 00836 // Acquire Bcb Resource shared to insure that it is in memory. 00837 // 00838 00839 if (!ReadOnly) { 00840 if (FlagOn(Flags, PIN_EXCLUSIVE)) { 00841 (VOID)ExAcquireResourceExclusive( &BcbOut->Resource, TRUE ); 00842 } else { 00843 (VOID)ExAcquireSharedStarveExclusive( &BcbOut->Resource, TRUE ); 00844 } 00845 } 00846 } 00847 00848 // 00849 // Case 6 - Bcb is there and mapped, and Wait is FALSE 00850 // 00851 // If we are not ReadOnly, we have to first see if we can 00852 // acquire the Bcb shared before incrmenting the PinCount, 00853 // since we will have to return FALSE if we cannot acquire the 00854 // resource. 00855 // 00856 00857 else { 00858 00859 // 00860 // Acquire Bcb Resource shared to insure that it is in memory. 00861 // 00862 00863 if (!ReadOnly && !ExAcquireSharedStarveExclusive( &BcbOut->Resource, FALSE )) { 00864 00865 // 00866 // If we cannot get the resource and have not incremented PinCount, then 00867 // suppress the unpin on cleanup. 00868 // 00869 00870 BcbOut = NULL; 00871 try_return( Result = FALSE ); 00872 } 00873 00874 BcbOut->PinCount += 1; 00875 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00876 SpinLockAcquired = FALSE; 00877 } 00878 00879 // 00880 // Calculate Base Address of the data we want. 00881 // 00882 00883 *BaseAddress = (PCHAR)BcbOut->BaseAddress + 00884 (ULONG)( FileOffset->QuadPart - BcbOut->FileOffset.QuadPart ); 00885 00886 // 00887 // Success! 00888 // 00889 00890 try_return( Result = TRUE ); 00891 } 00892 00893 00894 try_exit: NOTHING; 00895 00896 if (FlagOn(Flags, PIN_NO_READ) && 00897 FlagOn(Flags, PIN_EXCLUSIVE) && 00898 (BcbOut != NULL) && 00899 (BcbOut->BaseAddress != NULL)) { 00900 00901 // 00902 // Unmap the Vacb and free the resource if the Bcb is still 00903 // dirty. We have to free the resource before dropping the 00904 // spinlock, and we want to hold the resource until the 00905 // virtual address is freed. 00906 // 00907 00908 CcFreeVirtualAddress( BcbOut->Vacb ); 00909 00910 BcbOut->BaseAddress = NULL; 00911 BcbOut->Vacb = NULL; 00912 } 00913 00914 } finally { 00915 00916 // 00917 // Release the spinlock if it is acquired. 00918 // 00919 00920 if (SpinLockAcquired) { 00921 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 00922 } 00923 00924 // 00925 // If the Vacb was not used for any reason (error or not needed), then free it here. 00926 // 00927 00928 if (Vacb != NULL) { 00929 CcFreeVirtualAddress( Vacb ); 00930 } 00931 00932 // 00933 // If we referenced a piece of a multilevel structure, release here. 00934 // 00935 00936 if (FlagOn(Flags, PIN_NO_READ)) { 00937 00938 CcDereferenceFileOffset( SharedCacheMap, *FileOffset ); 00939 } 00940 00941 if (Result) { 00942 00943 *Bcb = BcbOut; 00944 *BeyondLastByte = BcbOut->BeyondLastByte; 00945 00946 // 00947 // An abnormal termination can occur on an allocation failure, 00948 // or on a failure to map and read the buffer. 00949 // 00950 00951 } else { 00952 00953 *BaseAddress = NULL; 00954 if (BcbOut != NULL) { 00955 CcUnpinFileData( BcbOut, ReadOnly, UNPIN ); 00956 } 00957 } 00958 00959 DebugTrace( 0, me, " <Bcb = %08lx\n", *Bcb ); 00960 DebugTrace( 0, me, " <BaseAddress = %08lx\n", *BaseAddress ); 00961 DebugTrace(-1, me, "CcPinFileData -> %02lx\n", Result ); 00962 } 00963 00964 return Result; 00965 }

VOID CcReleaseByteRangeFromWrite IN PSHARED_CACHE_MAP  SharedCacheMap,
IN PLARGE_INTEGER  FileOffset,
IN ULONG  Length,
IN PBCB  FirstBcb,
IN BOOLEAN  VerifyRequired
 

Definition at line 3758 of file cachesub.c.

References ASSERT, BooleanFlagOn, CACHE_NTC_BCB, CcSetDirtyInMask(), CcSetDirtyPinnedData(), CcUnpinFileData(), DebugTrace, DebugTrace2, DISABLE_WRITE_BEHIND, FlagOn, me, MODIFIED_WRITE_DISABLED, NULL, SET_CLEAN, TRUE, and UNPIN.

Referenced by CcFlushCache().

03768 : 03769 03770 This routine is called by the Lazy Writer to free a range of bytes and 03771 clear all dirty bits, for a byte range returned by CcAcquireByteRangeForWrite. 03772 03773 Arguments: 03774 03775 SharedCacheMap - As supplied to CcAcquireByteRangeForWrite 03776 03777 FileOffset - As returned from CcAcquireByteRangeForWrite 03778 03779 Length - As returned from CcAcquirebyteRangeForWrite 03780 03781 FirstBcb - As returned from CcAcquireByteRangeForWrite 03782 03783 VerifyRequired - supplied as TRUE if a verify required error was received. 03784 In this case we must mark/leave the data dirty so that 03785 we will try to write it again. 03786 03787 Return Value: 03788 03789 None 03790 03791 --*/ 03792 03793 { 03794 LARGE_INTEGER LastOffset; 03795 PBCB NextBcb; 03796 03797 DebugTrace(+1, me, "CcReleaseByteRangeFromWrite:\n", 0); 03798 DebugTrace2(0, me, " FileOffset = %08lx, %08lx\n", FileOffset->LowPart, 03799 FileOffset->HighPart ); 03800 03801 // 03802 // If it is a mask Mbcb we are getting, then we only have to check 03803 // for VerifyRequired. 03804 // 03805 03806 if (FirstBcb == NULL) { 03807 03808 ASSERT(Length != 0); 03809 03810 if (VerifyRequired) { 03811 CcSetDirtyInMask( SharedCacheMap, FileOffset, Length ); 03812 } 03813 03814 DebugTrace(-1, me, "CcReleaseByteRangeFromWrite -> VOID\n", 0); 03815 03816 return; 03817 } 03818 03819 // 03820 // Now loop to free up all of the Bcbs. If modified writing is disabled 03821 // for each Bcb, then we are to set it clean here, since we are synchronized 03822 // with callers who set the data dirty. Otherwise we only have the Bcb pinned 03823 // so it will not go away, and we only unpin it here. 03824 // 03825 03826 do { 03827 NextBcb = CONTAINING_RECORD( FirstBcb->BcbLinks.Flink, BCB, BcbLinks ); 03828 03829 // 03830 // Skip over any listheads. 03831 // 03832 03833 if (FirstBcb->NodeTypeCode == CACHE_NTC_BCB) { 03834 03835 LastOffset = FirstBcb->FileOffset; 03836 03837 // 03838 // If this is file system metadata (we disabled modified writing), 03839 // then this is the time to mark the buffer clean, so long as we 03840 // did not get verify required. 03841 // 03842 03843 if (FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED)) { 03844 03845 CcUnpinFileData( FirstBcb, 03846 BooleanFlagOn(SharedCacheMap->Flags, DISABLE_WRITE_BEHIND), 03847 SET_CLEAN ); 03848 } 03849 03850 // 03851 // If we got verify required, we have to mark the buffer dirty again 03852 // so we will try again later. Note we have to make this call again 03853 // to make sure the right thing happens with time stamps. 03854 // 03855 03856 if (VerifyRequired) { 03857 CcSetDirtyPinnedData( FirstBcb, NULL ); 03858 } 03859 03860 // 03861 // Finally remove a pin count left over from CcAcquireByteRangeForWrite. 03862 // 03863 03864 CcUnpinFileData( FirstBcb, TRUE, UNPIN ); 03865 } 03866 03867 FirstBcb = NextBcb; 03868 } while (FileOffset->QuadPart != LastOffset.QuadPart); 03869 03870 DebugTrace(-1, me, "CcReleaseByteRangeFromWrite -> VOID\n", 0); 03871 }

PVOID CcRemapBcb IN PVOID  Bcb  ) 
 

Definition at line 5024 of file cachesub.c.

References ASSERT, CACHE_NTC_BCB, CACHE_NTC_OBCB, CcAcquireVacbLock, CcBeyondVacbs, CcReleaseVacbLock, CcVacbs, and _VACB::Overlay.

05030 : 05031 05032 This routine may be called by a file system to map a Bcb an additional 05033 time in order to preserve it through several calls that perform additional 05034 maps and unpins. 05035 05036 05037 Arguments: 05038 05039 Bcb - Supplies a pointer to a previously returned Bcb. 05040 05041 Return Value: 05042 05043 Bcb with read-only indicator. 05044 05045 --*/ 05046 05047 { 05048 KIRQL OldIrql; 05049 PVACB Vacb; 05050 05051 // 05052 // Remove read-only bit 05053 // 05054 05055 Bcb = (PVOID) ((ULONG_PTR)Bcb & ~1); 05056 05057 if (((PBCB)Bcb)->NodeTypeCode == CACHE_NTC_OBCB) { 05058 05059 // 05060 // If this is an overlapped BCB, use the first Vacb in the 05061 // array 05062 // 05063 05064 Vacb = ((POBCB)Bcb)->Bcbs[0]->Vacb; 05065 05066 } else if (((PBCB)Bcb)->NodeTypeCode == CACHE_NTC_BCB) { 05067 05068 // 05069 // If this is a BCB, extract the Vcb from it 05070 // 05071 05072 Vacb = ((PBCB)Bcb)->Vacb; 05073 05074 } else { 05075 05076 // 05077 // Otherwise, there is no signature to match. Assume 05078 // it is a Vacb. 05079 // 05080 05081 Vacb = (PVACB) Bcb; 05082 } 05083 05084 ASSERT((Vacb >= CcVacbs) && (Vacb < CcBeyondVacbs)); 05085 05086 // 05087 // Safely bump the active count 05088 // 05089 05090 CcAcquireVacbLock( &OldIrql ); 05091 05092 Vacb->Overlay.ActiveCount += 1; 05093 05094 CcReleaseVacbLock( OldIrql ); 05095 05096 return (PVOID) ((ULONG_PTR)Vacb | 1); 05097 }

VOID CcRepinBcb IN PVOID  Bcb  ) 
 

Definition at line 5101 of file cachesub.c.

05107 : 05108 05109 This routine may be called by a file system to pin a Bcb an additional 05110 time in order to reserve it for Write Through or error recovery. 05111 Typically the file system would do this the first time that it sets a 05112 pinned buffer dirty while processing a WriteThrough request, or any 05113 time that it determines that a buffer will be required for WriteThrough. 05114 05115 The call to this routine must be followed by a call to CcUnpinRepinnedBcb. 05116 CcUnpinRepinnedBcb should normally be called during request completion 05117 after all other resources have been released. CcUnpinRepinnedBcb 05118 synchronously writes the buffer (for WriteThrough requests) and performs 05119 the matching unpin for this call. 05120 05121 Arguments: 05122 05123 Bcb - Supplies a pointer to a previously pinned Bcb 05124 05125 Return Value: 05126 05127 None. 05128 05129 --*/ 05130 05131 { 05132 KIRQL OldIrql; 05133 05134 ExAcquireFastLock( &((PBCB)Bcb)->SharedCacheMap->BcbSpinLock, &OldIrql ); 05135 05136 ((PBCB)Bcb)->PinCount += 1; 05137 05138 ExReleaseFastLock( &((PBCB)Bcb)->SharedCacheMap->BcbSpinLock, OldIrql ); 05139 }

VOID CcScheduleReadAhead IN PFILE_OBJECT  FileObject,
IN PLARGE_INTEGER  FileOffset,
IN ULONG  Length
 

Definition at line 1253 of file cachesub.c.

References ASSERT, _PRIVATE_CACHE_MAP::BeyondLastByte1, _PRIVATE_CACHE_MAP::BeyondLastByte2, CcAcquireMasterLock, CcAllocateWorkQueueEntry, CcExpressWorkQueue, CcIncrementOpenCount, CcPostWorkQueue(), CcReleaseMasterLock, DebugTrace, DebugTrace2, DISABLE_READ_AHEAD, FALSE, _PRIVATE_CACHE_MAP::FileOffset1, _PRIVATE_CACHE_MAP::FileOffset2, FlagOn, _SHARED_CACHE_MAP::Flags, FO_SEQUENTIAL_ONLY, _WORK_QUEUE_ENTRY::Function, me, NOISE_BITS, NULL, ObReferenceObject, PAGE_SIZE, _WORK_QUEUE_ENTRY::Parameters, PWORK_QUEUE_ENTRY, ReadAhead, _PRIVATE_CACHE_MAP::ReadAheadActive, _PRIVATE_CACHE_MAP::ReadAheadLength, _PRIVATE_CACHE_MAP::ReadAheadMask, _PRIVATE_CACHE_MAP::ReadAheadOffset, _PRIVATE_CACHE_MAP::ReadAheadSpinLock, ROUND_TO_PAGES, and TRUE.

Referenced by CcCopyRead(), CcFastCopyRead(), and CcMdlRead().

01261 : 01262 01263 This routine is called by Copy Read and Mdl Read file system routines to 01264 perform common Read Ahead processing. The input parameters describe 01265 the current read which has just been completed, or perhaps only started 01266 in the case of Mdl Reads. Based on these parameters, an 01267 assessment is made on how much data should be read ahead, and whether 01268 that data has already been read ahead. 01269 01270 The processing is divided into two parts: 01271 01272 CALCULATE READ AHEAD REQUIREMENTS (CcScheduleReadAhead) 01273 01274 PERFORM READ AHEAD (CcPerformReadAhead) 01275 01276 File systems should always call CcReadAhead, which will conditionally 01277 call CcScheduleReadAhead (if the read is large enough). If such a call 01278 determines that there is read ahead work to do, and no read ahead is 01279 currently active, then it will set ReadAheadActive and schedule read 01280 ahead to be peformed by the Lazy Writer, who will call CcPeformReadAhead. 01281 01282 Arguments: 01283 01284 FileObject - supplies pointer to FileObject on which readahead should be 01285 considered. 01286 01287 FileOffset - supplies the FileOffset at which the last read just occurred. 01288 01289 Length - supplies the length of the last read. 01290 01291 Return Value: 01292 01293 None 01294 --*/ 01295 01296 { 01297 LARGE_INTEGER NewOffset; 01298 LARGE_INTEGER NewBeyond; 01299 LARGE_INTEGER FileOffset1, FileOffset2; 01300 KIRQL OldIrql; 01301 PSHARED_CACHE_MAP SharedCacheMap; 01302 PPRIVATE_CACHE_MAP PrivateCacheMap; 01303 PWORK_QUEUE_ENTRY WorkQueueEntry; 01304 ULONG ReadAheadSize; 01305 BOOLEAN Changed = FALSE; 01306 01307 DebugTrace(+1, me, "CcScheduleReadAhead:\n", 0 ); 01308 DebugTrace2(0, me, " FileOffset = %08lx, %08lx\n", FileOffset->LowPart, 01309 FileOffset->HighPart ); 01310 DebugTrace( 0, me, " Length = %08lx\n", Length ); 01311 01312 SharedCacheMap = *(PSHARED_CACHE_MAP *)((PCHAR)FileObject->SectionObjectPointer 01313 + sizeof(PVOID)); 01314 PrivateCacheMap = FileObject->PrivateCacheMap; 01315 01316 if ((PrivateCacheMap == NULL) || 01317 (SharedCacheMap == NULL) || 01318 FlagOn(SharedCacheMap->Flags, DISABLE_READ_AHEAD)) { 01319 01320 DebugTrace(-1, me, "CcScheduleReadAhead -> VOID (Nooped)\n", 0 ); 01321 01322 return; 01323 } 01324 01325 // 01326 // Round boundaries of transfer up to some greater granularity, so that 01327 // sequential reads will be recognized even if a few bytes are skipped 01328 // between records. 01329 // 01330 01331 NewOffset = *FileOffset; 01332 NewBeyond.QuadPart = FileOffset->QuadPart + (LONGLONG)Length; 01333 01334 // 01335 // Find the next read ahead boundary beyond the current read. 01336 // 01337 01338 ReadAheadSize = (Length + PrivateCacheMap->ReadAheadMask) & ~PrivateCacheMap->ReadAheadMask; 01339 FileOffset2.QuadPart = NewBeyond.QuadPart + (LONGLONG)ReadAheadSize; 01340 FileOffset2.LowPart &= ~PrivateCacheMap->ReadAheadMask; 01341 01342 // 01343 // CALCULATE READ AHEAD REQUIREMENTS 01344 // 01345 01346 // 01347 // Take out the ReadAhead spinlock to synchronize our read ahead decision. 01348 // 01349 01350 ExAcquireSpinLock( &PrivateCacheMap->ReadAheadSpinLock, &OldIrql ); 01351 01352 // 01353 // Read Ahead Case 0. 01354 // 01355 // Sequential-only hint in the file object. For this case we will 01356 // try and always keep two read ahead granularities read ahead from 01357 // and including the end of the current transfer. This case has the 01358 // lowest overhead, and the code is completely immune to how the 01359 // caller skips around. Sequential files use ReadAheadOffset[1] in 01360 // the PrivateCacheMap as their "high water mark". 01361 // 01362 01363 if (FlagOn(FileObject->Flags, FO_SEQUENTIAL_ONLY)) { 01364 01365 // 01366 // If the next boundary is greater than or equal to the high-water mark, 01367 // then read ahead. 01368 // 01369 01370 if (FileOffset2.QuadPart >= PrivateCacheMap->ReadAheadOffset[1].QuadPart) { 01371 01372 // 01373 // On the first read if we are using a large read ahead granularity, 01374 // and the read did not get it all, we will just get the rest of the 01375 // first data we want. 01376 // 01377 01378 if ((FileOffset->QuadPart == 0) 01379 01380 && 01381 01382 (PrivateCacheMap->ReadAheadMask > (PAGE_SIZE - 1)) 01383 01384 && 01385 01386 ((Length + PAGE_SIZE - 1) <= PrivateCacheMap->ReadAheadMask)) { 01387 01388 FileOffset1.QuadPart = (LONGLONG)( ROUND_TO_PAGES(Length) ); 01389 PrivateCacheMap->ReadAheadLength[0] = ReadAheadSize - FileOffset1.LowPart; 01390 FileOffset2.QuadPart = (LONGLONG)ReadAheadSize; 01391 01392 // 01393 // Calculate the next read ahead boundary. 01394 // 01395 01396 } else { 01397 01398 FileOffset1.QuadPart = PrivateCacheMap->ReadAheadOffset[1].QuadPart + 01399 (LONGLONG)ReadAheadSize; 01400 01401 // 01402 // If the end of the current read is actually beyond where we would 01403 // normally do our read ahead, then we have fallen behind, and we must 01404 // advance to that spot. 01405 // 01406 01407 if (FileOffset2.QuadPart > FileOffset1.QuadPart) { 01408 FileOffset1 = FileOffset2; 01409 } 01410 PrivateCacheMap->ReadAheadLength[0] = ReadAheadSize; 01411 FileOffset2.QuadPart = FileOffset1.QuadPart + (LONGLONG)ReadAheadSize; 01412 } 01413 01414 // 01415 // Now issue the next two read aheads. 01416 // 01417 01418 PrivateCacheMap->ReadAheadOffset[0] = FileOffset1; 01419 01420 PrivateCacheMap->ReadAheadOffset[1] = FileOffset2; 01421 PrivateCacheMap->ReadAheadLength[1] = ReadAheadSize; 01422 01423 Changed = TRUE; 01424 } 01425 01426 // 01427 // Read Ahead Case 1. 01428 // 01429 // If this is the third of three sequential reads, then we will see if 01430 // we can read ahead. Note that if the first read to a file is to 01431 // offset 0, it passes this test. 01432 // 01433 01434 } else if ((NewOffset.HighPart == PrivateCacheMap->BeyondLastByte2.HighPart) 01435 01436 && 01437 01438 ((NewOffset.LowPart & ~NOISE_BITS) 01439 == (PrivateCacheMap->BeyondLastByte2.LowPart & ~NOISE_BITS)) 01440 01441 && 01442 01443 (PrivateCacheMap->FileOffset2.HighPart 01444 == PrivateCacheMap->BeyondLastByte1.HighPart) 01445 01446 && 01447 01448 ((PrivateCacheMap->FileOffset2.LowPart & ~NOISE_BITS) 01449 == (PrivateCacheMap->BeyondLastByte1.LowPart & ~NOISE_BITS))) { 01450 01451 // 01452 // On the first read if we are using a large read ahead granularity, 01453 // and the read did not get it all, we will just get the rest of the 01454 // first data we want. 01455 // 01456 01457 if ((FileOffset->QuadPart == 0) 01458 01459 && 01460 01461 (PrivateCacheMap->ReadAheadMask > (PAGE_SIZE - 1)) 01462 01463 && 01464 01465 ((Length + PAGE_SIZE - 1) <= PrivateCacheMap->ReadAheadMask)) { 01466 01467 FileOffset2.QuadPart = (LONGLONG)( ROUND_TO_PAGES(Length) ); 01468 } 01469 01470 // 01471 // Round read offset to next read ahead boundary. 01472 // 01473 01474 else { 01475 FileOffset2.QuadPart = NewBeyond.QuadPart + (LONGLONG)ReadAheadSize; 01476 01477 FileOffset2.LowPart &= ~PrivateCacheMap->ReadAheadMask; 01478 } 01479 01480 // 01481 // Set read ahead length to be the same as for the most recent read, 01482 // up to our max. 01483 // 01484 01485 if (FileOffset2.QuadPart != PrivateCacheMap->ReadAheadOffset[1].QuadPart) { 01486 01487 ASSERT( FileOffset2.HighPart >= 0 ); 01488 01489 Changed = TRUE; 01490 PrivateCacheMap->ReadAheadOffset[1] = FileOffset2; 01491 PrivateCacheMap->ReadAheadLength[1] = ReadAheadSize; 01492 } 01493 } 01494 01495 // 01496 // Read Ahead Case 2. 01497 // 01498 // If this is the third read following a particular stride, then we 01499 // will see if we can read ahead. One example of an application that 01500 // might do this is a spreadsheet. Note that this code even works 01501 // for negative strides. 01502 // 01503 01504 else if ( ( NewOffset.QuadPart - 01505 PrivateCacheMap->FileOffset2.QuadPart ) == 01506 ( PrivateCacheMap->FileOffset2.QuadPart - 01507 PrivateCacheMap->FileOffset1.QuadPart )) { 01508 01509 // 01510 // According to the current stride, the next offset will be: 01511 // 01512 // NewOffset + (NewOffset - FileOffset2) 01513 // 01514 // which is the same as: 01515 // 01516 // (NewOffset * 2) - FileOffset2 01517 // 01518 01519 FileOffset2.QuadPart = ( NewOffset.QuadPart << 1 ) - PrivateCacheMap->FileOffset2.QuadPart; 01520 01521 // 01522 // If our stride is going backwards through the file, we 01523 // have to detect the case where the next step would wrap. 01524 // 01525 01526 if (FileOffset2.HighPart >= 0) { 01527 01528 // 01529 // The read ahead length must be extended by the same amount that 01530 // we will round the PrivateCacheMap->ReadAheadOffset down. 01531 // 01532 01533 Length += FileOffset2.LowPart & (PAGE_SIZE - 1); 01534 01535 // 01536 // Now round the PrivateCacheMap->ReadAheadOffset down. 01537 // 01538 01539 FileOffset2.LowPart &= ~(PAGE_SIZE - 1); 01540 PrivateCacheMap->ReadAheadOffset[1] = FileOffset2; 01541 01542 // 01543 // Round to page boundary. 01544 // 01545 01546 PrivateCacheMap->ReadAheadLength[1] = (ULONG) ROUND_TO_PAGES(Length); 01547 Changed = TRUE; 01548 } 01549 } 01550 01551 // 01552 // Get out if the ReadAhead requirements did not change. 01553 // 01554 01555 if (!Changed || PrivateCacheMap->ReadAheadActive) { 01556 01557 DebugTrace( 0, me, "Read ahead already in progress or no change\n", 0 ); 01558 01559 ExReleaseSpinLock( &PrivateCacheMap->ReadAheadSpinLock, OldIrql ); 01560 return; 01561 } 01562 01563 // 01564 // Otherwise, we will proceed and try to schedule the read ahead 01565 // ourselves. 01566 // 01567 01568 PrivateCacheMap->ReadAheadActive = TRUE; 01569 01570 // 01571 // Release spin lock on way out 01572 // 01573 01574 ExReleaseSpinLock( &PrivateCacheMap->ReadAheadSpinLock, OldIrql ); 01575 01576 // 01577 // Queue the read ahead request to the Lazy Writer's work queue. 01578 // 01579 01580 DebugTrace( 0, me, "Queueing read ahead to worker thread\n", 0 ); 01581 01582 WorkQueueEntry = CcAllocateWorkQueueEntry(); 01583 01584 // 01585 // If we failed to allocate a work queue entry, then, we will 01586 // quietly bag it. Read ahead is only an optimization, and 01587 // no one ever requires that it occur. 01588 // 01589 01590 if (WorkQueueEntry != NULL) { 01591 01592 // 01593 // We must reference this file object so that it cannot go away 01594 // until we finish Read Ahead processing in the Worker Thread. 01595 // 01596 01597 ObReferenceObject ( FileObject ); 01598 01599 // 01600 // Increment open count to make sure the SharedCacheMap stays around. 01601 // 01602 01603 CcAcquireMasterLock( &OldIrql ); 01604 CcIncrementOpenCount( SharedCacheMap, 'adRQ' ); 01605 CcReleaseMasterLock( OldIrql ); 01606 01607 WorkQueueEntry->Function = (UCHAR)ReadAhead; 01608 WorkQueueEntry->Parameters.Read.FileObject = FileObject; 01609 01610 CcPostWorkQueue( WorkQueueEntry, &CcExpressWorkQueue ); 01611 } 01612 01613 // 01614 // If we failed to allocate a Work Queue Entry, or all of the pages 01615 // are resident we must set the active flag false. 01616 // 01617 01618 else { 01619 01620 ExAcquireFastLock( &PrivateCacheMap->ReadAheadSpinLock, &OldIrql ); 01621 PrivateCacheMap->ReadAheadActive = FALSE; 01622 ExReleaseFastLock( &PrivateCacheMap->ReadAheadSpinLock, OldIrql ); 01623 } 01624 01625 DebugTrace(-1, me, "CcScheduleReadAhead -> VOID\n", 0 ); 01626 01627 return; 01628 }

VOID CcSetDirtyInMask IN PSHARED_CACHE_MAP  SharedCacheMap,
IN PLARGE_INTEGER  FileOffset,
IN ULONG  Length
 

Definition at line 2242 of file cachesub.c.

References ASSERT, _BITMAP_RANGE::BasePage, _BITMAP_RANGE::Bitmap, _MBCB::BitmapRange1, _MBCB::BitmapRange2, _MBCB::BitmapRange3, _MBCB::BitmapRanges, CACHE_NTC_MBCB, CACHE_NTC_MBCB_GRANDE, CcAcquireMasterLockAtDpcLevel, CcAcquireVacbLockAtDpcLevel, CcAllocateInitializeBcb(), CcAllocateVacbLevel(), CcDeallocateVacbLevel(), CcDirtySharedCacheMapList, CcFindBitmapRangeToDirty(), CcPrefillVacbLevelZone(), CcReleaseMasterLockFromDpcLevel, CcReleaseVacbLock, CcReleaseVacbLockFromDpcLevel, CcScheduleLazyWriteScan(), CcTotalDirtyPages, _BITMAP_RANGE::DirtyPages, _MBCB::DirtyPages, FALSE, _BITMAP_RANGE::FirstDirtyPage, _BITMAP_RANGE::LastDirtyPage, LazyWriter, _BITMAP_RANGE::Links, MBCB_BITMAP_INITIAL_SIZE, MBCB_BITMAP_RANGE, _MBCB::NodeTypeCode, NULL, PAGE_SHIFT, PAGE_SIZE, QuadAlign, _MBCB::ResumeWritePage, _LAZY_WRITER::ScanActive, and _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks.

Referenced by CcFreeActiveVacb(), CcMapAndCopy(), CcMdlWriteComplete2(), CcPrepareMdlWrite(), CcPurgeAndClearCacheSection(), and CcReleaseByteRangeFromWrite().

02250 : 02251 02252 This routine may be called to set a range of pages dirty in a user data 02253 file, by just setting the corresponding bits in the mask bcb. 02254 02255 IMPORTANT NOTE: 02256 02257 If this routine fails to set any bits due to an allocation failure, 02258 it just returns quietly without informing the caller. (Note that this 02259 routine is never called for no modified write sections.) The reason 02260 for this behavior is that this routine is sometimes called as part of 02261 error recovery (CcFreeActiveVacb, CcMdlWriteComplete, etc.) when it is 02262 essential to just keep on moving. Note that if an allocation failure does 02263 occur, this only means that MM will have to flush the modified page in 02264 time, since the Lazy Writer will not do it. 02265 02266 Arguments: 02267 02268 SharedCacheMap - SharedCacheMap where the pages are to be set dirty. 02269 02270 FileOffset - FileOffset of first page to set dirty 02271 02272 Length - Used in conjunction with FileOffset to determine how many pages 02273 to set dirty. 02274 02275 Return Value: 02276 02277 None 02278 02279 --*/ 02280 02281 { 02282 KIRQL OldIrql; 02283 PMBCB Mbcb; 02284 PBITMAP_RANGE BitmapRange; 02285 LONGLONG FirstPage; 02286 LONGLONG LastPage; 02287 PULONG MaskPtr; 02288 ULONG Mask = 0; 02289 PULONG Bitmap = NULL; 02290 ULONG AllocationError = FALSE; 02291 02292 // 02293 // We assume no caller can cross a bitmap range boundary (currently not even 02294 // a view boundary!), so we do not want to loop through bitmap ranges. 02295 // 02296 02297 ASSERT((FileOffset->QuadPart / MBCB_BITMAP_RANGE) == 02298 ((FileOffset->QuadPart + Length - 1) / MBCB_BITMAP_RANGE)); 02299 02300 // 02301 // Initialize our locals. 02302 // 02303 02304 FirstPage = FileOffset->QuadPart >> PAGE_SHIFT; 02305 LastPage = ((FileOffset->QuadPart + Length - 1) >> PAGE_SHIFT); 02306 02307 // 02308 // If we have to convert to an Mbcb grande, we will loop back here to 02309 // preallocate another buffer. 02310 // 02311 02312 do { 02313 02314 // 02315 // For large streams, we need to preallocate a block we use for 02316 // we use for bitmaps. We allocate one, then loop back in the rare 02317 // case where we will need another. We free it at the bottom if we 02318 // don't need one. 02319 // 02320 02321 if (SharedCacheMap->SectionSize.QuadPart > (MBCB_BITMAP_INITIAL_SIZE * 8 * PAGE_SIZE)) { 02322 02323 // 02324 // If we could not preallocate, break out into common cleanup code and 02325 // return quietly. 02326 // 02327 02328 if (!CcPrefillVacbLevelZone( 1, &OldIrql, FALSE )) { 02329 return; 02330 } 02331 02332 Bitmap = (PULONG)CcAllocateVacbLevel( FALSE ); 02333 CcReleaseVacbLock( OldIrql ); 02334 } 02335 02336 // 02337 // Acquire the Mbcb spinlock. 02338 // 02339 02340 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 02341 02342 // 02343 // If there is no Mbcb, we will have to allocate one. 02344 // 02345 02346 Mbcb = SharedCacheMap->Mbcb; 02347 if (Mbcb == NULL) { 02348 02349 // 02350 // Since we use the Bcb zone, we must assume that Bcbs are big enough. 02351 // 02352 02353 ASSERT(QuadAlign(sizeof(MBCB)) <= QuadAlign(sizeof(BCB))); 02354 02355 // 02356 // Allocate the Mbcb from the Bcb zone. 02357 // 02358 02359 Mbcb = (PMBCB)CcAllocateInitializeBcb( NULL, NULL, NULL, NULL ); 02360 02361 // 02362 // If we could not allocate an Mbcb, break out to clean up and return 02363 // 02364 02365 if (Mbcb == NULL) { 02366 break; 02367 } 02368 02369 // 02370 // Set in the node type, and initialize the listhead of ranges. 02371 // 02372 02373 Mbcb->NodeTypeCode = CACHE_NTC_MBCB; 02374 InitializeListHead( &Mbcb->BitmapRanges ); 02375 02376 // 02377 // Insert and initialize the first range. 02378 // 02379 02380 InsertTailList( &Mbcb->BitmapRanges, &Mbcb->BitmapRange1.Links ); 02381 Mbcb->BitmapRange1.FirstDirtyPage = MAXULONG; 02382 02383 // 02384 // Use the rest of the Mbcb as the initial bitmap. 02385 // 02386 02387 Mbcb->BitmapRange1.Bitmap = (PULONG)&Mbcb->BitmapRange2; 02388 02389 // 02390 // Now set to use our new Mbcb. 02391 // 02392 02393 SharedCacheMap->Mbcb = Mbcb; 02394 } 02395 02396 // 02397 // Now see if we need to switch to the Mbcb grande format. 02398 // 02399 02400 if ((LastPage >= (MBCB_BITMAP_INITIAL_SIZE * 8)) && 02401 (Mbcb->NodeTypeCode != CACHE_NTC_MBCB_GRANDE)) { 02402 02403 // 02404 // If there are any dirty pages, copy the initial bitmap over, and zero 02405 // out the original end of the Mbcb for reuse. 02406 // 02407 02408 if (Mbcb->BitmapRange1.DirtyPages != 0) { 02409 RtlCopyMemory( Bitmap, Mbcb->BitmapRange1.Bitmap, MBCB_BITMAP_INITIAL_SIZE ); 02410 RtlZeroMemory( Mbcb->BitmapRange1.Bitmap, MBCB_BITMAP_INITIAL_SIZE ); 02411 } 02412 02413 // 02414 // Store the new bitmap pointer and show we have consumed this one. 02415 // 02416 02417 Mbcb->BitmapRange1.Bitmap = Bitmap; 02418 Bitmap = NULL; 02419 02420 // 02421 // Insert and initialize the first range. 02422 // 02423 02424 InsertTailList( &Mbcb->BitmapRanges, &Mbcb->BitmapRange2.Links ); 02425 Mbcb->BitmapRange2.BasePage = MAXLONGLONG; 02426 Mbcb->BitmapRange2.FirstDirtyPage = MAXULONG; 02427 InsertTailList( &Mbcb->BitmapRanges, &Mbcb->BitmapRange3.Links ); 02428 Mbcb->BitmapRange3.BasePage = MAXLONGLONG; 02429 Mbcb->BitmapRange3.FirstDirtyPage = MAXULONG; 02430 Mbcb->NodeTypeCode = CACHE_NTC_MBCB_GRANDE; 02431 02432 // 02433 // This is a one-time event - converting to the large Mbcb. Continue back 02434 // to preallocate another buffer for CcFindBitmapRangeToDirty. 02435 // 02436 02437 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 02438 continue; 02439 } 02440 02441 // 02442 // Now find the Bitmap range we are setting bits in. 02443 // 02444 02445 BitmapRange = CcFindBitmapRangeToDirty( Mbcb, FirstPage, &Bitmap ); 02446 02447 // 02448 // If we could not allocate this dinky structure, break out quietly. 02449 // 02450 02451 if (BitmapRange == NULL) { 02452 break; 02453 } 02454 02455 // 02456 // Now update the first and last dirty page indices and the bitmap. 02457 // 02458 02459 if (FirstPage < (BitmapRange->BasePage + BitmapRange->FirstDirtyPage)) { 02460 BitmapRange->FirstDirtyPage = (ULONG)(FirstPage - BitmapRange->BasePage); 02461 } 02462 02463 if (LastPage > (BitmapRange->BasePage + BitmapRange->LastDirtyPage)) { 02464 BitmapRange->LastDirtyPage = (ULONG)(LastPage - BitmapRange->BasePage); 02465 } 02466 02467 // 02468 // We have to acquire the shared cache map list, because we 02469 // may be changing lists. 02470 // 02471 02472 CcAcquireMasterLockAtDpcLevel(); 02473 02474 // 02475 // If this is the first dirty page for this cache map, there is some work 02476 // to do. 02477 // 02478 02479 if (SharedCacheMap->DirtyPages == 0) { 02480 02481 // 02482 // If the lazy write scan is not active, then start it. 02483 // 02484 02485 if (!LazyWriter.ScanActive) { 02486 CcScheduleLazyWriteScan(); 02487 } 02488 02489 // 02490 // Move to the dirty list. 02491 // 02492 02493 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 02494 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 02495 &SharedCacheMap->SharedCacheMapLinks ); 02496 02497 Mbcb->ResumeWritePage = FirstPage; 02498 } 02499 02500 MaskPtr = &BitmapRange->Bitmap[(ULONG)(FirstPage - BitmapRange->BasePage) / 32]; 02501 Mask = 1 << ((ULONG)FirstPage % 32); 02502 02503 // 02504 // Loop to set all of the bits and adjust the DirtyPage totals. 02505 // 02506 02507 for ( ; FirstPage <= LastPage; FirstPage++) { 02508 02509 if ((*MaskPtr & Mask) == 0) { 02510 02511 CcTotalDirtyPages += 1; 02512 SharedCacheMap->DirtyPages += 1; 02513 Mbcb->DirtyPages += 1; 02514 BitmapRange->DirtyPages += 1; 02515 *MaskPtr |= Mask; 02516 } 02517 02518 Mask <<= 1; 02519 02520 if (Mask == 0) { 02521 02522 MaskPtr += 1; 02523 Mask = 1; 02524 } 02525 } 02526 02527 // 02528 // See if we need to advance our goal for ValidDataLength. 02529 // 02530 02531 LastPage = FileOffset->QuadPart + Length; 02532 02533 if (LastPage > SharedCacheMap->ValidDataGoal.QuadPart) { 02534 SharedCacheMap->ValidDataGoal.QuadPart = (LONGLONG)LastPage; 02535 } 02536 02537 CcReleaseMasterLockFromDpcLevel(); 02538 02539 // 02540 // Continue until we have actually set the bits (there is a continue 02541 // which just wants to loop back and allocate another buffer). 02542 // 02543 02544 } while (Mask == 0); 02545 02546 // 02547 // Now if we preallocated a bitmap buffer, free it on the way out. 02548 // 02549 02550 if (Bitmap != NULL) { 02551 CcAcquireVacbLockAtDpcLevel(); 02552 CcDeallocateVacbLevel( (PVACB *)Bitmap, FALSE ); 02553 CcReleaseVacbLockFromDpcLevel(); 02554 } 02555 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 02556 }

VOID CcSetDirtyPinnedData IN PVOID  BcbVoid,
IN PLARGE_INTEGER Lsn  OPTIONAL
 

Definition at line 2560 of file cachesub.c.

References ASSERT, _SHARED_CACHE_MAP::BcbSpinLock, CACHE_NTC_OBCB, CcAcquireMasterLockAtDpcLevel, CcDirtySharedCacheMapList, CcReleaseMasterLockFromDpcLevel, CcScheduleLazyWriteScan(), CcTotalDirtyPages, DebugTrace, _SHARED_CACHE_MAP::DirtyPages, DISABLE_WRITE_BEHIND, FlagOn, _SHARED_CACHE_MAP::Flags, LazyWriter, me, NULL, PAGE_SHIFT, POBCB, _LAZY_WRITER::ScanActive, _SHARED_CACHE_MAP::SharedCacheMapLinks, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, TRUE, and _SHARED_CACHE_MAP::ValidDataGoal.

Referenced by CcCopyWrite(), CcPreparePinWrite(), CcReleaseByteRangeFromWrite(), CcUnpinRepinnedBcb(), CcZeroData(), LfsFlushLfcb(), and LfsFlushLogPage().

02567 : 02568 02569 This routine may be called to set a Bcb (returned by CcPinFileData) 02570 dirty, and a candidate for the Lazy Writer. All Bcbs should be set 02571 dirty by calling this routine, even if they are to be flushed 02572 another way. 02573 02574 Arguments: 02575 02576 Bcb - Supplies a pointer to a pinned (by CcPinFileData) Bcb, to 02577 be set dirty. 02578 02579 Lsn - Lsn to be remembered with page. 02580 02581 Return Value: 02582 02583 None 02584 02585 --*/ 02586 02587 { 02588 PBCB Bcbs[2]; 02589 PBCB *BcbPtrPtr; 02590 KIRQL OldIrql; 02591 PSHARED_CACHE_MAP SharedCacheMap; 02592 02593 DebugTrace(+1, me, "CcSetDirtyPinnedData: Bcb = %08lx\n", BcbVoid ); 02594 02595 // 02596 // Assume this is a normal Bcb, and set up for loop below. 02597 // 02598 02599 Bcbs[0] = (PBCB)BcbVoid; 02600 Bcbs[1] = NULL; 02601 BcbPtrPtr = &Bcbs[0]; 02602 02603 // 02604 // If it is an overlap Bcb, then point into the Bcb vector 02605 // for the loop. 02606 // 02607 02608 if (Bcbs[0]->NodeTypeCode == CACHE_NTC_OBCB) { 02609 BcbPtrPtr = &((POBCB)Bcbs[0])->Bcbs[0]; 02610 } 02611 02612 // 02613 // Loop to set all Bcbs dirty 02614 // 02615 02616 while (*BcbPtrPtr != NULL) { 02617 02618 Bcbs[0] = *(BcbPtrPtr++); 02619 02620 // 02621 // Should be no ReadOnly Bcbs 02622 // 02623 02624 ASSERT(((ULONG_PTR)Bcbs[0] & 1) != 1); 02625 02626 SharedCacheMap = Bcbs[0]->SharedCacheMap; 02627 02628 // 02629 // We have to acquire the shared cache map list, because we 02630 // may be changing lists. 02631 // 02632 02633 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 02634 02635 if (!Bcbs[0]->Dirty) { 02636 02637 ULONG Pages = Bcbs[0]->ByteLength >> PAGE_SHIFT; 02638 02639 // 02640 // Set dirty to keep the Bcb from going away until 02641 // it is set Undirty, and assign the next modification time stamp. 02642 // 02643 02644 Bcbs[0]->Dirty = TRUE; 02645 02646 // 02647 // Initialize the OldestLsn field. 02648 // 02649 02650 if (ARGUMENT_PRESENT(Lsn)) { 02651 Bcbs[0]->OldestLsn = *Lsn; 02652 Bcbs[0]->NewestLsn = *Lsn; 02653 } 02654 02655 // 02656 // Move it to the dirty list if these are the first dirty pages, 02657 // and this is not disabled for write behind. 02658 // 02659 // Increase the count of dirty bytes in the shared cache map. 02660 // 02661 02662 CcAcquireMasterLockAtDpcLevel(); 02663 if ((SharedCacheMap->DirtyPages == 0) && 02664 !FlagOn(SharedCacheMap->Flags, DISABLE_WRITE_BEHIND)) { 02665 02666 // 02667 // If the lazy write scan is not active, then start it. 02668 // 02669 02670 if (!LazyWriter.ScanActive) { 02671 CcScheduleLazyWriteScan(); 02672 } 02673 02674 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 02675 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 02676 &SharedCacheMap->SharedCacheMapLinks ); 02677 } 02678 02679 SharedCacheMap->DirtyPages += Pages; 02680 CcTotalDirtyPages += Pages; 02681 CcReleaseMasterLockFromDpcLevel(); 02682 } 02683 02684 // 02685 // If this Lsn happens to be older/newer than the ones we have stored, then 02686 // change it. 02687 // 02688 02689 if (ARGUMENT_PRESENT(Lsn)) { 02690 02691 if ((Bcbs[0]->OldestLsn.QuadPart == 0) || (Lsn->QuadPart < Bcbs[0]->OldestLsn.QuadPart)) { 02692 Bcbs[0]->OldestLsn = *Lsn; 02693 } 02694 02695 if (Lsn->QuadPart > Bcbs[0]->NewestLsn.QuadPart) { 02696 Bcbs[0]->NewestLsn = *Lsn; 02697 } 02698 } 02699 02700 // 02701 // See if we need to advance our goal for ValidDataLength. 02702 // 02703 02704 if ( Bcbs[0]->BeyondLastByte.QuadPart > SharedCacheMap->ValidDataGoal.QuadPart ) { 02705 02706 SharedCacheMap->ValidDataGoal = Bcbs[0]->BeyondLastByte; 02707 } 02708 02709 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 02710 } 02711 02712 DebugTrace(-1, me, "CcSetDirtyPinnedData -> VOID\n", 0 ); 02713 }

VOID CcSetReadAheadGranularity IN PFILE_OBJECT  FileObject,
IN ULONG  Granularity
 

Definition at line 1222 of file cachesub.c.

References PPRIVATE_CACHE_MAP.

Referenced by UdfCommonRead().

01229 : 01230 01231 This routine may be called to set the read ahead granularity used by 01232 the Cache Manager. The default is PAGE_SIZE. The number is decremented 01233 and stored as a mask. 01234 01235 Arguments: 01236 01237 FileObject - File Object for which granularity shall be set 01238 01239 Granularity - new granularity, which must be an even power of 2 and 01240 >= PAGE_SIZE 01241 01242 Return Value: 01243 01244 None 01245 --*/ 01246 01247 { 01248 ((PPRIVATE_CACHE_MAP)FileObject->PrivateCacheMap)->ReadAheadMask = Granularity - 1; 01249 }

NTSTATUS CcSetValidData IN PFILE_OBJECT  FileObject,
IN PLARGE_INTEGER  ValidDataLength
 

Definition at line 2717 of file cachesub.c.

References _IRP::AssociatedIrp, Buffer, DebugTrace, DebugTrace2, _IO_STACK_LOCATION::DeviceObject, Event(), Executive, FALSE, _IO_STACK_LOCATION::FileObject, _IRP::Flags, IoAllocateIrp(), IoCallDriver, IoGetNextIrpStackLocation, IoGetRelatedDeviceObject(), Irp, IRP_MJ_SET_INFORMATION, IRP_PAGING_IO, IRP_SYNCHRONOUS_PAGING_IO, KeInitializeEvent, KernelMode, KeWaitForSingleObject(), _IO_STACK_LOCATION::MajorFunction, me, NT_SUCCESS, NTSTATUS(), NULL, _IO_STACK_LOCATION::Parameters, PIO_STACK_LOCATION, PsGetCurrentThread, _IRP::RequestorMode, _DEVICE_OBJECT::StackSize, Status, _IRP::Tail, TRUE, _IRP::UserEvent, and _IRP::UserIosb.

Referenced by CcWriteBehind().

02724 : 02725 02726 This routine is used to call the File System to update ValidDataLength 02727 for a file. 02728 02729 Arguments: 02730 02731 FileObject - A pointer to a referenced file object describing which file 02732 the read should be performed from. 02733 02734 ValidDataLength - Pointer to new ValidDataLength. 02735 02736 Return Value: 02737 02738 Status of operation. 02739 02740 --*/ 02741 02742 { 02743 PIO_STACK_LOCATION IrpSp; 02744 PDEVICE_OBJECT DeviceObject; 02745 NTSTATUS Status; 02746 FILE_END_OF_FILE_INFORMATION Buffer; 02747 IO_STATUS_BLOCK IoStatus; 02748 KEVENT Event; 02749 PIRP Irp; 02750 02751 DebugTrace(+1, me, "CcSetValidData:\n", 0 ); 02752 DebugTrace( 0, me, " FileObject = %08lx\n", FileObject ); 02753 DebugTrace2(0, me, " ValidDataLength = %08lx, %08lx\n", 02754 ValidDataLength->LowPart, ValidDataLength->HighPart ); 02755 02756 // 02757 // Copy ValidDataLength to our buffer. 02758 // 02759 02760 Buffer.EndOfFile = *ValidDataLength; 02761 02762 // 02763 // Initialize the event. 02764 // 02765 02766 KeInitializeEvent( &Event, NotificationEvent, FALSE ); 02767 02768 // 02769 // Begin by getting a pointer to the device object that the file resides 02770 // on. 02771 // 02772 02773 DeviceObject = IoGetRelatedDeviceObject( FileObject ); 02774 02775 // 02776 // Allocate an I/O Request Packet (IRP) for this in-page operation. 02777 // 02778 02779 Irp = IoAllocateIrp( DeviceObject->StackSize, FALSE ); 02780 if (Irp == NULL) { 02781 02782 DebugTrace(-1, me, "CcSetValidData-> STATUS_INSUFFICIENT_RESOURCES\n", 0 ); 02783 02784 return STATUS_INSUFFICIENT_RESOURCES; 02785 } 02786 02787 // 02788 // Get a pointer to the first stack location in the packet. This location 02789 // will be used to pass the function codes and parameters to the first 02790 // driver. 02791 // 02792 02793 IrpSp = IoGetNextIrpStackLocation( Irp ); 02794 02795 // 02796 // Fill in the IRP according to this request, setting the flags to 02797 // just cause IO to set the event and deallocate the Irp. 02798 // 02799 02800 Irp->Flags = IRP_PAGING_IO | IRP_SYNCHRONOUS_PAGING_IO; 02801 Irp->RequestorMode = KernelMode; 02802 Irp->UserIosb = &IoStatus; 02803 Irp->UserEvent = &Event; 02804 Irp->Tail.Overlay.OriginalFileObject = FileObject; 02805 Irp->Tail.Overlay.Thread = PsGetCurrentThread(); 02806 Irp->AssociatedIrp.SystemBuffer = &Buffer; 02807 02808 // 02809 // Fill in the normal read parameters. 02810 // 02811 02812 IrpSp->MajorFunction = IRP_MJ_SET_INFORMATION; 02813 IrpSp->FileObject = FileObject; 02814 IrpSp->DeviceObject = DeviceObject; 02815 IrpSp->Parameters.SetFile.Length = sizeof(FILE_END_OF_FILE_INFORMATION); 02816 IrpSp->Parameters.SetFile.FileInformationClass = FileEndOfFileInformation; 02817 IrpSp->Parameters.SetFile.FileObject = NULL; 02818 IrpSp->Parameters.SetFile.AdvanceOnly = TRUE; 02819 02820 // 02821 // Queue the packet to the appropriate driver based on whether or not there 02822 // is a VPB associated with the device. This routine should not raise. 02823 // 02824 02825 Status = IoCallDriver( DeviceObject, Irp ); 02826 02827 // 02828 // If pending is returned (which is a successful status), 02829 // we must wait for the request to complete. 02830 // 02831 02832 if (Status == STATUS_PENDING) { 02833 KeWaitForSingleObject( &Event, 02834 Executive, 02835 KernelMode, 02836 FALSE, 02837 (PLARGE_INTEGER)NULL); 02838 } 02839 02840 // 02841 // If we got an error back in Status, then the Iosb 02842 // was not written, so we will just copy the status 02843 // there, then test the final status after that. 02844 // 02845 02846 if (!NT_SUCCESS(Status)) { 02847 IoStatus.Status = Status; 02848 } 02849 02850 DebugTrace(-1, me, "CcSetValidData-> %08lx\n", IoStatus.Status ); 02851 02852 return IoStatus.Status; 02853 }

VOID FASTCALL CcUnpinFileData IN OUT PBCB  Bcb,
IN BOOLEAN  ReadOnly,
IN UNMAP_ACTIONS  UnmapAction
 

Definition at line 974 of file cachesub.c.

References ASSERT, _SHARED_CACHE_MAP::BcbSpinLock, CACHE_NTC_BCB, CACHE_NTC_SHARED_CACHE_MAP, CcAcquireMasterLockAtDpcLevel, CcAcquireVacbLockAtDpcLevel, CcBcbSpinLock, CcBeyondVacbs, CcBugCheck, CcCleanSharedCacheMapList, CcDeallocateBcb(), CcFreeVirtualAddress(), CcPagesYetToWrite, CcReleaseMasterLockFromDpcLevel, CcReleaseVacbLockFromDpcLevel, CcTotalDirtyPages, CcUnlockVacbLevel, CcVacbs, DebugTrace, _SHARED_CACHE_MAP::DirtyPages, ExReleaseResource, FALSE, FlagOn, _SHARED_CACHE_MAP::Flags, me, MODIFIED_WRITE_DISABLED, _SHARED_CACHE_MAP::NodeTypeCode, NULL, _SHARED_CACHE_MAP::OpenCount, PAGE_SHIFT, PVACB, SET_CLEAN, _SHARED_CACHE_MAP::SharedCacheMapLinks, TRUE, UNPIN, and UNREF.

Referenced by CcAcquireByteRangeForWrite(), CcCopyRead(), CcCopyWrite(), CcGetDirtyPages(), CcMapData(), CcPinFileData(), CcReleaseByteRangeFromWrite(), CcUnpinData(), CcUnpinDataForThread(), CcUnpinRepinnedBcb(), and CcZeroData().

00982 : 00983 00984 This routine umaps and unlocks the specified buffer, which was previously 00985 locked and mapped by calling CcPinFileData. 00986 00987 Arguments: 00988 00989 Bcb - Pointer previously returned from CcPinFileData. As may be 00990 seen above, this pointer may be either a Bcb or a Vacb. 00991 00992 ReadOnly - must specify same value as when data was mapped 00993 00994 UnmapAction - UNPIN or SET_CLEAN 00995 00996 Return Value: 00997 00998 None 00999 01000 --*/ 01001 01002 { 01003 KIRQL OldIrql; 01004 PSHARED_CACHE_MAP SharedCacheMap; 01005 01006 DebugTrace(+1, me, "CcUnpinFileData >Bcb = %08lx\n", Bcb ); 01007 01008 // 01009 // Note, since we have to allocate so many Vacbs, we do not use 01010 // a node type code. However, the Vacb starts with a BaseAddress, 01011 // so we assume that the low byte of the Bcb node type code has 01012 // some bits set, which a page-aligned Base Address cannot. 01013 // 01014 01015 ASSERT( (CACHE_NTC_BCB & 0xFF) != 0 ); 01016 01017 if (Bcb->NodeTypeCode != CACHE_NTC_BCB) { 01018 01019 ASSERT(((PVACB)Bcb >= CcVacbs) && ((PVACB)Bcb < CcBeyondVacbs)); 01020 ASSERT(((PVACB)Bcb)->SharedCacheMap->NodeTypeCode == CACHE_NTC_SHARED_CACHE_MAP); 01021 01022 CcFreeVirtualAddress( (PVACB)Bcb ); 01023 01024 DebugTrace(-1, me, "CcUnpinFileData -> VOID (simple release)\n", 0 ); 01025 01026 return; 01027 } 01028 01029 SharedCacheMap = Bcb->SharedCacheMap; 01030 01031 // 01032 // We treat Bcbs as ReadOnly (do not acquire resource) if they 01033 // are in sections for which we have not disabled modified writing. 01034 // 01035 01036 if (!FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED) || 01037 UnmapAction == UNREF) { 01038 ReadOnly = TRUE; 01039 } 01040 01041 // 01042 // Synchronize 01043 // 01044 01045 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 01046 01047 switch (UnmapAction) { 01048 01049 case UNPIN: 01050 case UNREF: 01051 01052 ASSERT( Bcb->PinCount > 0 ); 01053 01054 Bcb->PinCount -= 1; 01055 break; 01056 01057 case SET_CLEAN: 01058 01059 if (Bcb->Dirty) { 01060 01061 ULONG Pages = Bcb->ByteLength >> PAGE_SHIFT; 01062 01063 // 01064 // Reverse the rest of the actions taken when the Bcb was set dirty. 01065 // 01066 01067 Bcb->Dirty = FALSE; 01068 01069 CcAcquireMasterLockAtDpcLevel(); 01070 01071 SharedCacheMap->DirtyPages -= Pages; 01072 CcTotalDirtyPages -= Pages; 01073 01074 // 01075 // Normally we need to reduce CcPagesYetToWrite appropriately. 01076 // 01077 01078 if (CcPagesYetToWrite > Pages) { 01079 CcPagesYetToWrite -= Pages; 01080 } else { 01081 CcPagesYetToWrite = 0; 01082 } 01083 01084 // 01085 // Remove SharedCacheMap from dirty list if nothing more dirty, 01086 // and someone still has the cache map opened. 01087 // 01088 01089 if ((SharedCacheMap->DirtyPages == 0) && 01090 (SharedCacheMap->OpenCount != 0)) { 01091 01092 RemoveEntryList( &SharedCacheMap->SharedCacheMapLinks ); 01093 InsertTailList( &CcCleanSharedCacheMapList, 01094 &SharedCacheMap->SharedCacheMapLinks ); 01095 } 01096 01097 CcReleaseMasterLockFromDpcLevel(); 01098 } 01099 01100 break; 01101 01102 default: 01103 CcBugCheck( UnmapAction, 0, 0 ); 01104 } 01105 01106 // 01107 // If we brought it to 0, then we have to kill it. 01108 // 01109 01110 if (Bcb->PinCount == 0) { 01111 01112 // 01113 // If the Bcb is Dirty, we only release the resource and unmap now. 01114 // 01115 01116 if (Bcb->Dirty) { 01117 01118 if (Bcb->BaseAddress != NULL) { 01119 01120 // 01121 // Unmap the Vacb and free the resource if the Bcb is still 01122 // dirty. We have to free the resource before dropping the 01123 // spinlock, and we want to hold the resource until the 01124 // virtual address is freed. 01125 // 01126 01127 CcFreeVirtualAddress( Bcb->Vacb ); 01128 01129 Bcb->BaseAddress = NULL; 01130 Bcb->Vacb = NULL; 01131 } 01132 01133 if (!ReadOnly) { 01134 ExReleaseResource( &Bcb->Resource ); 01135 } 01136 01137 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 01138 } 01139 01140 // 01141 // Otherwise, we also delete the Bcb. 01142 // 01143 01144 else { 01145 01146 // 01147 // Since CcCalculateVacbLockCount has to be able to walk 01148 // the BcbList with only the VacbSpinLock, we take that one 01149 // out to change the list and decrement the level. 01150 // 01151 01152 CcAcquireVacbLockAtDpcLevel(); 01153 RemoveEntryList( &Bcb->BcbLinks ); 01154 01155 // 01156 // For large metadata streams we unlock the Vacb level. 01157 // 01158 01159 CcUnlockVacbLevel( SharedCacheMap, Bcb->FileOffset.QuadPart ); 01160 CcReleaseVacbLockFromDpcLevel(); 01161 01162 // 01163 // Debug routines used to remove Bcbs from the global list 01164 // 01165 01166 #if LIST_DBG 01167 01168 ExAcquireSpinLockAtDpcLevel( &CcBcbSpinLock ); 01169 01170 if (Bcb->CcBcbLinks.Flink != NULL) { 01171 01172 RemoveEntryList( &Bcb->CcBcbLinks ); 01173 CcBcbCount -= 1; 01174 } 01175 01176 ExReleaseSpinLockFromDpcLevel( &CcBcbSpinLock ); 01177 01178 #endif 01179 01180 if (Bcb->BaseAddress != NULL) { 01181 01182 CcFreeVirtualAddress( Bcb->Vacb ); 01183 } 01184 #if DBG 01185 if (!ReadOnly) { 01186 ExReleaseResource( &Bcb->Resource ); 01187 } 01188 01189 // 01190 // ASSERT that the resource is unowned. 01191 // 01192 01193 ASSERT( Bcb->Resource.ActiveCount == 0 ); 01194 #endif 01195 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 01196 CcDeallocateBcb( Bcb ); 01197 } 01198 } 01199 01200 // 01201 // Else we just have to release our Shared access, if we are not 01202 // readonly. We don't need to do this above, since we deallocate 01203 // the entire Bcb there. 01204 // 01205 01206 else { 01207 01208 if (!ReadOnly) { 01209 ExReleaseResource( &Bcb->Resource ); 01210 } 01211 01212 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 01213 } 01214 01215 DebugTrace(-1, me, "CcUnpinFileData -> VOID\n", 0 ); 01216 01217 return; 01218 }

VOID CcUnpinRepinnedBcb IN PVOID  Bcb,
IN BOOLEAN  WriteThrough,
OUT PIO_STATUS_BLOCK  IoStatus
 

Definition at line 5143 of file cachesub.c.

References ASSERT, CcDeferredWrites, CcPostDeferredWrites(), CcSetDirtyPinnedData(), CcUnpinFileData(), DebugTrace, DebugTrace2, ExAcquireResourceExclusive, FALSE, _SHARED_CACHE_MAP::FileObject, FlagOn, _SHARED_CACHE_MAP::Flags, me, MmFlushSection(), MmSetAddressRangeModified(), MODIFIED_WRITE_DISABLED, NULL, Resource, RetryError, _FILE_OBJECT::SectionObjectPointer, SET_CLEAN, TRUE, and UNPIN.

05151 : 05152 05153 This routine may be called to Write a previously pinned buffer 05154 through to the file. It must have been preceded by a call to 05155 CcRepinBcb. As this routine must acquire the Bcb 05156 resource exclusive, the caller must be extremely careful to avoid 05157 deadlocks. Ideally the caller owns no resources at all when it 05158 calls this routine, or else the caller should guarantee that it 05159 has nothing else pinned in this same file. (The latter rule is 05160 the one used to avoid deadlocks in calls from CcCopyWrite and 05161 CcMdlWrite.) 05162 05163 Arguments: 05164 05165 Bcb - Pointer to a Bcb which was previously specified in a call 05166 to CcRepinBcb. 05167 05168 WriteThrough - TRUE if the Bcb should be written through. 05169 05170 IoStatus - Returns the I/O status for the operation. 05171 05172 Return Value: 05173 05174 None. 05175 05176 --*/ 05177 05178 { 05179 PSHARED_CACHE_MAP SharedCacheMap = ((PBCB)Bcb)->SharedCacheMap; 05180 05181 DebugTrace(+1, me, "CcUnpinRepinnedBcb\n", 0 ); 05182 DebugTrace( 0, me, " Bcb = %08lx\n", Bcb ); 05183 DebugTrace( 0, me, " WriteThrough = %02lx\n", WriteThrough ); 05184 05185 // 05186 // Set status to success for non write through case. 05187 // 05188 05189 IoStatus->Status = STATUS_SUCCESS; 05190 05191 if (WriteThrough) { 05192 05193 // 05194 // Acquire Bcb exclusive to eliminate possible modifiers of the buffer, 05195 // since we are about to write its buffer. 05196 // 05197 05198 if (FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED)) { 05199 ExAcquireResourceExclusive( &((PBCB)Bcb)->Resource, TRUE ); 05200 } 05201 05202 // 05203 // Now, there is a chance that the LazyWriter has already written 05204 // it, since the resource was free. We will only write it if it 05205 // is still dirty. 05206 // 05207 05208 if (((PBCB)Bcb)->Dirty) { 05209 05210 // 05211 // First we make sure that the dirty bit in the PFN database is set. 05212 // 05213 05214 ASSERT( ((PBCB)Bcb)->BaseAddress != NULL ); 05215 MmSetAddressRangeModified( ((PBCB)Bcb)->BaseAddress, 05216 ((PBCB)Bcb)->ByteLength ); 05217 05218 // 05219 // Now release the Bcb resource and set it clean. Note we do not check 05220 // here for errors, and just return the I/O status. Errors on writes 05221 // are rare to begin with. Nonetheless, our strategy is to rely on 05222 // one or more of the following (depending on the file system) to prevent 05223 // errors from getting to us. 05224 // 05225 // - Retries and/or other forms of error recovery in the disk driver 05226 // - Mirroring driver 05227 // - Hot fixing in the noncached path of the file system 05228 // 05229 // In the unexpected case that a write error does get through, we 05230 // report it to our caller, but go ahead and set the Bcb clean. There 05231 // seems to be no point in letting Bcbs (and pages in physical memory) 05232 // accumulate which can never go away because we get an unrecoverable I/O 05233 // error. 05234 // 05235 05236 // 05237 // We specify TRUE here for ReadOnly so that we will keep the 05238 // resource during the flush. 05239 // 05240 05241 CcUnpinFileData( (PBCB)Bcb, TRUE, SET_CLEAN ); 05242 05243 // 05244 // Write it out. 05245 // 05246 05247 MmFlushSection( ((PBCB)Bcb)->SharedCacheMap->FileObject->SectionObjectPointer, 05248 &((PBCB)Bcb)->FileOffset, 05249 ((PBCB)Bcb)->ByteLength, 05250 IoStatus, 05251 TRUE ); 05252 05253 // 05254 // If we got verify required, we have to mark the buffer dirty again 05255 // so we will try again later. 05256 // 05257 05258 if (RetryError(IoStatus->Status)) { 05259 CcSetDirtyPinnedData( (PBCB)Bcb, NULL ); 05260 } 05261 05262 // 05263 // Now remove the final pin count now that we have set it clean. 05264 // 05265 05266 CcUnpinFileData( (PBCB)Bcb, FALSE, UNPIN ); 05267 05268 // 05269 // See if there is any deferred writes we can post. 05270 // 05271 05272 if (!IsListEmpty(&CcDeferredWrites)) { 05273 CcPostDeferredWrites(); 05274 } 05275 } 05276 else { 05277 05278 // 05279 // Lazy Writer got there first, just free the resource and unpin. 05280 // 05281 05282 CcUnpinFileData( (PBCB)Bcb, FALSE, UNPIN ); 05283 05284 } 05285 05286 DebugTrace2(0, me, " <IoStatus = %08lx, %08lx\n", IoStatus->Status, 05287 IoStatus->Information ); 05288 } 05289 05290 // 05291 // Non-WriteThrough case 05292 // 05293 05294 else { 05295 05296 CcUnpinFileData( (PBCB)Bcb, TRUE, UNPIN ); 05297 05298 // 05299 // Set status to success for non write through case. 05300 // 05301 05302 IoStatus->Status = STATUS_SUCCESS; 05303 } 05304 05305 DebugTrace(-1, me, "CcUnpinRepinnedBcb -> VOID\n", 0 ); 05306 }

VOID FASTCALL CcWriteBehind IN PSHARED_CACHE_MAP  SharedCacheMap,
IN PIO_STATUS_BLOCK  IoStatus
 

Definition at line 3880 of file cachesub.c.

References CC_REQUEUE, CcAcquireMasterLock, CcAcquireMasterLockAtDpcLevel, CcDecrementOpenCount, CcDeferredWrites, CcDeleteSharedCacheMap(), CcFlushCache(), CcFreeActiveVacb(), CcGetFlushedValidData(), CcIncrementOpenCount, CcLogError(), CcNoDelay, CcPagesYetToWrite, CcPostDeferredWrites(), CcReleaseMasterLock, CcReleaseMasterLockFromDpcLevel, CcSetValidData(), ClearFlag, DbgPrint, DebugTrace, _MBCB::DirtyPages, ExAllocatePoolWithTag, ExFreePool(), FALSE, FlagOn, FsRtlAcquireFileExclusive(), FsRtlReleaseFile(), GetActiveVacbAtDpcLevel, IoRaiseInformationalHardError(), LAZY_WRITE_OCCURRED, me, NT_SUCCESS, NTSTATUS(), NULL, ObQueryNameString(), PagedPool, _MBCB::PagesToWrite, PIN_ACCESS, RetryError, Status, TRUE, and WRITE_QUEUED.

Referenced by CcWorkerThread().

03887 : 03888 03889 This routine may be called with Wait = FALSE to see if write behind 03890 is required, or with Wait = TRUE to perform write behind as required. 03891 03892 The code is very similar to the the code that the Lazy Writer performs 03893 for each SharedCacheMap. The main difference is in the call to 03894 CcAcquireByteRangeForWrite. Write Behind does not care about time 03895 stamps (passing ULONG to accept all time stamps), but it will never 03896 dump the first (highest byte offset) buffer in the list if the last 03897 byte of that buffer is not yet written. The Lazy Writer does exactly 03898 the opposite, in the sense that it is totally time-driven, and will 03899 even dump a partially modified buffer if it sits around long enough. 03900 03901 Arguments: 03902 03903 SharedCacheMap - Pointer to SharedCacheMap to be written 03904 03905 Return Value: 03906 03907 FALSE - if write behind is required, but the caller supplied 03908 Wait = FALSE 03909 03910 TRUE - if write behind is complete or not required 03911 03912 --*/ 03913 03914 { 03915 KIRQL OldIrql; 03916 ULONG ActivePage; 03917 ULONG PageIsDirty; 03918 PMBCB Mbcb; 03919 NTSTATUS Status; 03920 ULONG FileExclusive = FALSE; 03921 PVACB ActiveVacb = NULL; 03922 03923 DebugTrace(+1, me, "CcWriteBehind\n", 0 ); 03924 DebugTrace( 0, me, " SharedCacheMap = %08lx\n", SharedCacheMap ); 03925 03926 // 03927 // First we have to acquire the file for LazyWrite, to avoid 03928 // deadlocking with writers to the file. We do this via the 03929 // CallBack procedure specified to CcInitializeCacheMap. 03930 // 03931 03932 if (!(*SharedCacheMap->Callbacks->AcquireForLazyWrite) 03933 ( SharedCacheMap->LazyWriteContext, TRUE )) { 03934 03935 // 03936 // The filesystem is hinting that it doesn't think that it can 03937 // service the write without significant delay so we will defer 03938 // and come back later. Simply drop the queued flag ... note that 03939 // we do not modify CcPagesYetToWrite, in the hope that we can make 03940 // up the difference in some other cache map on this pass. 03941 // 03942 03943 CcAcquireMasterLock( &OldIrql ); 03944 ClearFlag(SharedCacheMap->Flags, WRITE_QUEUED); 03945 CcReleaseMasterLock( OldIrql ); 03946 03947 IoStatus->Status = STATUS_FILE_LOCK_CONFLICT; 03948 return; 03949 } 03950 03951 // 03952 // See if there is a previous active page to clean up, but only 03953 // do so now if it is the last dirty page or no users have the 03954 // file open. We will free it below after dropping the spinlock. 03955 // 03956 03957 ExAcquireFastLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 03958 CcAcquireMasterLockAtDpcLevel(); 03959 03960 if ((SharedCacheMap->DirtyPages <= 1) || (SharedCacheMap->OpenCount == 0)) { 03961 GetActiveVacbAtDpcLevel( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 03962 } 03963 03964 // 03965 // Increment open count so that our caller's views stay available 03966 // for CcGetVacbMiss. We could be tying up all of the views, and 03967 // still need to write file sizes. 03968 // 03969 03970 CcIncrementOpenCount( SharedCacheMap, 'brWS' ); 03971 03972 // 03973 // If there is a mask bcb, then we need to establish a target for 03974 // it to flush. 03975 // 03976 03977 if ((Mbcb = SharedCacheMap->Mbcb) != 0) { 03978 03979 // 03980 // Set a target of pages to write, assuming that any Active 03981 // Vacb will increase the number. 03982 // 03983 03984 Mbcb->PagesToWrite = Mbcb->DirtyPages + ((ActiveVacb != NULL) ? 1 : 0); 03985 03986 if (Mbcb->PagesToWrite > CcPagesYetToWrite) { 03987 03988 Mbcb->PagesToWrite = CcPagesYetToWrite; 03989 } 03990 } 03991 03992 CcReleaseMasterLockFromDpcLevel(); 03993 ExReleaseFastLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 03994 03995 // 03996 // Now free the active Vacb, if we found one. 03997 // 03998 03999 if (ActiveVacb != NULL) { 04000 04001 CcFreeActiveVacb( SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 04002 } 04003 04004 // 04005 // Now perform the lazy writing for this file via a special call 04006 // to CcFlushCache. He recognizes us by the &CcNoDelay input to 04007 // FileOffset, which signifies a Lazy Write, but is subsequently 04008 // ignored. 04009 // 04010 04011 CcFlushCache( SharedCacheMap->FileObject->SectionObjectPointer, 04012 &CcNoDelay, 04013 1, 04014 IoStatus ); 04015 04016 // 04017 // No need for the Lazy Write resource now. 04018 // 04019 04020 (*SharedCacheMap->Callbacks->ReleaseFromLazyWrite) 04021 ( SharedCacheMap->LazyWriteContext ); 04022 04023 // 04024 // Check if we need to put up a popup. 04025 // 04026 04027 if (!NT_SUCCESS(IoStatus->Status) && !RetryError(IoStatus->Status)) { 04028 04029 // 04030 // We lost writebehind data. Try to get the filename. If we can't, 04031 // then just raise the error returned by the failing write 04032 // 04033 04034 POBJECT_NAME_INFORMATION FileNameInfo; 04035 NTSTATUS QueryStatus; 04036 ULONG whocares; 04037 04038 FileNameInfo = ExAllocatePoolWithTag( PagedPool, 1024, 'nFcC' ); 04039 04040 if ( FileNameInfo ) { 04041 QueryStatus = ObQueryNameString( SharedCacheMap->FileObject, 04042 FileNameInfo, 04043 1024, 04044 &whocares ); 04045 04046 if ( !NT_SUCCESS(QueryStatus) ) { 04047 ExFreePool(FileNameInfo); 04048 FileNameInfo = NULL; 04049 } 04050 } 04051 04052 // 04053 // Give checked builds something to look at. This should also be event 04054 // logged for after-the-fact analysis. 04055 // 04056 04057 KdPrint(("CACHE MANAGER: Lost delayed write FileOb %08x status %08x\n", SharedCacheMap->FileObject, IoStatus->Status)); 04058 04059 if ( FileNameInfo ) { 04060 IoRaiseInformationalHardError( STATUS_LOST_WRITEBEHIND_DATA,&FileNameInfo->Name, NULL ); 04061 ExFreePool(FileNameInfo); 04062 } else { 04063 if ( SharedCacheMap->FileObject->FileName.Length && 04064 SharedCacheMap->FileObject->FileName.MaximumLength && 04065 SharedCacheMap->FileObject->FileName.Buffer ) { 04066 04067 IoRaiseInformationalHardError( STATUS_LOST_WRITEBEHIND_DATA,&SharedCacheMap->FileObject->FileName, NULL ); 04068 } 04069 } 04070 04071 CcLogError( SharedCacheMap->FileObject->DeviceObject, 04072 IO_LOST_DELAYED_WRITE, 04073 IoStatus->Status, 04074 &(SharedCacheMap->FileObject->FileName) ); 04075 // 04076 // See if there is any deferred writes we can post. 04077 // 04078 04079 } else if (!IsListEmpty(&CcDeferredWrites)) { 04080 CcPostDeferredWrites(); 04081 } 04082 04083 // 04084 // Now acquire BcbSpinLock again to check for ValidData updates. 04085 // 04086 04087 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 04088 04089 // 04090 // If the the current ValidDataGoal is greater (or equal) than ValidDataLength, 04091 // then we must see if we have advanced beyond the current ValidDataLength. 04092 // 04093 // If we have NEVER written anything out from this shared cache map, then 04094 // there is no need to check anything associtated with valid data length 04095 // here. We will come by here again when, and if, anybody actually 04096 // modifies the file and we lazy write some data. 04097 // 04098 04099 Status = STATUS_SUCCESS; 04100 if (FlagOn(SharedCacheMap->Flags, LAZY_WRITE_OCCURRED) && 04101 (SharedCacheMap->ValidDataGoal.QuadPart >= SharedCacheMap->ValidDataLength.QuadPart) && 04102 (SharedCacheMap->ValidDataLength.QuadPart != MAXLONGLONG) && 04103 (SharedCacheMap->FileSize.QuadPart != 0)) { 04104 04105 LARGE_INTEGER NewValidDataLength; 04106 04107 NewValidDataLength = CcGetFlushedValidData( SharedCacheMap->FileObject->SectionObjectPointer, 04108 TRUE ); 04109 04110 // 04111 // If New ValidDataLength has been written, then we have to 04112 // call the file system back to update it. We must temporarily 04113 // drop our global list while we do this, which is safe to do since 04114 // we have not cleared WRITE_QUEUED. 04115 // 04116 // Note we keep calling any time we wrote the last page of the file, 04117 // to solve the "famous" AFS Server problem. The file system will 04118 // truncate our valid data call to whatever is currently valid. But 04119 // then if he writes a little more, we do not want to stop calling 04120 // back. 04121 // 04122 04123 if ( NewValidDataLength.QuadPart >= SharedCacheMap->ValidDataLength.QuadPart ) { 04124 04125 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 04126 04127 // 04128 // Call file system to set new valid data. We have no 04129 // one to tell if this doesn't work. 04130 // 04131 04132 Status = CcSetValidData( SharedCacheMap->FileObject, 04133 &NewValidDataLength ); 04134 04135 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 04136 if (NT_SUCCESS(Status)) { 04137 SharedCacheMap->ValidDataLength = NewValidDataLength; 04138 #ifdef TOMM 04139 } else if ((Status != STATUS_INSUFFICIENT_RESOURCES) && !RetryError(Status)) { 04140 DbgPrint("Unexpected status from CcSetValidData: %08lx, FileObject: %08lx\n", 04141 Status, 04142 SharedCacheMap->FileObject); 04143 DbgBreakPoint(); 04144 #endif TOMM 04145 } 04146 } 04147 } 04148 04149 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 04150 04151 // 04152 // Show we are done. 04153 // 04154 04155 CcAcquireMasterLock( &OldIrql ); 04156 CcDecrementOpenCount( SharedCacheMap, 'brWF' ); 04157 04158 // 04159 // Make an approximate guess about whether we will call CcDeleteSharedCacheMap or not 04160 // to truncate the file. 04161 // 04162 // Also do not delete the SharedCacheMap if we got an error on the ValidDataLength 04163 // callback. If we get a resource allocation failure or a retryable error (due to 04164 // log file full?), we have no one to tell, so we must just loop back and try again. 04165 // Of course all I/O errors are just too bad. 04166 // 04167 04168 if ((SharedCacheMap->OpenCount == 0) 04169 04170 && 04171 04172 (NT_SUCCESS(Status) || ((Status != STATUS_INSUFFICIENT_RESOURCES) && !RetryError(Status)))) { 04173 04174 CcReleaseMasterLock( OldIrql ); 04175 FsRtlAcquireFileExclusive( SharedCacheMap->FileObject ); 04176 CcAcquireMasterLock( &OldIrql ); 04177 04178 // 04179 // Now really see if we are to delete this SharedCacheMap. By having released 04180 // first we avoid a deadlock with the file system when the FileObject is 04181 // dereferenced. Note that CcDeleteSharedCacheMap requires that the 04182 // CcMasterSpinLock already be acquired, and it releases it. 04183 // 04184 // Note that we must retest since we dropped and reacquired the master 04185 // lock. 04186 // 04187 04188 if ((SharedCacheMap->OpenCount == 0) 04189 04190 && 04191 04192 ((SharedCacheMap->DirtyPages == 0) || ((SharedCacheMap->FileSize.QuadPart == 0) && 04193 !FlagOn(SharedCacheMap->Flags, PIN_ACCESS)))) { 04194 04195 // 04196 // Make sure to drop the requeue flag in case the write hit the timeout at 04197 // the same time it finished everything up. 04198 // 04199 04200 CcDeleteSharedCacheMap( SharedCacheMap, OldIrql, TRUE ); 04201 IoStatus->Information = 0; 04202 SharedCacheMap = NULL; 04203 04204 } else { 04205 04206 CcReleaseMasterLock( OldIrql ); 04207 FsRtlReleaseFile( SharedCacheMap->FileObject ); 04208 CcAcquireMasterLock( &OldIrql ); 04209 } 04210 } 04211 04212 // 04213 // In the normal case, we just clear the flag on the way out if 04214 // we will not requeue the workitem. 04215 // 04216 04217 if (SharedCacheMap != NULL) { 04218 04219 if (IoStatus->Information != CC_REQUEUE) { 04220 ClearFlag(SharedCacheMap->Flags, WRITE_QUEUED); 04221 } 04222 CcReleaseMasterLock( OldIrql ); 04223 } 04224 04225 DebugTrace(-1, me, "CcWriteBehind->VOID\n", 0 ); 04226 04227 return; 04228 }


Variable Documentation

ULONG CcMaxDirtyWrite = 0x10000
 

Definition at line 41 of file cachesub.c.

Referenced by CcMapAndCopy().


Generated on Sat May 15 19:42:59 2004 for test by doxygen 1.3.7