Main Page | Class Hierarchy | Class List | File List | Class Members | File Members

vacbsup.c File Reference

#include "cc.h"

Go to the source code of this file.

Defines

#define me   0x000000040
#define GetVacb(SCM, OFF)
#define CcMoveVacbToReuseHead(V)
#define CcMoveVacbToReuseTail(V)
#define SizeOfVacbArray(LSZ)
#define CheckedDec(N)

Functions

VOID CcUnmapVacb (IN PVACB Vacb, IN PSHARED_CACHE_MAP SharedCacheMap, IN BOOLEAN UnmapBehind)
PVACB CcGetVacbMiss (IN PSHARED_CACHE_MAP SharedCacheMap, IN LARGE_INTEGER FileOffset, IN OUT PKIRQL OldIrql)
VOID CcCalculateVacbLevelLockCount (IN PSHARED_CACHE_MAP SharedCacheMap, IN PVACB *VacbArray, IN ULONG Level)
PVACB CcGetVacbLargeOffset (IN PSHARED_CACHE_MAP SharedCacheMap, IN LONGLONG FileOffset)
VOID CcSetVacbLargeOffset (IN PSHARED_CACHE_MAP SharedCacheMap, IN LONGLONG FileOffset, IN PVACB Vacb)
_inline VOID SetVacb (IN PSHARED_CACHE_MAP SharedCacheMap, IN LARGE_INTEGER Offset, IN PVACB Vacb)
_inline VOID ReferenceVacbLevel (IN PSHARED_CACHE_MAP SharedCacheMap, IN PVACB *VacbArray, IN ULONG Level, IN LONG Amount, IN BOOLEAN Special)
VOID CcInitializeVacbs ()
PVOID CcGetVirtualAddressIfMapped (IN PSHARED_CACHE_MAP SharedCacheMap, IN LONGLONG FileOffset, OUT PVACB *Vacb, OUT PULONG ReceivedLength)
PVOID CcGetVirtualAddress (IN PSHARED_CACHE_MAP SharedCacheMap, IN LARGE_INTEGER FileOffset, OUT PVACB *Vacb, IN OUT PULONG ReceivedLength)
VOID FASTCALL CcFreeVirtualAddress (IN PVACB Vacb)
VOID CcReferenceFileOffset (IN PSHARED_CACHE_MAP SharedCacheMap, IN LARGE_INTEGER FileOffset)
VOID CcDereferenceFileOffset (IN PSHARED_CACHE_MAP SharedCacheMap, IN LARGE_INTEGER FileOffset)
VOID CcWaitOnActiveCount (IN PSHARED_CACHE_MAP SharedCacheMap)
VOID FASTCALL CcCreateVacbArray (IN PSHARED_CACHE_MAP SharedCacheMap, IN LARGE_INTEGER NewSectionSize)
VOID CcExtendVacbArray (IN PSHARED_CACHE_MAP SharedCacheMap, IN LARGE_INTEGER NewSectionSize)
BOOLEAN FASTCALL CcUnmapVacbArray (IN PSHARED_CACHE_MAP SharedCacheMap, IN PLARGE_INTEGER FileOffset OPTIONAL, IN ULONG Length, IN BOOLEAN UnmapBehind)
ULONG CcPrefillVacbLevelZone (IN ULONG NumberNeeded, OUT PKIRQL OldIrql, IN ULONG NeedBcbListHeads)
VOID CcDrainVacbLevelZone ()
PLIST_ENTRY CcGetBcbListHeadLargeOffset (IN PSHARED_CACHE_MAP SharedCacheMap, IN LONGLONG FileOffset, IN BOOLEAN FailToSuccessor)
VOID CcAdjustVacbLevelLockCount (IN PSHARED_CACHE_MAP SharedCacheMap, IN LONGLONG FileOffset, IN LONG Adjustment)


Define Documentation

#define CcMoveVacbToReuseHead  ) 
 

Value:

RemoveEntryList( &(V)->LruList ); \ InsertHeadList( &CcVacbLru, &(V)->LruList );

Definition at line 163 of file vacbsup.c.

Referenced by CcFreeVirtualAddress(), CcGetVacbMiss(), and CcUnmapVacbArray().

#define CcMoveVacbToReuseTail  ) 
 

Value:

RemoveEntryList( &(V)->LruList ); \ InsertTailList( &CcVacbLru, &(V)->LruList );

Definition at line 166 of file vacbsup.c.

Referenced by CcFreeVirtualAddress(), CcGetVirtualAddress(), and CcGetVirtualAddressIfMapped().

#define CheckedDec  ) 
 

Value:

{ \ ASSERT((N) != 0); \ (N) -= 1; \ }

Definition at line 181 of file vacbsup.c.

Referenced by CcFreeVirtualAddress(), and CcGetVacbMiss().

#define GetVacb SCM,
OFF   ) 
 

Value:

( \ ((SCM)->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL) ? \ CcGetVacbLargeOffset((SCM),(OFF).QuadPart) : \ (SCM)->Vacbs[(OFF).LowPart >> VACB_OFFSET_SHIFT] \ )

Definition at line 78 of file vacbsup.c.

Referenced by CcGetVacbMiss(), CcGetVirtualAddress(), CcGetVirtualAddressIfMapped(), and CcUnmapVacbArray().

#define me   0x000000040
 

Definition at line 30 of file vacbsup.c.

#define SizeOfVacbArray LSZ   ) 
 

Value:

( \ ((LSZ).HighPart != 0) ? MAXULONG : \ ((LSZ).LowPart > (PREALLOCATED_VACBS * VACB_MAPPING_GRANULARITY) ? \ (((LSZ).LowPart >> VACB_OFFSET_SHIFT) * sizeof(PVACB)) : \ (PREALLOCATED_VACBS * sizeof(PVACB))) \ )

Definition at line 174 of file vacbsup.c.

Referenced by CcCreateVacbArray(), and CcExtendVacbArray().


Function Documentation

VOID CcAdjustVacbLevelLockCount IN PSHARED_CACHE_MAP  SharedCacheMap,
IN LONGLONG  FileOffset,
IN LONG  Adjustment
 

Definition at line 2335 of file vacbsup.c.

References ASSERT, CcSetVacbLargeOffset(), FALSE, IsVacbLevelReferenced(), ReferenceVacbLevel(), TRUE, VACB_LEVEL_SHIFT, VACB_OFFSET_SHIFT, VACB_SIZE_OF_FIRST_LEVEL, and VACB_SPECIAL_DEREFERENCE.

02343 : 02344 02345 This routine may be called to adjust the lock count of the bottom Vacb level when 02346 Bcbs are inserted or deleted. If the count goes to zero, the level will be 02347 eliminated. The bottom level must exist, or we crash! 02348 02349 Arguments: 02350 02351 SharedCacheMap - Supplies the pointer to the SharedCacheMap for which the Vacb 02352 is desired. 02353 02354 FileOffset - Supplies the fileOffset corresponding to the desired Vacb. 02355 02356 Adjustment - Generally -1 or +1. 02357 02358 Return Value: 02359 02360 None. 02361 02362 Environment: 02363 02364 CcVacbSpinLock should be held on entry. 02365 02366 --*/ 02367 02368 { 02369 ULONG Level, Shift; 02370 PVACB *VacbArray; 02371 LONGLONG OriginalFileOffset = FileOffset; 02372 02373 // 02374 // Initialize variables controlling our descent into the hierarchy. 02375 // 02376 02377 Level = 0; 02378 Shift = VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT; 02379 02380 VacbArray = SharedCacheMap->Vacbs; 02381 02382 // 02383 // Caller must have verified that we have a hierarchy, otherwise this routine 02384 // would fail. 02385 // 02386 02387 ASSERT(SharedCacheMap->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL); 02388 02389 // 02390 // Loop to calculate how many levels we have and how much we have to 02391 // shift to index into the first level. 02392 // 02393 02394 do { 02395 02396 Level += 1; 02397 Shift += VACB_LEVEL_SHIFT; 02398 02399 } while (SharedCacheMap->SectionSize.QuadPart > ((LONGLONG)1 << Shift)); 02400 02401 // 02402 // Now descend the tree to the bottom level to get the caller's Vacb. 02403 // 02404 02405 Shift -= VACB_LEVEL_SHIFT; 02406 do { 02407 02408 VacbArray = (PVACB *)VacbArray[(ULONG)(FileOffset >> Shift)]; 02409 02410 Level -= 1; 02411 02412 FileOffset &= ((LONGLONG)1 << Shift) - 1; 02413 02414 Shift -= VACB_LEVEL_SHIFT; 02415 02416 } while (Level != 0); 02417 02418 // 02419 // Now we have reached the final level, do the adjustment. 02420 // 02421 02422 ReferenceVacbLevel( SharedCacheMap, VacbArray, Level, Adjustment, FALSE ); 02423 02424 // 02425 // Now, if we decremented the count to 0, then force the collapse to happen by 02426 // upping count and resetting to NULL. Then smash OriginalFileOffset to be 02427 // the first entry so we do not recalculate! 02428 // 02429 02430 if (!IsVacbLevelReferenced( SharedCacheMap, VacbArray, Level )) { 02431 ReferenceVacbLevel( SharedCacheMap, VacbArray, Level, 1, TRUE ); 02432 OriginalFileOffset &= ~(VACB_SIZE_OF_FIRST_LEVEL - 1); 02433 CcSetVacbLargeOffset( SharedCacheMap, OriginalFileOffset, VACB_SPECIAL_DEREFERENCE ); 02434 } 02435 }

VOID CcCalculateVacbLevelLockCount IN PSHARED_CACHE_MAP  SharedCacheMap,
IN PVACB VacbArray,
IN ULONG  Level
 

Definition at line 2439 of file vacbsup.c.

References _BCB::BcbLinks, CACHE_NTC_BCB, Count, FlagOn, Index, MODIFIED_WRITE_DISABLED, _BCB::NodeTypeCode, NULL, _VACB_LEVEL_REFERENCE::Reference, VACB_LAST_INDEX_FOR_LEVEL, and VacbLevelReference().

Referenced by CcExtendVacbArray(), and ReferenceVacbLevel().

02447 : 02448 02449 This routine may be called to calculate or recalculate the lock count on a 02450 given Vacb level array. It is called, for example, when we are extending a 02451 section up to the point where we activate multilevel logic and want to start 02452 keeping the count. 02453 02454 Arguments: 02455 02456 SharedCacheMap - Supplies the pointer to the SharedCacheMap for which the Vacb 02457 is desired. 02458 02459 VacbArray - The Vacb Level array to recalculate 02460 02461 Level - Supplies 0 for the bottom level, nonzero otherwise. 02462 02463 Return Value: 02464 02465 None. 02466 02467 Environment: 02468 02469 CcVacbSpinLock should be held on entry. 02470 02471 --*/ 02472 02473 { 02474 PBCB Bcb; 02475 ULONG Index; 02476 LONG Count = 0; 02477 PVACB *VacbTemp = VacbArray; 02478 PVACB_LEVEL_REFERENCE VacbReference; 02479 02480 // 02481 // First loop through to count how many Vacb pointers are in use. 02482 // 02483 02484 for (Index = 0; Index <= VACB_LAST_INDEX_FOR_LEVEL; Index++) { 02485 if (*(VacbTemp++) != NULL) { 02486 Count += 1; 02487 } 02488 } 02489 02490 // 02491 // If this is a metadata stream, we also have to count the Bcbs in the 02492 // corresponding listheads. 02493 // 02494 02495 if (FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED) && (Level == 0)) { 02496 02497 // 02498 // Pick up the Blink of the first listhead, casting it to a Bcb. 02499 // 02500 02501 Bcb = (PBCB)CONTAINING_RECORD(((PLIST_ENTRY)VacbTemp)->Blink, BCB, BcbLinks); 02502 Index = 0; 02503 02504 // 02505 // Now loop through the list. For each Bcb we see, increment the count, 02506 // and for each listhead, increment Index. We are done when we hit the 02507 // last listhead, which is actually the next listhead past the ones in this 02508 // block. 02509 // 02510 02511 do { 02512 02513 if (Bcb->NodeTypeCode == CACHE_NTC_BCB) { 02514 Count += 1; 02515 } else { 02516 Index += 1; 02517 } 02518 02519 Bcb = (PBCB)CONTAINING_RECORD(Bcb->BcbLinks.Blink, BCB, BcbLinks); 02520 02521 } while (Index <= (VACB_LAST_INDEX_FOR_LEVEL / 2)); 02522 } 02523 02524 // 02525 // Store the count and get out... (by hand, don't touch the special count) 02526 // 02527 02528 VacbReference = VacbLevelReference( SharedCacheMap, VacbArray, Level ); 02529 VacbReference->Reference = Count; 02530 }

VOID FASTCALL CcCreateVacbArray IN PSHARED_CACHE_MAP  SharedCacheMap,
IN LARGE_INTEGER  NewSectionSize
 

Definition at line 1208 of file vacbsup.c.

References ASSERT, BEGIN_BCB_LIST_ARRAY, CcMaxVacbLevelsSeen, ExAllocatePoolWithTag, ExRaiseStatus(), FALSE, FlagOn, MODIFIED_WRITE_DISABLED, NonPagedPool, NULL, PAGE_SIZE, PREALLOCATED_VACBS, SIZE_PER_BCB_LIST, SizeOfVacbArray, TRUE, VACB_LEVEL_BLOCK_SIZE, VACB_LEVEL_REFERENCE, VACB_LEVEL_SHIFT, VACB_MAPPING_GRANULARITY, VACB_NUMBER_OF_LEVELS, and VACB_OFFSET_SHIFT.

Referenced by CcInitializeCacheMap().

01215 : 01216 01217 This routine must be called when a SharedCacheMap is created to create 01218 and initialize the initial Vacb array. 01219 01220 Arguments: 01221 01222 SharedCacheMap - Supplies the shared cache map for which the array is 01223 to be created. 01224 01225 NewSectionSize - Supplies the current size of the section which must be 01226 covered by the Vacb array. 01227 01228 Return Value: 01229 01230 None. 01231 01232 --*/ 01233 01234 { 01235 PVACB *NewAddresses; 01236 ULONG NewSize, SizeToAllocate; 01237 PLIST_ENTRY BcbListHead; 01238 BOOLEAN CreateBcbListHeads = FALSE, CreateReference = FALSE; 01239 01240 NewSize = SizeToAllocate = SizeOfVacbArray(NewSectionSize); 01241 01242 // 01243 // The following limit is greater than the MM limit 01244 // (i.e., MM actually only supports even smaller sections). 01245 // We have to reject the sign bit, and testing the high byte 01246 // for nonzero will surely only catch errors. 01247 // 01248 01249 if (NewSectionSize.HighPart & ~(PAGE_SIZE - 1)) { 01250 ExRaiseStatus(STATUS_SECTION_TOO_BIG); 01251 } 01252 01253 // 01254 // See if we can use the array inside the shared cache map. 01255 // 01256 01257 if (NewSize == (PREALLOCATED_VACBS * sizeof(PVACB))) { 01258 01259 NewAddresses = &SharedCacheMap->InitialVacbs[0]; 01260 01261 // 01262 // Else allocate the array. 01263 // 01264 01265 } else { 01266 01267 // 01268 // For large metadata streams, double the size to allocate 01269 // an array of Bcb listheads. Each two Vacb pointers also 01270 // gets its own Bcb listhead, thus requiring double the size. 01271 // 01272 01273 ASSERT(SIZE_PER_BCB_LIST == (VACB_MAPPING_GRANULARITY * 2)); 01274 01275 // 01276 // If this stream is larger than the size for multi-level Vacbs, 01277 // then fix the size to allocate the root. 01278 // 01279 01280 if (NewSize > VACB_LEVEL_BLOCK_SIZE) { 01281 01282 ULONG Level = 0; 01283 ULONG Shift = VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT; 01284 01285 NewSize = SizeToAllocate = VACB_LEVEL_BLOCK_SIZE; 01286 SizeToAllocate += sizeof(VACB_LEVEL_REFERENCE); 01287 CreateReference = TRUE; 01288 01289 // 01290 // Loop to calculate how many levels we have and how much we have to 01291 // shift to index into the first level. 01292 // 01293 01294 do { 01295 01296 Level += 1; 01297 Shift += VACB_LEVEL_SHIFT; 01298 01299 } while ((NewSectionSize.QuadPart > ((LONGLONG)1 << Shift)) != 0); 01300 01301 // 01302 // Remember the maximum level ever seen (which is actually Level + 1). 01303 // 01304 01305 if (Level >= CcMaxVacbLevelsSeen) { 01306 ASSERT(Level <= VACB_NUMBER_OF_LEVELS); 01307 CcMaxVacbLevelsSeen = Level + 1; 01308 } 01309 01310 } else { 01311 01312 // 01313 // Does this stream get a Bcb Listhead array? 01314 // 01315 01316 if (FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED) && 01317 (NewSectionSize.QuadPart > BEGIN_BCB_LIST_ARRAY)) { 01318 01319 SizeToAllocate *= 2; 01320 CreateBcbListHeads = TRUE; 01321 } 01322 01323 // 01324 // Handle the boundary case by giving the proto-level a 01325 // reference count. This will allow us to simply push it 01326 // in the expansion case. In practice, due to pool granularity 01327 // this will not change the amount of space allocated 01328 // 01329 01330 if (NewSize == VACB_LEVEL_BLOCK_SIZE) { 01331 01332 SizeToAllocate += sizeof(VACB_LEVEL_REFERENCE); 01333 CreateReference = TRUE; 01334 } 01335 } 01336 01337 NewAddresses = ExAllocatePoolWithTag( NonPagedPool, SizeToAllocate, 'pVcC' ); 01338 if (NewAddresses == NULL) { 01339 SharedCacheMap->Status = STATUS_INSUFFICIENT_RESOURCES; 01340 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 01341 } 01342 } 01343 01344 // 01345 // Zero out the Vacb array and the trailing reference counts. 01346 // 01347 01348 RtlZeroMemory( (PCHAR)NewAddresses, NewSize ); 01349 01350 if (CreateReference) { 01351 01352 SizeToAllocate -= sizeof(VACB_LEVEL_REFERENCE); 01353 RtlZeroMemory( (PCHAR)NewAddresses + SizeToAllocate, sizeof(VACB_LEVEL_REFERENCE) ); 01354 } 01355 01356 // 01357 // Loop to insert the Bcb listheads (if any) in the *descending* order 01358 // Bcb list. 01359 // 01360 01361 if (CreateBcbListHeads) { 01362 01363 for (BcbListHead = (PLIST_ENTRY)((PCHAR)NewAddresses + NewSize); 01364 BcbListHead < (PLIST_ENTRY)((PCHAR)NewAddresses + SizeToAllocate); 01365 BcbListHead++) { 01366 01367 InsertHeadList( &SharedCacheMap->BcbList, BcbListHead ); 01368 } 01369 } 01370 01371 SharedCacheMap->Vacbs = NewAddresses; 01372 SharedCacheMap->SectionSize = NewSectionSize; 01373 }

VOID CcDereferenceFileOffset IN PSHARED_CACHE_MAP  SharedCacheMap,
IN LARGE_INTEGER  FileOffset
 

Definition at line 1007 of file vacbsup.c.

References ASSERT, CcAcquireVacbLock, CcReleaseVacbLock, DISPATCH_LEVEL, SetVacb(), VACB_SIZE_OF_FIRST_LEVEL, and VACB_SPECIAL_DEREFERENCE.

Referenced by CcPinFileData().

01014 : 01015 01016 This routine must be called once for each call to CcReferenceFileOffset 01017 to remove the reference. 01018 01019 Arguments: 01020 01021 SharedCacheMap - Supplies a pointer to the Shared Cache Map for the file. 01022 01023 FileOffset - Supplies the desired FileOffset within the file. 01024 01025 Return Value: 01026 01027 None 01028 01029 --*/ 01030 01031 { 01032 KIRQL OldIrql; 01033 01034 ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); 01035 01036 // 01037 // This operation only has meaning if the Vacbs are in the multilevel form. 01038 // 01039 01040 if (SharedCacheMap->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL) { 01041 01042 // 01043 // Acquire the Vacb lock to synchronize the dereference. 01044 // 01045 01046 CcAcquireVacbLock( &OldIrql ); 01047 01048 ASSERT( FileOffset.QuadPart <= SharedCacheMap->SectionSize.QuadPart ); 01049 01050 SetVacb( SharedCacheMap, FileOffset, VACB_SPECIAL_DEREFERENCE ); 01051 01052 CcReleaseVacbLock( OldIrql ); 01053 } 01054 01055 ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); 01056 01057 return; 01058 }

VOID CcDrainVacbLevelZone  ) 
 

Definition at line 2044 of file vacbsup.c.

References CcAcquireVacbLock, CcMaxVacbLevelsSeen, CcReleaseVacbLock, CcVacbLevelEntries, CcVacbLevelFreeList, CcVacbLevelWithBcbsEntries, CcVacbLevelWithBcbsFreeList, ExFreePool(), and NULL.

Referenced by CcDeleteMbcb(), CcGetVacbMiss(), and CcUnmapVacbArray().

02049 : 02050 02051 This routine should be called any time some entries have been deallocated to 02052 the VacbLevel zone, and we want to insure the zone is returned to a normal level. 02053 02054 Arguments: 02055 02056 Return Value: 02057 02058 None. 02059 02060 Environment: 02061 02062 No spinlocks should be held upon entry. 02063 02064 --*/ 02065 02066 { 02067 KIRQL OldIrql; 02068 PVACB *NextVacbArray; 02069 02070 // 02071 // This is an unsafe loop to see if it looks like there is stuff to 02072 // clean up. 02073 // 02074 02075 while ((CcVacbLevelEntries > (CcMaxVacbLevelsSeen * 4)) || 02076 (CcVacbLevelWithBcbsEntries > 2)) { 02077 02078 // 02079 // Now go in and try to pick up one entry to free under a FastLock. 02080 // 02081 02082 NextVacbArray = NULL; 02083 CcAcquireVacbLock( &OldIrql ); 02084 if (CcVacbLevelEntries > (CcMaxVacbLevelsSeen * 4)) { 02085 NextVacbArray = CcVacbLevelFreeList; 02086 CcVacbLevelFreeList = (PVACB *)NextVacbArray[0]; 02087 CcVacbLevelEntries -= 1; 02088 } else if (CcVacbLevelWithBcbsEntries > 2) { 02089 NextVacbArray = CcVacbLevelWithBcbsFreeList; 02090 CcVacbLevelWithBcbsFreeList = (PVACB *)NextVacbArray[0]; 02091 CcVacbLevelWithBcbsEntries -= 1; 02092 } 02093 CcReleaseVacbLock( OldIrql ); 02094 02095 // 02096 // Since the loop is unsafe, we may not have gotten anything. 02097 // 02098 02099 if (NextVacbArray != NULL) { 02100 ExFreePool(NextVacbArray); 02101 } 02102 } 02103 }

VOID CcExtendVacbArray IN PSHARED_CACHE_MAP  SharedCacheMap,
IN LARGE_INTEGER  NewSectionSize
 

Definition at line 1377 of file vacbsup.c.

References ASSERT, BCB, BEGIN_BCB_LIST_ARRAY, CcAcquireVacbLock, CcAcquireVacbLockAtDpcLevel, CcAllocateVacbLevel(), CcCalculateVacbLevelLockCount(), CcDeallocateVacbLevel(), CcMaxVacbLevelsSeen, CcPrefillVacbLevelZone(), CcReleaseVacbLock, CcReleaseVacbLockFromDpcLevel, CcVacbLevelEntries, ExFreePool(), ExRaiseStatus(), FALSE, FlagOn, FsRtlAllocatePoolWithTag, IsVacbLevelReferenced(), MODIFIED_WRITE_DISABLED, NonPagedPool, NULL, Offset, PAGE_SIZE, ReferenceVacbLevel(), SIZE_PER_BCB_LIST, SizeOfVacbArray, TRUE, VACB_LEVEL_BLOCK_SIZE, VACB_LEVEL_SHIFT, VACB_NUMBER_OF_LEVELS, VACB_OFFSET_SHIFT, and VACB_SIZE_OF_FIRST_LEVEL.

Referenced by CcInitializeCacheMap(), and CcSetFileSizes().

01384 : 01385 01386 This routine must be called any time the section for a shared cache 01387 map is extended, in order to extend the Vacb array (if necessary). 01388 01389 Arguments: 01390 01391 SharedCacheMap - Supplies the shared cache map for which the array is 01392 to be created. 01393 01394 NewSectionSize - Supplies the new size of the section which must be 01395 covered by the Vacb array. 01396 01397 Return Value: 01398 01399 None. 01400 01401 --*/ 01402 01403 { 01404 KIRQL OldIrql; 01405 PVACB *OldAddresses; 01406 PVACB *NewAddresses; 01407 ULONG OldSize; 01408 ULONG NewSize, SizeToAllocate; 01409 LARGE_INTEGER NextLevelSize; 01410 BOOLEAN GrowingBcbListHeads = FALSE, CreateReference = FALSE; 01411 01412 // 01413 // The following limit is greater than the MM limit 01414 // (i.e., MM actually only supports even smaller sections). 01415 // We have to reject the sign bit, and testing the high byte 01416 // for nonzero will surely only catch errors. 01417 // 01418 01419 if (NewSectionSize.HighPart & ~(PAGE_SIZE - 1)) { 01420 ExRaiseStatus(STATUS_SECTION_TOO_BIG); 01421 } 01422 01423 // 01424 // See if we will be growing the Bcb ListHeads, so we can take out the 01425 // master lock if so. 01426 // 01427 01428 if (FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED) && 01429 (NewSectionSize.QuadPart > BEGIN_BCB_LIST_ARRAY)) { 01430 01431 GrowingBcbListHeads = TRUE; 01432 } 01433 01434 // 01435 // Is there any work to do? 01436 // 01437 01438 if (NewSectionSize.QuadPart > SharedCacheMap->SectionSize.QuadPart) { 01439 01440 // 01441 // Handle the growth of the first level here. 01442 // 01443 01444 if (SharedCacheMap->SectionSize.QuadPart < VACB_SIZE_OF_FIRST_LEVEL) { 01445 01446 NextLevelSize = NewSectionSize; 01447 01448 // 01449 // Limit the growth of this level 01450 // 01451 01452 if (NextLevelSize.QuadPart >= VACB_SIZE_OF_FIRST_LEVEL) { 01453 NextLevelSize.QuadPart = VACB_SIZE_OF_FIRST_LEVEL; 01454 CreateReference = TRUE; 01455 } 01456 01457 // 01458 // N.B.: SizeOfVacbArray only calculates the size of the VACB 01459 // pointer block. We must adjust for Bcb listheads and the 01460 // multilevel reference count. 01461 // 01462 01463 NewSize = SizeToAllocate = SizeOfVacbArray(NextLevelSize); 01464 OldSize = SizeOfVacbArray(SharedCacheMap->SectionSize); 01465 01466 // 01467 // Only do something if the size is growing. 01468 // 01469 01470 if (NewSize > OldSize) { 01471 01472 // 01473 // Does this stream get a Bcb Listhead array? 01474 // 01475 01476 if (GrowingBcbListHeads) { 01477 SizeToAllocate *= 2; 01478 } 01479 01480 // 01481 // Do we need space for the reference count? 01482 // 01483 01484 if (CreateReference) { 01485 SizeToAllocate += sizeof(VACB_LEVEL_REFERENCE); 01486 } 01487 01488 NewAddresses = FsRtlAllocatePoolWithTag( NonPagedPool, SizeToAllocate, 'pVcC' ); 01489 01490 // 01491 // See if we will be growing the Bcb ListHeads, so we can take out the 01492 // master lock if so. 01493 // 01494 01495 if (GrowingBcbListHeads) { 01496 01497 ExAcquireSpinLock( &SharedCacheMap->BcbSpinLock, &OldIrql ); 01498 CcAcquireVacbLockAtDpcLevel(); 01499 01500 } else { 01501 01502 // 01503 // Acquire the spin lock to serialize with anyone who might like 01504 // to "steal" one of the mappings we are going to move. 01505 // 01506 01507 CcAcquireVacbLock( &OldIrql ); 01508 } 01509 01510 OldAddresses = SharedCacheMap->Vacbs; 01511 if (OldAddresses != NULL) { 01512 RtlCopyMemory( NewAddresses, OldAddresses, OldSize ); 01513 } else { 01514 OldSize = 0; 01515 } 01516 01517 RtlZeroMemory( (PCHAR)NewAddresses + OldSize, NewSize - OldSize ); 01518 01519 if (CreateReference) { 01520 01521 SizeToAllocate -= sizeof(VACB_LEVEL_REFERENCE); 01522 RtlZeroMemory( (PCHAR)NewAddresses + SizeToAllocate, sizeof(VACB_LEVEL_REFERENCE) ); 01523 } 01524 01525 // 01526 // See if we have to initialize Bcb Listheads. 01527 // 01528 01529 if (GrowingBcbListHeads) { 01530 01531 LARGE_INTEGER Offset; 01532 PLIST_ENTRY BcbListHeadNew, TempEntry; 01533 01534 Offset.QuadPart = 0; 01535 BcbListHeadNew = (PLIST_ENTRY)((PCHAR)NewAddresses + NewSize ); 01536 01537 // 01538 // Handle case where the old array had Bcb Listheads. 01539 // 01540 01541 if ((SharedCacheMap->SectionSize.QuadPart > BEGIN_BCB_LIST_ARRAY) && 01542 (OldAddresses != NULL)) { 01543 01544 PLIST_ENTRY BcbListHeadOld; 01545 01546 BcbListHeadOld = (PLIST_ENTRY)((PCHAR)OldAddresses + OldSize); 01547 01548 // 01549 // Loop to remove each old listhead and insert the new one 01550 // in its place. 01551 // 01552 01553 do { 01554 TempEntry = BcbListHeadOld->Flink; 01555 RemoveEntryList( BcbListHeadOld ); 01556 InsertTailList( TempEntry, BcbListHeadNew ); 01557 Offset.QuadPart += SIZE_PER_BCB_LIST; 01558 BcbListHeadOld += 1; 01559 BcbListHeadNew += 1; 01560 } while (Offset.QuadPart < SharedCacheMap->SectionSize.QuadPart); 01561 01562 // 01563 // Otherwise, handle the case where we are adding Bcb 01564 // Listheads. 01565 // 01566 01567 } else { 01568 01569 TempEntry = SharedCacheMap->BcbList.Blink; 01570 01571 // 01572 // Loop through any/all Bcbs to insert the new listheads. 01573 // 01574 01575 while (TempEntry != &SharedCacheMap->BcbList) { 01576 01577 // 01578 // Sit on this Bcb until we have inserted all listheads 01579 // that go before it. 01580 // 01581 01582 while (Offset.QuadPart <= ((PBCB)CONTAINING_RECORD(TempEntry, BCB, BcbLinks))->FileOffset.QuadPart) { 01583 01584 InsertHeadList(TempEntry, BcbListHeadNew); 01585 Offset.QuadPart += SIZE_PER_BCB_LIST; 01586 BcbListHeadNew += 1; 01587 } 01588 TempEntry = TempEntry->Blink; 01589 } 01590 } 01591 01592 // 01593 // Now insert the rest of the new listhead entries that were 01594 // not finished in either loop above. 01595 // 01596 01597 while (Offset.QuadPart < NextLevelSize.QuadPart) { 01598 01599 InsertHeadList(&SharedCacheMap->BcbList, BcbListHeadNew); 01600 Offset.QuadPart += SIZE_PER_BCB_LIST; 01601 BcbListHeadNew += 1; 01602 } 01603 } 01604 01605 // 01606 // These two fields must be changed while still holding the spinlock. 01607 // 01608 01609 SharedCacheMap->Vacbs = NewAddresses; 01610 SharedCacheMap->SectionSize = NextLevelSize; 01611 01612 // 01613 // Now we can free the spinlocks ahead of freeing pool. 01614 // 01615 01616 if (GrowingBcbListHeads) { 01617 CcReleaseVacbLockFromDpcLevel(); 01618 ExReleaseSpinLock( &SharedCacheMap->BcbSpinLock, OldIrql ); 01619 } else { 01620 CcReleaseVacbLock( OldIrql ); 01621 } 01622 01623 if ((OldAddresses != &SharedCacheMap->InitialVacbs[0]) && 01624 (OldAddresses != NULL)) { 01625 ExFreePool( OldAddresses ); 01626 } 01627 } 01628 01629 // 01630 // Make sure SectionSize gets updated. It is ok to fall through here 01631 // without a spinlock, so long as either Vacbs was not changed, or it 01632 // was changed together with SectionSize under the spinlock(s) above. 01633 // 01634 01635 SharedCacheMap->SectionSize = NextLevelSize; 01636 } 01637 01638 // 01639 // Handle extends up to and within multi-level Vacb arrays here. This is fairly simple. 01640 // If no additional Vacb levels are required, then there is no work to do, otherwise 01641 // we just have to push the root one or more levels linked through the first pointer 01642 // in the new root(s). 01643 // 01644 01645 if (NewSectionSize.QuadPart > SharedCacheMap->SectionSize.QuadPart) { 01646 01647 PVACB *NextVacbArray; 01648 ULONG NewLevel; 01649 ULONG Level = 1; 01650 ULONG Shift = VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT; 01651 01652 // 01653 // Loop to calculate how many levels we currently have. 01654 // 01655 01656 while (SharedCacheMap->SectionSize.QuadPart > ((LONGLONG)1 << Shift)) { 01657 01658 Level += 1; 01659 Shift += VACB_LEVEL_SHIFT; 01660 } 01661 01662 NewLevel = Level; 01663 01664 // 01665 // Loop to calculate how many levels we need. 01666 // 01667 01668 while (((NewSectionSize.QuadPart - 1) >> Shift) != 0) { 01669 01670 NewLevel += 1; 01671 Shift += VACB_LEVEL_SHIFT; 01672 } 01673 01674 // 01675 // Now see if we have any work to do. 01676 // 01677 01678 if (NewLevel > Level) { 01679 01680 // 01681 // Remember the maximum level ever seen (which is actually NewLevel + 1). 01682 // 01683 01684 if (NewLevel >= CcMaxVacbLevelsSeen) { 01685 ASSERT(NewLevel <= VACB_NUMBER_OF_LEVELS); 01686 CcMaxVacbLevelsSeen = NewLevel + 1; 01687 } 01688 01689 // 01690 // Raise if we cannot preallocate enough buffers. 01691 // 01692 01693 if (!CcPrefillVacbLevelZone( NewLevel - Level, &OldIrql, FALSE )) { 01694 01695 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 01696 } 01697 01698 // 01699 // Now if the current Level of the file is 1, we have not been maintaining 01700 // a reference count, so we have to calculate it before pushing. In the 01701 // boundary case we have made sure that the reference space is avaliable. 01702 // 01703 01704 if (Level == 1) { 01705 01706 // 01707 // We know this is always a leaf-like level right now. 01708 // 01709 01710 CcCalculateVacbLevelLockCount( SharedCacheMap, SharedCacheMap->Vacbs, 0 ); 01711 } 01712 01713 // 01714 // Finally, if there are any active pointers in the first level, then we 01715 // have to create new levels by adding a new root enough times to create 01716 // additional levels. On the other hand, if the pointer count in the top 01717 // level is zero, then we must not do any pushes, because we never allow 01718 // empty leaves! 01719 // 01720 01721 if (IsVacbLevelReferenced( SharedCacheMap, SharedCacheMap->Vacbs, Level - 1 )) { 01722 01723 while (NewLevel > Level++) { 01724 01725 ASSERT(CcVacbLevelEntries != 0); 01726 NextVacbArray = CcAllocateVacbLevel(FALSE); 01727 01728 NextVacbArray[0] = (PVACB)SharedCacheMap->Vacbs; 01729 ReferenceVacbLevel( SharedCacheMap, NextVacbArray, Level, 1, FALSE ); 01730 01731 SharedCacheMap->Vacbs = NextVacbArray; 01732 } 01733 01734 } else { 01735 01736 // 01737 // We are now possesed of the additional problem that this level has no 01738 // references but may have Bcb listheads due to the boundary case where 01739 // we have expanded up to the multilevel Vacbs above. This level can't 01740 // remain at the root and needs to be destroyed. What we need to do is 01741 // replace it with one of our prefilled (non Bcb) levels and unlink the 01742 // Bcb listheads in the old one. 01743 // 01744 01745 if (Level == 1 && FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED)) { 01746 01747 PLIST_ENTRY PredecessorListHead, SuccessorListHead; 01748 01749 NextVacbArray = SharedCacheMap->Vacbs; 01750 SharedCacheMap->Vacbs = CcAllocateVacbLevel(FALSE); 01751 01752 PredecessorListHead = ((PLIST_ENTRY)((PCHAR)NextVacbArray + VACB_LEVEL_BLOCK_SIZE))->Flink; 01753 SuccessorListHead = ((PLIST_ENTRY)((PCHAR)NextVacbArray + (VACB_LEVEL_BLOCK_SIZE * 2) - sizeof(LIST_ENTRY)))->Blink; 01754 PredecessorListHead->Blink = SuccessorListHead; 01755 SuccessorListHead->Flink = PredecessorListHead; 01756 01757 CcDeallocateVacbLevel( NextVacbArray, TRUE ); 01758 } 01759 } 01760 01761 // 01762 // These two fields (Vacbs and SectionSize) must be changed while still 01763 // holding the spinlock. 01764 // 01765 01766 SharedCacheMap->SectionSize = NewSectionSize; 01767 CcReleaseVacbLock( OldIrql ); 01768 } 01769 01770 // 01771 // Make sure SectionSize gets updated. It is ok to fall through here 01772 // without a spinlock, so long as either Vacbs was not changed, or it 01773 // was changed together with SectionSize under the spinlock(s) above. 01774 // 01775 01776 SharedCacheMap->SectionSize = NewSectionSize; 01777 } 01778 } 01779 }

VOID FASTCALL CcFreeVirtualAddress IN PVACB  Vacb  ) 
 

Definition at line 862 of file vacbsup.c.

References CcAcquireVacbLock, CcMoveVacbToReuseHead, CcMoveVacbToReuseTail, CcReleaseVacbLock, CheckedDec, FALSE, KeSetEvent(), NULL, _SHARED_CACHE_MAP::VacbActiveCount, and _SHARED_CACHE_MAP::WaitOnActiveCount.

Referenced by CcCopyRead(), CcDeleteSharedCacheMap(), CcFastCopyRead(), CcFlushCache(), CcFreeActiveVacb(), CcMapAndCopy(), CcMdlRead(), CcPerformReadAhead(), CcPinFileData(), CcPinMappedData(), CcPrepareMdlWrite(), CcPurgeAndClearCacheSection(), and CcUnpinFileData().

00868 : 00869 00870 This routine must be called once for each call to CcGetVirtualAddress 00871 to free that virtual address. 00872 00873 Arguments: 00874 00875 Vacb - Supplies the Vacb which was returned from CcGetVirtualAddress. 00876 00877 Return Value: 00878 00879 None. 00880 00881 --*/ 00882 00883 { 00884 KIRQL OldIrql; 00885 PSHARED_CACHE_MAP SharedCacheMap = Vacb->SharedCacheMap; 00886 00887 CcAcquireVacbLock( &OldIrql ); 00888 00889 CheckedDec(Vacb->Overlay.ActiveCount); 00890 00891 // 00892 // If the count goes to zero, then we want to decrement the global 00893 // Active count. 00894 // 00895 00896 if (Vacb->Overlay.ActiveCount == 0) { 00897 00898 // 00899 // If the SharedCacheMap address is not NULL, then this one is 00900 // in use by a shared cache map, and we have to decrement his 00901 // count and see if anyone is waiting. 00902 // 00903 00904 if (SharedCacheMap != NULL) { 00905 00906 CheckedDec(SharedCacheMap->VacbActiveCount); 00907 00908 // 00909 // If there is someone waiting for this count to go to zero, 00910 // wake them here. 00911 // 00912 00913 if (SharedCacheMap->WaitOnActiveCount != NULL) { 00914 KeSetEvent( SharedCacheMap->WaitOnActiveCount, 0, FALSE ); 00915 } 00916 00917 // 00918 // Go to the back of the LRU to save this range for a bit 00919 // 00920 00921 CcMoveVacbToReuseTail( Vacb ); 00922 00923 } else { 00924 00925 // 00926 // This range is no longer referenced, so make it avaliable 00927 // 00928 00929 CcMoveVacbToReuseHead( Vacb ); 00930 } 00931 00932 } else { 00933 00934 // 00935 // This range is still in use, so move it away from the front 00936 // so that it doesn't consume cycles being checked. 00937 // 00938 00939 CcMoveVacbToReuseTail( Vacb ); 00940 } 00941 00942 CcReleaseVacbLock( OldIrql ); 00943 }

PLIST_ENTRY CcGetBcbListHeadLargeOffset IN PSHARED_CACHE_MAP  SharedCacheMap,
IN LONGLONG  FileOffset,
IN BOOLEAN  FailToSuccessor
 

Definition at line 2107 of file vacbsup.c.

References ASSERT, Index, NULL, TRUE, VACB_LAST_INDEX_FOR_LEVEL, VACB_LEVEL_BLOCK_SIZE, VACB_LEVEL_SHIFT, VACB_NUMBER_OF_LEVELS, VACB_OFFSET_SHIFT, and VACB_SIZE_OF_FIRST_LEVEL.

Referenced by CcSetVacbLargeOffset().

02115 : 02116 02117 This routine may be called to return the Bcb listhead for the specified FileOffset. 02118 It should only be called if the SectionSize is greater than VACB_SIZE_OF_FIRST_LEVEL. 02119 02120 Arguments: 02121 02122 SharedCacheMap - Supplies the pointer to the SharedCacheMap for which the listhead 02123 is desired. 02124 02125 FileOffset - Supplies the fileOffset corresponding to the desired listhead. 02126 02127 FailToSuccessor - Instructs whether not finding the exact listhead should cause us to 02128 return the predecessor or successor Bcb listhead. 02129 02130 Return Value: 02131 02132 Returns the desired Listhead pointer. If the desired listhead does not actually exist 02133 yet, then it returns the appropriate listhead. 02134 02135 Environment: 02136 02137 The BcbSpinlock should be held on entry. 02138 02139 --*/ 02140 02141 { 02142 ULONG Level, Shift; 02143 PVACB *VacbArray, *NextVacbArray; 02144 ULONG Index; 02145 ULONG SavedIndexes[VACB_NUMBER_OF_LEVELS]; 02146 PVACB *SavedVacbArrays[VACB_NUMBER_OF_LEVELS]; 02147 ULONG SavedLevels = 0; 02148 02149 // 02150 // Initialize variables controlling our descent into the hierarchy. 02151 // 02152 02153 Level = 0; 02154 Shift = VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT; 02155 VacbArray = SharedCacheMap->Vacbs; 02156 02157 // 02158 // Caller must have verified that we have a hierarchy, otherwise this routine 02159 // would fail. 02160 // 02161 02162 ASSERT(SharedCacheMap->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL); 02163 02164 // 02165 // Loop to calculate how many levels we have and how much we have to 02166 // shift to index into the first level. 02167 // 02168 02169 do { 02170 02171 Level += 1; 02172 Shift += VACB_LEVEL_SHIFT; 02173 02174 } while (SharedCacheMap->SectionSize.QuadPart > ((LONGLONG)1 << Shift)); 02175 02176 // 02177 // Our caller could be asking for an offset off the end of section size, so if he 02178 // is actually off the size of the level, then return the main listhead. 02179 // 02180 02181 if (FileOffset >= ((LONGLONG)1 << Shift)) { 02182 return &SharedCacheMap->BcbList; 02183 } 02184 02185 // 02186 // Now descend the tree to the bottom level to get the caller's Bcb ListHead. 02187 // 02188 02189 Shift -= VACB_LEVEL_SHIFT; 02190 do { 02191 02192 // 02193 // Decrement back to the level that describes the size we are within. 02194 // 02195 02196 Level -= 1; 02197 02198 // 02199 // Calculate the index into the Vacb block for this level. 02200 // 02201 02202 Index = (ULONG)(FileOffset >> Shift); 02203 ASSERT(Index <= VACB_LAST_INDEX_FOR_LEVEL); 02204 02205 // 02206 // Get block address for next level. 02207 // 02208 02209 NextVacbArray = (PVACB *)VacbArray[Index]; 02210 02211 // 02212 // If it is NULL then we have to go find the highest Bcb or listhead which 02213 // comes before the guy we are looking for, i.e., its predecessor. 02214 // 02215 02216 if (NextVacbArray == NULL) { 02217 02218 // 02219 // Back up to look for the highest guy earlier in this tree, i.e., the 02220 // predecessor listhead. 02221 // 02222 02223 while (TRUE) { 02224 02225 // 02226 // Scan, if we can, in the current array for a non-null index. 02227 // 02228 02229 if (FailToSuccessor) { 02230 02231 if (Index != VACB_LAST_INDEX_FOR_LEVEL) { 02232 02233 while ((Index != VACB_LAST_INDEX_FOR_LEVEL) && (VacbArray[++Index] == NULL)) { 02234 continue; 02235 } 02236 02237 // 02238 // If we found a non-null index, get out and try to return the 02239 // listhead. 02240 // 02241 02242 if ((NextVacbArray = (PVACB *)VacbArray[Index]) != NULL) { 02243 break; 02244 } 02245 } 02246 02247 } else { 02248 02249 if (Index != 0) { 02250 02251 while ((Index != 0) && (VacbArray[--Index] == NULL)) { 02252 continue; 02253 } 02254 02255 // 02256 // If we found a non-null index, get out and try to return the 02257 // listhead. 02258 // 02259 02260 if ((NextVacbArray = (PVACB *)VacbArray[Index]) != NULL) { 02261 break; 02262 } 02263 } 02264 } 02265 02266 // 02267 // If there are no saved levels yet, then there is no predecessor or 02268 // successor - it is the main listhead. 02269 // 02270 02271 if (SavedLevels == 0) { 02272 return &SharedCacheMap->BcbList; 02273 } 02274 02275 // 02276 // Otherwise, we can pop up a level in the tree and start scanning 02277 // from that guy for a path to the right listhead. 02278 // 02279 02280 Level += 1; 02281 Index = SavedIndexes[--SavedLevels]; 02282 VacbArray = SavedVacbArrays[SavedLevels]; 02283 } 02284 02285 // 02286 // We have backed up in the hierarchy, so now we are just looking for the 02287 // highest/lowest guy in the level we want, i.e., the level-linking listhead. 02288 // So smash FileOffset accordingly (we mask the high bits out anyway). 02289 // 02290 02291 if (FailToSuccessor) { 02292 FileOffset = 0; 02293 } else { 02294 FileOffset = MAXLONGLONG; 02295 } 02296 } 02297 02298 // 02299 // We save Index and VacbArray at each level, for the case that we 02300 // have to walk back up the tree to find a predecessor. 02301 // 02302 02303 SavedIndexes[SavedLevels] = Index; 02304 SavedVacbArrays[SavedLevels] = VacbArray; 02305 SavedLevels += 1; 02306 02307 // 02308 // Now make this one our current pointer, and mask away the extraneous high-order 02309 // FileOffset bits for this level. 02310 // 02311 02312 VacbArray = NextVacbArray; 02313 FileOffset &= ((LONGLONG)1 << Shift) - 1; 02314 Shift -= VACB_LEVEL_SHIFT; 02315 02316 // 02317 // Loop until we hit the bottom level. 02318 // 02319 02320 } while (Level != 0); 02321 02322 // 02323 // Now calculate the index for the bottom level and return the appropriate listhead. 02324 // (The normal Vacb index indexes to a pointer to a Vacb for a .25MB view, so dropping 02325 // the low bit gets you to the even-indexed Vacb pointer which is one block size below 02326 // the two-pointer listhead for the Bcbs for that .5MB range...) 02327 // 02328 02329 Index = (ULONG)(FileOffset >> Shift); 02330 return (PLIST_ENTRY)((PCHAR)&VacbArray[Index & ~1] + VACB_LEVEL_BLOCK_SIZE); 02331 }

PVACB CcGetVacbLargeOffset IN PSHARED_CACHE_MAP  SharedCacheMap,
IN LONGLONG  FileOffset
 

Definition at line 2534 of file vacbsup.c.

References ASSERT, CcBeyondVacbs, CcVacbs, NULL, VACB_LEVEL_SHIFT, VACB_OFFSET_SHIFT, and VACB_SIZE_OF_FIRST_LEVEL.

Referenced by SetVacb().

02541 : 02542 02543 This routine may be called to return the Vacb for the specified FileOffset. 02544 It should only be called if the SectionSize is greater than VACB_SIZE_OF_FIRST_LEVEL. 02545 02546 Arguments: 02547 02548 SharedCacheMap - Supplies the pointer to the SharedCacheMap for which the Vacb 02549 is desired. 02550 02551 FileOffset - Supplies the fileOffset corresponding to the desired Vacb. 02552 02553 Return Value: 02554 02555 Returns the desired Vacb pointer or NULL if there is none. 02556 02557 Environment: 02558 02559 CcVacbSpinLock should be held on entry. 02560 02561 --*/ 02562 02563 { 02564 ULONG Level, Shift; 02565 PVACB *VacbArray; 02566 PVACB Vacb; 02567 02568 // 02569 // Initialize variables controlling our descent into the hierarchy. 02570 // 02571 02572 Level = 0; 02573 Shift = VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT; 02574 VacbArray = SharedCacheMap->Vacbs; 02575 02576 // 02577 // Caller must have verified that we have a hierarchy, otherwise this routine 02578 // would fail. 02579 // 02580 02581 ASSERT(SharedCacheMap->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL); 02582 02583 // 02584 // Loop to calculate how many levels we have and how much we have to 02585 // shift to index into the first level. 02586 // 02587 02588 do { 02589 02590 Level += 1; 02591 Shift += VACB_LEVEL_SHIFT; 02592 02593 } while (SharedCacheMap->SectionSize.QuadPart > ((LONGLONG)1 << Shift)); 02594 02595 // 02596 // Now descend the tree to the bottom level to get the caller's Vacb. 02597 // 02598 02599 Shift -= VACB_LEVEL_SHIFT; 02600 while (((Vacb = (PVACB)VacbArray[FileOffset >> Shift]) != NULL) && (Level != 0)) { 02601 02602 Level -= 1; 02603 02604 VacbArray = (PVACB *)Vacb; 02605 FileOffset &= ((LONGLONG)1 << Shift) - 1; 02606 02607 Shift -= VACB_LEVEL_SHIFT; 02608 } 02609 02610 // 02611 // If the Vacb we exited with is not NULL, we want to make sure it looks OK. 02612 // 02613 02614 ASSERT(Vacb == NULL || ((Vacb >= CcVacbs) && (Vacb < CcBeyondVacbs))); 02615 02616 return Vacb; 02617 }

PVACB CcGetVacbMiss IN PSHARED_CACHE_MAP  SharedCacheMap,
IN LARGE_INTEGER  FileOffset,
IN OUT PKIRQL  OldIrql
 

Definition at line 401 of file vacbsup.c.

References _SHARED_CACHE_MAP::ActiveVacb, _VACB::BaseAddress, CcAcquireMasterLock, CcAcquireMasterLockAtDpcLevel, CcAcquireVacbLock, CcDecrementOpenCount, CcDirtySharedCacheMapList, CcDrainVacbLevelZone(), CcFreeActiveVacb(), CcIncrementOpenCount, CcMaxVacbLevelsSeen, CcMoveVacbToReuseHead, CcPrefillVacbLevelZone(), CcReleaseMasterLock, CcReleaseMasterLockFromDpcLevel, CcReleaseVacbLock, CcScheduleLazyWriteScan(), CcUnmapVacb(), CcUnmapVacbArray(), CcVacbLru, CheckedDec, DebugTrace, DebugTrace2, _SHARED_CACHE_MAP::DirtyPages, ExRaiseStatus(), FALSE, _SHARED_CACHE_MAP::FileObject, FlagOn, _SHARED_CACHE_MAP::Flags, FsRtlNormalizeNtstatus(), GetActiveVacbAtDpcLevel, GetVacb, KeSetEvent(), LazyWriter, _VACB::LruList, mm, MmMapViewInSystemCache(), MODIFIED_WRITE_DISABLED, NT_SUCCESS, NTSTATUS(), NULL, _SHARED_CACHE_MAP::OpenCount, _LAZY_WRITER::OtherWork, _VACB::Overlay, RANDOM_ACCESS_SEEN, _LAZY_WRITER::ScanActive, _FILE_OBJECT::SectionObjectPointer, SEQUENTIAL_MAP_LIMIT, SetVacb(), _SECTION_OBJECT_POINTERS::SharedCacheMap, _VACB::SharedCacheMap, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, _SHARED_CACHE_MAP::SharedCacheMapLinks, Status, TRUE, VACB_MAPPING_GRANULARITY, VACB_SIZE_OF_FIRST_LEVEL, and WRITE_QUEUED.

Referenced by CcGetVirtualAddress().

00409 : 00410 00411 This is the main routine for Vacb management. It may be called to acquire 00412 a virtual address for a given file offset. If the desired file offset is 00413 already mapped, this routine does very little work before returning with 00414 the desired virtual address and Vacb pointer (which must be supplied to 00415 free the mapping). 00416 00417 If the desired virtual address is not currently mapped, then this routine 00418 claims a Vacb from the tail of the Vacb LRU to reuse its mapping. This Vacb 00419 is then unmapped if necessary (normally not required), and mapped to the 00420 desired address. 00421 00422 Arguments: 00423 00424 SharedCacheMap - Supplies a pointer to the Shared Cache Map for the file. 00425 00426 FileOffset - Supplies the desired FileOffset within the file. 00427 00428 OldIrql - Pointer to the OldIrql variable in the caller 00429 00430 Return Value: 00431 00432 The Vacb. 00433 00434 --*/ 00435 00436 { 00437 PSHARED_CACHE_MAP OldSharedCacheMap; 00438 PVACB Vacb, TempVacb; 00439 LARGE_INTEGER MappedLength; 00440 LARGE_INTEGER NormalOffset; 00441 NTSTATUS Status; 00442 ULONG ActivePage; 00443 ULONG PageIsDirty; 00444 PVACB ActiveVacb = NULL; 00445 ULONG VacbOffset = FileOffset.LowPart & (VACB_MAPPING_GRANULARITY - 1); 00446 00447 NormalOffset = FileOffset; 00448 NormalOffset.LowPart -= VacbOffset; 00449 00450 // 00451 // For files that are not open for random access, we assume sequential 00452 // access and periodically unmap unused views behind us as we go, to 00453 // keep from hogging memory. 00454 // 00455 // We used to only do this for pure FO_SEQUENTIAL_ONLY access. The 00456 // sequential flags still has an effect (to put the pages at the front 00457 // of the standby lists) but we intend for the majority of the file 00458 // cache to live on the standby and are willing to take transition 00459 // faults to bring it back. Granted, this exacerbates the problem that 00460 // it is hard to figure out how big the filecache really is since even 00461 // less of it is going to be mapped at any given time. It may also 00462 // promote the synchronization bottlenecks in view mapping (MmPfnLock) 00463 // to the forefront when significant view thrashing occurs. 00464 // 00465 // This isn't as bad as it seems. When we see access take a view miss, 00466 // it is really likely that it is a result of sequential access. As long 00467 // as the pages go onto the back of the standby, they'll live for a while. 00468 // The problem we're dealing with here is that the cache can be filled at 00469 // high speed, but the working set manager can't possibly trim it as fast, 00470 // intelligently, while we have a pretty good guess where the candidate 00471 // pages should come from. We can't let the filecache size make large 00472 // excursions, or we'll kick out a lot of valuable pages in the process. 00473 // 00474 00475 if (!FlagOn(SharedCacheMap->Flags, RANDOM_ACCESS_SEEN) && 00476 ((NormalOffset.LowPart & (SEQUENTIAL_MAP_LIMIT - 1)) == 0) && 00477 (NormalOffset.QuadPart >= (SEQUENTIAL_MAP_LIMIT * 2))) { 00478 00479 // 00480 // Use MappedLength as a scratch variable to form the offset 00481 // to start unmapping. We are not synchronized with these past 00482 // views, so it is possible that CcUnmapVacbArray will kick out 00483 // early when it sees an active view. That is why we go back 00484 // twice the distance, and effectively try to unmap everything 00485 // twice. The second time should normally do it. If the file 00486 // is truly sequential only, then the only collision expected 00487 // might be the previous view if we are being called from readahead, 00488 // or there is a small chance that we can collide with the 00489 // Lazy Writer during the small window where he briefly maps 00490 // the file to push out the dirty bits. 00491 // 00492 00493 CcReleaseVacbLock( *OldIrql ); 00494 MappedLength.QuadPart = NormalOffset.QuadPart - (SEQUENTIAL_MAP_LIMIT * 2); 00495 CcUnmapVacbArray( SharedCacheMap, &MappedLength, (SEQUENTIAL_MAP_LIMIT * 2), TRUE ); 00496 CcAcquireVacbLock( OldIrql ); 00497 } 00498 00499 // 00500 // Scan from the front of the lru for the next victim Vacb 00501 // 00502 00503 Vacb = CONTAINING_RECORD( CcVacbLru.Flink, VACB, LruList ); 00504 00505 while (TRUE) { 00506 00507 // 00508 // If this guy is not active, break out and use him. Also, if 00509 // it is an Active Vacb, nuke it now, because the reader may be idle and we 00510 // want to clean up. 00511 // 00512 00513 OldSharedCacheMap = Vacb->SharedCacheMap; 00514 if ((Vacb->Overlay.ActiveCount == 0) || 00515 ((ActiveVacb == NULL) && 00516 (OldSharedCacheMap != NULL) && 00517 (OldSharedCacheMap->ActiveVacb == Vacb))) { 00518 00519 // 00520 // The normal case is that the Vacb is no longer mapped 00521 // and we can just get out and use it, however, here we 00522 // handle the case where it is mapped. 00523 // 00524 00525 if (Vacb->BaseAddress != NULL) { 00526 00527 00528 // 00529 // If this Vacb is active, it must be the ActiveVacb. 00530 // 00531 00532 if (Vacb->Overlay.ActiveCount != 0) { 00533 00534 // 00535 // Get the active Vacb. 00536 // 00537 00538 GetActiveVacbAtDpcLevel( Vacb->SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 00539 00540 // 00541 // Otherwise we will break out and use this Vacb. If it 00542 // is still mapped we can now safely increment the open 00543 // count. 00544 // 00545 00546 } else { 00547 00548 // 00549 // Note that if the SharedCacheMap is currently 00550 // being deleted, we need to skip over 00551 // it, otherwise we will become the second 00552 // deleter. CcDeleteSharedCacheMap clears the 00553 // pointer in the SectionObjectPointer. 00554 // 00555 00556 CcAcquireMasterLockAtDpcLevel(); 00557 if (Vacb->SharedCacheMap->FileObject->SectionObjectPointer->SharedCacheMap == 00558 Vacb->SharedCacheMap) { 00559 00560 CcIncrementOpenCount( Vacb->SharedCacheMap, 'mvGS' ); 00561 CcReleaseMasterLockFromDpcLevel(); 00562 break; 00563 } 00564 CcReleaseMasterLockFromDpcLevel(); 00565 } 00566 } else { 00567 break; 00568 } 00569 } 00570 00571 // 00572 // Advance to the next guy if we haven't scanned 00573 // the entire list. 00574 // 00575 00576 if (Vacb->LruList.Flink != &CcVacbLru) { 00577 00578 Vacb = CONTAINING_RECORD( Vacb->LruList.Flink, VACB, LruList ); 00579 00580 } else { 00581 00582 CcReleaseVacbLock( *OldIrql ); 00583 00584 // 00585 // If we found an active vacb, then free it and go back and 00586 // try again. Else it's time to bail. 00587 // 00588 00589 if (ActiveVacb != NULL) { 00590 CcFreeActiveVacb( ActiveVacb->SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 00591 ActiveVacb = NULL; 00592 00593 // 00594 // Reacquire spinlocks to loop back and position ourselves at the head 00595 // of the LRU for the next pass. 00596 // 00597 00598 CcAcquireVacbLock( OldIrql ); 00599 00600 Vacb = CONTAINING_RECORD( CcVacbLru.Flink, VACB, LruList ); 00601 00602 } else { 00603 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 00604 } 00605 } 00606 } 00607 00608 // 00609 // Unlink it from the other SharedCacheMap, so the other 00610 // guy will not try to use it when we free the spin lock. 00611 // 00612 00613 if (Vacb->SharedCacheMap != NULL) { 00614 00615 OldSharedCacheMap = Vacb->SharedCacheMap; 00616 SetVacb( OldSharedCacheMap, Vacb->Overlay.FileOffset, NULL ); 00617 Vacb->SharedCacheMap = NULL; 00618 } 00619 00620 // 00621 // Mark it in use so no one else will muck with it after 00622 // we release the spin lock. 00623 // 00624 00625 Vacb->Overlay.ActiveCount = 1; 00626 SharedCacheMap->VacbActiveCount += 1; 00627 00628 CcReleaseVacbLock( *OldIrql ); 00629 00630 // 00631 // If the Vacb is already mapped, then unmap it. 00632 // 00633 00634 if (Vacb->BaseAddress != NULL) { 00635 00636 // 00637 // Check to see if we need to drain the zone. 00638 // 00639 00640 CcDrainVacbLevelZone(); 00641 00642 CcUnmapVacb( Vacb, OldSharedCacheMap, FALSE ); 00643 00644 // 00645 // Now we can decrement the open count as we normally 00646 // do, possibly deleting the guy. 00647 // 00648 00649 CcAcquireMasterLock( OldIrql ); 00650 00651 // 00652 // Now release our open count. 00653 // 00654 00655 CcDecrementOpenCount( OldSharedCacheMap, 'mvGF' ); 00656 00657 if ((OldSharedCacheMap->OpenCount == 0) && 00658 !FlagOn(OldSharedCacheMap->Flags, WRITE_QUEUED) && 00659 (OldSharedCacheMap->DirtyPages == 0)) { 00660 00661 // 00662 // Move to the dirty list. 00663 // 00664 00665 RemoveEntryList( &OldSharedCacheMap->SharedCacheMapLinks ); 00666 InsertTailList( &CcDirtySharedCacheMapList.SharedCacheMapLinks, 00667 &OldSharedCacheMap->SharedCacheMapLinks ); 00668 00669 // 00670 // Make sure the Lazy Writer will wake up, because we 00671 // want him to delete this SharedCacheMap. 00672 // 00673 00674 LazyWriter.OtherWork = TRUE; 00675 if (!LazyWriter.ScanActive) { 00676 CcScheduleLazyWriteScan(); 00677 } 00678 } 00679 00680 CcReleaseMasterLock( *OldIrql ); 00681 } 00682 00683 // 00684 // Use try-finally to return this guy to the list if we get an 00685 // exception. 00686 // 00687 00688 try { 00689 00690 // 00691 // Assume we are mapping to the end of the section, but 00692 // reduce to our normal mapping granularity if the section 00693 // is too large. 00694 // 00695 00696 MappedLength.QuadPart = SharedCacheMap->SectionSize.QuadPart - NormalOffset.QuadPart; 00697 00698 if ((MappedLength.HighPart != 0) || 00699 (MappedLength.LowPart > VACB_MAPPING_GRANULARITY)) { 00700 00701 MappedLength.LowPart = VACB_MAPPING_GRANULARITY; 00702 } 00703 00704 // 00705 // Now map this one in the system cache. 00706 // 00707 00708 DebugTrace( 0, mm, "MmMapViewInSystemCache:\n", 0 ); 00709 DebugTrace( 0, mm, " Section = %08lx\n", SharedCacheMap->Section ); 00710 DebugTrace2(0, mm, " Offset = %08lx, %08lx\n", 00711 NormalOffset.LowPart, 00712 NormalOffset.HighPart ); 00713 DebugTrace( 0, mm, " ViewSize = %08lx\n", MappedLength.LowPart ); 00714 00715 Status = 00716 MmMapViewInSystemCache( SharedCacheMap->Section, 00717 &Vacb->BaseAddress, 00718 &NormalOffset, 00719 &MappedLength.LowPart ); 00720 00721 DebugTrace( 0, mm, " <BaseAddress = %08lx\n", Vacb->BaseAddress ); 00722 DebugTrace( 0, mm, " <ViewSize = %08lx\n", MappedLength.LowPart ); 00723 00724 if (!NT_SUCCESS( Status )) { 00725 00726 DebugTrace( 0, 0, "Error from Map, Status = %08lx\n", Status ); 00727 00728 ExRaiseStatus( FsRtlNormalizeNtstatus( Status, 00729 STATUS_UNEXPECTED_MM_MAP_ERROR )); 00730 } 00731 00732 } finally { 00733 00734 // 00735 // Take this opportunity to free the active vacb. 00736 // 00737 00738 if (ActiveVacb != NULL) { 00739 00740 CcFreeActiveVacb( ActiveVacb->SharedCacheMap, ActiveVacb, ActivePage, PageIsDirty ); 00741 } 00742 00743 // 00744 // On abnormal termination, get this guy back in the list. 00745 // 00746 00747 if (AbnormalTermination()) { 00748 00749 CcAcquireVacbLock( OldIrql ); 00750 00751 // 00752 // This is like the unlucky case below. Just back out the stuff 00753 // we did and put the guy at the tail of the list. Basically 00754 // only the Map should fail, and we clear BaseAddress accordingly. 00755 // 00756 00757 Vacb->BaseAddress = NULL; 00758 00759 CheckedDec(Vacb->Overlay.ActiveCount); 00760 CheckedDec(SharedCacheMap->VacbActiveCount); 00761 00762 // 00763 // If there is someone waiting for this count to go to zero, 00764 // wake them here. 00765 // 00766 00767 if (SharedCacheMap->WaitOnActiveCount != NULL) { 00768 KeSetEvent( SharedCacheMap->WaitOnActiveCount, 0, FALSE ); 00769 } 00770 00771 CcReleaseVacbLock( *OldIrql ); 00772 } 00773 } 00774 00775 // 00776 // Make sure the zone contains the worst case number of entries. 00777 // 00778 00779 if (SharedCacheMap->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL) { 00780 00781 // 00782 // Raise if we cannot preallocate enough buffers. 00783 // 00784 00785 if (!CcPrefillVacbLevelZone( CcMaxVacbLevelsSeen - 1, 00786 OldIrql, 00787 FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED) )) { 00788 00789 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 00790 } 00791 00792 } else { 00793 00794 CcAcquireVacbLock( OldIrql ); 00795 } 00796 00797 // 00798 // Finish filling in the Vacb, and store its address in the array in 00799 // the Shared Cache Map. (We have to rewrite the ActiveCount 00800 // since it is overlaid.) To do this we must reacquire the 00801 // spin lock one more time. Note we have to check for the unusual 00802 // case that someone beat us to mapping this view, since we had to 00803 // drop the spin lock. 00804 // 00805 00806 if ((TempVacb = GetVacb( SharedCacheMap, NormalOffset )) == NULL) { 00807 00808 Vacb->SharedCacheMap = SharedCacheMap; 00809 Vacb->Overlay.FileOffset = NormalOffset; 00810 Vacb->Overlay.ActiveCount = 1; 00811 00812 SetVacb( SharedCacheMap, NormalOffset, Vacb ); 00813 00814 // 00815 // This is the unlucky case where we collided with someone else 00816 // trying to map the same view. He can get in because we dropped 00817 // the spin lock above. Rather than allocating events and making 00818 // someone wait, considering this case is fairly unlikely, we just 00819 // dump this one at the head of the LRU and use the one from the 00820 // guy who beat us. 00821 // 00822 00823 } else { 00824 00825 // 00826 // Now we have to increment all of the counts for the one that 00827 // was already there, then ditch the one we had. 00828 // 00829 00830 if (TempVacb->Overlay.ActiveCount == 0) { 00831 SharedCacheMap->VacbActiveCount += 1; 00832 } 00833 00834 TempVacb->Overlay.ActiveCount += 1; 00835 00836 // 00837 // Now unmap the one we mapped and proceed with the other Vacb. 00838 // On this path we have to release the spinlock to do the unmap, 00839 // and then reacquire the spinlock before cleaning up. 00840 // 00841 00842 CcReleaseVacbLock( *OldIrql ); 00843 00844 CcUnmapVacb( Vacb, SharedCacheMap, FALSE ); 00845 00846 CcAcquireVacbLock( OldIrql ); 00847 CheckedDec(Vacb->Overlay.ActiveCount); 00848 CheckedDec(SharedCacheMap->VacbActiveCount); 00849 Vacb->SharedCacheMap = NULL; 00850 00851 CcMoveVacbToReuseHead( Vacb ); 00852 00853 Vacb = TempVacb; 00854 } 00855 00856 return Vacb; 00857 }

PVOID CcGetVirtualAddress IN PSHARED_CACHE_MAP  SharedCacheMap,
IN LARGE_INTEGER  FileOffset,
OUT PVACB Vacb,
IN OUT PULONG  ReceivedLength
 

Definition at line 310 of file vacbsup.c.

References ASSERT, CcAcquireVacbLock, CcGetVacbMiss(), CcMoveVacbToReuseTail, CcReleaseVacbLock, DISPATCH_LEVEL, GetVacb, NULL, _VACB::Overlay, and VACB_MAPPING_GRANULARITY.

Referenced by CcCopyRead(), CcFastCopyRead(), CcMapAndCopy(), CcMapData(), CcMdlRead(), CcPerformReadAhead(), CcPinFileData(), CcPrepareMdlWrite(), and CcPurgeAndClearCacheSection().

00319 : 00320 00321 This is the main routine for Vacb management. It may be called to acquire 00322 a virtual address for a given file offset. If the desired file offset is 00323 already mapped, this routine does very little work before returning with 00324 the desired virtual address and Vacb pointer (which must be supplied to 00325 free the mapping). 00326 00327 If the desired virtual address is not currently mapped, then this routine 00328 claims a Vacb from the tail of the Vacb LRU to reuse its mapping. This Vacb 00329 is then unmapped if necessary (normally not required), and mapped to the 00330 desired address. 00331 00332 Arguments: 00333 00334 SharedCacheMap - Supplies a pointer to the Shared Cache Map for the file. 00335 00336 FileOffset - Supplies the desired FileOffset within the file. 00337 00338 Vacb - Returns a Vacb pointer which must be supplied later to free 00339 this virtual address. 00340 00341 ReceivedLength - Returns the number of bytes which are contiguously 00342 mapped starting at the virtual address returned. 00343 00344 Return Value: 00345 00346 The virtual address at which the desired data is mapped. 00347 00348 --*/ 00349 00350 { 00351 KIRQL OldIrql; 00352 PVACB TempVacb; 00353 ULONG VacbOffset = FileOffset.LowPart & (VACB_MAPPING_GRANULARITY - 1); 00354 00355 ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); 00356 00357 // 00358 // Acquire the Vacb lock to see if the desired offset is already mapped. 00359 // 00360 00361 CcAcquireVacbLock( &OldIrql ); 00362 00363 ASSERT( FileOffset.QuadPart <= SharedCacheMap->SectionSize.QuadPart ); 00364 00365 if ((TempVacb = GetVacb( SharedCacheMap, FileOffset )) == NULL) { 00366 00367 TempVacb = CcGetVacbMiss( SharedCacheMap, FileOffset, &OldIrql ); 00368 00369 } else { 00370 00371 if (TempVacb->Overlay.ActiveCount == 0) { 00372 SharedCacheMap->VacbActiveCount += 1; 00373 } 00374 00375 TempVacb->Overlay.ActiveCount += 1; 00376 } 00377 00378 // 00379 // Move this range away from the front to avoid wasting cycles 00380 // looking at it for reuse. 00381 // 00382 00383 CcMoveVacbToReuseTail( TempVacb ); 00384 00385 CcReleaseVacbLock( OldIrql ); 00386 00387 // 00388 // Now form all outputs. 00389 // 00390 00391 *Vacb = TempVacb; 00392 *ReceivedLength = VACB_MAPPING_GRANULARITY - VacbOffset; 00393 00394 ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); 00395 00396 return (PVOID)((PCHAR)TempVacb->BaseAddress + VacbOffset); 00397 }

PVOID CcGetVirtualAddressIfMapped IN PSHARED_CACHE_MAP  SharedCacheMap,
IN LONGLONG  FileOffset,
OUT PVACB Vacb,
OUT PULONG  ReceivedLength
 

Definition at line 230 of file vacbsup.c.

References ASSERT, CcAcquireVacbLock, CcMoveVacbToReuseTail, CcReleaseVacbLock, DISPATCH_LEVEL, GetVacb, NULL, and VACB_MAPPING_GRANULARITY.

Referenced by CcFlushCache().

00239 : 00240 00241 This routine returns a virtual address for the specified FileOffset, 00242 iff it is mapped. Otherwise, it informs the caller that the specified 00243 virtual address was not mapped. In the latter case, it still returns 00244 a ReceivedLength, which may be used to advance to the next view boundary. 00245 00246 Arguments: 00247 00248 SharedCacheMap - Supplies a pointer to the Shared Cache Map for the file. 00249 00250 FileOffset - Supplies the desired FileOffset within the file. 00251 00252 Vach - Returns a Vacb pointer which must be supplied later to free 00253 this virtual address, or NULL if not mapped. 00254 00255 ReceivedLength - Returns the number of bytes to the next view boundary, 00256 whether the desired file offset is mapped or not. 00257 00258 Return Value: 00259 00260 The virtual address at which the desired data is mapped, or NULL if it 00261 is not mapped. 00262 00263 --*/ 00264 00265 { 00266 KIRQL OldIrql; 00267 ULONG VacbOffset = (ULONG)FileOffset & (VACB_MAPPING_GRANULARITY - 1); 00268 PVOID Value = NULL; 00269 00270 ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); 00271 00272 // 00273 // Generate ReceivedLength return right away. 00274 // 00275 00276 *ReceivedLength = VACB_MAPPING_GRANULARITY - VacbOffset; 00277 00278 // 00279 // Acquire the Vacb lock to see if the desired offset is already mapped. 00280 // 00281 00282 CcAcquireVacbLock( &OldIrql ); 00283 00284 ASSERT( FileOffset <= SharedCacheMap->SectionSize.QuadPart ); 00285 00286 if ((*Vacb = GetVacb( SharedCacheMap, *(PLARGE_INTEGER)&FileOffset )) != NULL) { 00287 00288 if ((*Vacb)->Overlay.ActiveCount == 0) { 00289 SharedCacheMap->VacbActiveCount += 1; 00290 } 00291 00292 (*Vacb)->Overlay.ActiveCount += 1; 00293 00294 // 00295 // Move this range away from the front to avoid wasting cycles 00296 // looking at it for reuse. 00297 // 00298 00299 CcMoveVacbToReuseTail( *Vacb ); 00300 00301 Value = (PVOID)((PCHAR)(*Vacb)->BaseAddress + VacbOffset); 00302 } 00303 00304 CcReleaseVacbLock( OldIrql ); 00305 return Value; 00306 }

VOID CcInitializeVacbs  ) 
 

Definition at line 188 of file vacbsup.c.

References CcBeyondVacbs, CcNumberVacbs, CcVacbLru, CcVacbs, CcVacbSpinLock, FsRtlAllocatePoolWithTag, KeInitializeSpinLock(), _VACB::LruList, MmSizeOfSystemCacheInPages, NonPagedPool, PAGE_SHIFT, VACB, and VACB_OFFSET_SHIFT.

Referenced by CcInitializeCacheManager().

00193 : 00194 00195 This routine must be called during Cache Manager initialization to 00196 initialize the Virtual Address Control Block structures. 00197 00198 Arguments: 00199 00200 None. 00201 00202 Return Value: 00203 00204 None. 00205 00206 --*/ 00207 00208 { 00209 ULONG VacbBytes; 00210 PVACB NextVacb; 00211 00212 CcNumberVacbs = (MmSizeOfSystemCacheInPages >> (VACB_OFFSET_SHIFT - PAGE_SHIFT)) - 2; 00213 VacbBytes = CcNumberVacbs * sizeof(VACB); 00214 00215 KeInitializeSpinLock( &CcVacbSpinLock ); 00216 CcVacbs = (PVACB)FsRtlAllocatePoolWithTag( NonPagedPool, VacbBytes, 'aVcC' ); 00217 CcBeyondVacbs = (PVACB)((PCHAR)CcVacbs + VacbBytes); 00218 RtlZeroMemory( CcVacbs, VacbBytes ); 00219 00220 InitializeListHead( &CcVacbLru ); 00221 00222 for (NextVacb = CcVacbs; NextVacb < CcBeyondVacbs; NextVacb++) { 00223 00224 InsertTailList( &CcVacbLru, &NextVacb->LruList ); 00225 } 00226 }

ULONG CcPrefillVacbLevelZone IN ULONG  NumberNeeded,
OUT PKIRQL  OldIrql,
IN ULONG  NeedBcbListHeads
 

Definition at line 1931 of file vacbsup.c.

References CcAcquireVacbLock, CcReleaseVacbLock, CcVacbLevelEntries, CcVacbLevelFreeList, CcVacbLevelWithBcbsEntries, CcVacbLevelWithBcbsFreeList, ExAllocatePoolWithTag, FALSE, NonPagedPool, NULL, TRUE, VACB_LEVEL_BLOCK_SIZE, and VACB_LEVEL_REFERENCE.

Referenced by CcExtendVacbArray(), CcGetVacbMiss(), CcReferenceFileOffset(), and CcSetDirtyInMask().

01939 : 01940 01941 This routine may be called to prefill the VacbLevelZone with the number of 01942 entries required, and return with CcVacbSpinLock acquired. This approach is 01943 taken so that the pool allocations and RtlZeroMemory calls can occur without 01944 holding any spinlock, yet the caller may proceed to peform a single indivisible 01945 operation without error handling, since there is a guaranteed minimum number of 01946 entries in the zone. 01947 01948 Arguments: 01949 01950 NumberNeeded - Number of VacbLevel entries needed, not counting the possible 01951 one with Bcb listheads. 01952 01953 OldIrql = supplies a pointer to where OldIrql should be returned upon acquiring 01954 the spinlock. 01955 01956 NeedBcbListHeads - Supplies true if a level is also needed which contains listheads. 01957 01958 Return Value: 01959 01960 FALSE if the buffers could not be preallocated, TRUE otherwise. 01961 01962 Environment: 01963 01964 No spinlocks should be held upon entry. 01965 01966 --*/ 01967 01968 { 01969 PVACB *NextVacbArray; 01970 01971 CcAcquireVacbLock( OldIrql ); 01972 01973 // 01974 // Loop until there is enough entries, else raise... 01975 // 01976 01977 while ((NumberNeeded > CcVacbLevelEntries) || 01978 (NeedBcbListHeads && (CcVacbLevelWithBcbsFreeList == NULL))) { 01979 01980 01981 // 01982 // Else release the spinlock so we can do the allocate/zero. 01983 // 01984 01985 CcReleaseVacbLock( *OldIrql ); 01986 01987 // 01988 // First handle the case where we need a VacbListHead with Bcb Listheads. 01989 // The pointer test is unsafe but see below. 01990 // 01991 01992 if (NeedBcbListHeads && (CcVacbLevelWithBcbsFreeList == NULL)) { 01993 01994 // 01995 // Allocate and initialize the Vacb block for this level, and store its pointer 01996 // back into our parent. We do not zero the listhead area. 01997 // 01998 01999 NextVacbArray = 02000 (PVACB *)ExAllocatePoolWithTag( NonPagedPool, (VACB_LEVEL_BLOCK_SIZE * 2) + sizeof(VACB_LEVEL_REFERENCE), 'lVcC' ); 02001 02002 if (NextVacbArray == NULL) { 02003 return FALSE; 02004 } 02005 02006 RtlZeroMemory( (PCHAR)NextVacbArray, VACB_LEVEL_BLOCK_SIZE ); 02007 RtlZeroMemory( (PCHAR)NextVacbArray + (VACB_LEVEL_BLOCK_SIZE * 2), sizeof(VACB_LEVEL_REFERENCE) ); 02008 02009 CcAcquireVacbLock( OldIrql ); 02010 02011 NextVacbArray[0] = (PVACB)CcVacbLevelWithBcbsFreeList; 02012 CcVacbLevelWithBcbsFreeList = NextVacbArray; 02013 CcVacbLevelWithBcbsEntries += 1; 02014 02015 } else { 02016 02017 // 02018 // Allocate and initialize the Vacb block for this level, and store its pointer 02019 // back into our parent. 02020 // 02021 02022 NextVacbArray = 02023 (PVACB *)ExAllocatePoolWithTag( NonPagedPool, VACB_LEVEL_BLOCK_SIZE + sizeof(VACB_LEVEL_REFERENCE), 'lVcC' ); 02024 02025 if (NextVacbArray == NULL) { 02026 return FALSE; 02027 } 02028 02029 RtlZeroMemory( (PCHAR)NextVacbArray, VACB_LEVEL_BLOCK_SIZE + sizeof(VACB_LEVEL_REFERENCE) ); 02030 02031 CcAcquireVacbLock( OldIrql ); 02032 02033 NextVacbArray[0] = (PVACB)CcVacbLevelFreeList; 02034 CcVacbLevelFreeList = NextVacbArray; 02035 CcVacbLevelEntries += 1; 02036 } 02037 } 02038 02039 return TRUE; 02040 }

VOID CcReferenceFileOffset IN PSHARED_CACHE_MAP  SharedCacheMap,
IN LARGE_INTEGER  FileOffset
 

Definition at line 947 of file vacbsup.c.

References ASSERT, CcMaxVacbLevelsSeen, CcPrefillVacbLevelZone(), CcReleaseVacbLock, DISPATCH_LEVEL, ExRaiseStatus(), FlagOn, MODIFIED_WRITE_DISABLED, SetVacb(), VACB_SIZE_OF_FIRST_LEVEL, and VACB_SPECIAL_REFERENCE.

Referenced by CcPinFileData().

00954 : 00955 00956 This is a special form of reference that insures that the multi-level 00957 Vacb structures are expanded to cover a given file offset. 00958 00959 Arguments: 00960 00961 SharedCacheMap - Supplies a pointer to the Shared Cache Map for the file. 00962 00963 FileOffset - Supplies the desired FileOffset within the file. 00964 00965 Return Value: 00966 00967 None 00968 00969 --*/ 00970 00971 { 00972 KIRQL OldIrql; 00973 00974 ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); 00975 00976 // 00977 // This operation only has meaning if the Vacbs are in the multilevel form. 00978 // 00979 00980 if (SharedCacheMap->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL) { 00981 00982 // 00983 // Prefill the level zone so that we can expand the tree if required. 00984 // 00985 00986 if (!CcPrefillVacbLevelZone( CcMaxVacbLevelsSeen - 1, 00987 &OldIrql, 00988 FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED) )) { 00989 00990 ExRaiseStatus( STATUS_INSUFFICIENT_RESOURCES ); 00991 } 00992 00993 ASSERT( FileOffset.QuadPart <= SharedCacheMap->SectionSize.QuadPart ); 00994 00995 SetVacb( SharedCacheMap, FileOffset, VACB_SPECIAL_REFERENCE ); 00996 00997 CcReleaseVacbLock( OldIrql ); 00998 } 00999 01000 ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); 01001 01002 return; 01003 }

VOID CcSetVacbLargeOffset IN PSHARED_CACHE_MAP  SharedCacheMap,
IN LONGLONG  FileOffset,
IN PVACB  Vacb
 

Definition at line 2621 of file vacbsup.c.

References ASSERT, CACHE_NTC_BCB, CcAllocateVacbLevel(), CcDeallocateVacbLevel(), CcGetBcbListHeadLargeOffset(), FALSE, FlagOn, Index, IsVacbLevelReferenced(), MODIFIED_WRITE_DISABLED, NULL, ReferenceVacbLevel(), TRUE, VACB_LAST_INDEX_FOR_LEVEL, VACB_LEVEL_BLOCK_SIZE, VACB_LEVEL_SHIFT, VACB_NUMBER_OF_LEVELS, VACB_OFFSET_SHIFT, VACB_SIZE_OF_FIRST_LEVEL, VACB_SPECIAL_DEREFERENCE, and VACB_SPECIAL_FIRST_VALID.

Referenced by CcAdjustVacbLevelLockCount(), and SetVacb().

02629 : 02630 02631 This routine may be called to set the specified Vacb pointer for the specified FileOffset. 02632 It should only be called if the SectionSize is greater than VACB_SIZE_OF_FIRST_LEVEL. 02633 02634 For non-null Vacb, intermediate Vacb levels will be added as necessary, and if the lowest 02635 level has Bcb listheads, these will also be added. For this case the caller must acquire 02636 the spinlock by calling CcPrefillVacbLevelZone specifying the worst-case number of levels 02637 required. 02638 02639 For a null Vacb pointer, the tree is pruned of all Vacb levels that go empty. If the lowest 02640 level has Bcb listheads, then they are removed. The caller should subsequently call 02641 CcDrainVacbLevelZone once the spinlock is release to actually free some of this zone to the 02642 pool. 02643 02644 Arguments: 02645 02646 SharedCacheMap - Supplies the pointer to the SharedCacheMap for which the Vacb 02647 is desired. 02648 02649 FileOffset - Supplies the fileOffset corresponding to the desired Vacb. 02650 02651 Return Value: 02652 02653 Returns the desired Vacb pointer or NULL if there is none. 02654 02655 Environment: 02656 02657 CcVacbSpinLock should be held on entry. 02658 02659 --*/ 02660 02661 { 02662 ULONG Level, Shift; 02663 PVACB *VacbArray, *NextVacbArray; 02664 ULONG Index; 02665 ULONG SavedIndexes[VACB_NUMBER_OF_LEVELS]; 02666 PVACB *SavedVacbArrays[VACB_NUMBER_OF_LEVELS]; 02667 PLIST_ENTRY PredecessorListHead, SuccessorListHead, CurrentListHead; 02668 BOOLEAN AllocatingBcbListHeads, Special = FALSE; 02669 LONGLONG OriginalFileOffset = FileOffset; 02670 ULONG SavedLevels = 0; 02671 02672 // 02673 // Initialize variables controlling our descent into the hierarchy. 02674 // 02675 02676 Level = 0; 02677 Shift = VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT; 02678 VacbArray = SharedCacheMap->Vacbs; 02679 02680 // 02681 // Caller must have verified that we have a hierarchy, otherwise this routine 02682 // would fail. 02683 // 02684 02685 ASSERT(SharedCacheMap->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL); 02686 02687 // 02688 // Loop to calculate how many levels we have and how much we have to 02689 // shift to index into the first level. 02690 // 02691 02692 do { 02693 02694 Level += 1; 02695 Shift += VACB_LEVEL_SHIFT; 02696 02697 } while (SharedCacheMap->SectionSize.QuadPart > ((LONGLONG)1 << Shift)); 02698 02699 // 02700 // Now descend the tree to the bottom level to set the caller's Vacb. 02701 // 02702 02703 Shift -= VACB_LEVEL_SHIFT; 02704 do { 02705 02706 // 02707 // Decrement back to the level that describes the size we are within. 02708 // 02709 02710 Level -= 1; 02711 02712 // 02713 // Calculate the index into the Vacb block for this level. 02714 // 02715 02716 Index = (ULONG)(FileOffset >> Shift); 02717 ASSERT(Index <= VACB_LAST_INDEX_FOR_LEVEL); 02718 02719 // 02720 // We save Index and VacbArray at each level, for the case that we 02721 // are collapsing and deallocating blocks below. 02722 // 02723 02724 SavedIndexes[SavedLevels] = Index; 02725 SavedVacbArrays[SavedLevels] = VacbArray; 02726 SavedLevels += 1; 02727 02728 // 02729 // Get block address for next level. 02730 // 02731 02732 NextVacbArray = (PVACB *)VacbArray[Index]; 02733 02734 // 02735 // If it is NULL then we have to allocate the next level to fill it in. 02736 // 02737 02738 if (NextVacbArray == NULL) { 02739 02740 // 02741 // We better not be thinking we're dereferencing a level if the level 02742 // doesn't currently exist. 02743 // 02744 02745 ASSERT( Vacb != VACB_SPECIAL_DEREFERENCE ); 02746 02747 AllocatingBcbListHeads = FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED) && (Level == 0); 02748 02749 // 02750 // This is only valid if we are setting a nonzero pointer! 02751 // 02752 02753 ASSERT(Vacb != NULL); 02754 02755 NextVacbArray = CcAllocateVacbLevel(AllocatingBcbListHeads); 02756 02757 // 02758 // If we allocated Bcb Listheads, we must link them in. 02759 // 02760 02761 if (AllocatingBcbListHeads) { 02762 02763 ULONG i; 02764 02765 // 02766 // Find our predecessor. 02767 // 02768 02769 PredecessorListHead = CcGetBcbListHeadLargeOffset( SharedCacheMap, OriginalFileOffset, FALSE ); 02770 02771 // 02772 // If he is followed by any Bcbs, they "belong" to him, and we have to 02773 // skip over them. 02774 // 02775 02776 while (((PBCB)CONTAINING_RECORD(PredecessorListHead->Blink, BCB, BcbLinks))->NodeTypeCode == 02777 CACHE_NTC_BCB) { 02778 PredecessorListHead = (PLIST_ENTRY)PredecessorListHead->Blink; 02779 } 02780 02781 // 02782 // Point to the first newly allocated listhead. 02783 // 02784 02785 CurrentListHead = (PLIST_ENTRY)((PCHAR)NextVacbArray + VACB_LEVEL_BLOCK_SIZE); 02786 02787 // 02788 // Link first new listhead to predecessor. 02789 // 02790 02791 SuccessorListHead = PredecessorListHead->Blink; 02792 PredecessorListHead->Blink = CurrentListHead; 02793 CurrentListHead->Flink = PredecessorListHead; 02794 02795 // 02796 // Now loop to link all of the new listheads together. 02797 // 02798 02799 for (i = 0; i < ((VACB_LEVEL_BLOCK_SIZE / sizeof(LIST_ENTRY) - 1)); i++) { 02800 02801 CurrentListHead->Blink = CurrentListHead + 1; 02802 CurrentListHead += 1; 02803 CurrentListHead->Flink = CurrentListHead - 1; 02804 } 02805 02806 // 02807 // Finally link the last new listhead to the successor. 02808 // 02809 02810 CurrentListHead->Blink = SuccessorListHead; 02811 SuccessorListHead->Flink = CurrentListHead; 02812 } 02813 02814 VacbArray[Index] = (PVACB)NextVacbArray; 02815 02816 // 02817 // Increment the reference count. Note that Level right now properly indicates 02818 // what level NextVacbArray is at, not VacbArray. 02819 // 02820 02821 ReferenceVacbLevel( SharedCacheMap, VacbArray, Level + 1, 1, FALSE ); 02822 } 02823 02824 // 02825 // Now make this one our current pointer, and mask away the extraneous high-order 02826 // FileOffset bits for this level and reduce the shift count. 02827 // 02828 02829 VacbArray = NextVacbArray; 02830 FileOffset &= ((LONGLONG)1 << Shift) - 1; 02831 Shift -= VACB_LEVEL_SHIFT; 02832 02833 // 02834 // Loop until we hit the bottom level. 02835 // 02836 02837 } while (Level != 0); 02838 02839 if (Vacb < VACB_SPECIAL_FIRST_VALID) { 02840 02841 // 02842 // Now calculate the index for the bottom level and store the caller's Vacb pointer. 02843 // 02844 02845 Index = (ULONG)(FileOffset >> Shift); 02846 VacbArray[Index] = Vacb; 02847 02848 // 02849 // Handle the special actions. 02850 // 02851 02852 } else { 02853 02854 Special = TRUE; 02855 02856 // 02857 // Induce the dereference. 02858 // 02859 02860 if (Vacb == VACB_SPECIAL_DEREFERENCE) { 02861 02862 Vacb = NULL; 02863 } 02864 } 02865 02866 // 02867 // If he is storing a nonzero pointer, just reference the level. 02868 // 02869 02870 if (Vacb != NULL) { 02871 02872 ASSERT( !(Special && Level != 0) ); 02873 02874 ReferenceVacbLevel( SharedCacheMap, VacbArray, Level, 1, Special ); 02875 02876 // 02877 // Otherwise we are storing a NULL pointer, and we have to see if we can collapse 02878 // the tree by deallocating empty blocks of pointers. 02879 // 02880 02881 } else { 02882 02883 // 02884 // Loop until doing all possible collapse except for the top level. 02885 // 02886 02887 while (TRUE) { 02888 02889 ReferenceVacbLevel( SharedCacheMap, VacbArray, Level, -1, Special ); 02890 02891 // 02892 // If this was a special dereference, then recognize that this was 02893 // the only one. The rest, as we tear up the tree, are regular 02894 // (calculable) references. 02895 // 02896 02897 Special = FALSE; 02898 02899 // 02900 // Now, if we have an empty block (other than the top one), then we should free the 02901 // block and keep looping. 02902 // 02903 02904 if (!IsVacbLevelReferenced( SharedCacheMap, VacbArray, Level ) && (SavedLevels != 0)) { 02905 02906 SavedLevels -= 1; 02907 02908 // 02909 // First see if we have Bcb Listheads to delete and if so, we have to unlink 02910 // the whole block first. 02911 // 02912 02913 AllocatingBcbListHeads = FALSE; 02914 if ((Level++ == 0) && FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED)) { 02915 02916 AllocatingBcbListHeads = TRUE; 02917 PredecessorListHead = ((PLIST_ENTRY)((PCHAR)VacbArray + VACB_LEVEL_BLOCK_SIZE))->Flink; 02918 SuccessorListHead = ((PLIST_ENTRY)((PCHAR)VacbArray + (VACB_LEVEL_BLOCK_SIZE * 2) - sizeof(LIST_ENTRY)))->Blink; 02919 PredecessorListHead->Blink = SuccessorListHead; 02920 SuccessorListHead->Flink = PredecessorListHead; 02921 } 02922 02923 // 02924 // Free the unused block and then pick up the saved parent pointer array and 02925 // index and erase the pointer to this block. 02926 // 02927 02928 CcDeallocateVacbLevel( VacbArray, AllocatingBcbListHeads ); 02929 Index = SavedIndexes[SavedLevels]; 02930 VacbArray = SavedVacbArrays[SavedLevels]; 02931 VacbArray[Index] = NULL; 02932 02933 // 02934 // No more collapsing if we hit a block that still has pointers, or we hit the root. 02935 // 02936 02937 } else { 02938 break; 02939 } 02940 } 02941 } 02942 }

VOID CcUnmapVacb IN PVACB  Vacb,
IN PSHARED_CACHE_MAP  SharedCacheMap,
IN BOOLEAN  UnmapBehind
 

Definition at line 1156 of file vacbsup.c.

References ASSERT, DebugTrace, FlagOn, mm, MmUnmapViewInSystemCache(), NULL, and ONLY_SEQUENTIAL_ONLY_SEEN.

Referenced by CcGetVacbMiss(), and CcUnmapVacbArray().

01164 : 01165 01166 This routine may be called to unmap a previously mapped Vacb, and 01167 clear its BaseAddress field. 01168 01169 Arguments: 01170 01171 Vacb - Supplies the Vacb which was returned from CcGetVirtualAddress. 01172 01173 UnmapBehind - If this is a result of our unmap behind logic (the 01174 only case in which we pay attention to sequential hints) 01175 01176 Return Value: 01177 01178 None. 01179 01180 --*/ 01181 01182 { 01183 // 01184 // Make sure it is mapped. 01185 // 01186 01187 ASSERT(SharedCacheMap != NULL); 01188 ASSERT(Vacb->BaseAddress != NULL); 01189 01190 // 01191 // Call MM to unmap it. 01192 // 01193 01194 DebugTrace( 0, mm, "MmUnmapViewInSystemCache:\n", 0 ); 01195 DebugTrace( 0, mm, " BaseAddress = %08lx\n", Vacb->BaseAddress ); 01196 01197 MmUnmapViewInSystemCache( Vacb->BaseAddress, 01198 SharedCacheMap->Section, 01199 UnmapBehind && 01200 FlagOn(SharedCacheMap->Flags, ONLY_SEQUENTIAL_ONLY_SEEN) ); 01201 01202 Vacb->BaseAddress = NULL; 01203 }

BOOLEAN FASTCALL CcUnmapVacbArray IN PSHARED_CACHE_MAP  SharedCacheMap,
IN PLARGE_INTEGER FileOffset  OPTIONAL,
IN ULONG  Length,
IN BOOLEAN  UnmapBehind
 

Definition at line 1784 of file vacbsup.c.

References CcAcquireVacbLock, CcDrainVacbLevelZone(), CcMoveVacbToReuseHead, CcReleaseVacbLock, CcUnmapVacb(), FALSE, GetVacb, NULL, _VACB::Overlay, SetVacb(), _VACB::SharedCacheMap, TRUE, and VACB_MAPPING_GRANULARITY.

Referenced by CcGetVacbMiss(), CcPurgeCacheSection(), and CcUnmapAndPurge().

01793 : 01794 01795 This routine must be called to do any unmapping and associated 01796 cleanup for a shared cache map, just before it is deleted. 01797 01798 Arguments: 01799 01800 SharedCacheMap - Supplies a pointer to the shared cache map 01801 which is about to be deleted. 01802 01803 FileOffset - If supplied, only unmap the specified offset and length 01804 01805 Length - Completes range to unmap if FileOffset specified. If FileOffset 01806 is specified, Length of 0 means unmap to the end of the section. 01807 01808 UnmapBehind - If this is a result of our unmap behind logic 01809 01810 Return Value: 01811 01812 FALSE -- if an the unmap was not done due to an active vacb 01813 TRUE -- if the unmap was done 01814 01815 --*/ 01816 01817 { 01818 PVACB Vacb; 01819 KIRQL OldIrql; 01820 LARGE_INTEGER StartingFileOffset = {0,0}; 01821 LARGE_INTEGER EndingFileOffset = SharedCacheMap->SectionSize; 01822 01823 // 01824 // We could be just cleaning up for error recovery. 01825 // 01826 01827 if (SharedCacheMap->Vacbs == NULL) { 01828 return TRUE; 01829 } 01830 01831 // 01832 // See if a range was specified. Align it to the VACB boundaries so it 01833 // works in the loop below 01834 // 01835 01836 if (ARGUMENT_PRESENT(FileOffset)) { 01837 StartingFileOffset.QuadPart = ((FileOffset->QuadPart) & (~((LONGLONG)VACB_MAPPING_GRANULARITY - 1))); 01838 if (Length != 0) { 01839 01840 EndingFileOffset.QuadPart = FileOffset->QuadPart + Length; 01841 01842 } 01843 } 01844 01845 // 01846 // Acquire the spin lock to 01847 // 01848 01849 CcAcquireVacbLock( &OldIrql ); 01850 01851 while (StartingFileOffset.QuadPart < EndingFileOffset.QuadPart) { 01852 01853 // 01854 // Note that the caller with an explicit range may be off the 01855 // end of the section (example CcPurgeCacheSection for cache 01856 // coherency). That is the reason for the first part of the 01857 // test below. 01858 // 01859 // Check the next cell once without the spin lock, it probably will 01860 // not change, but we will handle it if it does not. 01861 // 01862 01863 if ((StartingFileOffset.QuadPart < SharedCacheMap->SectionSize.QuadPart) && 01864 ((Vacb = GetVacb( SharedCacheMap, StartingFileOffset )) != NULL)) { 01865 01866 // 01867 // Return here if we are unlucky and see an active 01868 // Vacb. It could be Purge calling, and the Lazy Writer 01869 // may have done a CcGetVirtualAddressIfMapped! 01870 // 01871 01872 if (Vacb->Overlay.ActiveCount != 0) { 01873 01874 CcReleaseVacbLock( OldIrql ); 01875 return FALSE; 01876 } 01877 01878 // 01879 // Unlink it from the other SharedCacheMap, so the other 01880 // guy will not try to use it when we free the spin lock. 01881 // 01882 01883 SetVacb( SharedCacheMap, StartingFileOffset, NULL ); 01884 Vacb->SharedCacheMap = NULL; 01885 01886 // 01887 // Increment the open count so that no one else will 01888 // try to unmap or reuse until we are done. 01889 // 01890 01891 Vacb->Overlay.ActiveCount += 1; 01892 01893 // 01894 // Release the spin lock. 01895 // 01896 01897 CcReleaseVacbLock( OldIrql ); 01898 01899 // 01900 // Unmap and free it if we really got it above. 01901 // 01902 01903 CcUnmapVacb( Vacb, SharedCacheMap, UnmapBehind ); 01904 01905 // 01906 // Reacquire the spin lock so that we can decrment the count. 01907 // 01908 01909 CcAcquireVacbLock( &OldIrql ); 01910 Vacb->Overlay.ActiveCount -= 1; 01911 01912 // 01913 // Place this VACB at the head of the LRU 01914 // 01915 01916 CcMoveVacbToReuseHead( Vacb ); 01917 } 01918 01919 StartingFileOffset.QuadPart = StartingFileOffset.QuadPart + VACB_MAPPING_GRANULARITY; 01920 } 01921 01922 CcReleaseVacbLock( OldIrql ); 01923 01924 CcDrainVacbLevelZone(); 01925 01926 return TRUE; 01927 }

VOID CcWaitOnActiveCount IN PSHARED_CACHE_MAP  SharedCacheMap  ) 
 

Definition at line 1062 of file vacbsup.c.

References CcAcquireVacbLock, CcReleaseVacbLock, Event(), ExAllocatePoolWithTag, Executive, FALSE, KeInitializeEvent, KernelMode, KeWaitForSingleObject(), NonPagedPoolMustSucceed, and NULL.

Referenced by CcDeleteSharedCacheMap(), and CcPurgeCacheSection().

01068 : 01069 01070 This routine may be called to wait for outstanding mappings for 01071 a given SharedCacheMap to go inactive. It is intended to be called 01072 from CcUninitializeCacheMap, which is called by the file systems 01073 during cleanup processing. In that case this routine only has to 01074 wait if the user closed a handle without waiting for all I/Os on the 01075 handle to complete. 01076 01077 This routine returns each time the active count is decremented. The 01078 caller must recheck his wait conditions on return, either waiting for 01079 the ActiveCount to go to 0, or for specific views to go inactive 01080 (CcPurgeCacheSection case). 01081 01082 Arguments: 01083 01084 SharedCacheMap - Supplies the Shared Cache Map on whose VacbActiveCount 01085 we wish to wait. 01086 01087 Return Value: 01088 01089 None. 01090 01091 --*/ 01092 01093 { 01094 KIRQL OldIrql; 01095 PKEVENT Event; 01096 01097 // 01098 // In the unusual case that we get a cleanup while I/O is still going 01099 // on, we can wait here. The caller must test the count for nonzero 01100 // before calling this routine. 01101 // 01102 // Since we are being called from cleanup, we cannot afford to 01103 // fail here. 01104 // 01105 01106 CcAcquireVacbLock( &OldIrql ); 01107 01108 // 01109 // It is possible that the count went to zero before we acquired the 01110 // spinlock, so we must handle two cases here. 01111 // 01112 01113 if (SharedCacheMap->VacbActiveCount != 0) { 01114 01115 if ((Event = SharedCacheMap->WaitOnActiveCount) == NULL) { 01116 01117 // 01118 // If the local event is not being used then we take it. 01119 // 01120 01121 Event = InterlockedExchangePointer( &SharedCacheMap->LocalEvent, NULL ); 01122 01123 if (Event == NULL) { 01124 01125 Event = (PKEVENT)ExAllocatePoolWithTag( NonPagedPoolMustSucceed, 01126 sizeof(KEVENT), 01127 'vEcC' ); 01128 } 01129 } 01130 01131 KeInitializeEvent( Event, 01132 NotificationEvent, 01133 FALSE ); 01134 01135 SharedCacheMap->WaitOnActiveCount = Event; 01136 01137 CcReleaseVacbLock( OldIrql ); 01138 01139 KeWaitForSingleObject( Event, 01140 Executive, 01141 KernelMode, 01142 FALSE, 01143 (PLARGE_INTEGER)NULL); 01144 } else { 01145 01146 CcReleaseVacbLock( OldIrql ); 01147 } 01148 }

_inline VOID ReferenceVacbLevel IN PSHARED_CACHE_MAP  SharedCacheMap,
IN PVACB VacbArray,
IN ULONG  Level,
IN LONG  Amount,
IN BOOLEAN  Special
 

Definition at line 125 of file vacbsup.c.

References ASSERT, CcCalculateVacbLevelLockCount(), PVACB_LEVEL_REFERENCE, _VACB_LEVEL_REFERENCE::Reference, _VACB_LEVEL_REFERENCE::SpecialReference, and VacbLevelReference().

Referenced by CcAdjustVacbLevelLockCount(), CcExtendVacbArray(), and CcSetVacbLargeOffset().

00132 { 00133 PVACB_LEVEL_REFERENCE VacbReference = VacbLevelReference( SharedCacheMap, VacbArray, Level ); 00134 00135 ASSERT( Amount > 0 || 00136 (!Special && VacbReference->Reference >= (0 - Amount)) || 00137 ( Special && VacbReference->SpecialReference >= (0 - Amount))); 00138 00139 if (Special) { 00140 VacbReference->SpecialReference += Amount; 00141 } else { 00142 VacbReference->Reference += Amount; 00143 } 00144 00145 #ifdef VACB_DBG 00146 // 00147 // For debugging purposes, we can assert that the regular reference count 00148 // corresponds to the population of the level. 00149 // 00150 00151 { 00152 LONG Current = VacbReference->Reference; 00153 CcCalculateVacbLevelLockCount( SharedCacheMap, VacbArray, Level ); 00154 ASSERT( Current == VacbReference->Reference ); 00155 } 00156 #endif // VACB_DBG 00157 }

_inline VOID SetVacb IN PSHARED_CACHE_MAP  SharedCacheMap,
IN LARGE_INTEGER  Offset,
IN PVACB  Vacb
 

Definition at line 86 of file vacbsup.c.

References ASSERT, CcGetVacbLargeOffset(), CcSetVacbLargeOffset(), IsVacbLevelReferenced(), NULL, Offset, VACB_OFFSET_SHIFT, VACB_SIZE_OF_FIRST_LEVEL, and VACB_SPECIAL_FIRST_VALID.

Referenced by CcDereferenceFileOffset(), CcGetVacbMiss(), CcReferenceFileOffset(), and CcUnmapVacbArray().

00091 { 00092 if (SharedCacheMap->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL) { 00093 CcSetVacbLargeOffset(SharedCacheMap, Offset.QuadPart, Vacb); 00094 #ifdef VACB_DBG 00095 ASSERT(Vacb >= VACB_SPECIAL_FIRST_VALID || CcGetVacbLargeOffset(SharedCacheMap, Offset.QuadPart) == Vacb); 00096 #endif // VACB_DBG 00097 } else if (Vacb < VACB_SPECIAL_FIRST_VALID) { 00098 SharedCacheMap->Vacbs[Offset.LowPart >> VACB_OFFSET_SHIFT] = Vacb; 00099 } 00100 #ifdef VACB_DBG 00101 // 00102 // Note, we need a new field if we turn this check on again - ReservedForAlignment 00103 // has been stolen for other purposes. 00104 // 00105 00106 if (Vacb < VACB_SPECIAL_FIRST_VALID) { 00107 if (Vacb != NULL) { 00108 SharedCacheMap->ReservedForAlignment++; 00109 } else { 00110 SharedCacheMap->ReservedForAlignment--; 00111 } 00112 } 00113 ASSERT((SharedCacheMap->SectionSize.QuadPart <= VACB_SIZE_OF_FIRST_LEVEL) || 00114 (SharedCacheMap->ReservedForAlignment == 0) || 00115 IsVacbLevelReferenced( SharedCacheMap, SharedCacheMap->Vacbs, 1 )); 00116 #endif // VACB_DBG 00117 }


Generated on Sat May 15 19:46:06 2004 for test by doxygen 1.3.7