Main Page | Class Hierarchy | Class List | File List | Class Members | File Members

lazyrite.c File Reference

#include "cc.h"

Go to the source code of this file.

Defines

#define BugCheckFileId   (CACHE_BUG_CHECK_LAZYRITE)
#define me   0x00000020

Functions

PWORK_QUEUE_ENTRY CcReadWorkQueue ()
VOID CcLazyWriteScan ()
VOID CcScheduleLazyWriteScan ()
VOID CcScanDpc (IN PKDPC Dpc, IN PVOID DeferredContext, IN PVOID SystemArgument1, IN PVOID SystemArgument2)
NTSTATUS CcWaitForCurrentLazyWriterActivity ()
LONG CcExceptionFilter (IN NTSTATUS ExceptionCode)
VOID FASTCALL CcPostWorkQueue (IN PWORK_QUEUE_ENTRY WorkQueueEntry, IN PLIST_ENTRY WorkQueue)
VOID CcWorkerThread (PVOID ExWorkQueueItem)


Define Documentation

#define BugCheckFileId   (CACHE_BUG_CHECK_LAZYRITE)
 

Definition at line 27 of file lazyrite.c.

#define me   0x00000020
 

Definition at line 33 of file lazyrite.c.


Function Documentation

LONG CcExceptionFilter IN NTSTATUS  ExceptionCode  ) 
 

Definition at line 619 of file lazyrite.c.

References DebugTrace, EXCEPTION_CONTINUE_SEARCH, EXCEPTION_EXECUTE_HANDLER, and FsRtlIsNtstatusExpected().

Referenced by CcAcquireByteRangeForWrite(), CcFlushCache(), CcLazyWriteScan(), and CcWorkerThread().

00625 : 00626 00627 This is the standard exception filter for worker threads which simply 00628 calls an FsRtl routine to see if an expected status is being raised. 00629 If so, the exception is handled, else we bug check. 00630 00631 Arguments: 00632 00633 ExceptionCode - the exception code which was raised. 00634 00635 Return Value: 00636 00637 EXCEPTION_EXECUTE_HANDLER if expected, else a Bug Check occurs. 00638 00639 --*/ 00640 00641 { 00642 DebugTrace(0, 0, "CcExceptionFilter %08lx\n", ExceptionCode); 00643 00644 if (FsRtlIsNtstatusExpected( ExceptionCode )) { 00645 00646 return EXCEPTION_EXECUTE_HANDLER; 00647 00648 } else { 00649 00650 return EXCEPTION_CONTINUE_SEARCH; 00651 } 00652 }

VOID CcLazyWriteScan  ) 
 

Definition at line 221 of file lazyrite.c.

References CcAcquireMasterLock, CcAllocateWorkQueueEntry, CcBugCheck, CcCanIWrite(), CcCapturedSystemSize, CcDeferredWrites, CcDirtyPagesLastScan, CcDirtyPageTarget, CcExceptionFilter(), CcLazyWriterCursor, CcPagesWrittenLastTime, CcPagesYetToWrite, CcPostDeferredWrites(), CcPostTickWorkQueue, CcPostWorkQueue(), CcRegularWorkQueue, CcReleaseMasterLock, CcScheduleLazyWriteScan(), CcTotalDirtyPages, ClearFlag, DebugTrace, _SHARED_CACHE_MAP::DirtyPages, FALSE, _SHARED_CACHE_MAP::FileObject, _SHARED_CACHE_MAP::FileSize, FlagOn, _FILE_OBJECT::Flags, _SHARED_CACHE_MAP::Flags, FO_TEMPORARY_FILE, _WORK_QUEUE_ENTRY::Function, IS_CURSOR, LAZY_WRITER_MAX_AGE_TARGET, _SHARED_CACHE_MAP::LazyWritePassCount, LazyWriter, MAX_WRITE_BEHIND, me, MmSmallSystem, MODIFIED_WRITE_DISABLED, NULL, _SHARED_CACHE_MAP::OpenCount, _LAZY_WRITER::OtherWork, PAGE_SIZE, _SHARED_CACHE_MAP::PagesToWrite, _WORK_QUEUE_ENTRY::Parameters, _LAZY_WRITER::ScanActive, SetFlag, _SHARED_CACHE_MAP::SharedCacheMapLinks, _SHARED_CACHE_MAP_LIST_CURSOR::SharedCacheMapLinks, TRUE, WRITE_CHARGE_THRESHOLD, WRITE_QUEUED, and WriteBehind.

Referenced by CcWorkerThread().

00226 : 00227 00228 This routine implements the Lazy Writer scan for dirty data to flush 00229 or any other work to do (lazy close). This routine is scheduled by 00230 calling CcScheduleLazyWriteScan. 00231 00232 Arguments: 00233 00234 None. 00235 00236 Return Value: 00237 00238 None. 00239 00240 --*/ 00241 00242 { 00243 ULONG PagesToWrite, ForegroundRate, EstimatedDirtyNextInterval; 00244 PSHARED_CACHE_MAP SharedCacheMap, FirstVisited; 00245 KIRQL OldIrql; 00246 ULONG LoopsWithLockHeld = 0; 00247 BOOLEAN AlreadyMoved = FALSE; 00248 00249 LIST_ENTRY PostTickWorkQueue; 00250 00251 // 00252 // Top of Lazy Writer scan. 00253 // 00254 00255 try { 00256 00257 // 00258 // If there is no work to do, then we will go inactive, and return. 00259 // 00260 00261 CcAcquireMasterLock( &OldIrql ); 00262 00263 if ((CcTotalDirtyPages == 0) && !LazyWriter.OtherWork) { 00264 00265 // 00266 // Sleep if there are no deferred writes. It is important to check 00267 // proactively because writes may be blocked for reasons external 00268 // to the cache manager. The lazy writer must keep poking since it 00269 // may have no bytes to write itself. 00270 // 00271 00272 if (IsListEmpty(&CcDeferredWrites)) { 00273 00274 LazyWriter.ScanActive = FALSE; 00275 CcReleaseMasterLock( OldIrql ); 00276 00277 } else { 00278 00279 CcReleaseMasterLock( OldIrql ); 00280 00281 // 00282 // Check for writes and schedule the next scan. 00283 // 00284 00285 CcPostDeferredWrites(); 00286 CcScheduleLazyWriteScan(); 00287 } 00288 00289 return; 00290 } 00291 00292 // 00293 // Pull out the post tick workitems for this pass. It is important that 00294 // we are doing this at the top since more could be queued as we rummage 00295 // for work to do. Post tick workitems are guaranteed to occur after all 00296 // work generated in a complete scan. 00297 // 00298 00299 InitializeListHead( &PostTickWorkQueue ); 00300 while (!IsListEmpty( &CcPostTickWorkQueue )) { 00301 00302 PLIST_ENTRY Entry = RemoveHeadList( &CcPostTickWorkQueue ); 00303 InsertTailList( &PostTickWorkQueue, Entry ); 00304 } 00305 00306 // 00307 // Calculate the next sweep time stamp, then update all relevant fields for 00308 // the next time around. Also we can clear the OtherWork flag. 00309 // 00310 00311 LazyWriter.OtherWork = FALSE; 00312 00313 // 00314 // Assume we will write our usual fraction of dirty pages. Do not do the 00315 // divide if there is not enough dirty pages, or else we will never write 00316 // the last few pages. 00317 // 00318 00319 PagesToWrite = CcTotalDirtyPages; 00320 if (PagesToWrite > LAZY_WRITER_MAX_AGE_TARGET) { 00321 PagesToWrite /= LAZY_WRITER_MAX_AGE_TARGET; 00322 } 00323 00324 // 00325 // Estimate the rate of dirty pages being produced in the foreground. 00326 // This is the total number of dirty pages now plus the number of dirty 00327 // pages we scheduled to write last time, minus the number of dirty 00328 // pages we have now. Throw out any cases which would not produce a 00329 // positive rate. 00330 // 00331 00332 ForegroundRate = 0; 00333 00334 if ((CcTotalDirtyPages + CcPagesWrittenLastTime) > CcDirtyPagesLastScan) { 00335 ForegroundRate = (CcTotalDirtyPages + CcPagesWrittenLastTime) - 00336 CcDirtyPagesLastScan; 00337 } 00338 00339 // 00340 // If we estimate that we will exceed our dirty page target by the end 00341 // of this interval, then we must write more. Try to arrive on target. 00342 // 00343 00344 EstimatedDirtyNextInterval = CcTotalDirtyPages - PagesToWrite + ForegroundRate; 00345 00346 if (EstimatedDirtyNextInterval > CcDirtyPageTarget) { 00347 PagesToWrite += EstimatedDirtyNextInterval - CcDirtyPageTarget; 00348 } 00349 00350 // 00351 // Now save away the number of dirty pages and the number of pages we 00352 // just calculated to write. 00353 // 00354 00355 CcDirtyPagesLastScan = CcTotalDirtyPages; 00356 CcPagesYetToWrite = CcPagesWrittenLastTime = PagesToWrite; 00357 00358 // 00359 // Loop to flush enough Shared Cache Maps to write the number of pages 00360 // we just calculated. 00361 // 00362 00363 SharedCacheMap = CONTAINING_RECORD( CcLazyWriterCursor.SharedCacheMapLinks.Flink, 00364 SHARED_CACHE_MAP, 00365 SharedCacheMapLinks ); 00366 00367 DebugTrace( 0, me, "Start of Lazy Writer Scan\n", 0 ); 00368 00369 // 00370 // Normally we would just like to visit every Cache Map once on each scan, 00371 // so the scan will terminate normally when we return to FirstVisited. But 00372 // in the off chance that FirstVisited gets deleted, we are guaranteed to stop 00373 // when we get back to our own listhead. 00374 // 00375 00376 FirstVisited = NULL; 00377 while ((SharedCacheMap != FirstVisited) && 00378 (&SharedCacheMap->SharedCacheMapLinks != &CcLazyWriterCursor.SharedCacheMapLinks)) { 00379 00380 if (FirstVisited == NULL) { 00381 FirstVisited = SharedCacheMap; 00382 } 00383 00384 // 00385 // Skip the SharedCacheMap if a write behind request is 00386 // already queued, write behind has been disabled, or 00387 // if there is no work to do (either dirty data to be written 00388 // or a delete is required). 00389 // 00390 // Note that for streams where modified writing is disabled, we 00391 // need to take out Bcbs exclusive, which serializes with foreground 00392 // activity. Therefore we use a special counter in the SharedCacheMap 00393 // to only service these once every n intervals. 00394 // 00395 // Skip temporary files unless we currently could not write as many 00396 // bytes as we might charge some hapless thread for throttling. 00397 // 00398 // When considering lazy closes, decline to work on SharedCacheMaps 00399 // that would require additional IO already prohibited by our normal 00400 // tests. 00401 // 00402 00403 if (!FlagOn(SharedCacheMap->Flags, WRITE_QUEUED | IS_CURSOR) 00404 00405 && 00406 00407 (((PagesToWrite != 0) && (SharedCacheMap->DirtyPages != 0) && 00408 (((++SharedCacheMap->LazyWritePassCount & 0xF) == 0) || 00409 !FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED) || 00410 (CcCapturedSystemSize == MmSmallSystem) || 00411 (SharedCacheMap->DirtyPages >= (4 * (MAX_WRITE_BEHIND / PAGE_SIZE)))) && 00412 (!FlagOn(SharedCacheMap->FileObject->Flags, FO_TEMPORARY_FILE) || 00413 !CcCanIWrite(SharedCacheMap->FileObject, WRITE_CHARGE_THRESHOLD, FALSE, MAXUCHAR))) 00414 00415 || 00416 00417 ((SharedCacheMap->OpenCount == 0) && 00418 ((SharedCacheMap->DirtyPages == 0) || 00419 (SharedCacheMap->FileSize.QuadPart == 0))))) { 00420 00421 PWORK_QUEUE_ENTRY WorkQueueEntry; 00422 00423 // 00424 // If this is a metadata stream with at least 4 times 00425 // the maximum write behind I/O size, then let's tell 00426 // this guy to write 1/8 of his dirty data on this pass 00427 // so it doesn't build up. 00428 // 00429 // Else assume we can write everything (PagesToWrite only affects 00430 // metadata streams - otherwise writing is controlled by the Mbcb). 00431 // 00432 00433 SharedCacheMap->PagesToWrite = SharedCacheMap->DirtyPages; 00434 00435 if (FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED) && 00436 (SharedCacheMap->PagesToWrite >= (4 * (MAX_WRITE_BEHIND / PAGE_SIZE))) && 00437 (CcCapturedSystemSize != MmSmallSystem)) { 00438 00439 SharedCacheMap->PagesToWrite /= 8; 00440 } 00441 00442 // 00443 // If still searching for pages to write, adjust our targets. 00444 // 00445 00446 if (!AlreadyMoved) { 00447 00448 // 00449 // See if he exhausts the number of pages to write. (We 00450 // keep going in case there are any closes to do.) 00451 // 00452 00453 if (SharedCacheMap->PagesToWrite >= PagesToWrite) { 00454 00455 // 00456 // If we met our write quota on a given SharedCacheMap, then make sure 00457 // we start at him on the next scan, unless it is a metadata stream. 00458 // 00459 00460 RemoveEntryList( &CcLazyWriterCursor.SharedCacheMapLinks ); 00461 00462 // 00463 // For Metadata streams, set up to resume on the next stream on the 00464 // next scan. 00465 // 00466 00467 if (FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED)) { 00468 InsertHeadList( &SharedCacheMap->SharedCacheMapLinks, &CcLazyWriterCursor.SharedCacheMapLinks ); 00469 00470 // 00471 // For other streams, set up to resume on the same stream on the 00472 // next scan. 00473 // 00474 00475 } else { 00476 InsertTailList( &SharedCacheMap->SharedCacheMapLinks, &CcLazyWriterCursor.SharedCacheMapLinks ); 00477 } 00478 00479 PagesToWrite = 0; 00480 AlreadyMoved = TRUE; 00481 00482 } else { 00483 00484 PagesToWrite -= SharedCacheMap->PagesToWrite; 00485 } 00486 } 00487 00488 // 00489 // Otherwise show we are actively writing, and keep it in the dirty 00490 // list. 00491 // 00492 00493 SetFlag(SharedCacheMap->Flags, WRITE_QUEUED); 00494 SharedCacheMap->DirtyPages += 1; 00495 00496 CcReleaseMasterLock( OldIrql ); 00497 00498 // 00499 // Queue the request to do the work to a worker thread. 00500 // 00501 00502 WorkQueueEntry = CcAllocateWorkQueueEntry(); 00503 00504 // 00505 // If we failed to allocate a WorkQueueEntry, things must 00506 // be in pretty bad shape. However, all we have to do is 00507 // break out of our current loop, and try to go back and 00508 // delay a while. Even if the current guy should have gone 00509 // away when we clear WRITE_QUEUED, we will find him again 00510 // in the LW scan. 00511 // 00512 00513 if (WorkQueueEntry == NULL) { 00514 00515 CcAcquireMasterLock( &OldIrql ); 00516 ClearFlag(SharedCacheMap->Flags, WRITE_QUEUED); 00517 SharedCacheMap->DirtyPages -= 1; 00518 break; 00519 } 00520 00521 WorkQueueEntry->Function = (UCHAR)WriteBehind; 00522 WorkQueueEntry->Parameters.Write.SharedCacheMap = SharedCacheMap; 00523 00524 // 00525 // Post it to the regular work queue. 00526 // 00527 00528 CcAcquireMasterLock( &OldIrql ); 00529 SharedCacheMap->DirtyPages -= 1; 00530 CcPostWorkQueue( WorkQueueEntry, &CcRegularWorkQueue ); 00531 00532 LoopsWithLockHeld = 0; 00533 00534 // 00535 // Make sure we occassionally drop the lock. Set WRITE_QUEUED 00536 // to keep the guy from going away. 00537 // 00538 00539 } else if ((++LoopsWithLockHeld >= 20) && 00540 !FlagOn(SharedCacheMap->Flags, WRITE_QUEUED | IS_CURSOR)) { 00541 00542 SetFlag(SharedCacheMap->Flags, WRITE_QUEUED); 00543 SharedCacheMap->DirtyPages += 1; 00544 CcReleaseMasterLock( OldIrql ); 00545 LoopsWithLockHeld = 0; 00546 CcAcquireMasterLock( &OldIrql ); 00547 ClearFlag(SharedCacheMap->Flags, WRITE_QUEUED); 00548 SharedCacheMap->DirtyPages -= 1; 00549 } 00550 00551 // 00552 // Now loop back. 00553 // 00554 00555 SharedCacheMap = 00556 CONTAINING_RECORD( SharedCacheMap->SharedCacheMapLinks.Flink, 00557 SHARED_CACHE_MAP, 00558 SharedCacheMapLinks ); 00559 } 00560 00561 DebugTrace( 0, me, "End of Lazy Writer Scan\n", 0 ); 00562 00563 // 00564 // Queue up our post tick workitems for this pass. 00565 // 00566 00567 while (!IsListEmpty( &PostTickWorkQueue )) { 00568 00569 PLIST_ENTRY Entry = RemoveHeadList( &PostTickWorkQueue ); 00570 CcPostWorkQueue( CONTAINING_RECORD( Entry, WORK_QUEUE_ENTRY, WorkQueueLinks ), 00571 &CcRegularWorkQueue ); 00572 } 00573 00574 // 00575 // Now we can release the global list and loop back, per chance to sleep. 00576 // 00577 00578 CcReleaseMasterLock( OldIrql ); 00579 00580 // 00581 // Once again we need to give the deferred writes a poke. We can have all dirty 00582 // pages on disable_write_behind files but also have an external condition that 00583 // caused the cached IO to be deferred. If so, this serves as our only chance to 00584 // issue it when the condition clears. 00585 // 00586 // Case hit on ForrestF's 5gb Alpha, 1/12/99. 00587 // 00588 00589 if (!IsListEmpty(&CcDeferredWrites)) { 00590 00591 CcPostDeferredWrites(); 00592 } 00593 00594 // 00595 // Now go ahead and schedule the next scan. 00596 // 00597 00598 CcScheduleLazyWriteScan(); 00599 00600 // 00601 // Basically, the Lazy Writer thread should never get an exception, 00602 // so we put a try-except around it that bug checks one way or the other. 00603 // Better we bug check here than worry about what happens if we let one 00604 // get by. 00605 // 00606 00607 } except( CcExceptionFilter( GetExceptionCode() )) { 00608 00609 CcBugCheck( GetExceptionCode(), 0, 0 ); 00610 } 00611 }

VOID FASTCALL CcPostWorkQueue IN PWORK_QUEUE_ENTRY  WorkQueueEntry,
IN PLIST_ENTRY  WorkQueue
 

Definition at line 662 of file lazyrite.c.

References ASSERT, CcIdleWorkerThreadList, CcNumberActiveWorkerThreads, CcQueueThrottle, CcWorkQueueSpinlock, CriticalWorkQueue, DebugTrace, ExQueueWorkItem(), List, me, and NULL.

Referenced by CcLazyWriteScan(), CcScanDpc(), and CcScheduleReadAhead().

00669 : 00670 00671 This routine queues a WorkQueueEntry, which has been allocated and 00672 initialized by the caller, to the WorkQueue for FIFO processing by 00673 the work threads. 00674 00675 Arguments: 00676 00677 WorkQueueEntry - supplies a pointer to the entry to queue 00678 00679 Return Value: 00680 00681 None 00682 00683 --*/ 00684 00685 { 00686 KIRQL OldIrql; 00687 PLIST_ENTRY WorkerThreadEntry = NULL; 00688 00689 ASSERT(FIELD_OFFSET(WORK_QUEUE_ITEM, List) == 0); 00690 00691 DebugTrace(+1, me, "CcPostWorkQueue:\n", 0 ); 00692 DebugTrace( 0, me, " WorkQueueEntry = %08lx\n", WorkQueueEntry ); 00693 00694 // 00695 // Queue the entry to the respective work queue. 00696 // 00697 00698 ExAcquireFastLock( &CcWorkQueueSpinlock, &OldIrql ); 00699 InsertTailList( WorkQueue, &WorkQueueEntry->WorkQueueLinks ); 00700 00701 // 00702 // Now, if we aren't throttled and have any more idle threads we can 00703 // use, activate one. 00704 // 00705 00706 if (!CcQueueThrottle && !IsListEmpty(&CcIdleWorkerThreadList)) { 00707 WorkerThreadEntry = RemoveHeadList( &CcIdleWorkerThreadList ); 00708 CcNumberActiveWorkerThreads += 1; 00709 } 00710 ExReleaseFastLock( &CcWorkQueueSpinlock, OldIrql ); 00711 00712 if (WorkerThreadEntry != NULL) { 00713 00714 // 00715 // I had to peak in the sources to verify that this routine 00716 // is a noop if the Flink is not NULL. Sheeeeit! 00717 // 00718 00719 ((PWORK_QUEUE_ITEM)WorkerThreadEntry)->List.Flink = NULL; 00720 ExQueueWorkItem( (PWORK_QUEUE_ITEM)WorkerThreadEntry, CriticalWorkQueue ); 00721 } 00722 00723 // 00724 // And return to our caller 00725 // 00726 00727 DebugTrace(-1, me, "CcPostWorkQueue -> VOID\n", 0 ); 00728 00729 return; 00730 }

PWORK_QUEUE_ENTRY CcReadWorkQueue  ) 
 

VOID CcScanDpc IN PKDPC  Dpc,
IN PVOID  DeferredContext,
IN PVOID  SystemArgument1,
IN PVOID  SystemArgument2
 

Definition at line 99 of file lazyrite.c.

References CcAllocateWorkQueueEntry, CcPostWorkQueue(), CcRegularWorkQueue, FALSE, _WORK_QUEUE_ENTRY::Function, LazyWriter, LazyWriteScan, NULL, and _LAZY_WRITER::ScanActive.

Referenced by CcInitializeCacheManager().

00108 : 00109 00110 This is the Dpc routine which runs when the scan timer goes off. It 00111 simply posts an element for an Ex Worker thread to do the scan. 00112 00113 Arguments: 00114 00115 (All are ignored) 00116 00117 Return Value: 00118 00119 None. 00120 00121 --*/ 00122 00123 { 00124 PWORK_QUEUE_ENTRY WorkQueueEntry; 00125 00126 UNREFERENCED_PARAMETER(Dpc); 00127 UNREFERENCED_PARAMETER(DeferredContext); 00128 UNREFERENCED_PARAMETER(SystemArgument1); 00129 UNREFERENCED_PARAMETER(SystemArgument2); 00130 00131 WorkQueueEntry = CcAllocateWorkQueueEntry(); 00132 00133 // 00134 // If we failed to allocate a WorkQueueEntry, things must 00135 // be in pretty bad shape. However, all we have to do is 00136 // say we are not active, and wait for another event to 00137 // wake things up again. 00138 // 00139 00140 if (WorkQueueEntry == NULL) { 00141 00142 LazyWriter.ScanActive = FALSE; 00143 00144 } else { 00145 00146 // 00147 // Otherwise post a work queue entry to do the scan. 00148 // 00149 00150 WorkQueueEntry->Function = (UCHAR)LazyWriteScan; 00151 00152 CcPostWorkQueue( WorkQueueEntry, &CcRegularWorkQueue ); 00153 } 00154 }

VOID CcScheduleLazyWriteScan  ) 
 

Definition at line 49 of file lazyrite.c.

References CcFirstDelay, CcIdleDelay, KeSetTimer(), LazyWriter, _LAZY_WRITER::ScanActive, _LAZY_WRITER::ScanDpc, _LAZY_WRITER::ScanTimer, and TRUE.

Referenced by CcDeferWrite(), CcFlushCache(), CcGetFlushedValidData(), CcGetVacbMiss(), CcInitializeCacheMap(), CcLazyWriteScan(), CcMdlWriteComplete2(), CcPerformReadAhead(), CcPurgeCacheSection(), CcSetDirtyInMask(), CcSetDirtyPinnedData(), CcSetFileSizes(), CcUninitializeCacheMap(), CcWaitForCurrentLazyWriterActivity(), and CcZeroEndOfLastPage().

00054 : 00055 00056 This routine may be called to schedule the next lazy writer scan, 00057 during which lazy write and lazy close activity is posted to other 00058 worker threads. Callers should acquire the lazy writer spin lock 00059 to see if the scan is currently active, and then call this routine 00060 still holding the spin lock if not. One special call is used at 00061 the end of the lazy write scan to propagate lazy write active once 00062 we go active. This call is "the" scan thread, and it can therefore 00063 safely schedule the next scan without taking out the spin lock. 00064 00065 Arguments: 00066 00067 None 00068 00069 Return Value: 00070 00071 None. 00072 00073 --*/ 00074 00075 { 00076 // 00077 // It is important to set the active flag TRUE first for the propagate 00078 // case, because it is conceivable that once the timer is set, another 00079 // thread could actually run and make the scan go idle before we then 00080 // jam the flag TRUE. 00081 // 00082 // When going from idle to active, we delay a little longer to let the 00083 // app finish saving its file. 00084 // 00085 00086 if (LazyWriter.ScanActive) { 00087 00088 KeSetTimer( &LazyWriter.ScanTimer, CcIdleDelay, &LazyWriter.ScanDpc ); 00089 00090 } else { 00091 00092 LazyWriter.ScanActive = TRUE; 00093 KeSetTimer( &LazyWriter.ScanTimer, CcFirstDelay, &LazyWriter.ScanDpc ); 00094 } 00095 }

NTSTATUS CcWaitForCurrentLazyWriterActivity  ) 
 

Definition at line 158 of file lazyrite.c.

References CcAcquireMasterLock, CcAllocateWorkQueueEntry, CcPostTickWorkQueue, CcReleaseMasterLock, CcScheduleLazyWriteScan(), Event(), EventSet, Executive, FALSE, _WORK_QUEUE_ENTRY::Function, KeInitializeEvent, KernelMode, KeWaitForSingleObject(), LazyWriter, NULL, _LAZY_WRITER::OtherWork, _WORK_QUEUE_ENTRY::Parameters, _LAZY_WRITER::ScanActive, TRUE, and _WORK_QUEUE_ENTRY::WorkQueueLinks.

Referenced by UdfLockVolumeInternal().

00163 : 00164 00165 This routine allows a thread to receive notification when the current tick 00166 of lazy writer work has completed. It must not be called within a lazy 00167 writer workitem! The caller must not be holding synchronization that could 00168 block a Cc workitem! 00169 00170 In particular, this lets a caller insure that all avaliable lazy closes at 00171 the time of the call have completed. 00172 00173 Arguments: 00174 00175 None. 00176 00177 Return Value: 00178 00179 Final result of the wait. 00180 00181 --*/ 00182 00183 { 00184 KIRQL OldIrql; 00185 KEVENT Event; 00186 PWORK_QUEUE_ENTRY WorkQueueEntry; 00187 00188 WorkQueueEntry = CcAllocateWorkQueueEntry(); 00189 00190 if (WorkQueueEntry == NULL) { 00191 return STATUS_INSUFFICIENT_RESOURCES; 00192 } 00193 00194 WorkQueueEntry->Function = (UCHAR)EventSet; 00195 KeInitializeEvent( &Event, NotificationEvent, FALSE ); 00196 WorkQueueEntry->Parameters.Event.Event = &Event; 00197 00198 // 00199 // Add this to the post-tick work queue and wake the lazy writer for it. 00200 // The lazy writer will add this to the end of the next batch of work 00201 // he issues. 00202 // 00203 00204 CcAcquireMasterLock( &OldIrql ); 00205 00206 InsertTailList( &CcPostTickWorkQueue, &WorkQueueEntry->WorkQueueLinks ); 00207 00208 LazyWriter.OtherWork = TRUE; 00209 if (!LazyWriter.ScanActive) { 00210 CcScheduleLazyWriteScan(); 00211 } 00212 00213 CcReleaseMasterLock( OldIrql ); 00214 00215 return KeWaitForSingleObject( &Event, Executive, KernelMode, FALSE, NULL ); 00216 }

VOID CcWorkerThread PVOID  ExWorkQueueItem  ) 
 

Definition at line 738 of file lazyrite.c.

References ASSERT, CC_REQUEUE, CcDeferredWrites, CcExceptionFilter(), CcExpressWorkQueue, CcFreeWorkQueueEntry, CcIdleWorkerThreadList, CcLazyWriteScan(), CcNumberActiveWorkerThreads, CcPerformReadAhead(), CcQueueThrottle, CcRegularWorkQueue, CcTotalDirtyPages, CcWorkQueueSpinlock, CcWriteBehind(), DebugTrace, EventSet, FALSE, _WORK_QUEUE_ENTRY::Function, KeSetEvent(), LazyWriteScan, List, me, NT_SUCCESS, _WORK_QUEUE_ENTRY::Parameters, ReadAhead, TRUE, _WORK_QUEUE_ENTRY::WorkQueueLinks, and WriteBehind.

Referenced by CcInitializeCacheManager().

00744 : 00745 00746 This is worker thread routine for processing cache manager work queue 00747 entries. 00748 00749 Arguments: 00750 00751 ExWorkQueueItem - The work item used for this thread 00752 00753 Return Value: 00754 00755 None 00756 00757 --*/ 00758 00759 { 00760 KIRQL OldIrql; 00761 PLIST_ENTRY WorkQueue; 00762 PWORK_QUEUE_ENTRY WorkQueueEntry; 00763 BOOLEAN RescanOk = FALSE; 00764 BOOLEAN DropThrottle = FALSE; 00765 IO_STATUS_BLOCK IoStatus; 00766 00767 IoStatus.Status = STATUS_SUCCESS; 00768 IoStatus.Information = 0; 00769 00770 ASSERT(FIELD_OFFSET(WORK_QUEUE_ENTRY, WorkQueueLinks) == 0); 00771 00772 while (TRUE) { 00773 00774 ExAcquireFastLock( &CcWorkQueueSpinlock, &OldIrql ); 00775 00776 // 00777 // If we just processed a throttled operation, drop the flag. 00778 // 00779 00780 if (DropThrottle) { 00781 00782 DropThrottle = CcQueueThrottle = FALSE; 00783 } 00784 00785 // 00786 // On requeue, push at end of the regular queue and clear hint. 00787 // 00788 00789 if (IoStatus.Information == CC_REQUEUE) { 00790 00791 InsertTailList( WorkQueue, &WorkQueueEntry->WorkQueueLinks ); 00792 IoStatus.Information = 0; 00793 } 00794 00795 // 00796 // First see if there is something in the express queue. 00797 // 00798 00799 if (!IsListEmpty(&CcExpressWorkQueue)) { 00800 WorkQueue = &CcExpressWorkQueue; 00801 00802 // 00803 // If there was nothing there, then try the regular queue. 00804 // 00805 00806 } else if (!IsListEmpty(&CcRegularWorkQueue)) { 00807 WorkQueue = &CcRegularWorkQueue; 00808 00809 // 00810 // Else we can break and go idle. 00811 // 00812 00813 } else { 00814 00815 break; 00816 } 00817 00818 WorkQueueEntry = CONTAINING_RECORD( WorkQueue->Flink, WORK_QUEUE_ENTRY, WorkQueueLinks ); 00819 00820 // 00821 // If this is an EventSet, throttle down to a single thread to be sure 00822 // that this event fires after all preceeding workitems have completed. 00823 // 00824 00825 if (WorkQueueEntry->Function == EventSet && CcNumberActiveWorkerThreads > 1) { 00826 00827 CcQueueThrottle = TRUE; 00828 break; 00829 } 00830 00831 // 00832 // Pop the workitem off: we will execute it now. 00833 // 00834 00835 RemoveHeadList( WorkQueue ); 00836 00837 ExReleaseFastLock( &CcWorkQueueSpinlock, OldIrql ); 00838 00839 // 00840 // Process the entry within a try-except clause, so that any errors 00841 // will cause us to continue after the called routine has unwound. 00842 // 00843 00844 try { 00845 00846 switch (WorkQueueEntry->Function) { 00847 00848 // 00849 // Perform read ahead 00850 // 00851 00852 case ReadAhead: 00853 00854 DebugTrace( 0, me, "CcWorkerThread Read Ahead FileObject = %08lx\n", 00855 WorkQueueEntry->Parameters.Read.FileObject ); 00856 00857 CcPerformReadAhead( WorkQueueEntry->Parameters.Read.FileObject ); 00858 00859 break; 00860 00861 // 00862 // Perform write behind 00863 // 00864 00865 case WriteBehind: 00866 00867 DebugTrace( 0, me, "CcWorkerThread WriteBehind SharedCacheMap = %08lx\n", 00868 WorkQueueEntry->Parameters.Write.SharedCacheMap ); 00869 00870 CcWriteBehind( WorkQueueEntry->Parameters.Write.SharedCacheMap, &IoStatus ); 00871 RescanOk = (BOOLEAN)NT_SUCCESS(IoStatus.Status); 00872 break; 00873 00874 00875 // 00876 // Perform set event 00877 // 00878 00879 case EventSet: 00880 00881 DebugTrace( 0, me, "CcWorkerThread SetEvent Event = %08lx\n", 00882 WorkQueueEntry->Parameters.Event.Event ); 00883 00884 KeSetEvent( WorkQueueEntry->Parameters.Event.Event, 0, FALSE ); 00885 DropThrottle = TRUE; 00886 break; 00887 00888 // 00889 // Perform Lazy Write Scan 00890 // 00891 00892 case LazyWriteScan: 00893 00894 DebugTrace( 0, me, "CcWorkerThread Lazy Write Scan\n", 0 ); 00895 00896 CcLazyWriteScan(); 00897 break; 00898 } 00899 00900 } 00901 except( CcExceptionFilter( GetExceptionCode() )) { 00902 00903 NOTHING; 00904 } 00905 00906 // 00907 // If not a requeue request, free the workitem. 00908 // 00909 00910 if (IoStatus.Information != CC_REQUEUE) { 00911 00912 CcFreeWorkQueueEntry( WorkQueueEntry ); 00913 } 00914 } 00915 00916 // 00917 // No more work. Requeue our worker thread entry and get out. 00918 // 00919 00920 InsertTailList( &CcIdleWorkerThreadList, 00921 &((PWORK_QUEUE_ITEM)ExWorkQueueItem)->List ); 00922 CcNumberActiveWorkerThreads -= 1; 00923 00924 ExReleaseFastLock( &CcWorkQueueSpinlock, OldIrql ); 00925 00926 if (!IsListEmpty(&CcDeferredWrites) && (CcTotalDirtyPages >= 20) && RescanOk) { 00927 CcLazyWriteScan(); 00928 } 00929 00930 return; 00931 } }


Generated on Sat May 15 19:44:29 2004 for test by doxygen 1.3.7