Main Page | Class Hierarchy | Class List | File List | Class Members | File Members

cc.h

Go to the documentation of this file.
00001 /*++ 00002 00003 Copyright (c) 1990 Microsoft Corporation 00004 00005 Module Name: 00006 00007 cc.h 00008 00009 Abstract: 00010 00011 This module is a header file for the Memory Management based cache 00012 management routines for the common Cache subsystem. 00013 00014 Author: 00015 00016 Tom Miller [TomM] 4-May-1990 00017 00018 Revision History: 00019 00020 --*/ 00021 00022 #ifndef _CCh_ 00023 #define _CCh_ 00024 00025 #include <ntos.h> 00026 #include <NtIoLogc.h> 00027 00028 #ifdef MEMPRINT 00029 #include <memprint.h> 00030 #endif 00031 00032 // 00033 // Define macros to acquire and release cache manager locks. 00034 // 00035 00036 #if defined(_ALPHA_) || defined(_X86_) 00037 00038 #define CcAcquireMasterLock( OldIrql ) \ 00039 *( OldIrql ) = KeAcquireQueuedSpinLock( LockQueueMasterLock ) 00040 00041 #define CcReleaseMasterLock( OldIrql ) \ 00042 KeReleaseQueuedSpinLock( LockQueueMasterLock, OldIrql ) 00043 00044 #define CcAcquireMasterLockAtDpcLevel() \ 00045 KiAcquireQueuedSpinLock( &KeGetCurrentPrcb()->LockQueue[LockQueueMasterLock] ) 00046 00047 #define CcReleaseMasterLockFromDpcLevel() \ 00048 KiReleaseQueuedSpinLock( &KeGetCurrentPrcb()->LockQueue[LockQueueMasterLock] ) 00049 00050 #define CcAcquireVacbLock( OldIrql ) \ 00051 *( OldIrql ) = KeAcquireQueuedSpinLock( LockQueueVacbLock ) 00052 00053 #define CcReleaseVacbLock( OldIrql ) \ 00054 KeReleaseQueuedSpinLock( LockQueueVacbLock, OldIrql ) 00055 00056 #define CcAcquireVacbLockAtDpcLevel() \ 00057 KiAcquireQueuedSpinLock( &KeGetCurrentPrcb()->LockQueue[LockQueueVacbLock] ) 00058 00059 #define CcReleaseVacbLockFromDpcLevel() \ 00060 KiReleaseQueuedSpinLock( &KeGetCurrentPrcb()->LockQueue[LockQueueVacbLock] ) 00061 00062 #else 00063 00064 #define CcAcquireMasterLock( OldIrql ) \ 00065 ExAcquireSpinLock( &CcMasterSpinLock, OldIrql ) 00066 00067 #define CcReleaseMasterLock( OldIrql ) \ 00068 ExReleaseSpinLock( &CcMasterSpinLock, OldIrql ) 00069 00070 #define CcAcquireMasterLockAtDpcLevel() \ 00071 ExAcquireSpinLockAtDpcLevel( &CcMasterSpinLock ) 00072 00073 #define CcReleaseMasterLockFromDpcLevel() \ 00074 ExReleaseSpinLockFromDpcLevel( &CcMasterSpinLock ) 00075 00076 #define CcAcquireVacbLock( OldIrql ) \ 00077 ExAcquireSpinLock( &CcVacbSpinLock, OldIrql ) 00078 00079 #define CcReleaseVacbLock( OldIrql ) \ 00080 ExReleaseSpinLock( &CcVacbSpinLock, OldIrql ) 00081 00082 #define CcAcquireVacbLockAtDpcLevel() \ 00083 ExAcquireSpinLockAtDpcLevel( &CcVacbSpinLock ) 00084 00085 #define CcReleaseVacbLockFromDpcLevel() \ 00086 ExReleaseSpinLockFromDpcLevel( &CcVacbSpinLock ) 00087 00088 #endif 00089 00090 // 00091 // This turns on the Bcb list debugging in a debug system. Set value 00092 // to 0 to turn off. 00093 // 00094 // **** Note it must currently be turned off because the routines in 00095 // pinsup.c that manipulate this list need to be changed to do the 00096 // right thing for Obcbs. Right now they screw up by inserting Obcbs 00097 // (which may not be large enough among other things) into the global 00098 // list. Ideally each place gets some code to insert the underlying 00099 // Bcbs into the list if they are not already there. 00100 // 00101 00102 #if DBG 00103 #define LIST_DBG 0 00104 #endif 00105 00106 #include <FsRtl.h> 00107 #ifndef _USERKDX_ // Including stdlib.h build breaks ntos\w32\ntuser\kdexts\kd (!dso) 00108 #include <stdlib.h> 00109 #endif 00110 #include <string.h> 00111 #include <limits.h> 00112 00113 // 00114 // Tag all of our allocations if tagging is turned on 00115 // 00116 00117 #undef FsRtlAllocatePool 00118 #undef FsRtlAllocatePoolWithQuota 00119 00120 #define FsRtlAllocatePool(a,b) FsRtlAllocatePoolWithTag(a,b,' cC') 00121 #define FsRtlAllocatePoolWithQuota(a,b) FsRtlAllocatePoolWithQuotaTag(a,b,' cC') 00122 00123 #undef ExAllocatePool 00124 #undef ExAllocatePoolWithQuota 00125 00126 #define ExAllocatePool(a,b) ExAllocatePoolWithTag(a,b,' cC') 00127 #define ExAllocatePoolWithQuota(a,b) ExAllocatePoolWithQuotaTag(a,b,' cC') 00128 00129 00130 // 00131 // Peek at number of available pages. 00132 // 00133 00134 extern PFN_COUNT MmAvailablePages; 00135 00136 #if DBG 00137 // #define MIPS_PREFILL 0 00138 #endif 00139 00140 #ifdef MIPS 00141 #ifdef MIPS_PREFILL 00142 VOID 00143 KeSweepDcache ( 00144 IN BOOLEAN AllProcessors 00145 ); 00146 #endif 00147 #endif 00148 00149 // 00150 // Define our node type codes. 00151 // 00152 00153 #define CACHE_NTC_SHARED_CACHE_MAP (0x2FF) 00154 #define CACHE_NTC_PRIVATE_CACHE_MAP (0x2FE) 00155 #define CACHE_NTC_BCB (0x2FD) 00156 #define CACHE_NTC_DEFERRED_WRITE (0x2FC) 00157 #define CACHE_NTC_MBCB (0x2FB) 00158 #define CACHE_NTC_OBCB (0x2FA) 00159 #define CACHE_NTC_MBCB_GRANDE (0x2F9) 00160 00161 // 00162 // The following definitions are used to generate meaningful blue bugcheck 00163 // screens. On a bugcheck the file system can output 4 ulongs of useful 00164 // information. The first ulong will have encoded in it a source file id 00165 // (in the high word) and the line number of the bugcheck (in the low word). 00166 // The other values can be whatever the caller of the bugcheck routine deems 00167 // necessary. 00168 // 00169 // Each individual file that calls bugcheck needs to have defined at the 00170 // start of the file a constant called BugCheckFileId with one of the 00171 // CACHE_BUG_CHECK_ values defined below and then use CcBugCheck to bugcheck 00172 // the system. 00173 // 00174 00175 #define CACHE_BUG_CHECK_CACHEDAT (0x00010000) 00176 #define CACHE_BUG_CHECK_CACHESUB (0x00020000) 00177 #define CACHE_BUG_CHECK_COPYSUP (0x00030000) 00178 #define CACHE_BUG_CHECK_FSSUP (0x00040000) 00179 #define CACHE_BUG_CHECK_LAZYRITE (0x00050000) 00180 #define CACHE_BUG_CHECK_LOGSUP (0x00060000) 00181 #define CACHE_BUG_CHECK_MDLSUP (0x00070000) 00182 #define CACHE_BUG_CHECK_PINSUP (0x00080000) 00183 #define CACHE_BUG_CHECK_VACBSUP (0x00090000) 00184 00185 #define CcBugCheck(A,B,C) { KeBugCheckEx(CACHE_MANAGER, BugCheckFileId | __LINE__, A, B, C ); } 00186 00187 // 00188 // Define maximum View Size (These constants are currently so chosen so 00189 // as to be exactly a page worth of PTEs. 00190 // 00191 00192 #define DEFAULT_CREATE_MODULO ((ULONG)(0x00100000)) 00193 #define DEFAULT_EXTEND_MODULO ((ULONG)(0x00100000)) 00194 00195 // 00196 // For non FO_RANDOM_ACCESS files, define how far we go before umapping 00197 // views. 00198 // 00199 00200 #define SEQUENTIAL_MAP_LIMIT ((ULONG)(0x00080000)) 00201 00202 // 00203 // Define some constants to drive read ahead and write behind 00204 // 00205 00206 // 00207 // Set max read ahead. Even though some drivers, such as AT, break up transfers >= 128kb, 00208 // we need to permit enough readahead to satisfy plausible cached read operation while 00209 // preventing denial of service attacks. 00210 // 00211 // This value used to be set to 64k. When doing cached reads in larger units (128k), we 00212 // would never be bringing in enough data to keep the user from blocking. 8mb is 00213 // arbitrarily chosen to be greater than plausible RAID bandwidth and user operation size 00214 // by a factor of 3-4. 00215 // 00216 00217 #define MAX_READ_AHEAD (8 * 1024 * 1024) 00218 00219 // 00220 // Set maximum write behind / lazy write (most drivers break up transfers >= 64kb) 00221 // 00222 00223 #define MAX_WRITE_BEHIND (MM_MAXIMUM_DISK_IO_SIZE) 00224 00225 // 00226 // Set a throttle for charging a given write against the total number of dirty 00227 // pages in the system, for the purpose of seeing when we should invoke write 00228 // throttling. 00229 // 00230 // This must be the same as the throttle used for seeing when we must flush 00231 // temporary files in the lazy writer. On the back of the envelope, here 00232 // is why: 00233 // 00234 // RDP = Regular File Dirty Pages 00235 // TDP = Temporary File Dirty Pages 00236 // CWT = Charged Write Throttle 00237 // -> the maximum we will charge a user with when we see if 00238 // he should be throttled 00239 // TWT = Temporary Write Throttle 00240 // -> if we can't write this many pages, we must write temp data 00241 // DPT = Dirty Page Threshold 00242 // -> the limit when write throttling kicks in 00243 // 00244 // PTD = Pages To Dirty 00245 // CDP = Charged Dirty Pages 00246 // 00247 // Now, CDP = Min( PTD, CWT). 00248 // 00249 // Excluding other effects, we throttle when: 00250 // #0 (RDP + TDP) + CPD >= DPT 00251 // 00252 // To write temporary data, we must cause: 00253 // #1 (RDP + TDP) + TWT >= DPT 00254 // 00255 // To release the throttle, we must eventually cause: 00256 // #2 (RDP + TDP) + CDP < DPT 00257 // 00258 // Now, imagine TDP >> RDP (perhaps RDP == 0) and CDP == CWT for a particular 00259 // throttled write. 00260 // 00261 // If CWT > TWT, as we drive RDP to zero (we never defer writing regular 00262 // data except for hotspots or other very temporary conditions), it is clear 00263 // that we may never trigger the writing of temporary data (#1) but also 00264 // never release the throttle (#2). Simply, we would be willing to charge 00265 // for more dirty pages than we would be willing to guarantee are avaliable 00266 // to dirty. Hence, potential deadlock. 00267 // 00268 // CWT < TWT I leave aside for the moment. This would mean we try not to 00269 // allow temporary data to accumulate to the point that writes throttle as 00270 // a result. Perhaps this would even be better than CWT == TWT. 00271 // 00272 // It is legitimate to ask if throttling temporary data writes should be relaxed 00273 // if we see a large amount of dirty temp data accumulate (and it would be very 00274 // easy to keep track of this). I don't claim to know the best answer to this, 00275 // but for now the attempt to avoid temporary data writes at all costs still 00276 // fits the reasonable operation mix, and we will only penalize the outside 00277 // oddcase with a little more throttle/release. 00278 // 00279 00280 #define WRITE_CHARGE_THRESHOLD (64 * PAGE_SIZE) 00281 00282 // 00283 // Define constants to control zeroing of file data: one constant to control 00284 // how much data we will actually zero ahead in the cache, and another to 00285 // control what the maximum transfer size is that we will use to write zeros. 00286 // 00287 00288 #define MAX_ZERO_TRANSFER (PAGE_SIZE * 128) 00289 #define MIN_ZERO_TRANSFER (0x10000) 00290 #define MAX_ZEROS_IN_CACHE (0x10000) 00291 00292 // 00293 // Definitions for multi-level Vacb structure. The primary definition is the 00294 // VACB_LEVEL_SHIFT. In a multi-level Vacb structure, level in the tree of 00295 // pointers has 2 ** VACB_LEVEL_SHIFT pointers. 00296 // 00297 // For test, this value may be set as low as 4 (no lower), a value of 10 corresponds 00298 // to a convenient block size of 4KB. (If set to 2, CcExtendVacbArray will try to 00299 // "push" the Vacb array allocated within the SharedCacheMap, and later someone will 00300 // try to deallocate the middle of the SharedCacheMap. At 3, the MBCB_BITMAP_BLOCK_SIZE 00301 // is larger than MBCB_BITMAP_BLOCK_SIZE) 00302 // 00303 // There is a bit of a trick as we make the jump to the multilevel structure in that 00304 // we need a real fixed reference count. 00305 // 00306 00307 #define VACB_LEVEL_SHIFT (7) 00308 00309 // 00310 // This is how many bytes of pointers are at each level. This is the size for both 00311 // the Vacb array and (optional) Bcb listheads. It does not include the reference 00312 // block. 00313 // 00314 00315 #define VACB_LEVEL_BLOCK_SIZE ((1 << VACB_LEVEL_SHIFT) * sizeof(PVOID)) 00316 00317 // 00318 // This is the last index for a level. 00319 // 00320 00321 #define VACB_LAST_INDEX_FOR_LEVEL ((1 << VACB_LEVEL_SHIFT) - 1) 00322 00323 // 00324 // This is the size of file which can be handled in a single level. 00325 // 00326 00327 #define VACB_SIZE_OF_FIRST_LEVEL (1 << (VACB_OFFSET_SHIFT + VACB_LEVEL_SHIFT)) 00328 00329 // 00330 // This is the maximum number of levels it takes to support 63-bits. It is 00331 // used for routines that must remember a path. 00332 // 00333 00334 #define VACB_NUMBER_OF_LEVELS (((63 - VACB_OFFSET_SHIFT)/VACB_LEVEL_SHIFT) + 1) 00335 00336 // 00337 // Define the reference structure for multilevel Vacb trees. 00338 // 00339 00340 typedef struct _VACB_LEVEL_REFERENCE { 00341 00342 LONG Reference; 00343 LONG SpecialReference; 00344 00345 } VACB_LEVEL_REFERENCE, *PVACB_LEVEL_REFERENCE; 00346 00347 // 00348 // Define the size of a bitmap allocated for a bitmap range, in bytes. 00349 // 00350 00351 #define MBCB_BITMAP_BLOCK_SIZE (VACB_LEVEL_BLOCK_SIZE) 00352 00353 // 00354 // Define how many bytes of a file are covered by an Mbcb bitmap range, 00355 // at a bit for each page. 00356 // 00357 00358 #define MBCB_BITMAP_RANGE (MBCB_BITMAP_BLOCK_SIZE * 8 * PAGE_SIZE) 00359 00360 // 00361 // Define the initial size of the Mbcb bitmap that is self-contained in the Mbcb. 00362 // 00363 00364 #define MBCB_BITMAP_INITIAL_SIZE (2 * sizeof(BITMAP_RANGE)) 00365 00366 // 00367 // Define constants controlling when the Bcb list is broken into a 00368 // pendaflex-style array of listheads, and how the correct listhead 00369 // is found. Begin when file size exceeds 2MB, and cover 512KB per 00370 // listhead. At 512KB per listhead, the BcbListArray is the same 00371 // size as the Vacb array, i.e., it doubles the size. 00372 // 00373 // The code handling these Bcb lists in the Vacb package contains 00374 // assumptions that the size is the same as that of the Vacb pointers. 00375 // Future work could undo this, but until then the size and shift 00376 // below cannot change. There really isn't a good reason to want to 00377 // anyway. 00378 // 00379 // Note that by definition a flat vacb array cannot fail to find an 00380 // exact match when searching for the listhead - this is only a 00381 // complication of the sparse structure. 00382 // 00383 00384 00385 #define BEGIN_BCB_LIST_ARRAY (0x200000) 00386 #define SIZE_PER_BCB_LIST (VACB_MAPPING_GRANULARITY * 2) 00387 #define BCB_LIST_SHIFT (VACB_OFFSET_SHIFT + 1) 00388 00389 #define GetBcbListHead(SCM,OFF,FAILSUCC) ( \ 00390 (((SCM)->SectionSize.QuadPart > BEGIN_BCB_LIST_ARRAY) && \ 00391 FlagOn((SCM)->Flags, MODIFIED_WRITE_DISABLED)) ? \ 00392 (((SCM)->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL) ? \ 00393 CcGetBcbListHeadLargeOffset((SCM),(OFF),(FAILSUCC)) : \ 00394 (((OFF) >= (SCM)->SectionSize.QuadPart) ? &(SCM)->BcbList : \ 00395 ((PLIST_ENTRY)((SCM)->Vacbs) + (((SCM)->SectionSize.QuadPart + (OFF)) >> BCB_LIST_SHIFT)))) : \ 00396 &(SCM)->BcbList \ 00397 ) 00398 00399 // 00400 // Macros to lock/unlock a Vacb level as Bcbs are inserted/deleted 00401 // 00402 00403 #define CcLockVacbLevel(SCM,OFF) { \ 00404 if (((SCM)->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL) && \ 00405 FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED)) { \ 00406 CcAdjustVacbLevelLockCount((SCM),(OFF), +1);} \ 00407 } 00408 00409 #define CcUnlockVacbLevel(SCM,OFF) { \ 00410 if (((SCM)->SectionSize.QuadPart > VACB_SIZE_OF_FIRST_LEVEL) && \ 00411 FlagOn(SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED)) { \ 00412 CcAdjustVacbLevelLockCount((SCM),(OFF), -1);} \ 00413 } 00414 00415 // 00416 // NOISE_BITS defines how many bits are masked off when testing for 00417 // sequential reads. This allows the reader to skip up to 7 bytes 00418 // for alignment purposes, and we still consider the next read to be 00419 // sequential. Starting and ending addresses are masked by this pattern 00420 // before comparison. 00421 // 00422 00423 #define NOISE_BITS (0x7) 00424 00425 // 00426 // Define some constants to drive the Lazy Writer 00427 // 00428 00429 #define LAZY_WRITER_IDLE_DELAY ((LONG)(10000000)) 00430 #define LAZY_WRITER_COLLISION_DELAY ((LONG)(1000000)) 00431 00432 // 00433 // The following target should best be a power of 2 00434 // 00435 00436 #define LAZY_WRITER_MAX_AGE_TARGET ((ULONG)(8)) 00437 00438 // 00439 // Requeue information hint for the lazy writer. 00440 // 00441 00442 #define CC_REQUEUE 35422 00443 00444 // 00445 // The global Cache Manager debug level variable, its values are: 00446 // 00447 // 0x00000000 Always gets printed (used when about to bug check) 00448 // 00449 // 0x00000001 FsSup 00450 // 0x00000002 CacheSub 00451 // 0x00000004 CopySup 00452 // 0x00000008 PinSup 00453 // 00454 // 0x00000010 MdlSup 00455 // 0x00000020 LazyRite 00456 // 0x00000040 00457 // 0x00000080 00458 // 00459 // 0x00000100 Trace all Mm calls 00460 // 00461 00462 #define mm (0x100) 00463 00464 // 00465 // Miscellaneous support macros. 00466 // 00467 // ULONG 00468 // FlagOn ( 00469 // IN ULONG Flags, 00470 // IN ULONG SingleFlag 00471 // ); 00472 // 00473 // BOOLEAN 00474 // BooleanFlagOn ( 00475 // IN ULONG Flags, 00476 // IN ULONG SingleFlag 00477 // ); 00478 // 00479 // VOID 00480 // SetFlag ( 00481 // IN ULONG Flags, 00482 // IN ULONG SingleFlag 00483 // ); 00484 // 00485 // VOID 00486 // ClearFlag ( 00487 // IN ULONG Flags, 00488 // IN ULONG SingleFlag 00489 // ); 00490 // 00491 // ULONG 00492 // QuadAlign ( 00493 // IN ULONG Pointer 00494 // ); 00495 // 00496 00497 #define FlagOn(F,SF) ( \ 00498 (((F) & (SF))) \ 00499 ) 00500 00501 #define BooleanFlagOn(F,SF) ( \ 00502 (BOOLEAN)(((F) & (SF)) != 0) \ 00503 ) 00504 00505 #define SetFlag(F,SF) { \ 00506 (F) |= (SF); \ 00507 } 00508 00509 #define ClearFlag(F,SF) { \ 00510 (F) &= ~(SF); \ 00511 } 00512 00513 #define QuadAlign(P) ( \ 00514 ((((P)) + 7) & (-8)) \ 00515 ) 00516 00517 // 00518 // Turn on pseudo-asserts if CC_FREE_ASSERTS is defined. 00519 // 00520 00521 #if (!DBG && defined( CC_FREE_ASSERTS )) 00522 #undef ASSERT 00523 #undef ASSERTMSG 00524 #define ASSERT(exp) \ 00525 ((exp) ? TRUE : \ 00526 (DbgPrint( "%s:%d %s\n",__FILE__,__LINE__,#exp ), \ 00527 DbgBreakPoint(), \ 00528 TRUE)) 00529 #define ASSERTMSG(msg,exp) \ 00530 ((exp) ? TRUE : \ 00531 (DbgPrint( "%s:%d %s %s\n",__FILE__,__LINE__,msg,#exp ), \ 00532 DbgBreakPoint(), \ 00533 TRUE)) 00534 #endif 00535 00536 00537 #if DANLO 00538 typedef struct _CC_LOG_ENTRY { 00539 ULONG Action; 00540 ULONG Reason; 00541 } CC_LOG_ENTRY; 00542 00543 typedef struct _CC_LOG { 00544 USHORT Current; 00545 USHORT Size; 00546 CC_LOG_ENTRY Log[48]; 00547 } CC_LOG; 00548 00549 #define CcAddToLog( LOG, ACTION, REASON ) { \ 00550 (LOG)->Current += 1; \ 00551 if ((LOG)->Current == (LOG)->Size) { \ 00552 (LOG)->Current = 0; \ 00553 } \ 00554 (LOG)->Log[(LOG)->Current].Action = (ACTION); \ 00555 (LOG)->Log[(LOG)->Current].Reason = (REASON); \ 00556 } 00557 #else 00558 #define CcAddToLog( LOG, ACTION, REASON ) 00559 #endif 00560 00561 00562 // 00563 // Define the Virtual Address Control Block, which controls all mapping 00564 // performed by the Cache Manager. 00565 // 00566 00567 // 00568 // First some constants 00569 // 00570 00571 #define PREALLOCATED_VACBS (4) 00572 00573 // 00574 // Virtual Address Control Block 00575 // 00576 00577 typedef struct _VACB { 00578 00579 // 00580 // Base Address for this control block. 00581 // 00582 00583 PVOID BaseAddress; 00584 00585 // 00586 // Pointer to the Shared Cache Map using this Vacb. 00587 // 00588 00589 struct _SHARED_CACHE_MAP *SharedCacheMap; 00590 00591 // 00592 // Overlay for remembering mapped offset within the Shared Cache Map, 00593 // and the count of the number of times this Vacb is in use. 00594 // 00595 00596 union { 00597 00598 // 00599 // File Offset within Shared Cache Map 00600 // 00601 00602 LARGE_INTEGER FileOffset; 00603 00604 // 00605 // Count of number of times this Vacb is in use. The size of this 00606 // count is calculated to be adequate, while never large enough to 00607 // overwrite nonzero bits of the FileOffset, which is a multiple 00608 // of VACB_MAPPING_GRANULARITY. 00609 // 00610 00611 USHORT ActiveCount; 00612 00613 } Overlay; 00614 00615 // 00616 // Entry for the VACB reuse list 00617 // 00618 00619 LIST_ENTRY LruList; 00620 00621 } VACB, *PVACB; 00622 00623 // 00624 // These define special flag values that are overloaded as PVACB. They cause 00625 // certain special behavior, currently only in the case of multilevel structures. 00626 // 00627 00628 #define VACB_SPECIAL_REFERENCE ((PVACB) ~0) 00629 #define VACB_SPECIAL_DEREFERENCE ((PVACB) ~1) 00630 00631 #define VACB_SPECIAL_FIRST_VALID VACB_SPECIAL_DEREFERENCE 00632 00633 00634 // 00635 // The Private Cache Map is a structure pointed to by the File Object, whenever 00636 // a file is opened with caching enabled (default). 00637 // 00638 00639 typedef struct _PRIVATE_CACHE_MAP { 00640 00641 // 00642 // Type and size of this record 00643 // 00644 00645 CSHORT NodeTypeCode; 00646 CSHORT NodeByteSize; 00647 00648 // 00649 // Pointer to FileObject for this PrivateCacheMap. 00650 // 00651 00652 PFILE_OBJECT FileObject; 00653 00654 // 00655 // READ AHEAD CONTROL 00656 // 00657 // Read ahead history for determining when read ahead might be 00658 // beneficial. 00659 // 00660 00661 LARGE_INTEGER FileOffset1; 00662 LARGE_INTEGER BeyondLastByte1; 00663 00664 LARGE_INTEGER FileOffset2; 00665 LARGE_INTEGER BeyondLastByte2; 00666 00667 // 00668 // Current read ahead requirements. 00669 // 00670 // Array element 0 is optionally used for recording remaining bytes 00671 // required for satisfying a large Mdl read. 00672 // 00673 // Array element 1 is used for predicted read ahead. 00674 // 00675 00676 LARGE_INTEGER ReadAheadOffset[2]; 00677 ULONG ReadAheadLength[2]; 00678 00679 // 00680 // SpinLock controlling access to following fields 00681 // 00682 00683 KSPIN_LOCK ReadAheadSpinLock; 00684 00685 // 00686 // Read Ahead mask formed from Read Ahead granularity - 1 00687 // 00688 00689 ULONG ReadAheadMask; 00690 00691 // 00692 // Links for list of all PrivateCacheMaps linked to the same 00693 // SharedCacheMap. 00694 // 00695 00696 LIST_ENTRY PrivateLinks; 00697 00698 // 00699 // This flag says read ahead is currently active, which means either 00700 // a file system call to CcReadAhead is still determining if the 00701 // desired data is already resident, or else a request to do read ahead 00702 // has been queued to a worker thread. 00703 // 00704 00705 BOOLEAN ReadAheadActive; 00706 00707 // 00708 // Flag to say whether read ahead is currently enabled for this 00709 // FileObject/PrivateCacheMap. On read misses it is enabled on 00710 // read ahead hits it will be disabled. Initially disabled. 00711 // 00712 00713 BOOLEAN ReadAheadEnabled; 00714 00715 } PRIVATE_CACHE_MAP; 00716 00717 typedef PRIVATE_CACHE_MAP *PPRIVATE_CACHE_MAP; 00718 00719 00720 // 00721 // The Shared Cache Map is a per-file structure pointed to indirectly by 00722 // each File Object. The File Object points to a pointer in a single 00723 // FS-private structure for the file (Fcb). The SharedCacheMap maps the 00724 // first part of the file for common access by all callers. 00725 // 00726 00727 typedef struct _SHARED_CACHE_MAP { 00728 00729 // 00730 // Type and size of this record 00731 // 00732 00733 CSHORT NodeTypeCode; 00734 CSHORT NodeByteSize; 00735 00736 // 00737 // Number of times this file has been opened cached. 00738 // 00739 00740 ULONG OpenCount; 00741 00742 // 00743 // Actual size of file, primarily for restricting Read Ahead. Initialized 00744 // on creation and maintained by extend and truncate operations. 00745 // 00746 // NOTE: This field may never be moved, thanks to the late DavidGoe, 00747 // who should have written this comment himself :-( cache.h 00748 // exports a macro which "knows" that FileSize is the second 00749 // longword in the Cache Map! 00750 // 00751 00752 LARGE_INTEGER FileSize; 00753 00754 // 00755 // Bcb Listhead. The BcbList is ordered by descending 00756 // FileOffsets, to optimize misses in the sequential I/O case. 00757 // Synchronized by the BcbSpinLock. 00758 // 00759 00760 LIST_ENTRY BcbList; 00761 00762 // 00763 // Size of section created. 00764 // 00765 00766 LARGE_INTEGER SectionSize; 00767 00768 // 00769 // ValidDataLength for file, as currently stored by the file system. 00770 // Synchronized by the BcbSpinLock or exclusive access by FileSystem. 00771 // 00772 00773 LARGE_INTEGER ValidDataLength; 00774 00775 // 00776 // Goal for ValidDataLength, when current dirty data is written. 00777 // Synchronized by the BcbSpinLock or exclusive access by FileSystem. 00778 // 00779 00780 LARGE_INTEGER ValidDataGoal; 00781 00782 // 00783 // Pointer to a contiguous array of Vacb pointers which control mapping 00784 // to this file, along with Vacbs (currently) for a 1MB file. 00785 // Synchronized by CcVacbSpinLock. 00786 // 00787 00788 PVACB InitialVacbs[PREALLOCATED_VACBS]; 00789 PVACB * Vacbs; 00790 00791 // 00792 // Referenced pointer to original File Object on which the SharedCacheMap 00793 // was created. 00794 // 00795 00796 PFILE_OBJECT FileObject; 00797 00798 // 00799 // Describe Active Vacb and Page for copysup optimizations. 00800 // 00801 00802 volatile PVACB ActiveVacb; 00803 ULONG ActivePage; 00804 00805 // 00806 // Virtual address needing zero to end of page 00807 // 00808 00809 volatile PVOID NeedToZero; 00810 ULONG NeedToZeroPage; 00811 00812 // 00813 // Fields for synchronizing on active requests. 00814 // 00815 00816 KSPIN_LOCK ActiveVacbSpinLock; 00817 ULONG VacbActiveCount; 00818 00819 // 00820 // THE NEXT TWO FIELDS MUST BE ADJACENT, TO SUPPORT 00821 // SHARED_CACHE_MAP_LIST_CURSOR! 00822 // 00823 // Links for Global SharedCacheMap List 00824 // 00825 00826 LIST_ENTRY SharedCacheMapLinks; 00827 00828 // 00829 // Shared Cache Map flags (defined below) 00830 // 00831 00832 ULONG Flags; 00833 00834 // 00835 // Mask Bcb for this SharedCacheMap, if there is one. 00836 // Synchronized by the BcbSpinLock. 00837 // 00838 00839 struct _MBCB *Mbcb; 00840 00841 // 00842 // Number of dirty pages in this SharedCacheMap. Used to trigger 00843 // write behind. Synchronized by CcMasterSpinLock. 00844 // 00845 00846 ULONG DirtyPages; 00847 00848 // 00849 // Pointer to the common Section Object used by the file system. 00850 // 00851 00852 PVOID Section; 00853 00854 // 00855 // Status variable set by creator of SharedCacheMap 00856 // 00857 00858 NTSTATUS Status; 00859 00860 // 00861 // This event pointer is used to handle creation collisions. 00862 // If a second thread tries to call CcInitializeCacheMap for the 00863 // same file, while BeingCreated (below) is TRUE, then that thread 00864 // will allocate an event store it here (if not already allocated), 00865 // and wait on it. The first creator will set this event when it 00866 // is done. The event is not deleted until CcUninitializedCacheMap 00867 // is called, to avoid possible race conditions. (Note that normally 00868 // the event never has to be allocated. 00869 // 00870 00871 PKEVENT CreateEvent; 00872 00873 // 00874 // This points to an event used to wait for active count to go to zero 00875 // 00876 00877 PKEVENT WaitOnActiveCount; 00878 00879 // 00880 // These two fields control the writing of large metadata 00881 // streams. The first field gives a target for the current 00882 // flush interval, and the second field stores the end of 00883 // the last flush that occurred on this file. 00884 // 00885 00886 ULONG PagesToWrite; 00887 LONGLONG BeyondLastFlush; 00888 00889 #if 0 00890 // 00891 // This records where the last view miss occured in the file. 00892 // 00893 00894 LARGE_INTEGER LastViewMiss; 00895 #endif 00896 00897 // 00898 // Pointer to structure of routines used by the Lazy Writer to Acquire 00899 // and Release the file for Lazy Write and Close, to avoid deadlocks, 00900 // and the context to call them with. 00901 // 00902 00903 PCACHE_MANAGER_CALLBACKS Callbacks; 00904 00905 PVOID LazyWriteContext; 00906 00907 // 00908 // Listhead of all PrivateCacheMaps linked to this SharedCacheMap. 00909 // 00910 00911 LIST_ENTRY PrivateList; 00912 00913 // 00914 // Log handle specified for this shared cache map, for support of routines 00915 // in logsup.c 00916 // 00917 00918 PVOID LogHandle; 00919 00920 // 00921 // Callback routine specified for flushing to Lsn. 00922 // 00923 00924 PFLUSH_TO_LSN FlushToLsnRoutine; 00925 00926 // 00927 // Dirty Page Threshold for this stream 00928 // 00929 00930 ULONG DirtyPageThreshold; 00931 00932 // 00933 // Lazy Writer pass count. Used by the Lazy Writer for 00934 // no modified write streams, which are not serviced on 00935 // every pass in order to avoid contention with foreground 00936 // activity. 00937 // 00938 00939 ULONG LazyWritePassCount; 00940 00941 // 00942 // This event pointer is used to allow a file system to be notified when 00943 // the deletion of a shared cache map. 00944 // 00945 // This has to be provided here because the cache manager may decide to 00946 // "Lazy Delete" the shared cache map, and some network file systems 00947 // will want to know when the lazy delete completes. 00948 // 00949 00950 PCACHE_UNINITIALIZE_EVENT UninitializeEvent; 00951 00952 // 00953 // This Vacb pointer is needed for keeping the NeedToZero virtual address 00954 // valid. 00955 // 00956 00957 PVACB NeedToZeroVacb; 00958 00959 // 00960 // Spinlock for synchronizing the Mbcb and Bcb lists - must be acquired 00961 // before CcMasterSpinLock. This spinlock also synchronizes ValidDataGoal 00962 // and ValidDataLength, as described above. 00963 // 00964 00965 KSPIN_LOCK BcbSpinLock; 00966 00967 // 00968 // This is a scratch event which can be used either for 00969 // a CreateEvent or a WaitOnActiveCount event. It is 00970 // difficult to share this event, because of the very 00971 // careful semantics by which they are cleared. On the 00972 // other hand, both events are relatively rarely used 00973 // (especially the CreateEvent), so it will be rare that 00974 // we will actually use both for the same file, and have 00975 // to allocate one. 00976 // 00977 00978 PKEVENT LocalEvent; 00979 KEVENT Event; 00980 00981 // 00982 // Preallocate on PrivateCacheMap to reduce pool allocations. 00983 // 00984 00985 PRIVATE_CACHE_MAP PrivateCacheMap; 00986 00987 #if DANLO 00988 // 00989 // Instrument reasons for OpenCount 00990 // 00991 00992 CC_LOG OpenCountLog; 00993 #endif 00994 00995 } SHARED_CACHE_MAP; 00996 00997 typedef SHARED_CACHE_MAP *PSHARED_CACHE_MAP; 00998 00999 // 01000 // OpenCount log Reasons/Actions 01001 // 01002 01003 #define CcIncrementOpenCount( SCM, REASON ) { \ 01004 (SCM)->OpenCount += 1; \ 01005 if (REASON != 0) { \ 01006 CcAddToLog( &(SCM)->OpenCountLog, REASON, 1 ); \ 01007 } \ 01008 } 01009 01010 #define CcDecrementOpenCount( SCM, REASON ) { \ 01011 (SCM)->OpenCount -= 1; \ 01012 if (REASON != 0) { \ 01013 CcAddToLog( &(SCM)->OpenCountLog, REASON, -1 ); \ 01014 } \ 01015 } 01016 01017 // 01018 // Shared Cache Map Flags 01019 // 01020 01021 // 01022 // Read ahead has been disabled on this file. 01023 // 01024 01025 #define DISABLE_READ_AHEAD 0x0001 01026 01027 // 01028 // Write behind has been disabled on this file. 01029 // 01030 01031 #define DISABLE_WRITE_BEHIND 0x0002 01032 01033 // 01034 // This flag indicates whether CcInitializeCacheMap was called with 01035 // PinAccess = TRUE. 01036 // 01037 01038 #define PIN_ACCESS 0x0004 01039 01040 // 01041 // This flag indicates that a truncate is required when OpenCount 01042 // goes to 0. 01043 // 01044 01045 #define TRUNCATE_REQUIRED 0x0010 01046 01047 // 01048 // This flag indicates that a LazyWrite request is queued. 01049 // 01050 01051 #define WRITE_QUEUED 0x0020 01052 01053 // 01054 // This flag indicates that we have never seen anyone cache 01055 // the file except for with FO_SEQUENTIAL_ONLY, so we should 01056 // tell MM to quickly dump pages when we unmap. 01057 // 01058 01059 #define ONLY_SEQUENTIAL_ONLY_SEEN 0x0040 01060 01061 // 01062 // Active Page is locked 01063 // 01064 01065 #define ACTIVE_PAGE_IS_DIRTY 0x0080 01066 01067 // 01068 // Flag to say that a create is in progress. 01069 // 01070 01071 #define BEING_CREATED 0x0100 01072 01073 // 01074 // Flag to say that modified write was disabled on the section. 01075 // 01076 01077 #define MODIFIED_WRITE_DISABLED 0x0200 01078 01079 // 01080 // Flag that indicates if a lazy write ever occurred on this file. 01081 // 01082 01083 #define LAZY_WRITE_OCCURRED 0x0400 01084 01085 // 01086 // Flag that indicates this structure is only a cursor, only the 01087 // SharedCacheMapLinks and Flags are valid! 01088 // 01089 01090 #define IS_CURSOR 0x0800 01091 01092 // 01093 // Flag that indicates that we have seen someone cache this file 01094 // and specify FO_RANDOM_ACCESS. This will deactivate our cache 01095 // working set trim assist. 01096 // 01097 01098 #define RANDOM_ACCESS_SEEN 0x1000 01099 01100 // 01101 // Cursor structure for traversing the SharedCacheMap lists. Anyone 01102 // scanning these lists must verify that the IS_CURSOR flag is clear 01103 // before looking at other SharedCacheMap fields. 01104 // 01105 01106 01107 typedef struct _SHARED_CACHE_MAP_LIST_CURSOR { 01108 01109 // 01110 // Links for Global SharedCacheMap List 01111 // 01112 01113 LIST_ENTRY SharedCacheMapLinks; 01114 01115 // 01116 // Shared Cache Map flags, IS_CURSOR must be set. 01117 // 01118 01119 ULONG Flags; 01120 01121 } SHARED_CACHE_MAP_LIST_CURSOR, *PSHARED_CACHE_MAP_LIST_CURSOR; 01122 01123 01124 01125 #ifndef KDEXT 01126 // 01127 // Bitmap Range structure. For small files there is just one embedded in the 01128 // Mbcb. For large files there may be many of these linked to the Mbcb. 01129 // 01130 01131 typedef struct _BITMAP_RANGE { 01132 01133 // 01134 // Links for the list of bitmap ranges off the Mbcb. 01135 // 01136 01137 LIST_ENTRY Links; 01138 01139 // 01140 // Base page (FileOffset / PAGE_SIZE) represented by this range. 01141 // (Size is a fixed maximum.) 01142 // 01143 01144 LONGLONG BasePage; 01145 01146 // 01147 // First and Last dirty pages relative to the BasePage. 01148 // 01149 01150 ULONG FirstDirtyPage; 01151 ULONG LastDirtyPage; 01152 01153 // 01154 // Number of dirty pages in this range. 01155 // 01156 01157 ULONG DirtyPages; 01158 01159 // 01160 // Pointer to the bitmap for this range. 01161 // 01162 01163 PULONG Bitmap; 01164 01165 } BITMAP_RANGE, *PBITMAP_RANGE; 01166 #endif 01167 01168 // 01169 // This structure is a "mask" Bcb. For fast simple write operations, 01170 // a mask Bcb is used so that we basically only have to set bits to remember 01171 // where the dirty data is. 01172 // 01173 01174 typedef struct _MBCB { 01175 01176 // 01177 // Type and size of this record 01178 // 01179 01180 CSHORT NodeTypeCode; 01181 CSHORT NodeIsInZone; 01182 01183 // 01184 // This field is used as a scratch area for the Lazy Writer to 01185 // guide how much he will write each time he wakes up. 01186 // 01187 01188 ULONG PagesToWrite; 01189 01190 // 01191 // Number of dirty pages (set bits) in the bitmap below. 01192 // 01193 01194 ULONG DirtyPages; 01195 01196 // 01197 // Reserved for alignment. 01198 // 01199 01200 ULONG Reserved; 01201 01202 // 01203 // ListHead of Bitmap ranges. 01204 // 01205 01206 LIST_ENTRY BitmapRanges; 01207 01208 // 01209 // This is a hint on where to resume writing, since we will not 01210 // always write all of the dirty data at once. 01211 // 01212 01213 LONGLONG ResumeWritePage; 01214 01215 // 01216 // Initial three embedded Bitmap ranges. For a file up to 2MB, only the 01217 // first range is used, and the rest of the Mbcb contains bits for 2MB of 01218 // dirty pages (4MB on Alpha). For larger files, all three ranges may 01219 // be used to describe external bitmaps. 01220 // 01221 01222 BITMAP_RANGE BitmapRange1; 01223 BITMAP_RANGE BitmapRange2; 01224 BITMAP_RANGE BitmapRange3; 01225 01226 } MBCB; 01227 01228 typedef MBCB *PMBCB; 01229 01230 01231 // 01232 // This is the Buffer Control Block structure for representing data which 01233 // is "pinned" in memory by one or more active requests and/or dirty. This 01234 // structure is created the first time that a call to CcPinFileData specifies 01235 // a particular integral range of pages. It is deallocated whenever the Pin 01236 // Count reaches 0 and the Bcb is not Dirty. 01237 // 01238 // NOTE: The first four fields must be the same as the PUBLIC_BCB. 01239 // 01240 01241 typedef struct _BCB { 01242 01243 // 01244 // Type and size of this record 01245 // 01246 01247 CSHORT NodeTypeCode; 01248 CSHORT NodeIsInZone; 01249 01250 // 01251 // Byte FileOffset and and length of entire buffer 01252 // 01253 01254 ULONG ByteLength; 01255 LARGE_INTEGER FileOffset; 01256 01257 // 01258 // Links for BcbList in SharedCacheMap 01259 // 01260 01261 LIST_ENTRY BcbLinks; 01262 01263 // 01264 // Byte FileOffset of last byte in buffer (used for searching) 01265 // 01266 01267 LARGE_INTEGER BeyondLastByte; 01268 01269 // 01270 // Oldest Lsn (if specified) when this buffer was set dirty. 01271 // 01272 01273 LARGE_INTEGER OldestLsn; 01274 01275 // 01276 // Most recent Lsn specified when this buffer was set dirty. 01277 // The FlushToLsnRoutine is called with this Lsn. 01278 // 01279 01280 LARGE_INTEGER NewestLsn; 01281 01282 // 01283 // Pointer to Vacb via which this Bcb is mapped. 01284 // 01285 01286 PVACB Vacb; 01287 01288 // 01289 // Links and caller addresses for the global Bcb list (for debug only) 01290 // 01291 01292 #if LIST_DBG 01293 LIST_ENTRY CcBcbLinks; 01294 PVOID CallerAddress; 01295 PVOID CallersCallerAddress; 01296 #endif 01297 01298 // 01299 // Count of threads actively using this Bcb to process a request. 01300 // This must be manipulated under protection of the BcbListSpinLock 01301 // in the SharedCacheMap. 01302 // 01303 01304 ULONG PinCount; 01305 01306 // 01307 // Resource to synchronize buffer access. Pinning Readers and all Writers 01308 // of the described buffer take out shared access (synchronization of 01309 // buffer modifications is strictly up to the caller). Note that pinning 01310 // readers do not declare if they are going to modify the buffer or not. 01311 // Anyone writing to disk takes out exclusive access, to prevent the buffer 01312 // from changing while it is being written out. 01313 // 01314 01315 ERESOURCE Resource; 01316 01317 // 01318 // Pointer to SharedCacheMap for this Bcb. 01319 // 01320 01321 PSHARED_CACHE_MAP SharedCacheMap; 01322 01323 // 01324 // This is the Base Address at which the buffer can be seen in 01325 // system space. All access to buffer data should go through this 01326 // address. 01327 // 01328 01329 PVOID BaseAddress; 01330 01331 // 01332 // Flags 01333 // 01334 01335 BOOLEAN Dirty; 01336 01337 } BCB; 01338 01339 #ifndef KDEXT 01340 typedef BCB *PBCB; 01341 #endif 01342 01343 // 01344 // This is the Overlap Buffer Control Block structure for representing data which 01345 // is "pinned" in memory and must be represented by multiple Bcbs due to overlaps. 01346 // 01347 // NOTE: The first four fields must be the same as the PUBLIC_BCB. 01348 // 01349 01350 typedef struct _OBCB { 01351 01352 // 01353 // Type and size of this record 01354 // 01355 01356 CSHORT NodeTypeCode; 01357 CSHORT NodeByteSize; 01358 01359 // 01360 // Byte FileOffset and and length of entire buffer 01361 // 01362 01363 ULONG ByteLength; 01364 LARGE_INTEGER FileOffset; 01365 01366 // 01367 // Vector of Bcb pointers. 01368 // 01369 01370 PBCB Bcbs[ANYSIZE_ARRAY]; 01371 01372 } OBCB; 01373 01374 typedef OBCB *POBCB; 01375 01376 01377 // 01378 // Struct for remembering deferred writes for later posting. 01379 // 01380 01381 typedef struct _DEFERRED_WRITE { 01382 01383 // 01384 // Type and size of this record 01385 // 01386 01387 CSHORT NodeTypeCode; 01388 CSHORT NodeByteSize; 01389 01390 // 01391 // The file to be written. 01392 // 01393 01394 PFILE_OBJECT FileObject; 01395 01396 // 01397 // Number of bytes the caller intends to write 01398 // 01399 01400 ULONG BytesToWrite; 01401 01402 // 01403 // Links for the deferred write queue. 01404 // 01405 01406 LIST_ENTRY DeferredWriteLinks; 01407 01408 // 01409 // If this event pointer is not NULL, then this event will 01410 // be signalled when the write is ok, rather than calling 01411 // the PostRoutine below. 01412 // 01413 01414 PKEVENT Event; 01415 01416 // 01417 // The posting routine and its parameters 01418 // 01419 01420 PCC_POST_DEFERRED_WRITE PostRoutine; 01421 PVOID Context1; 01422 PVOID Context2; 01423 01424 BOOLEAN LimitModifiedPages; 01425 01426 } DEFERRED_WRITE, *PDEFERRED_WRITE; 01427 01428 01429 // 01430 // Struct controlling the Lazy Writer algorithms 01431 // 01432 01433 typedef struct _LAZY_WRITER { 01434 01435 // 01436 // A few Mm routines still require a process. 01437 // 01438 01439 PEPROCESS OurProcess; 01440 01441 // 01442 // Work queue. 01443 // 01444 01445 LIST_ENTRY WorkQueue; 01446 01447 // 01448 // Zone for Bcbs. 01449 // 01450 01451 ZONE_HEADER BcbZone; 01452 01453 // 01454 // Dpc and Timer Structures used for activating periodic scan when active. 01455 // 01456 01457 KDPC ScanDpc; 01458 KTIMER ScanTimer; 01459 01460 // 01461 // Boolean to say whether Lazy Writer scan is active or not. 01462 // 01463 01464 BOOLEAN ScanActive; 01465 01466 // 01467 // Boolean indicating if there is any other reason for Lazy Writer to 01468 // wake up. 01469 // 01470 01471 BOOLEAN OtherWork; 01472 01473 } LAZY_WRITER; 01474 01475 01476 #ifndef KDEXT 01477 // 01478 // Work queue entry for the worker threads, with an enumerated 01479 // function code. 01480 // 01481 01482 typedef enum _WORKER_FUNCTION { 01483 Noop = 0, 01484 ReadAhead, 01485 WriteBehind, 01486 LazyWriteScan, 01487 EventSet 01488 } WORKER_FUNCTION; 01489 #endif 01490 01491 typedef struct _WORK_QUEUE_ENTRY { 01492 01493 // 01494 // List entry for our work queues. 01495 // 01496 01497 LIST_ENTRY WorkQueueLinks; 01498 01499 // 01500 // Define a union to contain function-specific parameters. 01501 // 01502 01503 union { 01504 01505 // 01506 // Read parameters (for read ahead) 01507 // 01508 01509 struct { 01510 PFILE_OBJECT FileObject; 01511 } Read; 01512 01513 // 01514 // Write parameters (for write behind) 01515 // 01516 01517 struct { 01518 PSHARED_CACHE_MAP SharedCacheMap; 01519 } Write; 01520 01521 // 01522 // Set event parameters (for queue checks) 01523 // 01524 01525 struct { 01526 PKEVENT Event; 01527 } Event; 01528 01529 } Parameters; 01530 01531 // 01532 // Function code for this entry: 01533 // 01534 01535 UCHAR Function; 01536 01537 } WORK_QUEUE_ENTRY, *PWORK_QUEUE_ENTRY; 01538 01539 // 01540 // This is a structure apended to the end of an MDL 01541 // 01542 01543 typedef struct _MDL_WRITE { 01544 01545 // 01546 // This field is for the use of the Server to stash anything interesting 01547 // 01548 01549 PVOID ServerContext; 01550 01551 // 01552 // This is the resource to release when the write is complete. 01553 // 01554 01555 PERESOURCE Resource; 01556 01557 // 01558 // This is thread caller's thread, and the thread that must release 01559 // the resource. 01560 // 01561 01562 ERESOURCE_THREAD Thread; 01563 01564 // 01565 // This links all the pending MDLs through the shared cache map. 01566 // 01567 01568 LIST_ENTRY MdlLinks; 01569 01570 } MDL_WRITE, *PMDL_WRITE; 01571 01572 01573 // 01574 // Common Private routine definitions for the Cache Manager 01575 // 01576 01577 #define GetActiveVacb(SCM,IRQ,V,P,D) { \ 01578 ExAcquireFastLock(&(SCM)->ActiveVacbSpinLock, &(IRQ)); \ 01579 (V) = (SCM)->ActiveVacb; \ 01580 if ((V) != NULL) { \ 01581 (P) = (SCM)->ActivePage; \ 01582 (SCM)->ActiveVacb = NULL; \ 01583 (D) = (SCM)->Flags & ACTIVE_PAGE_IS_DIRTY; \ 01584 } \ 01585 ExReleaseFastLock(&(SCM)->ActiveVacbSpinLock, (IRQ)); \ 01586 } 01587 01588 #define GetActiveVacbAtDpcLevel(SCM,V,P,D) { \ 01589 ExAcquireSpinLockAtDpcLevel(&(SCM)->ActiveVacbSpinLock); \ 01590 (V) = (SCM)->ActiveVacb; \ 01591 if ((V) != NULL) { \ 01592 (P) = (SCM)->ActivePage; \ 01593 (SCM)->ActiveVacb = NULL; \ 01594 (D) = (SCM)->Flags & ACTIVE_PAGE_IS_DIRTY; \ 01595 } \ 01596 ExReleaseSpinLockFromDpcLevel(&(SCM)->ActiveVacbSpinLock); \ 01597 } 01598 01599 // 01600 // When setting dirty, when we set ACTIVE_PAGE_IS_DIRTY the first time, 01601 // we increment the dirty counts, and they never get decremented until 01602 // CcFreeActiveVacb. If we are trying to set and there is already an 01603 // active Vacb *or* we are trying to set a clean one and the flag above 01604 // is set, we do not allow it, and we just free the vacb (we only want 01605 // to handle the clean transition in one place). 01606 // 01607 // MP & UP cases are separately defined, because I do not trust the compiler 01608 // to otherwise generate the optimal UP code. 01609 // 01610 01611 01612 // 01613 // In the MP case, we test if we are setting the page dirty, because then 01614 // we must acquire CcMasterSpinLock to diddle CcDirtyPages. 01615 // 01616 01617 #if !defined(NT_UP) \ 01618 01619 #define SetActiveVacb(SCM,IRQ,V,P,D) { \ 01620 if (D) { \ 01621 CcAcquireMasterLock(&(IRQ)); \ 01622 ExAcquireSpinLockAtDpcLevel(&(SCM)->ActiveVacbSpinLock); \ 01623 } else { \ 01624 ExAcquireSpinLock(&(SCM)->ActiveVacbSpinLock, &(IRQ)); \ 01625 } \ 01626 do { \ 01627 if ((SCM)->ActiveVacb == NULL) { \ 01628 if (((SCM)->Flags & ACTIVE_PAGE_IS_DIRTY) != (D)) { \ 01629 if (D) { \ 01630 (SCM)->ActiveVacb = (V); \ 01631 (SCM)->ActivePage = (P); \ 01632 (V) = NULL; \ 01633 SetFlag((SCM)->Flags, ACTIVE_PAGE_IS_DIRTY); \ 01634 CcTotalDirtyPages += 1; \ 01635 (SCM)->DirtyPages += 1; \ 01636 if ((SCM)->DirtyPages == 1) { \ 01637 PLIST_ENTRY Blink; \ 01638 PLIST_ENTRY Entry; \ 01639 PLIST_ENTRY Flink; \ 01640 PLIST_ENTRY Head; \ 01641 Entry = &(SCM)->SharedCacheMapLinks; \ 01642 Blink = Entry->Blink; \ 01643 Flink = Entry->Flink; \ 01644 Blink->Flink = Flink; \ 01645 Flink->Blink = Blink; \ 01646 Head = &CcDirtySharedCacheMapList.SharedCacheMapLinks; \ 01647 Blink = Head->Blink; \ 01648 Entry->Flink = Head; \ 01649 Entry->Blink = Blink; \ 01650 Blink->Flink = Entry; \ 01651 Head->Blink = Entry; \ 01652 if (!LazyWriter.ScanActive) { \ 01653 LazyWriter.ScanActive = TRUE; \ 01654 ExReleaseSpinLockFromDpcLevel(&(SCM)->ActiveVacbSpinLock); \ 01655 CcReleaseMasterLock((IRQ)); \ 01656 KeSetTimer( &LazyWriter.ScanTimer, \ 01657 CcFirstDelay, \ 01658 &LazyWriter.ScanDpc ); \ 01659 break; \ 01660 } \ 01661 } \ 01662 } \ 01663 } else { \ 01664 (SCM)->ActiveVacb = (V); \ 01665 (SCM)->ActivePage = (P); \ 01666 (V) = NULL; \ 01667 } \ 01668 } \ 01669 if (D) { \ 01670 ExReleaseSpinLockFromDpcLevel(&(SCM)->ActiveVacbSpinLock); \ 01671 CcReleaseMasterLock((IRQ)); \ 01672 } else { \ 01673 ExReleaseSpinLock(&(SCM)->ActiveVacbSpinLock, (IRQ)); \ 01674 } \ 01675 if ((V) != NULL) { \ 01676 CcFreeActiveVacb( (SCM), (V), (P), (D)); \ 01677 } \ 01678 } while (FALSE); \ 01679 } 01680 01681 // 01682 // In the UP case, any FastLock will do, so we just use the ActiveVacb lock, and do not 01683 // explicitly acquire CcMasterSpinLock. 01684 // 01685 01686 #else 01687 01688 #define SetActiveVacb(SCM,IRQ,V,P,D) { \ 01689 ExAcquireFastLock(&(SCM)->ActiveVacbSpinLock, &(IRQ)); \ 01690 do { \ 01691 if ((SCM)->ActiveVacb == NULL) { \ 01692 if (((SCM)->Flags & ACTIVE_PAGE_IS_DIRTY) != (D)) { \ 01693 if (D) { \ 01694 (SCM)->ActiveVacb = (V); \ 01695 (SCM)->ActivePage = (P); \ 01696 (V) = NULL; \ 01697 SetFlag((SCM)->Flags, ACTIVE_PAGE_IS_DIRTY); \ 01698 CcTotalDirtyPages += 1; \ 01699 (SCM)->DirtyPages += 1; \ 01700 if ((SCM)->DirtyPages == 1) { \ 01701 PLIST_ENTRY Blink; \ 01702 PLIST_ENTRY Entry; \ 01703 PLIST_ENTRY Flink; \ 01704 PLIST_ENTRY Head; \ 01705 Entry = &(SCM)->SharedCacheMapLinks; \ 01706 Blink = Entry->Blink; \ 01707 Flink = Entry->Flink; \ 01708 Blink->Flink = Flink; \ 01709 Flink->Blink = Blink; \ 01710 Head = &CcDirtySharedCacheMapList.SharedCacheMapLinks; \ 01711 Blink = Head->Blink; \ 01712 Entry->Flink = Head; \ 01713 Entry->Blink = Blink; \ 01714 Blink->Flink = Entry; \ 01715 Head->Blink = Entry; \ 01716 if (!LazyWriter.ScanActive) { \ 01717 LazyWriter.ScanActive = TRUE; \ 01718 ExReleaseFastLock(&(SCM)->ActiveVacbSpinLock, (IRQ)); \ 01719 KeSetTimer( &LazyWriter.ScanTimer, \ 01720 CcFirstDelay, \ 01721 &LazyWriter.ScanDpc ); \ 01722 break; \ 01723 } \ 01724 } \ 01725 } \ 01726 } else { \ 01727 (SCM)->ActiveVacb = (V); \ 01728 (SCM)->ActivePage = (P); \ 01729 (V) = NULL; \ 01730 } \ 01731 } \ 01732 ExReleaseFastLock(&(SCM)->ActiveVacbSpinLock, (IRQ)); \ 01733 if ((V) != NULL) { \ 01734 CcFreeActiveVacb( (SCM), (V), (P), (D)); \ 01735 } \ 01736 } while (FALSE); \ 01737 } 01738 01739 #endif 01740 01741 VOID 01742 CcPostDeferredWrites ( 01743 ); 01744 01745 BOOLEAN 01746 CcPinFileData ( 01747 IN PFILE_OBJECT FileObject, 01748 IN PLARGE_INTEGER FileOffset, 01749 IN ULONG Length, 01750 IN BOOLEAN ReadOnly, 01751 IN BOOLEAN WriteOnly, 01752 IN ULONG Flags, 01753 OUT PBCB *Bcb, 01754 OUT PVOID *BaseAddress, 01755 OUT PLARGE_INTEGER BeyondLastByte 01756 ); 01757 01758 typedef enum { 01759 UNPIN, 01760 UNREF, 01761 SET_CLEAN 01762 } UNMAP_ACTIONS; 01763 01764 VOID 01765 FASTCALL 01766 CcUnpinFileData ( 01767 IN OUT PBCB Bcb, 01768 IN BOOLEAN ReadOnly, 01769 IN UNMAP_ACTIONS UnmapAction 01770 ); 01771 01772 VOID 01773 FASTCALL 01774 CcDeallocateBcb ( 01775 IN PBCB Bcb 01776 ); 01777 01778 VOID 01779 FASTCALL 01780 CcPerformReadAhead ( 01781 IN PFILE_OBJECT FileObject 01782 ); 01783 01784 VOID 01785 CcSetDirtyInMask ( 01786 IN PSHARED_CACHE_MAP SharedCacheMap, 01787 IN PLARGE_INTEGER FileOffset, 01788 IN ULONG Length 01789 ); 01790 01791 VOID 01792 FASTCALL 01793 CcWriteBehind ( 01794 IN PSHARED_CACHE_MAP SharedCacheMap, 01795 IN PIO_STATUS_BLOCK IoStatus 01796 ); 01797 01798 #define ZERO_FIRST_PAGE 1 01799 #define ZERO_MIDDLE_PAGES 2 01800 #define ZERO_LAST_PAGE 4 01801 01802 BOOLEAN 01803 CcMapAndRead( 01804 IN PSHARED_CACHE_MAP SharedCacheMap, 01805 IN PLARGE_INTEGER FileOffset, 01806 IN ULONG Length, 01807 IN ULONG ZeroFlags, 01808 IN BOOLEAN Wait, 01809 IN PVOID BaseAddress 01810 ); 01811 01812 VOID 01813 CcFreeActiveVacb ( 01814 IN PSHARED_CACHE_MAP SharedCacheMap, 01815 IN PVACB ActiveVacb OPTIONAL, 01816 IN ULONG ActivePage, 01817 IN ULONG PageIsDirty 01818 ); 01819 01820 VOID 01821 CcMapAndCopy( 01822 IN PSHARED_CACHE_MAP SharedCacheMap, 01823 IN PVOID UserBuffer, 01824 IN PLARGE_INTEGER FileOffset, 01825 IN ULONG Length, 01826 IN ULONG ZeroFlags, 01827 IN BOOLEAN WriteThrough 01828 ); 01829 01830 VOID 01831 CcScanDpc ( 01832 IN PKDPC Dpc, 01833 IN PVOID DeferredContext, 01834 IN PVOID SystemArgument1, 01835 IN PVOID SystemArgument2 01836 ); 01837 01838 VOID 01839 CcScheduleLazyWriteScan ( 01840 ); 01841 01842 VOID 01843 CcStartLazyWriter ( 01844 IN PVOID NotUsed 01845 ); 01846 01847 #define CcAllocateWorkQueueEntry() \ 01848 (PWORK_QUEUE_ENTRY)ExAllocateFromPPNPagedLookasideList(LookasideTwilightList) 01849 01850 #define CcFreeWorkQueueEntry(_entry_) \ 01851 ExFreeToPPNPagedLookasideList(LookasideTwilightList, (_entry_)) 01852 01853 VOID 01854 FASTCALL 01855 CcPostWorkQueue ( 01856 IN PWORK_QUEUE_ENTRY WorkQueueEntry, 01857 IN PLIST_ENTRY WorkQueue 01858 ); 01859 01860 VOID 01861 CcWorkerThread ( 01862 PVOID ExWorkQueueItem 01863 ); 01864 01865 VOID 01866 FASTCALL 01867 CcDeleteSharedCacheMap ( 01868 IN PSHARED_CACHE_MAP SharedCacheMap, 01869 IN KIRQL ListIrql, 01870 IN ULONG ReleaseFile 01871 ); 01872 01873 // 01874 // This exception filter handles STATUS_IN_PAGE_ERROR correctly 01875 // 01876 01877 LONG 01878 CcCopyReadExceptionFilter( 01879 IN PEXCEPTION_POINTERS ExceptionPointer, 01880 IN PNTSTATUS ExceptionCode 01881 ); 01882 01883 // 01884 // Exception filter for Worker Threads in lazyrite.c 01885 // 01886 01887 LONG 01888 CcExceptionFilter ( 01889 IN NTSTATUS ExceptionCode 01890 ); 01891 01892 #ifdef CCDBG 01893 VOID 01894 CcDump ( 01895 IN PVOID Ptr 01896 ); 01897 #endif 01898 01899 // 01900 // Vacb routines 01901 // 01902 01903 VOID 01904 CcInitializeVacbs( 01905 ); 01906 01907 PVOID 01908 CcGetVirtualAddressIfMapped ( 01909 IN PSHARED_CACHE_MAP SharedCacheMap, 01910 IN LONGLONG FileOffset, 01911 OUT PVACB *Vacb, 01912 OUT PULONG ReceivedLength 01913 ); 01914 01915 PVOID 01916 CcGetVirtualAddress ( 01917 IN PSHARED_CACHE_MAP SharedCacheMap, 01918 IN LARGE_INTEGER FileOffset, 01919 OUT PVACB *Vacb, 01920 OUT PULONG ReceivedLength 01921 ); 01922 01923 VOID 01924 FASTCALL 01925 CcFreeVirtualAddress ( 01926 IN PVACB Vacb 01927 ); 01928 01929 VOID 01930 CcReferenceFileOffset ( 01931 IN PSHARED_CACHE_MAP SharedCacheMap, 01932 IN LARGE_INTEGER FileOffset 01933 ); 01934 01935 VOID 01936 CcDereferenceFileOffset ( 01937 IN PSHARED_CACHE_MAP SharedCacheMap, 01938 IN LARGE_INTEGER FileOffset 01939 ); 01940 01941 VOID 01942 CcWaitOnActiveCount ( 01943 IN PSHARED_CACHE_MAP SharedCacheMap 01944 ); 01945 01946 VOID 01947 FASTCALL 01948 CcCreateVacbArray ( 01949 IN PSHARED_CACHE_MAP SharedCacheMap, 01950 IN LARGE_INTEGER NewSectionSize 01951 ); 01952 01953 VOID 01954 CcExtendVacbArray ( 01955 IN PSHARED_CACHE_MAP SharedCacheMap, 01956 IN LARGE_INTEGER NewSectionSize 01957 ); 01958 01959 BOOLEAN 01960 FASTCALL 01961 CcUnmapVacbArray ( 01962 IN PSHARED_CACHE_MAP SharedCacheMap, 01963 IN PLARGE_INTEGER FileOffset OPTIONAL, 01964 IN ULONG Length, 01965 IN BOOLEAN UnmapBehind 01966 ); 01967 01968 VOID 01969 CcAdjustVacbLevelLockCount ( 01970 IN PSHARED_CACHE_MAP SharedCacheMap, 01971 IN LONGLONG FileOffset, 01972 IN LONG Adjustment 01973 ); 01974 01975 PLIST_ENTRY 01976 CcGetBcbListHeadLargeOffset ( 01977 IN PSHARED_CACHE_MAP SharedCacheMap, 01978 IN LONGLONG FileOffset, 01979 IN BOOLEAN FailToSuccessor 01980 ); 01981 01982 ULONG 01983 CcPrefillVacbLevelZone ( 01984 IN ULONG NumberNeeded, 01985 OUT PKIRQL OldIrql, 01986 IN ULONG NeedBcbListHeads 01987 ); 01988 01989 VOID 01990 CcDrainVacbLevelZone ( 01991 ); 01992 01993 // 01994 // Define references to global data 01995 // 01996 01997 extern KSPIN_LOCK CcMasterSpinLock; 01998 extern KSPIN_LOCK CcBcbSpinLock; 01999 extern LIST_ENTRY CcCleanSharedCacheMapList; 02000 extern SHARED_CACHE_MAP_LIST_CURSOR CcDirtySharedCacheMapList; 02001 extern SHARED_CACHE_MAP_LIST_CURSOR CcLazyWriterCursor; 02002 extern NPAGED_LOOKASIDE_LIST CcTwilightLookasideList; 02003 extern KSPIN_LOCK CcWorkQueueSpinlock; 02004 extern ULONG CcNumberWorkerThreads; 02005 extern ULONG CcNumberActiveWorkerThreads; 02006 extern LIST_ENTRY CcIdleWorkerThreadList; 02007 extern LIST_ENTRY CcExpressWorkQueue; 02008 extern LIST_ENTRY CcRegularWorkQueue; 02009 extern LIST_ENTRY CcPostTickWorkQueue; 02010 extern BOOLEAN CcQueueThrottle; 02011 extern ULONG CcIdleDelayTick; 02012 extern LARGE_INTEGER CcNoDelay; 02013 extern LARGE_INTEGER CcFirstDelay; 02014 extern LARGE_INTEGER CcIdleDelay; 02015 extern LARGE_INTEGER CcCollisionDelay; 02016 extern LARGE_INTEGER CcTargetCleanDelay; 02017 extern LAZY_WRITER LazyWriter; 02018 extern KSPIN_LOCK CcVacbSpinLock; 02019 extern ULONG CcNumberVacbs; 02020 extern PVACB CcVacbs; 02021 extern PVACB CcBeyondVacbs; 02022 extern LIST_ENTRY CcVacbLru; 02023 extern KSPIN_LOCK CcDeferredWriteSpinLock; 02024 extern LIST_ENTRY CcDeferredWrites; 02025 extern ULONG CcDirtyPageThreshold; 02026 extern ULONG CcDirtyPageTarget; 02027 extern ULONG CcDirtyPagesLastScan; 02028 extern ULONG CcPagesYetToWrite; 02029 extern ULONG CcPagesWrittenLastTime; 02030 extern ULONG CcAvailablePagesThreshold; 02031 extern ULONG CcTotalDirtyPages; 02032 extern ULONG CcTune; 02033 extern LONG CcAggressiveZeroCount; 02034 extern LONG CcAggressiveZeroThreshold; 02035 extern ULONG CcLazyWriteHotSpots; 02036 extern MM_SYSTEMSIZE CcCapturedSystemSize; 02037 extern ULONG CcMaxVacbLevelsSeen; 02038 extern ULONG CcVacbLevelEntries; 02039 extern PVACB *CcVacbLevelFreeList; 02040 extern ULONG CcVacbLevelWithBcbsEntries; 02041 extern PVACB *CcVacbLevelWithBcbsFreeList; 02042 02043 // 02044 // Macros for allocating and deallocating Vacb levels - CcVacbSpinLock must 02045 // be acquired. 02046 // 02047 02048 _inline PVACB *CcAllocateVacbLevel ( 02049 IN BOOLEAN AllocatingBcbListHeads 02050 ) 02051 02052 { 02053 PVACB *ReturnEntry; 02054 02055 if (AllocatingBcbListHeads) { 02056 ReturnEntry = CcVacbLevelWithBcbsFreeList; 02057 CcVacbLevelWithBcbsFreeList = (PVACB *)*ReturnEntry; 02058 CcVacbLevelWithBcbsEntries -= 1; 02059 } else { 02060 ReturnEntry = CcVacbLevelFreeList; 02061 CcVacbLevelFreeList = (PVACB *)*ReturnEntry; 02062 CcVacbLevelEntries -= 1; 02063 } 02064 *ReturnEntry = NULL; 02065 ASSERT(RtlCompareMemory(ReturnEntry, ReturnEntry + 1, VACB_LEVEL_BLOCK_SIZE - sizeof(PVACB)) == 02066 (VACB_LEVEL_BLOCK_SIZE - sizeof(PVACB))); 02067 return ReturnEntry; 02068 } 02069 02070 _inline VOID CcDeallocateVacbLevel ( 02071 IN PVACB *Entry, 02072 IN BOOLEAN DeallocatingBcbListHeads 02073 ) 02074 02075 { 02076 if (DeallocatingBcbListHeads) { 02077 *Entry = (PVACB)CcVacbLevelWithBcbsFreeList; 02078 CcVacbLevelWithBcbsFreeList = Entry; 02079 CcVacbLevelWithBcbsEntries += 1; 02080 } else { 02081 *Entry = (PVACB)CcVacbLevelFreeList; 02082 CcVacbLevelFreeList = Entry; 02083 CcVacbLevelEntries += 1; 02084 } 02085 } 02086 02087 // 02088 // Export the macros for inspecting the reference counts for 02089 // the multilevel Vacb array. 02090 // 02091 02092 _inline 02093 PVACB_LEVEL_REFERENCE 02094 VacbLevelReference ( 02095 IN PSHARED_CACHE_MAP SharedCacheMap, 02096 IN PVACB *VacbArray, 02097 IN ULONG Level 02098 ) 02099 { 02100 return (PVACB_LEVEL_REFERENCE) 02101 ((PCHAR)VacbArray + 02102 VACB_LEVEL_BLOCK_SIZE + 02103 (Level != 0? 02104 0 : (FlagOn( SharedCacheMap->Flags, MODIFIED_WRITE_DISABLED )? 02105 VACB_LEVEL_BLOCK_SIZE : 0))); 02106 } 02107 02108 _inline 02109 ULONG 02110 IsVacbLevelReferenced ( 02111 IN PSHARED_CACHE_MAP SharedCacheMap, 02112 IN PVACB *VacbArray, 02113 IN ULONG Level 02114 ) 02115 { 02116 PVACB_LEVEL_REFERENCE VacbReference = VacbLevelReference( SharedCacheMap, VacbArray, Level ); 02117 02118 return VacbReference->Reference | VacbReference->SpecialReference; 02119 } 02120 02121 02122 // 02123 // Here is a page of macros stolen directly from Pinball... 02124 // 02125 02126 // 02127 // The following macros are used to establish the semantics needed 02128 // to do a return from within a try-finally clause. As a rule every 02129 // try clause must end with a label call try_exit. For example, 02130 // 02131 // try { 02132 // : 02133 // : 02134 // 02135 // try_exit: NOTHING; 02136 // } finally { 02137 // 02138 // : 02139 // : 02140 // } 02141 // 02142 // Every return statement executed inside of a try clause should use the 02143 // try_return macro. If the compiler fully supports the try-finally construct 02144 // then the macro should be 02145 // 02146 // #define try_return(S) { return(S); } 02147 // 02148 // If the compiler does not support the try-finally construct then the macro 02149 // should be 02150 // 02151 // #define try_return(S) { S; goto try_exit; } 02152 // 02153 02154 #define try_return(S) { S; goto try_exit; } 02155 02156 #ifdef CCDBG 02157 02158 extern LONG CcDebugTraceLevel; 02159 extern LONG CcDebugTraceIndent; 02160 02161 #ifndef CCDBG_LOCK 02162 02163 #define DebugTrace(INDENT,LEVEL,X,Y) { \ 02164 LONG _i; \ 02165 if (((LEVEL) == 0) || (CcDebugTraceLevel & (LEVEL))) { \ 02166 _i = (ULONG)PsGetCurrentThread(); \ 02167 DbgPrint("%08lx:",_i); \ 02168 if ((INDENT) < 0) { \ 02169 CcDebugTraceIndent += (INDENT); \ 02170 } \ 02171 if (CcDebugTraceIndent < 0) { \ 02172 CcDebugTraceIndent = 0; \ 02173 } \ 02174 for (_i=0; _i<CcDebugTraceIndent; _i+=1) { \ 02175 DbgPrint(" "); \ 02176 } \ 02177 DbgPrint(X,Y); \ 02178 if ((INDENT) > 0) { \ 02179 CcDebugTraceIndent += (INDENT); \ 02180 } \ 02181 } \ 02182 } 02183 02184 #define DebugTrace2(INDENT,LEVEL,X,Y,Z) { \ 02185 LONG _i; \ 02186 if (((LEVEL) == 0) || (CcDebugTraceLevel & (LEVEL))) { \ 02187 _i = (ULONG)PsGetCurrentThread(); \ 02188 DbgPrint("%08lx:",_i); \ 02189 if ((INDENT) < 0) { \ 02190 CcDebugTraceIndent += (INDENT); \ 02191 } \ 02192 if (CcDebugTraceIndent < 0) { \ 02193 CcDebugTraceIndent = 0; \ 02194 } \ 02195 for (_i=0; _i<CcDebugTraceIndent; _i+=1) { \ 02196 DbgPrint(" "); \ 02197 } \ 02198 DbgPrint(X,Y,Z); \ 02199 if ((INDENT) > 0) { \ 02200 CcDebugTraceIndent += (INDENT); \ 02201 } \ 02202 } \ 02203 } 02204 02205 #define DebugDump(STR,LEVEL,PTR) { \ 02206 LONG _i; \ 02207 VOID CcDump(); \ 02208 if (((LEVEL) == 0) || (CcDebugTraceLevel & (LEVEL))) { \ 02209 _i = (ULONG)PsGetCurrentThread(); \ 02210 DbgPrint("%08lx:",_i); \ 02211 DbgPrint(STR); \ 02212 if (PTR != NULL) {CcDump(PTR);} \ 02213 DbgBreakPoint(); \ 02214 } \ 02215 } 02216 02217 #else // ndef CCDBG_LOCK 02218 02219 extern KSPIN_LOCK CcDebugTraceLock; 02220 02221 #define DebugTrace(INDENT,LEVEL,X,Y) { \ 02222 LONG _i; \ 02223 KIRQL _oldIrql; \ 02224 if (((LEVEL) == 0) || (CcDebugTraceLevel & (LEVEL))) { \ 02225 _i = (ULONG)PsGetCurrentThread(); \ 02226 ExAcquireSpinLock( &CcDebugTraceLock, &_oldIrql ); \ 02227 DbgPrint("%08lx:",_i); \ 02228 if ((INDENT) < 0) { \ 02229 CcDebugTraceIndent += (INDENT); \ 02230 } \ 02231 if (CcDebugTraceIndent < 0) { \ 02232 CcDebugTraceIndent = 0; \ 02233 } \ 02234 for (_i=0; _i<CcDebugTraceIndent; _i+=1) { \ 02235 DbgPrint(" "); \ 02236 } \ 02237 DbgPrint(X,Y); \ 02238 if ((INDENT) > 0) { \ 02239 CcDebugTraceIndent += (INDENT); \ 02240 } \ 02241 ExReleaseSpinLock( &CcDebugTraceLock, _oldIrql ); \ 02242 } \ 02243 } 02244 02245 #define DebugTrace2(INDENT,LEVEL,X,Y,Z) { \ 02246 LONG _i; \ 02247 KIRQL _oldIrql; \ 02248 if (((LEVEL) == 0) || (CcDebugTraceLevel & (LEVEL))) { \ 02249 _i = (ULONG)PsGetCurrentThread(); \ 02250 ExAcquireSpinLock( &CcDebugTraceLock, &_oldIrql ); \ 02251 DbgPrint("%08lx:",_i); \ 02252 if ((INDENT) < 0) { \ 02253 CcDebugTraceIndent += (INDENT); \ 02254 } \ 02255 if (CcDebugTraceIndent < 0) { \ 02256 CcDebugTraceIndent = 0; \ 02257 } \ 02258 for (_i=0; _i<CcDebugTraceIndent; _i+=1) { \ 02259 DbgPrint(" "); \ 02260 } \ 02261 DbgPrint(X,Y,Z); \ 02262 if ((INDENT) > 0) { \ 02263 CcDebugTraceIndent += (INDENT); \ 02264 } \ 02265 ExReleaseSpinLock( &CcDebugTraceLock, _oldIrql ); \ 02266 } \ 02267 } 02268 02269 #define DebugDump(STR,LEVEL,PTR) { \ 02270 LONG _i; \ 02271 KIRQL _oldIrql; \ 02272 VOID CcDump(); \ 02273 if (((LEVEL) == 0) || (CcDebugTraceLevel & (LEVEL))) { \ 02274 _i = (ULONG)PsGetCurrentThread(); \ 02275 ExAcquireSpinLock( &CcDebugTraceLock, &_oldIrql ); \ 02276 DbgPrint("%08lx:",_i); \ 02277 DbgPrint(STR); \ 02278 if (PTR != NULL) {CcDump(PTR);} \ 02279 DbgBreakPoint(); \ 02280 ExReleaseSpinLock( &CcDebugTraceLock, _oldIrql ); \ 02281 } \ 02282 } 02283 02284 #endif // else ndef CCDBG_LOCK 02285 02286 #else 02287 02288 #undef CCDBG_LOCK 02289 02290 #define DebugTrace(INDENT,LEVEL,X,Y) {NOTHING;} 02291 02292 #define DebugTrace2(INDENT,LEVEL,X,Y,Z) {NOTHING;} 02293 02294 #define DebugDump(STR,LEVEL,PTR) {NOTHING;} 02295 02296 #endif // CCDBG 02297 02298 // 02299 // Global list of pinned Bcbs which may be examined for debug purposes 02300 // 02301 02302 #if DBG 02303 02304 extern ULONG CcBcbCount; 02305 extern LIST_ENTRY CcBcbList; 02306 02307 #endif 02308 02309 #endif // _CCh_

Generated on Sat May 15 19:39:21 2004 for test by doxygen 1.3.7