Main Page | Class Hierarchy | Class List | File List | Class Members | File Members

stktrace.c

Go to the documentation of this file.
00001 /*++ 00002 00003 Copyright (c) 1991 Microsoft Corporation 00004 00005 Module Name: 00006 00007 stktrace.c 00008 00009 Abstract: 00010 00011 This module implements routines to snapshot a set of stack back traces 00012 in a data base. Useful for heap allocators to track allocation requests 00013 cheaply. 00014 00015 Author: 00016 00017 Steve Wood (stevewo) 29-Jan-1992 00018 00019 Revision History: 00020 00021 17-May-1999 (silviuc) : added RtlWalkFrameChain that replaces the 00022 unsafe RtlCaptureStackBackTrace. 00023 00024 --*/ 00025 00026 #include <ntos.h> 00027 #include <ntrtl.h> 00028 #include <nturtl.h> 00029 #include <zwapi.h> 00030 #include <stktrace.h> 00031 #include <heap.h> 00032 #include <heappriv.h> 00033 00034 BOOLEAN 00035 NtdllOkayToLockRoutine( 00036 IN PVOID Lock 00037 ); 00038 00039 #if !defined(RtlGetCallersAddress) && defined(_X86_) && (!NTOS_KERNEL_RUNTIME) 00040 00041 VOID 00042 RtlGetCallersAddress( 00043 OUT PVOID *CallersAddress, 00044 OUT PVOID *CallersCaller 00045 ) 00046 /*++ 00047 00048 Routine Description: 00049 00050 This routine returns the first to callers on the current stack. It should be 00051 noted that the function can miss some of the callers in the presence of FPO 00052 optimization. 00053 00054 Arguments: 00055 00056 CallersAddress - address to save the first caller. 00057 00058 CallersCaller - address to save the second caller. 00059 00060 Return Value: 00061 00062 None. If the function does not succeed in finding the two callers 00063 it will zero the addresses where it was supposed to write them. 00064 00065 Environment: 00066 00067 X86, user mode and w/o having a macro with same name defined. 00068 00069 --*/ 00070 00071 { 00072 PVOID BackTrace[ 2 ]; 00073 ULONG Hash; 00074 USHORT Count; 00075 00076 Count = RtlCaptureStackBackTrace( 00077 2, 00078 2, 00079 BackTrace, 00080 &Hash 00081 ); 00082 00083 if (ARGUMENT_PRESENT( CallersAddress )) { 00084 if (Count >= 1) { 00085 *CallersAddress = BackTrace[ 0 ]; 00086 } 00087 else { 00088 *CallersAddress = NULL; 00089 } 00090 } 00091 00092 if (ARGUMENT_PRESENT( CallersCaller )) { 00093 if (Count >= 2) { 00094 *CallersCaller = BackTrace[ 1 ]; 00095 } 00096 else { 00097 *CallersCaller = NULL; 00098 } 00099 } 00100 00101 return; 00102 } 00103 00104 #endif // !defined(RtlGetCallersAddress) && defined(_X86_) && (!NTOS_KERNEL_RUNTIME) 00105 00106 // bugbug (silviuc): I do not think !FPO is correct below 00107 // The reason is ExtendDb function uses VirtualAlloc. 00108 #if defined(_X86_) && (!NTOS_KERNEL_RUNTIME || !FPO) 00109 00110 // 00111 // Global per process stack trace database. 00112 // 00113 00114 PSTACK_TRACE_DATABASE RtlpStackTraceDataBase; 00115 00116 00117 PRTL_STACK_TRACE_ENTRY 00118 RtlpExtendStackTraceDataBase( 00119 IN PRTL_STACK_TRACE_ENTRY InitialValue, 00120 IN ULONG Size 00121 ); 00122 00123 00124 NTSTATUS 00125 RtlInitStackTraceDataBaseEx( 00126 IN PVOID CommitBase, 00127 IN ULONG CommitSize, 00128 IN ULONG ReserveSize, 00129 IN PRTL_INITIALIZE_LOCK_ROUTINE InitializeLockRoutine, 00130 IN PRTL_ACQUIRE_LOCK_ROUTINE AcquireLockRoutine, 00131 IN PRTL_RELEASE_LOCK_ROUTINE ReleaseLockRoutine, 00132 IN PRTL_OKAY_TO_LOCK_ROUTINE OkayToLockRoutine 00133 ); 00134 00135 NTSTATUS 00136 RtlInitStackTraceDataBaseEx( 00137 IN PVOID CommitBase, 00138 IN ULONG CommitSize, 00139 IN ULONG ReserveSize, 00140 IN PRTL_INITIALIZE_LOCK_ROUTINE InitializeLockRoutine, 00141 IN PRTL_ACQUIRE_LOCK_ROUTINE AcquireLockRoutine, 00142 IN PRTL_RELEASE_LOCK_ROUTINE ReleaseLockRoutine, 00143 IN PRTL_OKAY_TO_LOCK_ROUTINE OkayToLockRoutine 00144 ) 00145 { 00146 NTSTATUS Status; 00147 PSTACK_TRACE_DATABASE DataBase; 00148 00149 DataBase = (PSTACK_TRACE_DATABASE)CommitBase; 00150 if (CommitSize == 0) { 00151 CommitSize = PAGE_SIZE; 00152 Status = ZwAllocateVirtualMemory( NtCurrentProcess(), 00153 (PVOID *)&CommitBase, 00154 0, 00155 &CommitSize, 00156 MEM_COMMIT, 00157 PAGE_READWRITE 00158 ); 00159 if (!NT_SUCCESS( Status )) { 00160 KdPrint(( "RTL: Unable to commit space to extend stack trace data base - Status = %lx\n", 00161 Status 00162 )); 00163 return Status; 00164 } 00165 00166 DataBase->PreCommitted = FALSE; 00167 } 00168 else 00169 if (CommitSize == ReserveSize) { 00170 RtlZeroMemory( DataBase, sizeof( *DataBase ) ); 00171 DataBase->PreCommitted = TRUE; 00172 } 00173 else { 00174 return STATUS_INVALID_PARAMETER; 00175 } 00176 00177 DataBase->CommitBase = CommitBase; 00178 DataBase->NumberOfBuckets = 37; 00179 DataBase->NextFreeLowerMemory = (PCHAR) 00180 (&DataBase->Buckets[ DataBase->NumberOfBuckets ]); 00181 DataBase->NextFreeUpperMemory = (PCHAR)CommitBase + ReserveSize; 00182 00183 if(!DataBase->PreCommitted) { 00184 DataBase->CurrentLowerCommitLimit = (PCHAR)CommitBase + CommitSize; 00185 DataBase->CurrentUpperCommitLimit = (PCHAR)CommitBase + ReserveSize; 00186 } 00187 else { 00188 RtlZeroMemory( &DataBase->Buckets[ 0 ], 00189 DataBase->NumberOfBuckets * sizeof( DataBase->Buckets[ 0 ] ) 00190 ); 00191 } 00192 00193 DataBase->EntryIndexArray = (PRTL_STACK_TRACE_ENTRY *)DataBase->NextFreeUpperMemory; 00194 00195 DataBase->AcquireLockRoutine = AcquireLockRoutine; 00196 DataBase->ReleaseLockRoutine = ReleaseLockRoutine; 00197 DataBase->OkayToLockRoutine = OkayToLockRoutine; 00198 00199 Status = (InitializeLockRoutine)( &DataBase->Lock.CriticalSection ); 00200 if (!NT_SUCCESS( Status )) { 00201 KdPrint(( "RTL: Unable to initialize stack trace data base CriticalSection, Status = %lx\n", 00202 Status 00203 )); 00204 return( Status ); 00205 } 00206 00207 RtlpStackTraceDataBase = DataBase; 00208 return( STATUS_SUCCESS ); 00209 } 00210 00211 NTSTATUS 00212 RtlInitializeStackTraceDataBase( 00213 IN PVOID CommitBase, 00214 IN ULONG CommitSize, 00215 IN ULONG ReserveSize 00216 ) 00217 { 00218 #ifdef NTOS_KERNEL_RUNTIME 00219 00220 BOOLEAN 00221 ExOkayToLockRoutine( 00222 IN PVOID Lock 00223 ); 00224 00225 return RtlInitStackTraceDataBaseEx( 00226 CommitBase, 00227 CommitSize, 00228 ReserveSize, 00229 ExInitializeResource, 00230 (PRTL_RELEASE_LOCK_ROUTINE)ExAcquireResourceExclusive, 00231 (PRTL_RELEASE_LOCK_ROUTINE)ExReleaseResourceLite, 00232 ExOkayToLockRoutine 00233 ); 00234 #else // #ifdef NTOS_KERNEL_RUNTIME 00235 00236 return RtlInitStackTraceDataBaseEx( 00237 CommitBase, 00238 CommitSize, 00239 ReserveSize, 00240 RtlInitializeCriticalSection, 00241 RtlEnterCriticalSection, 00242 RtlLeaveCriticalSection, 00243 NtdllOkayToLockRoutine 00244 ); 00245 #endif // #ifdef NTOS_KERNEL_RUNTIME 00246 } 00247 00248 00249 PSTACK_TRACE_DATABASE 00250 RtlpAcquireStackTraceDataBase( VOID ) 00251 { 00252 if (RtlpStackTraceDataBase != NULL) { 00253 if (RtlpStackTraceDataBase->DumpInProgress || 00254 !(RtlpStackTraceDataBase->OkayToLockRoutine)( &RtlpStackTraceDataBase->Lock.CriticalSection ) 00255 ) { 00256 return( NULL ); 00257 } 00258 00259 (RtlpStackTraceDataBase->AcquireLockRoutine)( &RtlpStackTraceDataBase->Lock.CriticalSection ); 00260 } 00261 00262 return( RtlpStackTraceDataBase ); 00263 } 00264 00265 VOID 00266 RtlpReleaseStackTraceDataBase( VOID ) 00267 { 00268 (RtlpStackTraceDataBase->ReleaseLockRoutine)( &RtlpStackTraceDataBase->Lock.CriticalSection ); 00269 return; 00270 } 00271 00272 PRTL_STACK_TRACE_ENTRY 00273 RtlpExtendStackTraceDataBase( 00274 IN PRTL_STACK_TRACE_ENTRY InitialValue, 00275 IN ULONG Size 00276 ) 00277 /*++ 00278 00279 Routine Description: 00280 00281 This routine extends the stack trace database in order to accomodate 00282 the new stack trace that has to be saved. 00283 00284 Arguments: 00285 00286 InitialValue - stack trace to be saved. 00287 00288 Size - size of the stack trace in bytes. Note that this is not the 00289 depth of the trace but rather `Depth * sizeof(PVOID)'. 00290 00291 Return Value: 00292 00293 The address of the just saved stack trace or null in case we have hit 00294 the maximum size of the database or we get commit errors. 00295 00296 Environment: 00297 00298 User mode. 00299 00300 Note. In order to make all this code work in kernel mode we have to 00301 rewrite this function that relies on VirtualAlloc. 00302 00303 --*/ 00304 00305 { 00306 NTSTATUS Status; 00307 PRTL_STACK_TRACE_ENTRY p, *pp; 00308 ULONG CommitSize; 00309 PSTACK_TRACE_DATABASE DataBase; 00310 00311 DataBase = RtlpStackTraceDataBase; 00312 00313 // 00314 // We will try to find space for one stack trace entry in the 00315 // upper part of the database. 00316 // 00317 00318 pp = (PRTL_STACK_TRACE_ENTRY *)DataBase->NextFreeUpperMemory; 00319 00320 if ((! DataBase->PreCommitted) && 00321 ((PCHAR)(pp - 1) < (PCHAR)DataBase->CurrentUpperCommitLimit)) { 00322 00323 // 00324 // No more committed space in the upper part of the database. 00325 // We need to extend it downwards. 00326 // 00327 00328 DataBase->CurrentUpperCommitLimit = 00329 (PVOID)((PCHAR)DataBase->CurrentUpperCommitLimit - PAGE_SIZE); 00330 00331 if (DataBase->CurrentUpperCommitLimit < DataBase->CurrentLowerCommitLimit) { 00332 00333 // 00334 // No more space at all. We have got over the lower part of the db. 00335 // We failed therefore increase back the UpperCommitLimit pointer. 00336 // 00337 00338 DataBase->CurrentUpperCommitLimit = 00339 (PVOID)((PCHAR)DataBase->CurrentUpperCommitLimit + PAGE_SIZE); 00340 00341 return( NULL ); 00342 } 00343 00344 CommitSize = PAGE_SIZE; 00345 Status = ZwAllocateVirtualMemory( 00346 NtCurrentProcess(), 00347 (PVOID *)&DataBase->CurrentUpperCommitLimit, 00348 0, 00349 &CommitSize, 00350 MEM_COMMIT, 00351 PAGE_READWRITE 00352 ); 00353 00354 if (!NT_SUCCESS( Status )) { 00355 00356 // 00357 // We tried to increase the upper part of the db by one page. 00358 // We failed therefore increase back the UpperCommitLimit pointer 00359 // 00360 00361 DataBase->CurrentUpperCommitLimit = 00362 (PVOID)((PCHAR)DataBase->CurrentUpperCommitLimit + PAGE_SIZE); 00363 00364 KdPrint(( "RTL: Unable to commit space to extend stack trace data base - Status = %lx\n", 00365 Status 00366 )); 00367 return( NULL ); 00368 } 00369 } 00370 00371 // 00372 // We managed to make sure we have usable space in the upper part 00373 // therefore we take out one stack trace entry address. 00374 // 00375 00376 DataBase->NextFreeUpperMemory -= sizeof( *pp ); 00377 00378 // 00379 // Now we will try to find space in the lower part of the database for 00380 // for the eactual stack trace. 00381 // 00382 00383 p = (PRTL_STACK_TRACE_ENTRY)DataBase->NextFreeLowerMemory; 00384 00385 if ((! DataBase->PreCommitted) && 00386 (((PCHAR)p + Size) > (PCHAR)DataBase->CurrentLowerCommitLimit)) { 00387 00388 // 00389 // We need to extend the lower part. 00390 // 00391 00392 if (DataBase->CurrentLowerCommitLimit >= DataBase->CurrentUpperCommitLimit) { 00393 00394 // 00395 // We have hit the maximum size of the database. 00396 // 00397 00398 return( NULL ); 00399 } 00400 00401 // 00402 // Extend the lower part of the database by one page. 00403 // 00404 00405 CommitSize = Size; 00406 Status = ZwAllocateVirtualMemory( 00407 NtCurrentProcess(), 00408 (PVOID *)&DataBase->CurrentLowerCommitLimit, 00409 0, 00410 &CommitSize, 00411 MEM_COMMIT, 00412 PAGE_READWRITE 00413 ); 00414 00415 if (! NT_SUCCESS( Status )) { 00416 KdPrint(( "RTL: Unable to commit space to extend stack trace data base - Status = %lx\n", 00417 Status 00418 )); 00419 return( NULL ); 00420 } 00421 00422 DataBase->CurrentLowerCommitLimit = 00423 (PCHAR)DataBase->CurrentLowerCommitLimit + CommitSize; 00424 } 00425 00426 // 00427 // Take out the space for the stack trace. 00428 // 00429 00430 DataBase->NextFreeLowerMemory += Size; 00431 00432 // 00433 // Deal with a precommitted database case. If the lower and upper 00434 // pointers have crossed each other then rollback and return failure. 00435 // 00436 00437 if (DataBase->PreCommitted && 00438 DataBase->NextFreeLowerMemory >= DataBase->NextFreeUpperMemory) { 00439 00440 DataBase->NextFreeUpperMemory += sizeof( *pp ); 00441 DataBase->NextFreeLowerMemory -= Size; 00442 return( NULL ); 00443 } 00444 00445 // 00446 // Save the stack trace in the database 00447 // 00448 00449 RtlMoveMemory( p, InitialValue, Size ); 00450 p->HashChain = NULL; 00451 p->TraceCount = 0; 00452 p->Index = (USHORT)(++DataBase->NumberOfEntriesAdded); 00453 00454 // 00455 // Save the address of the new stack trace entry in the 00456 // upper part of the databse. 00457 // 00458 00459 *--pp = p; 00460 00461 // 00462 // Return address of the saved stack trace entry. 00463 // 00464 00465 return( p ); 00466 } 00467 00468 USHORT 00469 RtlLogStackBackTrace( 00470 VOID 00471 ) 00472 /*++ 00473 00474 Routine Description: 00475 00476 This routine will capture the current stacktrace (skipping the 00477 present function) and will save it in the global (per process) 00478 stack trace database. It should be noted that we do not save 00479 duplicate traces. 00480 00481 Arguments: 00482 00483 None. 00484 00485 Return Value: 00486 00487 Index of the stack trace saved. The index can be used by tools 00488 to access quickly the trace data. This is the reason at the end of 00489 the database we save downwards a list of pointers to trace entries. 00490 This index can be used to find this pointer in constant time. 00491 00492 A zero index will be returned for error conditions (e.g. stack 00493 trace database not initialized). 00494 00495 Environment: 00496 00497 User mode. 00498 00499 --*/ 00500 00501 { 00502 PSTACK_TRACE_DATABASE DataBase; 00503 RTL_STACK_TRACE_ENTRY StackTrace; 00504 PRTL_STACK_TRACE_ENTRY p, *pp; 00505 ULONG Hash, RequestedSize, DepthSize; 00506 00507 if (RtlpStackTraceDataBase == NULL) { 00508 return 0; 00509 } 00510 00511 Hash = 0; 00512 00513 // 00514 // Capture stack trace. The try/except was useful 00515 // in the old days when the function did not validate 00516 // the stack frame chain. We keep it just ot be defensive. 00517 // 00518 00519 try { 00520 StackTrace.Depth = RtlCaptureStackBackTrace( 00521 1, 00522 MAX_STACK_DEPTH, 00523 StackTrace.BackTrace, 00524 &Hash 00525 ); 00526 } 00527 except(EXCEPTION_EXECUTE_HANDLER) { 00528 StackTrace.Depth = 0; 00529 } 00530 00531 if (StackTrace.Depth == 0) { 00532 return 0; 00533 } 00534 00535 // 00536 // Lock the global per-process stack trace database. 00537 // 00538 00539 DataBase = RtlpAcquireStackTraceDataBase(); 00540 00541 if (DataBase == NULL) { 00542 return( 0 ); 00543 } 00544 00545 DataBase->NumberOfEntriesLookedUp++; 00546 00547 try { 00548 00549 // 00550 // We will try to find out if the trace has been saved in the past. 00551 // We find the right hash chain and then traverse it. 00552 // 00553 00554 DepthSize = StackTrace.Depth * sizeof( StackTrace.BackTrace[ 0 ] ); 00555 pp = &DataBase->Buckets[ Hash % DataBase->NumberOfBuckets ]; 00556 00557 while (p = *pp) { 00558 if (p->Depth == StackTrace.Depth && 00559 RtlCompareMemory( &p->BackTrace[ 0 ], 00560 &StackTrace.BackTrace[ 0 ], 00561 DepthSize 00562 ) == DepthSize 00563 ) { 00564 break; 00565 } 00566 else { 00567 pp = &p->HashChain; 00568 } 00569 } 00570 00571 if (p == NULL) { 00572 00573 // 00574 // We did not find the stack trace. We will extend the database 00575 // and save the new trace. 00576 // 00577 00578 RequestedSize = FIELD_OFFSET( RTL_STACK_TRACE_ENTRY, BackTrace ) + 00579 DepthSize; 00580 00581 p = RtlpExtendStackTraceDataBase( &StackTrace, RequestedSize ); 00582 00583 // 00584 // If we managed to stack the trace we need to link it as the last 00585 // element in the proper hash chain. 00586 // 00587 00588 if (p != NULL) { 00589 *pp = p; 00590 } 00591 } 00592 } 00593 except(EXCEPTION_EXECUTE_HANDLER) { 00594 00595 // 00596 // bugbug (silviuc): We should not be here. Right? 00597 // 00598 00599 p = NULL; 00600 } 00601 00602 // 00603 // Release global trace db. 00604 // 00605 00606 RtlpReleaseStackTraceDataBase(); 00607 00608 if (p != NULL) { 00609 p->TraceCount++; 00610 return( p->Index ); 00611 } 00612 else { 00613 return( 0 ); 00614 } 00615 00616 return 0; 00617 } 00618 #endif // defined(_X86_) && !NTOS_KERNEL_RUNTIME 00619 00620 00621 #if defined(_X86_) 00622 00623 USHORT 00624 RtlCaptureStackBackTrace( 00625 IN ULONG FramesToSkip, 00626 IN ULONG FramesToCapture, 00627 OUT PVOID *BackTrace, 00628 OUT PULONG BackTraceHash 00629 ) 00630 /*++ 00631 00632 Routine Description: 00633 00634 This routine walks up the stack frames, capturing the return address from 00635 each frame requested. This used to be implemented in assembly language and 00636 used to be unsafe in special contexts (DPC level). Right now it uses 00637 RtlWalkFrameChain that validates the chain of pointers and it guarantees 00638 not to take exceptions. 00639 00640 Arguments: 00641 00642 FramesToSkip - frames detected but not included in the stack trace 00643 00644 FramesToCapture - frames to be captured in the stack trace buffer. 00645 One of the frames will be for RtlCaptureStackBackTrace. 00646 00647 BackTrace - stack trace buffer 00648 00649 BackTraceHash - very simple hash value that can be used to organize 00650 hash tables. It is just an arithmetic sum of the pointers in the 00651 stack trace buffer. 00652 00653 Return Value: 00654 00655 Number of return addresses returned in the stack trace buffer. 00656 00657 --*/ 00658 { 00659 PVOID Trace [2 * MAX_STACK_DEPTH]; 00660 ULONG FramesFound; 00661 ULONG HashValue; 00662 ULONG Index; 00663 00664 // 00665 // One more frame to skip for the "capture" function (WalkFrameChain). 00666 // 00667 00668 FramesToSkip++; 00669 00670 // 00671 // Sanity checks. 00672 // 00673 00674 if (FramesToCapture + FramesToSkip >= 2 * MAX_STACK_DEPTH) { 00675 return 0; 00676 } 00677 00678 FramesFound = RtlWalkFrameChain ( 00679 Trace, 00680 FramesToCapture + FramesToSkip, 00681 0); 00682 00683 if (FramesFound <= FramesToSkip) { 00684 return 0; 00685 } 00686 00687 for (HashValue = 0, Index = 0; Index < FramesToCapture; Index++) { 00688 00689 if (FramesToSkip + Index >= FramesFound) { 00690 break; 00691 } 00692 00693 BackTrace[Index] = Trace[FramesToSkip + Index]; 00694 HashValue += PtrToUlong(BackTrace[Index]); 00695 } 00696 00697 *BackTraceHash = HashValue; 00698 return (USHORT)Index; 00699 } 00700 #endif 00701 00702 00703 00707 00708 // 00709 // This section contains an algorithm for getting stack traces. 00710 // It works only on x86. It is an improvement of 00711 // RtlCaptureStackBackTrace which for reasons that escape me is written 00712 // in assembly and it is unsafe (can raise exceptions). The new function 00713 // RtlWalkFrameChain is guaranteed to not take exceptions whatever the 00714 // call context. 00715 // 00716 // Note. It might be a good idea to not BBT this code. Especially I am concerned 00717 // about the only assembly instruction used in the whole code that saves 00718 // the value of the EBP register. 00719 // 00720 00721 #ifdef NTOS_KERNEL_RUNTIME 00722 #define _KERNEL_MODE_STACK_TRACES_ 1 00723 #define _COLLECT_FRAME_WALK_STATISTICS_ 0 00724 #else 00725 #define _KERNEL_MODE_STACK_TRACES_ 0 00726 #define _COLLECT_FRAME_WALK_STATISTICS_ 0 00727 #endif 00728 00729 #define SIZE_1_KB ((ULONG_PTR) 0x400) 00730 #define SIZE_1_GB ((ULONG_PTR) 0x40000000) 00731 00732 #define PAGE_START(address) (((ULONG_PTR)address) & ~((ULONG_PTR)PAGE_SIZE - 1)) 00733 00734 VOID CollectFrameWalkStatistics (ULONG Index); 00735 00736 #if (( i386 ) && ( FPO )) 00737 #pragma optimize( "y", off ) // disable FPO for consistent stack traces 00738 #endif 00739 00740 ULONG 00741 RtlWalkFrameChain ( 00742 00743 OUT PVOID *Callers, 00744 IN ULONG Count, 00745 IN ULONG Flags) 00746 00747 /*++ 00748 00749 Routine Description: 00750 00751 RtlWalkFrameChain 00752 00753 Description: 00754 00755 This function tries to walk the EBP chain and fill out a vector of 00756 return addresses. The function works only on x86. It is possible that 00757 the function cannot fill the requested number of callers because somewhere 00758 on the stack we have a function compiled FPO (the frame register (EBP) is 00759 used as a normal register. In this case the function will just return with 00760 a less then requested count. In kernel mode the function should not take 00761 any exceptions (page faults) because it can be called at all sorts of 00762 irql levels. 00763 00764 The `Flags' parameter is used for future extensions. A zero value will be 00765 compatible with new stack walking algorithms. 00766 00767 Note. The algorithm can be somewhat improved by unassembling the return 00768 addresses identified. However this is impractical in kernel mode because 00769 the function might get called at high irql levels where page faults are 00770 not allowed. 00771 00772 Return value: 00773 00774 The number of identified return addresses on the stack. This can be less 00775 then the Count requested if the stack ends or we encounter a FPO compiled 00776 function. 00777 00778 --*/ 00779 00780 { 00781 #if defined(_X86_) 00782 00783 ULONG_PTR Fp, NewFp, ReturnAddress; 00784 ULONG Index; 00785 ULONG_PTR StackEnd, StackStart; 00786 BOOLEAN Result; 00787 00788 // 00789 // Get the current EBP pointer which is supposed to 00790 // be the start of the EBP chain. 00791 // 00792 00793 _asm mov Fp, EBP; 00794 00795 StackStart = Fp; 00796 00797 #if _KERNEL_MODE_STACK_TRACES_ 00798 00799 StackEnd = (ULONG_PTR)(KeGetCurrentThread()->StackBase); 00800 00801 // 00802 // bugbug: find a reliable way to get the stack limit in kernel mode. 00803 // `StackBase' is not a reliable way to get the stack end in kernel 00804 // mode because we might execute a DPC routine on thread's behalf. 00805 // There are a few other reasons why we cannot trust this completely. 00806 // 00807 // Note. The condition `PAGE_START(StackEnd) - PAGE_START(StackStart) > PAGE_SIZE' 00808 // is not totally safe. We can encounter a situation where in this case we 00809 // do not have the same stack. Can we? 00810 // 00811 // The DPC stack is actually the stack of the idle thread corresponding to 00812 // the current processor. Based on that we probably can figure out in almost 00813 // all contexts what are the real limits of the stack. 00814 // 00815 00816 if ((StackStart > StackEnd) 00817 || (PAGE_START(StackEnd) - PAGE_START(StackStart) > PAGE_SIZE)) { 00818 00819 StackEnd = (StackStart + PAGE_SIZE) & ~((ULONG_PTR)PAGE_SIZE - 1); 00820 00821 // 00822 // Try to get one more page if possible. Note that this is not 00823 // 100% reliable because a non faulting address can fault if 00824 // appropriate locks are not held. 00825 // 00826 00827 if (MmIsAddressValid ((PVOID)StackEnd)) { 00828 StackEnd += PAGE_SIZE; 00829 } 00830 } 00831 00832 #else 00833 00834 StackEnd = (ULONG_PTR)(NtCurrentTeb()->NtTib.StackBase); 00835 00836 #endif // #if _KERNEL_MODE_STACK_TRACES_ 00837 00838 try { 00839 00840 for (Index = 0; Index < Count; Index++) { 00841 00842 if (Fp + sizeof(ULONG_PTR) >= StackEnd) { 00843 break; 00844 } 00845 00846 NewFp = *((PULONG_PTR)(Fp + 0)); 00847 ReturnAddress = *((PULONG_PTR)(Fp + sizeof(ULONG_PTR))); 00848 00849 // 00850 // Figure out if the new frame pointer is ok. This validation 00851 // should avoid all exceptions in kernel mode because we always 00852 // read within the current thread's stack and the stack is 00853 // guaranteed to be in memory (no page faults). It is also guaranteed 00854 // that we do not take random exceptions in user mode because we always 00855 // keep the frame pointer within stack limits. 00856 // 00857 00858 if (! (Fp < NewFp && NewFp < StackEnd)) { 00859 break; 00860 } 00861 00862 // 00863 // Figure out if the return address is ok. If return address 00864 // is a stack address or <64k then something is wrong. There is 00865 // no reason to return garbage to the caller therefore we stop. 00866 // 00867 00868 if (StackStart < ReturnAddress && ReturnAddress < StackEnd) { 00869 break; 00870 } 00871 00872 if (ReturnAddress < 64 * SIZE_1_KB) { 00873 break; 00874 } 00875 00876 // 00877 // Store new fp and return address and move on. 00878 // 00879 00880 Fp = NewFp; 00881 Callers[Index] = (PVOID)ReturnAddress; 00882 } 00883 } 00884 except (EXCEPTION_EXECUTE_HANDLER) { 00885 00886 // 00887 // The frame traversal algorithm is written so that we should 00888 // not get any exception. Therefore if we get some exception 00889 // we better debug it. 00890 // 00891 // bugbug: enable bkpt only on checked builds 00892 // After we get some coverage on this we should leave it active 00893 // only on checked builds. 00894 // 00895 00896 DbgPrint ("Unexpected exception in RtlWalkFrameChain ...\n"); 00897 DbgBreakPoint (); 00898 } 00899 00900 // 00901 // Return the number of return addresses identified on the stack. 00902 // 00903 00904 #if _COLLECT_FRAME_WALK_STATISTICS_ 00905 CollectFrameWalkStatistics (Index); 00906 #endif // #if _COLLECT_FRAME_WALK_STATISTICS_ 00907 00908 return Index; 00909 00910 #else 00911 00912 return 0; 00913 00914 #endif // #if defined(_X86_) 00915 } 00916 00917 00918 #if _COLLECT_FRAME_WALK_STATISTICS_ 00919 00920 KSPIN_LOCK FrameWalkStatisticsLock; 00921 ULONG FrameWalkStatisticsCounters [32]; 00922 ULONG FrameWalkCollectStatisticsCalls; 00923 BOOLEAN FrameWalkStatisticsInitialized; 00924 00925 VOID 00926 CollectFrameWalkStatistics ( 00927 00928 ULONG Index) 00929 00930 /*++ 00931 00932 Routine description: 00933 00934 CollectFrameWalkStatistics 00935 00936 Description: 00937 00938 This function computes the distribution of detectable chain 00939 lengths. This is used only for debugging the frame traversal 00940 algorithm. It proves that it is worth trying to get stack 00941 traces on optimized images because only about 8% of the calls 00942 cannot be resolved to more than two callers. A sample distribution 00943 computed by calling RtlWalkFrameChain for every call to 00944 ExAllocatePoolWithTag gave the results below: 00945 00946 Length Percentage 00947 0-2 5% 00948 3-5 20% 00949 6-10 40% 00950 10-16 35% 00951 00952 With a failure rate of 5% it is worth using it. 00953 00954 --*/ 00955 00956 { 00957 KIRQL PreviousIrql; 00958 ULONG I; 00959 ULONG Percentage; 00960 ULONG TotalPercentage; 00961 00962 // 00963 // Spin lock initialization is not safe in the code below 00964 // but this code is used only during frame walk algorithm 00965 // development so there is no reason to make it bulletproof. 00966 // 00967 00968 if (! FrameWalkStatisticsInitialized) { 00969 KeInitializeSpinLock (&FrameWalkStatisticsLock); 00970 FrameWalkStatisticsInitialized = TRUE; 00971 } 00972 00973 KeAcquireSpinLock ( 00974 &FrameWalkStatisticsLock, 00975 &PreviousIrql); 00976 00977 FrameWalkCollectStatisticsCalls++; 00978 00979 if (Index < 32) { 00980 FrameWalkStatisticsCounters[Index]++; 00981 } 00982 00983 if (FrameWalkCollectStatisticsCalls != 0 00984 && (FrameWalkCollectStatisticsCalls % 60000 == 0)) { 00985 00986 DbgPrint ("FrameWalk: %u calls \n", FrameWalkCollectStatisticsCalls); 00987 00988 TotalPercentage = 0; 00989 00990 for (I = 0; I < 32; I++) { 00991 00992 Percentage = FrameWalkStatisticsCounters[I] * 100 00993 / FrameWalkCollectStatisticsCalls; 00994 00995 DbgPrint ("FrameWalk: [%02u] %02u \n", I, Percentage); 00996 00997 TotalPercentage += Percentage; 00998 } 00999 01000 DbgPrint ("FrameWalk: total %u \n", TotalPercentage); 01001 DbgBreakPoint (); 01002 } 01003 01004 KeReleaseSpinLock ( 01005 &FrameWalkStatisticsLock, 01006 PreviousIrql); 01007 } 01008 01009 #endif // #if _COLLECT_FRAME_WALK_STATISTICS_ 01010 01011 

Generated on Sat May 15 19:41:52 2004 for test by doxygen 1.3.7