Main Page | Class Hierarchy | Class List | File List | Class Members | File Members

mtrr.c

Go to the documentation of this file.
00001 /*++ 00002 00003 Copyright (c) 1991 Microsoft Corporation 00004 00005 Module Name: 00006 00007 mtrr.c 00008 00009 Abstract: 00010 00011 This module implements interfaces that support manipulation of 00012 memory type range registers. 00013 00014 These entry points only exist on i386 machines. 00015 00016 Author: 00017 00018 Ken Reneris (kenr) 11-Oct-95 00019 00020 Environment: 00021 00022 Kernel mode only. 00023 00024 Revision History: 00025 00026 --*/ 00027 00028 #include "ki.h" 00029 #include "mtrr.h" 00030 00031 #define STATIC 00032 00033 #define IDBG 0 00034 00035 #if DBG 00036 #define DBGMSG(a) DbgPrint(a) 00037 #else 00038 #define DBGMSG(a) 00039 #endif 00040 00041 // 00042 // Internal declarations 00043 // 00044 00045 // 00046 // Range in generic terms 00047 // 00048 00049 typedef struct _ONE_RANGE { 00050 ULONGLONG Base; 00051 ULONGLONG Limit; 00052 UCHAR Type; 00053 } ONE_RANGE, *PONE_RANGE; 00054 00055 #define GROW_RANGE_TABLE 4 00056 00057 // 00058 // Range in specific mtrr terms 00059 // 00060 00061 typedef struct _MTRR_RANGE { 00062 MTRR_VARIABLE_BASE Base; 00063 MTRR_VARIABLE_MASK Mask; 00064 } MTRR_RANGE, *PMTRR_RANGE; 00065 00066 // 00067 // System static information concerning cached range types 00068 // 00069 00070 typedef struct _RANGE_INFO { 00071 00072 // 00073 // Global MTRR info 00074 // 00075 00076 MTRR_DEFAULT Default; // h/w mtrr default 00077 MTRR_CAPABILITIES Capabilities; // h/w mtrr Capabilities 00078 UCHAR DefaultCachedType; // default type for MmCached 00079 00080 // 00081 // Variable MTRR information 00082 // 00083 00084 BOOLEAN RangesValid; // Ranges initialized and valid. 00085 BOOLEAN MtrrWorkaround; // Work Around needed/not. 00086 UCHAR NoRange; // No ranges currently in Ranges 00087 UCHAR MaxRange; // Max size of Ranges 00088 PONE_RANGE Ranges; // Current ranges as set into h/w 00089 00090 } RANGE_INFO, *PRANGE_INFO; 00091 00092 00093 // 00094 // Structure used while processing range database 00095 // 00096 00097 typedef struct _NEW_RANGE { 00098 // 00099 // Current Status 00100 // 00101 00102 NTSTATUS Status; 00103 00104 // 00105 // Generic info on new range 00106 // 00107 00108 ULONGLONG Base; 00109 ULONGLONG Limit; 00110 UCHAR Type; 00111 00112 // 00113 // MTRR image to be set into h/w 00114 // 00115 00116 PMTRR_RANGE MTRR; 00117 00118 // 00119 // RangeDatabase before edits were started 00120 // 00121 00122 UCHAR NoRange; 00123 PONE_RANGE Ranges; 00124 00125 // 00126 // IPI context to coordinate concurrent processor update 00127 // 00128 00129 ULONG NoMTRR; 00130 ULONG Processor; 00131 volatile ULONG TargetCount; 00132 volatile ULONG *TargetPhase; 00133 00134 } NEW_RANGE, *PNEW_RANGE; 00135 00136 // 00137 // Prototypes 00138 // 00139 00140 VOID 00141 KiInitializeMTRR ( 00142 IN BOOLEAN LastProcessor 00143 ); 00144 00145 BOOLEAN 00146 KiRemoveRange ( 00147 IN PNEW_RANGE NewRange, 00148 IN ULONGLONG Base, 00149 IN ULONGLONG Limit, 00150 IN PBOOLEAN RemoveThisType 00151 ); 00152 00153 VOID 00154 KiAddRange ( 00155 IN PNEW_RANGE NewRange, 00156 IN ULONGLONG Base, 00157 IN ULONGLONG Limit, 00158 IN UCHAR Type 00159 ); 00160 00161 VOID 00162 KiStartEffectiveRangeChange ( 00163 IN PNEW_RANGE NewRange 00164 ); 00165 00166 VOID 00167 KiCompleteEffectiveRangeChange ( 00168 IN PNEW_RANGE NewRange 00169 ); 00170 00171 STATIC ULONG 00172 KiRangeWeight ( 00173 IN PONE_RANGE Range 00174 ); 00175 00176 STATIC ULONG 00177 KiFindFirstSetLeftBit ( 00178 IN ULONGLONG Set 00179 ); 00180 00181 STATIC ULONG 00182 KiFindFirstSetRightBit ( 00183 IN ULONGLONG Set 00184 ); 00185 00186 VOID 00187 KiLoadMTRRTarget ( 00188 IN PKIPI_CONTEXT SignalDone, 00189 IN PVOID Context, 00190 IN PVOID Parameter2, 00191 IN PVOID Parameter3 00192 ); 00193 00194 NTSTATUS 00195 KiLoadMTRR ( 00196 IN PNEW_RANGE Context 00197 ); 00198 00199 VOID 00200 KiSynchronizeMTRRLoad ( 00201 IN PNEW_RANGE Context 00202 ); 00203 00204 ULONGLONG 00205 KiMaskToLength ( 00206 IN ULONGLONG Mask 00207 ); 00208 00209 ULONGLONG 00210 KiLengthToMask ( 00211 IN ULONGLONG Length 00212 ); 00213 00214 #if IDBG 00215 VOID 00216 KiDumpMTRR ( 00217 PUCHAR DebugString, 00218 PMTRR_RANGE MTRR 00219 ); 00220 #endif 00221 00222 // 00223 // --- AMD - Prototypes for AMD K6 MTRR Support functions. --- 00224 // 00225 00226 NTSTATUS 00227 KiAmdK6MtrrSetMemoryType ( 00228 IN ULONG BaseAddress, 00229 IN ULONG NumberOfBytes, 00230 IN MEMORY_CACHING_TYPE CacheType 00231 ); 00232 00233 VOID 00234 KiAmdK6MtrrWRMSR ( 00235 VOID 00236 ); 00237 00238 // --- AMD - End --- 00239 00240 #ifdef ALLOC_PRAGMA 00241 #pragma alloc_text(INIT,KiInitializeMTRR) 00242 #pragma alloc_text(PAGELK,KiRemoveRange) 00243 #pragma alloc_text(PAGELK,KiAddRange) 00244 #pragma alloc_text(PAGELK,KiStartEffectiveRangeChange) 00245 #pragma alloc_text(PAGELK,KiCompleteEffectiveRangeChange) 00246 #pragma alloc_text(PAGELK,KiRangeWeight) 00247 #pragma alloc_text(PAGELK,KiFindFirstSetLeftBit) 00248 #pragma alloc_text(PAGELK,KiFindFirstSetRightBit) 00249 #pragma alloc_text(PAGELK,KiLoadMTRR) 00250 #pragma alloc_text(PAGELK,KiLoadMTRRTarget) 00251 #pragma alloc_text(PAGELK,KiSynchronizeMTRRLoad) 00252 #pragma alloc_text(PAGELK,KiLengthToMask) 00253 #pragma alloc_text(PAGELK,KiMaskToLength) 00254 00255 #if IDBG 00256 #pragma alloc_text(PAGELK,KiDumpMTRR) 00257 #endif 00258 00259 #endif 00260 00261 // 00262 // KiRangeLock - Used to synchronize accesses to KiRangeInfo 00263 // 00264 00265 KSPIN_LOCK KiRangeLock; 00266 00267 // 00268 // KiRangeInfo - Range type mapping information. Details specific h/w support 00269 // and contains the current range database of how physical 00270 // addresses have been set 00271 00272 RANGE_INFO KiRangeInfo; 00273 00274 VOID 00275 KiInitializeMTRR ( 00276 IN BOOLEAN LastProcessor 00277 ) 00278 /*++ 00279 00280 Routine Description: 00281 00282 Called to incrementally initialize the physical range 00283 database feature. First processor's MTRR set is read into the 00284 physical range database. 00285 00286 Arguments: 00287 00288 LastProcessor - If set this is the last processor to execute this routine 00289 such that when this processor finishes, the initialization is complete. 00290 00291 Return Value: 00292 00293 None - if there was a problem the function 00294 KeSetPhysicalCacheTypeRange type is disabled. 00295 00296 --*/ 00297 { 00298 BOOLEAN Status; 00299 ULONG Index, Size; 00300 MTRR_DEFAULT Default; 00301 MTRR_CAPABILITIES Capabilities; 00302 NEW_RANGE NewRange; 00303 MTRR_VARIABLE_BASE MtrrBase; 00304 MTRR_VARIABLE_MASK MtrrMask; 00305 ULONGLONG Base, Mask, Length; 00306 BOOLEAN RemoveThisType[MTRR_TYPE_MAX]; 00307 NTSTATUS NtStatus; 00308 PKPRCB Prcb; 00309 00310 Status = TRUE; 00311 RtlZeroMemory (&NewRange, sizeof (NewRange)); 00312 NewRange.Status = STATUS_UNSUCCESSFUL; 00313 00314 // 00315 // If this is the first processor, initialize some fields 00316 // 00317 00318 if (KeGetPcr()->Number == 0) { 00319 KeInitializeSpinLock (&KiRangeLock); 00320 00321 KiRangeInfo.Capabilities.u.QuadPart = RDMSR(MTRR_MSR_CAPABILITIES); 00322 KiRangeInfo.Default.u.QuadPart = RDMSR(MTRR_MSR_DEFAULT); 00323 KiRangeInfo.DefaultCachedType = MTRR_TYPE_MAX; 00324 00325 // 00326 // If h/w mtrr support is not enabled, disable OS support 00327 // 00328 00329 if (!KiRangeInfo.Default.u.hw.MtrrEnabled || 00330 KiRangeInfo.Capabilities.u.hw.VarCnt == 0 || 00331 KiRangeInfo.Default.u.hw.Type != MTRR_TYPE_UC) { 00332 00333 DBGMSG("MTRR feature disabled.\n"); 00334 Status = FALSE; 00335 00336 } else { 00337 00338 // 00339 // If USWC type is supported by hardware, but the MTRR 00340 // feature is not set in KeFeatureBits, it is because 00341 // the HAL indicated USWC should not be used on this 00342 // machine. (Possibly due to shared memory clusters). 00343 // 00344 00345 if (KiRangeInfo.Capabilities.u.hw.UswcSupported && 00346 ((KeFeatureBits & KF_MTRR) == 0)) { 00347 00348 DBGMSG("KiInitializeMTRR: MTRR use globally disabled on this machine.\n"); 00349 KiRangeInfo.Capabilities.u.hw.UswcSupported = 0; 00350 } 00351 00352 // 00353 // Allocate initial range type database 00354 // 00355 00356 KiRangeInfo.NoRange = 0; 00357 KiRangeInfo.MaxRange = (UCHAR) KiRangeInfo.Capabilities.u.hw.VarCnt + GROW_RANGE_TABLE; 00358 KiRangeInfo.Ranges = ExAllocatePoolWithTag (NonPagedPool, 00359 sizeof(ONE_RANGE) * KiRangeInfo.MaxRange, 00360 ' eK'); 00361 RtlZeroMemory (KiRangeInfo.Ranges, sizeof(ONE_RANGE) * KiRangeInfo.MaxRange); 00362 } 00363 } 00364 00365 // 00366 // Workaround for cpu signatures 611, 612, 616 and 617 00367 // - if the request for setting a variable MTRR specifies 00368 // an address which is not 4M aligned or length is not 00369 // a multiple of 4M then possible problem for INVLPG inst. 00370 // Detect if workaround is required 00371 // 00372 00373 Prcb = KeGetCurrentPrcb(); 00374 if (Prcb->CpuType == 6 && 00375 (Prcb->CpuStep == 0x0101 || Prcb->CpuStep == 0x0102 || 00376 Prcb->CpuStep == 0x0106 || Prcb->CpuStep == 0x0107 )) { 00377 00378 if (strcmp(Prcb->VendorString, "GenuineIntel") == 0) { 00379 00380 // 00381 // Only do this if it's an Intel part, other 00382 // manufacturers may have the same stepping 00383 // numbers but no bug. 00384 // 00385 00386 KiRangeInfo.MtrrWorkaround = TRUE; 00387 } 00388 } 00389 00390 // 00391 // If MTRR support disabled on first processor or if 00392 // buffer not allocated then fall through 00393 // 00394 00395 if (!KiRangeInfo.Ranges){ 00396 Status = FALSE; 00397 } else { 00398 00399 // 00400 // Verify MTRR support is symmetric 00401 // 00402 00403 Capabilities.u.QuadPart = RDMSR(MTRR_MSR_CAPABILITIES); 00404 00405 if ((Capabilities.u.hw.UswcSupported) && 00406 ((KeFeatureBits & KF_MTRR) == 0)) { 00407 DBGMSG ("KiInitializeMTRR: setting UswcSupported FALSE\n"); 00408 Capabilities.u.hw.UswcSupported = 0; 00409 } 00410 00411 Default.u.QuadPart = RDMSR(MTRR_MSR_DEFAULT); 00412 00413 if (Default.u.QuadPart != KiRangeInfo.Default.u.QuadPart || 00414 Capabilities.u.QuadPart != KiRangeInfo.Capabilities.u.QuadPart) { 00415 DBGMSG ("KiInitializeMTRR: asymmetric mtrr support\n"); 00416 Status = FALSE; 00417 } 00418 } 00419 00420 NewRange.Status = STATUS_SUCCESS; 00421 00422 // 00423 // MTRR registers should be identically set on each processor. 00424 // Ranges should be added to the range database only for one 00425 // processor. 00426 // 00427 00428 if (Status && (KeGetPcr()->Number == 0)) { 00429 #if IDBG 00430 KiDumpMTRR ("Processor MTRR:", NULL); 00431 #endif 00432 00433 // 00434 // Read current MTRR settings for various cached range types 00435 // and add them to the range database 00436 // 00437 00438 for (Index=0; Index < Capabilities.u.hw.VarCnt; Index++) { 00439 00440 MtrrBase.u.QuadPart = RDMSR(MTRR_MSR_VARIABLE_BASE+Index*2); 00441 MtrrMask.u.QuadPart = RDMSR(MTRR_MSR_VARIABLE_MASK+Index*2); 00442 00443 Mask = MtrrMask.u.QuadPart & MTRR_MASK_MASK; 00444 Base = MtrrBase.u.QuadPart & MTRR_MASK_BASE; 00445 00446 // 00447 // Note - the variable MTRR Mask does NOT contain the length 00448 // spanned by the variable MTRR. Thus just checking the Valid 00449 // Bit should be sufficient for identifying a valid MTRR. 00450 // 00451 00452 if (MtrrMask.u.hw.Valid) { 00453 00454 Length = KiMaskToLength(Mask); 00455 00456 // 00457 // Check for non-contiguous MTRR mask. 00458 // 00459 00460 if ((Mask + Length) & MASK_OVERFLOW_MASK) { 00461 DBGMSG ("KiInitializeMTRR: Found non-contiguous MTRR mask!\n"); 00462 Status = FALSE; 00463 } 00464 00465 // 00466 // Add this MTRR to the range database 00467 // 00468 00469 Base &= Mask; 00470 KiAddRange ( 00471 &NewRange, 00472 Base, 00473 Base + Length - 1, 00474 (UCHAR) MtrrBase.u.hw.Type 00475 ); 00476 00477 // 00478 // Check for default cache type 00479 // 00480 00481 if (MtrrBase.u.hw.Type == MTRR_TYPE_WB) { 00482 KiRangeInfo.DefaultCachedType = MTRR_TYPE_WB; 00483 } 00484 00485 if (KiRangeInfo.DefaultCachedType == MTRR_TYPE_MAX && 00486 MtrrBase.u.hw.Type == MTRR_TYPE_WT) { 00487 KiRangeInfo.DefaultCachedType = MTRR_TYPE_WT; 00488 } 00489 } 00490 } 00491 00492 // 00493 // If a default type for "cached" was not found, assume write-back 00494 // 00495 00496 if (KiRangeInfo.DefaultCachedType == MTRR_TYPE_MAX) { 00497 DBGMSG ("KiInitializeMTRR: assume write-back\n"); 00498 KiRangeInfo.DefaultCachedType = MTRR_TYPE_WB; 00499 } 00500 } 00501 00502 // 00503 // Done 00504 // 00505 00506 if (!NT_SUCCESS(NewRange.Status)) { 00507 Status = FALSE; 00508 } 00509 00510 if (!Status) { 00511 DBGMSG ("KiInitializeMTRR: OS support for MTRRs disabled\n"); 00512 if (KiRangeInfo.Ranges != NULL) { 00513 ExFreePool (KiRangeInfo.Ranges); 00514 KiRangeInfo.Ranges = NULL; 00515 } 00516 } else { 00517 00518 // if last processor indicate initialization complete 00519 if (LastProcessor) { 00520 KiRangeInfo.RangesValid = TRUE; 00521 } 00522 } 00523 } 00524 00525 VOID 00526 KeRestoreMtrr ( 00527 VOID 00528 ) 00529 /*++ 00530 00531 Routine Description: 00532 00533 This function reloads the MTRR registers to be the current 00534 known values. This is used on a system wakeup to ensure the 00535 registers are sane. 00536 00537 N.B. The caller must have the PAGELK code locked 00538 00539 Arguments: 00540 00541 none 00542 00543 Return Value: 00544 00545 none 00546 00547 --*/ 00548 { 00549 NEW_RANGE NewRange; 00550 KIRQL OldIrql; 00551 00552 if (KiRangeInfo.RangesValid) { 00553 RtlZeroMemory (&NewRange, sizeof (NewRange)); 00554 KeAcquireSpinLock (&KiRangeLock, &OldIrql); 00555 KiStartEffectiveRangeChange (&NewRange); 00556 ASSERT (NT_SUCCESS(NewRange.Status)); 00557 KiCompleteEffectiveRangeChange (&NewRange); 00558 KeReleaseSpinLock (&KiRangeLock, OldIrql); 00559 return; 00560 } 00561 00562 // 00563 // If the processor is a AMD K6 with MTRR support then perform 00564 // processor specific implentaiton. 00565 // 00566 00567 if (KeFeatureBits & KF_AMDK6MTRR) { 00568 KeAcquireSpinLock (&KiRangeLock, &OldIrql); 00569 KiLoadMTRR(NULL); 00570 KeReleaseSpinLock (&KiRangeLock, OldIrql); 00571 } 00572 } 00573 00574 00575 NTSTATUS 00576 KeSetPhysicalCacheTypeRange ( 00577 IN PHYSICAL_ADDRESS PhysicalAddress, 00578 IN ULONG NumberOfBytes, 00579 IN MEMORY_CACHING_TYPE CacheType 00580 ) 00581 /*++ 00582 00583 Routine Description: 00584 00585 This function sets a physical range to a particular cache type. 00586 If the system does not support setting cache policies based on 00587 physical ranges, no action is taken. 00588 00589 Arguments: 00590 00591 PhysicalAddress - The starting address of the range being set 00592 00593 NumberOfBytes - The length, in bytes, of the range being set 00594 00595 CacheType - The caching type for which the physical range is 00596 to be set to. 00597 00598 NonCached: 00599 Setting ranges to be NonCached is done for 00600 book keeping reasons. A return of SUCCESS when 00601 setting a range NonCached does not mean it has 00602 been physically set to as NonCached. The caller 00603 must use a cache-disabled virtual pointer for 00604 any NonCached range. 00605 00606 Cached: 00607 A successful return indicates that the physical 00608 range has been set to cached. This mode requires 00609 the caller to be at irql < dispatch_level. 00610 00611 FrameBuffer: 00612 A successful return indicates that the physical 00613 range has been set to be framebuffer cached. 00614 This mode requires the caller to be at irql < 00615 dispatch_level. 00616 00617 USWCCached: 00618 This type is to be satisfied only via PAT and 00619 fails for the MTRR interface. 00620 00621 Return Value: 00622 00623 STATUS_SUCCESS - if success, the cache attributes of the physical range 00624 have been set. 00625 00626 STATUS_NOT_SUPPORTED - either feature not supported or not yet initialized, 00627 or MmWriteCombined type not supported and is 00628 requested, or input range does not match restrictions 00629 imposed by workarounds for current processor stepping 00630 or is below 1M (in the fixed MTRR range), or not yet 00631 initialized. 00632 00633 STATUS_UNSUCCESSFUL - Unable to satisfy request due to 00634 - Unable to map software image into limited # of 00635 hardware MTRRs. 00636 - irql was not < DISPATCH_LEVEL. 00637 - Failure due to other internal error (out of memory). 00638 00639 STATUS_INVALID_PARAMETER - Incorrect input memory type. 00640 00641 --*/ 00642 { 00643 KIRQL OldIrql; 00644 NEW_RANGE NewRange; 00645 BOOLEAN RemoveThisType[MTRR_TYPE_MAX]; 00646 BOOLEAN EffectRangeChange, AddToRangeDatabase; 00647 00648 // 00649 // If caller has requested the MmUSWCCached memory type then fail 00650 // - MmUSWCCached is supported via PAT and not otherwise 00651 // 00652 00653 if (CacheType == MmUSWCCached) { 00654 return STATUS_NOT_SUPPORTED; 00655 } 00656 00657 // 00658 // Addresses above 4GB, below 1MB or not page aligned and 00659 // page length are not supported. 00660 // 00661 00662 if ((PhysicalAddress.HighPart != 0) || 00663 (PhysicalAddress.LowPart < (1 * 1024 * 1024)) || 00664 (PhysicalAddress.LowPart & 0xfff) || 00665 (NumberOfBytes & 0xfff) ) { 00666 return STATUS_NOT_SUPPORTED; 00667 } 00668 00669 ASSERT (NumberOfBytes != 0); 00670 00671 // 00672 // If the processor is a AMD K6 with MTRR support then perform 00673 // processor specific implentaiton. 00674 // 00675 00676 if (KeFeatureBits & KF_AMDK6MTRR) { 00677 00678 if ((CacheType != MmWriteCombined) && (CacheType != MmNonCached)) { 00679 return STATUS_NOT_SUPPORTED; 00680 } 00681 00682 return KiAmdK6MtrrSetMemoryType(PhysicalAddress.LowPart, 00683 NumberOfBytes, 00684 CacheType); 00685 } 00686 00687 // 00688 // If processor doesn't have the memory type range feature 00689 // return not supported. 00690 // 00691 00692 if (!KiRangeInfo.RangesValid) { 00693 return STATUS_NOT_SUPPORTED; 00694 } 00695 00696 // 00697 // Workaround for cpu signatures 611, 612, 616 and 617 00698 // - if the request for setting a variable MTRR specifies 00699 // an address which is not 4M aligned or length is not 00700 // a multiple of 4M then return status not supported 00701 // 00702 00703 if ((KiRangeInfo.MtrrWorkaround) && 00704 ((PhysicalAddress.LowPart & 0x3fffff) || 00705 (NumberOfBytes & 0x3fffff))) { 00706 00707 return STATUS_NOT_SUPPORTED; 00708 } 00709 00710 RtlZeroMemory (&NewRange, sizeof (NewRange)); 00711 NewRange.Base = PhysicalAddress.QuadPart; 00712 NewRange.Limit = NewRange.Base + NumberOfBytes - 1; 00713 00714 // 00715 // Determine what the new mtrr range type is. If setting NonCached then 00716 // the database need not be updated to reflect the virtual change. This 00717 // is because non-cached virtual pointers are mapped as cache disabled. 00718 // 00719 00720 EffectRangeChange = TRUE; 00721 AddToRangeDatabase = TRUE; 00722 switch (CacheType) { 00723 case MmNonCached: 00724 NewRange.Type = MTRR_TYPE_UC; 00725 00726 // 00727 // NonCached ranges do not need to be reflected into the h/w state 00728 // as all non-cached ranges are mapped with cache-disabled pointers. 00729 // This also means that cache-disabled ranges do not need to 00730 // be put into mtrrs, or held in the range, regardless of the default 00731 // range type. 00732 // 00733 00734 EffectRangeChange = FALSE; 00735 AddToRangeDatabase = FALSE; 00736 break; 00737 00738 case MmCached: 00739 NewRange.Type = KiRangeInfo.DefaultCachedType; 00740 break; 00741 00742 case MmWriteCombined: 00743 NewRange.Type = MTRR_TYPE_USWC; 00744 00745 // 00746 // If USWC type isn't supported, then request can not be honored 00747 // 00748 00749 if (!KiRangeInfo.Capabilities.u.hw.UswcSupported) { 00750 DBGMSG ("KeSetPhysicalCacheTypeRange: USWC not supported\n"); 00751 return STATUS_NOT_SUPPORTED; 00752 } 00753 break; 00754 00755 default: 00756 DBGMSG ("KeSetPhysicalCacheTypeRange: no such cache type\n"); 00757 return STATUS_INVALID_PARAMETER; 00758 break; 00759 } 00760 00761 NewRange.Status = STATUS_SUCCESS; 00762 00763 // 00764 // The default type is UC thus the range is still mapped using 00765 // a Cache Disabled VirtualPointer and hence it need not be added. 00766 // 00767 00768 // 00769 // If h/w needs updated, lock down the code required to effect the change 00770 // 00771 00772 if (EffectRangeChange) { 00773 if (KeGetCurrentIrql() >= DISPATCH_LEVEL) { 00774 00775 // 00776 // Code can not be locked down. Supplying a new range type requires 00777 // that the caller calls at irql < dispatch_level. 00778 // 00779 00780 DBGMSG ("KeSetPhysicalCacheTypeRange failed due to calling IRQL == DISPATCH_LEVEL\n"); 00781 return STATUS_UNSUCCESSFUL; 00782 } 00783 00784 MmLockPagableSectionByHandle(ExPageLockHandle); 00785 } 00786 00787 // 00788 // Serialize the range type database 00789 // 00790 00791 KeAcquireSpinLock (&KiRangeLock, &OldIrql); 00792 00793 // 00794 // If h/w is going to need updated, then start an effective range change 00795 // 00796 00797 if (EffectRangeChange) { 00798 KiStartEffectiveRangeChange (&NewRange); 00799 } 00800 00801 if (NT_SUCCESS (NewRange.Status)) { 00802 00803 // 00804 // If the new range is NonCached, then don't remove standard memory 00805 // caching types 00806 // 00807 00808 memset (RemoveThisType, TRUE, MTRR_TYPE_MAX); 00809 if (NewRange.Type != MTRR_TYPE_UC) { 00810 // 00811 // If the requested type is uncached then the physical 00812 // memory region is mapped using a cache disabled virtual pointer. 00813 // The effective memory type for that region will be the lowest 00814 // common denominator of the MTRR type and the cache type in the 00815 // PTE. Therefore for a request of type UC, the effective type 00816 // will be UC irrespective of the MTRR settings in that range. 00817 // Hence it is not necessary to remove the existing MTRR settings 00818 // (if any) for that range. 00819 // 00820 00821 // 00822 // Clip/remove any ranges in the target area 00823 // 00824 00825 KiRemoveRange (&NewRange, NewRange.Base, NewRange.Limit, RemoveThisType); 00826 } 00827 00828 // 00829 // If needed, add new range type 00830 // 00831 00832 if (AddToRangeDatabase) { 00833 ASSERT (EffectRangeChange == TRUE); 00834 KiAddRange (&NewRange, NewRange.Base, NewRange.Limit, NewRange.Type); 00835 } 00836 00837 // 00838 // If this is an effect range change, then complete it 00839 // 00840 00841 if (EffectRangeChange) { 00842 KiCompleteEffectiveRangeChange (&NewRange); 00843 } 00844 } 00845 00846 KeReleaseSpinLock (&KiRangeLock, OldIrql); 00847 if (EffectRangeChange) { 00848 MmUnlockPagableImageSection(ExPageLockHandle); 00849 } 00850 00851 return NewRange.Status; 00852 } 00853 00854 BOOLEAN 00855 KiRemoveRange ( 00856 IN PNEW_RANGE NewRange, 00857 IN ULONGLONG Base, 00858 IN ULONGLONG Limit, 00859 IN PBOOLEAN RemoveThisType 00860 ) 00861 /*++ 00862 00863 Routine Description: 00864 00865 This function removes any range overlapping with the passed range, of 00866 type supplied in RemoveThisType from the global range database. 00867 00868 Arguments: 00869 00870 NewRange - Context information 00871 00872 Base - Base & Limit signify the first & last address of a range 00873 Limit - which is to be removed from the range database 00874 00875 RemoveThisType - A TRUE flag for each type which can not overlap the 00876 target range 00877 00878 00879 Return Value: 00880 00881 TRUE - if the range database was altered such that it may no longer 00882 be sorted. 00883 00884 --*/ 00885 { 00886 ULONG i; 00887 PONE_RANGE Range; 00888 BOOLEAN DatabaseNeedsSorted; 00889 00890 00891 DatabaseNeedsSorted = FALSE; 00892 00893 // 00894 // Check each range 00895 // 00896 00897 for (i=0, Range=KiRangeInfo.Ranges; i < KiRangeInfo.NoRange; i++, Range++) { 00898 00899 // 00900 // If this range type doesn't need to be altered, skip it 00901 // 00902 00903 if (!RemoveThisType[Range->Type]) { 00904 continue; 00905 } 00906 00907 // 00908 // Check range to see if it overlaps with range being removed 00909 // 00910 00911 if (Range->Base < Base) { 00912 00913 if (Range->Limit >= Base && Range->Limit <= Limit) { 00914 00915 // 00916 // Truncate range to not overlap with area being removed 00917 // 00918 00919 Range->Limit = Base - 1; 00920 } 00921 00922 if (Range->Limit > Limit) { 00923 00924 // 00925 // Target area is contained totally within this area. 00926 // Split into two ranges 00927 // 00928 00929 // 00930 // Add range at end 00931 // 00932 00933 DatabaseNeedsSorted = TRUE; 00934 KiAddRange ( 00935 NewRange, 00936 Limit+1, 00937 Range->Limit, 00938 Range->Type 00939 ); 00940 00941 // 00942 // Turn current range into range at beginning 00943 // 00944 00945 Range->Limit = Base - 1; 00946 } 00947 00948 } else { 00949 00950 // Range->Base >= Base 00951 00952 if (Range->Base <= Limit) { 00953 if (Range->Limit <= Limit) { 00954 // 00955 // This range is totally within the target area. Remove it. 00956 // 00957 00958 DatabaseNeedsSorted = TRUE; 00959 KiRangeInfo.NoRange -= 1; 00960 Range->Base = KiRangeInfo.Ranges[KiRangeInfo.NoRange].Base; 00961 Range->Limit = KiRangeInfo.Ranges[KiRangeInfo.NoRange].Limit; 00962 Range->Type = KiRangeInfo.Ranges[KiRangeInfo.NoRange].Type; 00963 00964 // 00965 // recheck at current location 00966 // 00967 00968 i -= 1; 00969 Range -= 1; 00970 00971 } else { 00972 00973 // 00974 // Bump beginning past area being removed 00975 // 00976 00977 Range->Base = Limit + 1; 00978 } 00979 } 00980 } 00981 } 00982 00983 if (!NT_SUCCESS (NewRange->Status)) { 00984 DBGMSG ("KiRemoveRange: failure\n"); 00985 } 00986 00987 return DatabaseNeedsSorted; 00988 } 00989 00990 00991 VOID 00992 KiAddRange ( 00993 IN PNEW_RANGE NewRange, 00994 IN ULONGLONG Base, 00995 IN ULONGLONG Limit, 00996 IN UCHAR Type 00997 ) 00998 /*++ 00999 01000 Routine Description: 01001 01002 This function adds the passed range to the global range database. 01003 01004 Arguments: 01005 01006 NewRange - Context information 01007 01008 Base - Base & Limit signify the first & last address of a range 01009 Limit - which is to be added to the range database 01010 01011 Type - Type of caching required for this range 01012 01013 Return Value: 01014 01015 None - Context is updated with an error if the table has overflowed 01016 01017 --*/ 01018 { 01019 PONE_RANGE Range, OldRange; 01020 ULONG size; 01021 01022 if (KiRangeInfo.NoRange >= KiRangeInfo.MaxRange) { 01023 01024 // 01025 // Table is out of space, get a bigger one 01026 // 01027 01028 OldRange = KiRangeInfo.Ranges; 01029 size = sizeof(ONE_RANGE) * (KiRangeInfo.MaxRange + GROW_RANGE_TABLE); 01030 Range = ExAllocatePoolWithTag (NonPagedPool, size, ' eK'); 01031 01032 if (!Range) { 01033 NewRange->Status = STATUS_UNSUCCESSFUL; 01034 return ; 01035 } 01036 01037 // 01038 // Grow table 01039 // 01040 01041 RtlZeroMemory (Range, size); 01042 RtlCopyMemory (Range, OldRange, sizeof(ONE_RANGE) * KiRangeInfo.MaxRange); 01043 KiRangeInfo.Ranges = Range; 01044 KiRangeInfo.MaxRange += GROW_RANGE_TABLE; 01045 ExFreePool (OldRange); 01046 } 01047 01048 // 01049 // Add new entry to table 01050 // 01051 01052 KiRangeInfo.Ranges[KiRangeInfo.NoRange].Base = Base; 01053 KiRangeInfo.Ranges[KiRangeInfo.NoRange].Limit = Limit; 01054 KiRangeInfo.Ranges[KiRangeInfo.NoRange].Type = Type; 01055 KiRangeInfo.NoRange += 1; 01056 } 01057 01058 01059 VOID 01060 KiStartEffectiveRangeChange ( 01061 IN PNEW_RANGE NewRange 01062 ) 01063 /*++ 01064 01065 Routine Description: 01066 01067 This functions sets up the context information required to 01068 track & later effect a range change in hardware 01069 01070 Arguments: 01071 01072 NewRange - Context information 01073 01074 Return Value: 01075 01076 None 01077 01078 --*/ 01079 { 01080 ULONG size; 01081 01082 // 01083 // Allocate working space for MTRR image 01084 // 01085 01086 size = sizeof(MTRR_RANGE) * ((ULONG) KiRangeInfo.Capabilities.u.hw.VarCnt + 1); 01087 NewRange->MTRR = ExAllocatePoolWithTag (NonPagedPool, size, ' eK'); 01088 if (!NewRange->MTRR) { 01089 NewRange->Status = STATUS_UNSUCCESSFUL; 01090 return ; 01091 } 01092 01093 RtlZeroMemory (NewRange->MTRR, size); 01094 01095 // 01096 // Save current range information in case of an error 01097 // 01098 01099 size = sizeof(ONE_RANGE) * KiRangeInfo.NoRange; 01100 NewRange->NoRange = KiRangeInfo.NoRange; 01101 NewRange->Ranges = ExAllocatePoolWithTag (NonPagedPool, size, ' eK'); 01102 if (!NewRange->Ranges) { 01103 NewRange->Status = STATUS_UNSUCCESSFUL; 01104 return ; 01105 } 01106 01107 RtlCopyMemory (NewRange->Ranges, KiRangeInfo.Ranges, size); 01108 } 01109 01110 01111 VOID 01112 KiCompleteEffectiveRangeChange ( 01113 IN PNEW_RANGE NewRange 01114 ) 01115 /*++ 01116 01117 Routine Description: 01118 01119 This functions commits the range database to hardware, or backs 01120 out the current changes to it. 01121 01122 Arguments: 01123 01124 NewRange - Context information 01125 01126 Return Value: 01127 01128 None 01129 01130 --*/ 01131 { 01132 BOOLEAN Restart; 01133 ULONG Index, Index2, RemIndex2, NoMTRR; 01134 ULONGLONG BestLength, WhichMtrr; 01135 ULONGLONG CurrLength; 01136 ULONGLONG l, Base, Length, MLength; 01137 PONE_RANGE Range; 01138 ONE_RANGE OneRange; 01139 PMTRR_RANGE MTRR; 01140 BOOLEAN RoundDown; 01141 BOOLEAN RemoveThisType[MTRR_TYPE_MAX]; 01142 PKPRCB Prcb; 01143 KIRQL OldIrql, OldIrql2; 01144 KAFFINITY TargetProcessors; 01145 01146 01147 ASSERT (KeGetCurrentIrql() == DISPATCH_LEVEL); 01148 Prcb = KeGetCurrentPrcb(); 01149 01150 // 01151 // Round all ranges, according to type, to match what h/w can support 01152 // 01153 01154 for (Index=0; Index < KiRangeInfo.NoRange; Index++) { 01155 Range = &KiRangeInfo.Ranges[Index]; 01156 01157 // 01158 // Determine rounding for this range type 01159 // 01160 01161 RoundDown = TRUE; 01162 if (Range->Type == MTRR_TYPE_UC) { 01163 RoundDown = FALSE; 01164 } 01165 01166 // 01167 // Apply rounding 01168 // 01169 01170 if (RoundDown) { 01171 Range->Base = (Range->Base + MTRR_PAGE_SIZE - 1) & MTRR_PAGE_MASK; 01172 Range->Limit = ((Range->Limit+1) & MTRR_PAGE_MASK)-1; 01173 } else { 01174 Range->Base = (Range->Base & MTRR_PAGE_MASK); 01175 Range->Limit = ((Range->Limit + MTRR_PAGE_SIZE) & MTRR_PAGE_MASK)-1; 01176 } 01177 } 01178 01179 do { 01180 Restart = FALSE; 01181 01182 // 01183 // Sort the ranges by base address 01184 // 01185 01186 for (Index=0; Index < KiRangeInfo.NoRange; Index++) { 01187 Range = &KiRangeInfo.Ranges[Index]; 01188 01189 for (Index2=Index+1; Index2 < KiRangeInfo.NoRange; Index2++) { 01190 01191 if (KiRangeInfo.Ranges[Index2].Base < Range->Base) { 01192 01193 // 01194 // Swap KiRangeInfo.Ranges[Index] with KiRangeInfo.Ranges[Index2] 01195 // 01196 01197 OneRange = *Range; 01198 *Range = KiRangeInfo.Ranges[Index2]; 01199 KiRangeInfo.Ranges[Index2] = OneRange; 01200 } 01201 } 01202 } 01203 01204 // 01205 // At this point the range database is sorted on 01206 // base address. Scan range database combining adjacent and 01207 // overlapping ranges of the same type 01208 // 01209 01210 for (Index=0; Index < (ULONG) KiRangeInfo.NoRange-1; Index++) { 01211 Range = &KiRangeInfo.Ranges[Index]; 01212 01213 // 01214 // Scan the range database. If ranges are adjacent/overlap and are of 01215 // the same type, combine them. 01216 // 01217 01218 for (Index2 = Index+1; Index2 < (ULONG) KiRangeInfo.NoRange; Index2++) { 01219 01220 l = Range[0].Limit + 1; 01221 if (l < Range[0].Limit) { 01222 l = Range[0].Limit; 01223 } 01224 01225 if (l >= KiRangeInfo.Ranges[Index2].Base && 01226 Range[0].Type == KiRangeInfo.Ranges[Index2].Type) { 01227 01228 // 01229 // Increase Range[0] limit to cover Range[Index2] 01230 // 01231 01232 if (KiRangeInfo.Ranges[Index2].Limit > Range[0].Limit) { 01233 Range[0].Limit = KiRangeInfo.Ranges[Index2].Limit; 01234 } 01235 01236 // 01237 // Remove KiRangeInfo.Ranges[Index2] 01238 // 01239 01240 if (Index2 < (ULONG) KiRangeInfo.NoRange - 1 ) { 01241 01242 // 01243 // Copy everything from Index2 till end 01244 // of range list. # Entries to copy is 01245 // (KiRangeInfo.NoRange -1) - (Index2+1) + 1 01246 // 01247 01248 RtlCopyMemory( 01249 &(KiRangeInfo.Ranges[Index2]), 01250 &(KiRangeInfo.Ranges[Index2+1]), 01251 sizeof(ONE_RANGE) * (KiRangeInfo.NoRange-Index2-1) 01252 ); 01253 } 01254 01255 KiRangeInfo.NoRange -= 1; 01256 01257 // 01258 // Recheck current location 01259 // 01260 01261 Index2 -= 1; 01262 } 01263 } 01264 } 01265 01266 // 01267 // At this point the range database is sorted on base 01268 // address and adjacent/overlapping ranges of the same 01269 // type are combined. Check for overlapping ranges - 01270 // If legal then allow else truncate the less "weighty" range 01271 // 01272 01273 for (Index = 0; Index < (ULONG) KiRangeInfo.NoRange-1 && !Restart; Index++) { 01274 01275 Range = &KiRangeInfo.Ranges[Index]; 01276 01277 l = Range[0].Limit + 1; 01278 if (l < Range[0].Limit) { 01279 l = Range[0].Limit; 01280 } 01281 01282 // 01283 // If ranges overlap and are not of same type, and if the 01284 // overlap is not legal then carve them to the best cache type 01285 // available. 01286 // 01287 01288 for (Index2 = Index+1; Index2 < (ULONG) KiRangeInfo.NoRange && !Restart; Index2++) { 01289 01290 if (l > KiRangeInfo.Ranges[Index2].Base) { 01291 01292 if (Range[0].Type == MTRR_TYPE_UC || 01293 KiRangeInfo.Ranges[Index2].Type == MTRR_TYPE_UC) { 01294 01295 // 01296 // Overlap of a UC type with a range of any other type is 01297 // legal 01298 // 01299 01300 } else if ((Range[0].Type == MTRR_TYPE_WT && 01301 KiRangeInfo.Ranges[Index2].Type == MTRR_TYPE_WB) || 01302 (Range[0].Type == MTRR_TYPE_WB && 01303 KiRangeInfo.Ranges[Index2].Type == MTRR_TYPE_WT) ) { 01304 // 01305 // Overlap of WT and WB range is legal. The overlap range will 01306 // be WT. 01307 // 01308 01309 } else { 01310 01311 // 01312 // This is an illegal overlap and we need to carve the ranges 01313 // to remove the overlap. 01314 // 01315 // Pick range which has the cache type which should be used for 01316 // the overlapped area 01317 // 01318 01319 if (KiRangeWeight(&Range[0]) > KiRangeWeight(&(KiRangeInfo.Ranges[Index2]))){ 01320 RemIndex2 = Index2; 01321 } else { 01322 RemIndex2 = Index; 01323 } 01324 01325 // 01326 // Remove ranges of type which do not belong in the overlapped area 01327 // 01328 01329 RtlZeroMemory (RemoveThisType, MTRR_TYPE_MAX); 01330 RemoveThisType[KiRangeInfo.Ranges[RemIndex2].Type] = TRUE; 01331 01332 // 01333 // Remove just the overlapped portion of the range. 01334 // 01335 01336 Restart = KiRemoveRange ( 01337 NewRange, 01338 KiRangeInfo.Ranges[Index2].Base, 01339 (Range[0].Limit < KiRangeInfo.Ranges[Index2].Limit ? 01340 Range[0].Limit : KiRangeInfo.Ranges[Index2].Limit), 01341 RemoveThisType 01342 ); 01343 } 01344 } 01345 } 01346 } 01347 01348 } while (Restart); 01349 01350 // 01351 // The range database is now rounded to fit in the h/w and sorted. 01352 // Attempt to build MTRR settings which exactly describe the ranges 01353 // 01354 01355 MTRR = NewRange->MTRR; 01356 NoMTRR = 0; 01357 for (Index=0;NT_SUCCESS(NewRange->Status)&& Index<KiRangeInfo.NoRange;Index++) { 01358 Range = &KiRangeInfo.Ranges[Index]; 01359 01360 // 01361 // Build MTRRs to fit this range 01362 // 01363 01364 Base = Range->Base; 01365 Length = Range->Limit - Base + 1; 01366 01367 while (Length) { 01368 01369 // 01370 // Compute MTRR length for current range base & length 01371 // 01372 01373 if (Base == 0) { 01374 MLength = Length; 01375 } else { 01376 MLength = (ULONGLONG) 1 << KiFindFirstSetRightBit(Base); 01377 } 01378 if (MLength > Length) { 01379 MLength = Length; 01380 } 01381 01382 l = (ULONGLONG) 1 << KiFindFirstSetLeftBit (MLength); 01383 if (MLength > l) { 01384 MLength = l; 01385 } 01386 01387 // 01388 // Store it in the next MTRR 01389 // 01390 01391 MTRR[NoMTRR].Base.u.QuadPart = Base; 01392 MTRR[NoMTRR].Base.u.hw.Type = Range->Type; 01393 MTRR[NoMTRR].Mask.u.QuadPart = KiLengthToMask(MLength); 01394 MTRR[NoMTRR].Mask.u.hw.Valid = 1; 01395 NoMTRR += 1; 01396 01397 // 01398 // Adjust off amount of data covered by that last MTRR 01399 // 01400 01401 Base += MLength; 01402 Length -= MLength; 01403 01404 // 01405 // If there are too many MTRRs, and currently setting a 01406 // Non-USWC range try to remove a USWC MTRR. 01407 // (ie, convert some MmWriteCombined to MmNonCached). 01408 // 01409 01410 if (NoMTRR > (ULONG) KiRangeInfo.Capabilities.u.hw.VarCnt) { 01411 01412 if (Range->Type != MTRR_TYPE_USWC) { 01413 01414 // 01415 // Find smallest USWC type and drop it 01416 // 01417 // This is okay only if the default type is UC. 01418 // Default type should always be UC unless BIOS changes 01419 // it. Still ASSERT! 01420 // 01421 01422 ASSERT(KiRangeInfo.Default.u.hw.Type == MTRR_TYPE_UC); 01423 01424 BestLength = (ULONGLONG) 1 << (MTRR_MAX_RANGE_SHIFT + 1); 01425 01426 for (Index2=0; Index2 < KiRangeInfo.Capabilities.u.hw.VarCnt; Index2++) { 01427 01428 if (MTRR[Index2].Base.u.hw.Type == MTRR_TYPE_USWC) { 01429 01430 CurrLength = KiMaskToLength(MTRR[Index2].Mask.u.QuadPart & 01431 MTRR_MASK_MASK); 01432 01433 if (CurrLength < BestLength) { 01434 WhichMtrr = Index2; 01435 BestLength = CurrLength; 01436 } 01437 } 01438 } 01439 01440 if (BestLength == ((ULONGLONG) 1 << (MTRR_MAX_RANGE_SHIFT + 1))) { 01441 // 01442 // Range was not found which could be dropped. Abort process 01443 // 01444 01445 NewRange->Status = STATUS_UNSUCCESSFUL; 01446 Length = 0; 01447 01448 } else { 01449 // 01450 // Remove WhichMtrr 01451 // 01452 01453 NoMTRR -= 1; 01454 MTRR[WhichMtrr] = MTRR[NoMTRR]; 01455 } 01456 01457 } else { 01458 01459 NewRange->Status = STATUS_UNSUCCESSFUL; 01460 Length =0; 01461 } 01462 } 01463 } 01464 } 01465 01466 // 01467 // Done building new MTRRs 01468 // 01469 01470 if (NT_SUCCESS(NewRange->Status)) { 01471 01472 // 01473 // Update the MTRRs on all processors 01474 // 01475 01476 #if IDBG 01477 KiDumpMTRR ("Loading the following MTRR:", NewRange->MTRR); 01478 #endif 01479 01480 NewRange->TargetCount = 0; 01481 NewRange->TargetPhase = &Prcb->ReverseStall; 01482 NewRange->Processor = Prcb->Number; 01483 01484 // 01485 // Previously enabled MTRRs with index > NoMTRR 01486 // which could conflict with existing setting should be disabled 01487 // This is taken care of by setting NewRange->NoMTRR to total 01488 // number of variable MTRRs. 01489 // 01490 01491 NewRange->NoMTRR = (ULONG) KiRangeInfo.Capabilities.u.hw.VarCnt; 01492 01493 // 01494 // Synchronize with other IPI functions which may stall 01495 // 01496 01497 KiLockContextSwap(&OldIrql); 01498 01499 #if !defined(NT_UP) 01500 // 01501 // Collect all the (other) processors 01502 // 01503 01504 TargetProcessors = KeActiveProcessors & ~Prcb->SetMember; 01505 if (TargetProcessors != 0) { 01506 01507 KiIpiSendSynchronousPacket ( 01508 Prcb, 01509 TargetProcessors, 01510 KiLoadMTRRTarget, 01511 (PVOID) NewRange, 01512 NULL, 01513 NULL 01514 ); 01515 01516 // 01517 // Wait for all processors to be collected 01518 // 01519 01520 KiIpiStallOnPacketTargets(TargetProcessors); 01521 01522 // 01523 // All processors are now waiting. Raise to high level to 01524 // ensure this processor doesn't enter the debugger due to 01525 // some interrupt service routine. 01526 // 01527 01528 KeRaiseIrql (HIGH_LEVEL, &OldIrql2); 01529 01530 // 01531 // There's no reason for any debug events now, so signal 01532 // the other processors that they can all disable interrupts 01533 // and being the MTRR update 01534 // 01535 01536 Prcb->ReverseStall += 1; 01537 } 01538 #endif 01539 01540 // 01541 // Update MTRRs 01542 // 01543 01544 KiLoadMTRR (NewRange); 01545 01546 // 01547 // Release ContextSwap lock 01548 // 01549 01550 KiUnlockContextSwap(OldIrql); 01551 01552 01553 #if IDBG 01554 KiDumpMTRR ("Processor MTRR:", NewRange->MTRR); 01555 #endif 01556 01557 } else { 01558 01559 // 01560 // There was an error, put original range database back 01561 // 01562 01563 DBGMSG ("KiCompleteEffectiveRangeChange: mtrr update did not occur\n"); 01564 01565 if (NewRange->Ranges) { 01566 KiRangeInfo.NoRange = NewRange->NoRange; 01567 01568 RtlCopyMemory ( 01569 KiRangeInfo.Ranges, 01570 NewRange->Ranges, 01571 sizeof (ONE_RANGE) * KiRangeInfo.NoRange 01572 ); 01573 } 01574 } 01575 01576 // 01577 // Cleanup 01578 // 01579 01580 ExFreePool (NewRange->Ranges); 01581 ExFreePool (NewRange->MTRR); 01582 } 01583 01584 01585 STATIC ULONG 01586 KiRangeWeight ( 01587 IN PONE_RANGE Range 01588 ) 01589 /*++ 01590 01591 Routine Description: 01592 01593 This functions returns a weighting of the passed in range's cache 01594 type. When two or more regions collide within the same h/w region 01595 the types are weighted and that cache type of the higher weight 01596 is used for the collision area. 01597 01598 Arguments: 01599 01600 Range - Range to obtain weighting for 01601 01602 Return Value: 01603 01604 The weight of the particular cache type 01605 01606 --*/ 01607 { 01608 ULONG Weight; 01609 01610 switch (Range->Type) { 01611 case MTRR_TYPE_UC: Weight = 5; break; 01612 case MTRR_TYPE_USWC: Weight = 4; break; 01613 case MTRR_TYPE_WP: Weight = 3; break; 01614 case MTRR_TYPE_WT: Weight = 2; break; 01615 case MTRR_TYPE_WB: Weight = 1; break; 01616 default: Weight = 0; break; 01617 } 01618 01619 return Weight; 01620 } 01621 01622 01623 STATIC ULONGLONG 01624 KiMaskToLength ( 01625 IN ULONGLONG Mask 01626 ) 01627 /*++ 01628 01629 Routine Description: 01630 01631 This function returns the length specified by a particular 01632 mtrr variable register mask. 01633 01634 --*/ 01635 { 01636 if (Mask == 0) { 01637 // Zero Mask signifies a length of 2**36 01638 return(((ULONGLONG) 1 << MTRR_MAX_RANGE_SHIFT)); 01639 } else { 01640 return(((ULONGLONG) 1 << KiFindFirstSetRightBit(Mask))); 01641 } 01642 } 01643 01644 STATIC ULONGLONG 01645 KiLengthToMask ( 01646 IN ULONGLONG Length 01647 ) 01648 /*++ 01649 01650 Routine Description: 01651 01652 This function constructs the mask corresponding to the input length 01653 to be set in a variable MTRR register. The length is assumed to be 01654 a multiple of 4K. 01655 01656 --*/ 01657 { 01658 ULONGLONG FullMask = 0xffffff; 01659 01660 if (Length == ((ULONGLONG) 1 << MTRR_MAX_RANGE_SHIFT)) { 01661 return(0); 01662 } else { 01663 return(((FullMask << KiFindFirstSetRightBit(Length)) & 01664 MTRR_RESVBIT_MASK)); 01665 } 01666 } 01667 01668 STATIC ULONG 01669 KiFindFirstSetRightBit ( 01670 IN ULONGLONG Set 01671 ) 01672 /*++ 01673 01674 Routine Description: 01675 01676 This function returns a bit position of the least significant 01677 bit set in the passed ULONGLONG parameter. Passed parameter 01678 must be non-zero. 01679 01680 --*/ 01681 { 01682 ULONG bitno; 01683 01684 ASSERT(Set != 0); 01685 for (bitno=0; !(Set & 0xFF); bitno += 8, Set >>= 8) ; 01686 return KiFindFirstSetRight[Set & 0xFF] + bitno; 01687 } 01688 01689 STATIC ULONG 01690 KiFindFirstSetLeftBit ( 01691 IN ULONGLONG Set 01692 ) 01693 /*++ 01694 01695 Routine Description: 01696 01697 This function returns a bit position of the most significant 01698 bit set in the passed ULONGLONG parameter. Passed parameter 01699 must be non-zero. 01700 01701 --*/ 01702 { 01703 ULONG bitno; 01704 01705 ASSERT(Set != 0); 01706 for (bitno=56;!(Set & 0xFF00000000000000); bitno -= 8, Set <<= 8) ; 01707 return KiFindFirstSetLeft[Set >> 56] + bitno; 01708 } 01709 01710 #if IDBG 01711 VOID 01712 KiDumpMTRR ( 01713 PUCHAR DebugString, 01714 PMTRR_RANGE MTRR 01715 ) 01716 /*++ 01717 01718 Routine Description: 01719 01720 This function dumps the MTRR information to the debugger 01721 01722 --*/ 01723 { 01724 static PUCHAR Type[] = { 01725 // 0 1 2 3 4 5 6 01726 "UC ", "USWC", "????", "????", "WT ", "WP ", "WB " }; 01727 MTRR_VARIABLE_BASE Base; 01728 MTRR_VARIABLE_MASK Mask; 01729 ULONG Index; 01730 ULONG i; 01731 PUCHAR p; 01732 01733 DbgPrint ("%s\n", DebugString); 01734 for (Index=0; Index < (ULONG) KiRangeInfo.Capabilities.u.hw.VarCnt; Index++) { 01735 if (MTRR) { 01736 Base = MTRR[Index].Base; 01737 Mask = MTRR[Index].Mask; 01738 } else { 01739 Base.u.QuadPart = RDMSR(MTRR_MSR_VARIABLE_BASE+2*Index); 01740 Mask.u.QuadPart = RDMSR(MTRR_MSR_VARIABLE_MASK+2*Index); 01741 } 01742 01743 DbgPrint (" %d. ", Index); 01744 if (Mask.u.hw.Valid) { 01745 p = "????"; 01746 if (Base.u.hw.Type < 7) { 01747 p = Type[Base.u.hw.Type]; 01748 } 01749 01750 DbgPrint ("%s %08x:%08x %08x:%08x", 01751 p, 01752 (ULONG) (Base.u.QuadPart >> 32), 01753 ((ULONG) (Base.u.QuadPart & MTRR_MASK_BASE)), 01754 (ULONG) (Mask.u.QuadPart >> 32), 01755 ((ULONG) (Mask.u.QuadPart & MTRR_MASK_MASK)) 01756 ); 01757 01758 } 01759 DbgPrint ("\n"); 01760 } 01761 } 01762 #endif 01763 01764 01765 VOID 01766 KiLoadMTRRTarget ( 01767 IN PKIPI_CONTEXT SignalDone, 01768 IN PVOID NewRange, 01769 IN PVOID Parameter2, 01770 IN PVOID Parameter3 01771 ) 01772 { 01773 PNEW_RANGE Context; 01774 01775 Context = (PNEW_RANGE) NewRange; 01776 01777 // 01778 // Wait for all processors to be ready 01779 // 01780 01781 KiIpiSignalPacketDoneAndStall (SignalDone, Context->TargetPhase); 01782 01783 // 01784 // Update MTRRs 01785 // 01786 01787 KiLoadMTRR (Context); 01788 } 01789 01790 01791 01792 #define MOV_EAX_CR4 _emit { 0Fh, 20h, E0h } 01793 #define MOV_CR4_EAX _emit { 0Fh, 22h, E0h } 01794 01795 NTSTATUS 01796 KiLoadMTRR ( 01797 IN PNEW_RANGE Context 01798 ) 01799 /*++ 01800 01801 Routine Description: 01802 01803 This function loads the memory type range registers into all processors 01804 01805 Arguments: 01806 01807 Context - Context which include the MTRRs to load 01808 01809 Return Value: 01810 01811 All processors are set into the new state 01812 01813 --*/ 01814 { 01815 MTRR_DEFAULT Default; 01816 BOOLEAN Enable; 01817 ULONG HldCr0, HldCr4; 01818 ULONG Index; 01819 01820 // 01821 // Disable interrupts 01822 // 01823 01824 Enable = KiDisableInterrupts(); 01825 01826 // 01827 // Synchronize all processors 01828 // 01829 01830 if (!(KeFeatureBits & KF_AMDK6MTRR)) { 01831 KiSynchronizeMTRRLoad (Context); 01832 } 01833 01834 _asm { 01835 ; 01836 ; Get current CR0 01837 ; 01838 01839 mov eax, cr0 01840 mov HldCr0, eax 01841 01842 ; 01843 ; Disable caching & line fill 01844 ; 01845 01846 and eax, not CR0_NW 01847 or eax, CR0_CD 01848 mov cr0, eax 01849 01850 ; 01851 ; Flush caches 01852 ; 01853 01854 ; 01855 ; wbinvd 01856 ; 01857 01858 _emit 0Fh 01859 _emit 09h 01860 01861 ; 01862 ; Get current cr4 01863 ; 01864 01865 _emit 0Fh 01866 _emit 20h 01867 _emit 0E0h ; mov eax, cr4 01868 mov HldCr4, eax 01869 01870 ; 01871 ; Disable global page 01872 ; 01873 01874 and eax, not CR4_PGE 01875 _emit 0Fh 01876 _emit 22h 01877 _emit 0E0h ; mov cr4, eax 01878 01879 ; 01880 ; Flush TLB 01881 ; 01882 01883 mov eax, cr3 01884 mov cr3, eax 01885 } 01886 01887 if (KeFeatureBits & KF_AMDK6MTRR) { 01888 01889 // 01890 // Write the MTRRs 01891 // 01892 01893 KiAmdK6MtrrWRMSR(); 01894 01895 } else { 01896 01897 // 01898 // Disable MTRRs 01899 // 01900 01901 Default.u.QuadPart = RDMSR(MTRR_MSR_DEFAULT); 01902 Default.u.hw.MtrrEnabled = 0; 01903 WRMSR (MTRR_MSR_DEFAULT, Default.u.QuadPart); 01904 01905 // 01906 // Synchronize all processors 01907 // 01908 01909 KiSynchronizeMTRRLoad (Context); 01910 01911 // 01912 // Load new MTRRs 01913 // 01914 01915 for (Index=0; Index < Context->NoMTRR; Index++) { 01916 WRMSR (MTRR_MSR_VARIABLE_BASE+2*Index, Context->MTRR[Index].Base.u.QuadPart); 01917 WRMSR (MTRR_MSR_VARIABLE_MASK+2*Index, Context->MTRR[Index].Mask.u.QuadPart); 01918 } 01919 01920 // 01921 // Synchronize all processors 01922 // 01923 01924 KiSynchronizeMTRRLoad (Context); 01925 } 01926 _asm { 01927 01928 ; 01929 ; Flush caches (this should be a "nop", but it was in the Intel reference algorithm) 01930 ; This is required because of aggressive prefetch of both instr + data 01931 ; 01932 01933 ; 01934 ; wbinvd 01935 ; 01936 01937 _emit 0Fh 01938 _emit 09h 01939 01940 ; 01941 ; Flush TLBs (same comment as above) 01942 ; Same explanation as above 01943 ; 01944 01945 mov eax, cr3 01946 mov cr3, eax 01947 } 01948 01949 if (!(KeFeatureBits & KF_AMDK6MTRR)) { 01950 01951 // 01952 // Enable MTRRs 01953 // 01954 01955 Default.u.hw.MtrrEnabled = 1; 01956 WRMSR (MTRR_MSR_DEFAULT, Default.u.QuadPart); 01957 01958 // 01959 // Synchronize all processors 01960 // 01961 01962 KiSynchronizeMTRRLoad (Context); 01963 } 01964 01965 _asm { 01966 ; 01967 ; Restore CR4 (global page enable) 01968 ; 01969 01970 mov eax, HldCr4 01971 _emit 0Fh 01972 _emit 22h 01973 _emit 0E0h ; mov cr4, eax 01974 01975 ; 01976 ; Restore CR0 (cache enable) 01977 ; 01978 01979 mov eax, HldCr0 01980 mov cr0, eax 01981 } 01982 01983 // 01984 // Restore interrupts and return 01985 // 01986 01987 KiRestoreInterrupts (Enable); 01988 return STATUS_SUCCESS; 01989 } 01990 01991 01992 VOID 01993 KiSynchronizeMTRRLoad ( 01994 IN PNEW_RANGE Context 01995 ) 01996 { 01997 01998 #if !defined(NT_UP) 01999 02000 ULONG CurrentPhase; 02001 volatile ULONG *TargetPhase; 02002 PKPRCB Prcb; 02003 02004 TargetPhase = Context->TargetPhase; 02005 Prcb = KeGetCurrentPrcb(); 02006 02007 if (Prcb->Number == (CCHAR) Context->Processor) { 02008 02009 // 02010 // Wait for all processors to signal 02011 // 02012 02013 while (Context->TargetCount != (ULONG) KeNumberProcessors - 1) { 02014 KeYieldProcessor (); 02015 } 02016 02017 // 02018 // Reset count for next time 02019 // 02020 02021 Context->TargetCount = 0; 02022 02023 // 02024 // Let waiting processor go to next synchronization point 02025 // 02026 02027 InterlockedIncrement ((PULONG) TargetPhase); 02028 02029 02030 } else { 02031 02032 // 02033 // Get current phase 02034 // 02035 02036 CurrentPhase = *TargetPhase; 02037 02038 // 02039 // Signal that we have completed the current phase 02040 // 02041 02042 InterlockedIncrement ((PULONG) &Context->TargetCount); 02043 02044 // 02045 // Wait for new phase to begin 02046 // 02047 02048 while (*TargetPhase == CurrentPhase) { 02049 KeYieldProcessor (); 02050 } 02051 } 02052 02053 #endif 02054 02055 }

Generated on Sat May 15 19:40:53 2004 for test by doxygen 1.3.7