Main Page | Class Hierarchy | Class List | File List | Class Members | File Members

mtrr.c File Reference

#include "ki.h"
#include "mtrr.h"

Go to the source code of this file.

Classes

struct  _ONE_RANGE
struct  _MTRR_RANGE
struct  _RANGE_INFO
struct  _NEW_RANGE

Defines

#define STATIC
#define IDBG   0
#define DBGMSG(a)
#define GROW_RANGE_TABLE   4
#define MOV_EAX_CR4   _emit { 0Fh, 20h, E0h }
#define MOV_CR4_EAX   _emit { 0Fh, 22h, E0h }

Typedefs

typedef _ONE_RANGE ONE_RANGE
typedef _ONE_RANGEPONE_RANGE
typedef _MTRR_RANGE MTRR_RANGE
typedef _MTRR_RANGEPMTRR_RANGE
typedef _RANGE_INFO RANGE_INFO
typedef _RANGE_INFOPRANGE_INFO
typedef _NEW_RANGE NEW_RANGE
typedef _NEW_RANGEPNEW_RANGE

Functions

VOID KiInitializeMTRR (IN BOOLEAN LastProcessor)
BOOLEAN KiRemoveRange (IN PNEW_RANGE NewRange, IN ULONGLONG Base, IN ULONGLONG Limit, IN PBOOLEAN RemoveThisType)
VOID KiAddRange (IN PNEW_RANGE NewRange, IN ULONGLONG Base, IN ULONGLONG Limit, IN UCHAR Type)
VOID KiStartEffectiveRangeChange (IN PNEW_RANGE NewRange)
VOID KiCompleteEffectiveRangeChange (IN PNEW_RANGE NewRange)
STATIC ULONG KiRangeWeight (IN PONE_RANGE Range)
STATIC ULONG KiFindFirstSetLeftBit (IN ULONGLONG Set)
STATIC ULONG KiFindFirstSetRightBit (IN ULONGLONG Set)
VOID KiLoadMTRRTarget (IN PKIPI_CONTEXT SignalDone, IN PVOID Context, IN PVOID Parameter2, IN PVOID Parameter3)
NTSTATUS KiLoadMTRR (IN PNEW_RANGE Context)
VOID KiSynchronizeMTRRLoad (IN PNEW_RANGE Context)
ULONGLONG KiMaskToLength (IN ULONGLONG Mask)
ULONGLONG KiLengthToMask (IN ULONGLONG Length)
NTSTATUS KiAmdK6MtrrSetMemoryType (IN ULONG BaseAddress, IN ULONG NumberOfBytes, IN MEMORY_CACHING_TYPE CacheType)
VOID KiAmdK6MtrrWRMSR (VOID)
VOID KeRestoreMtrr (VOID)
NTSTATUS KeSetPhysicalCacheTypeRange (IN PHYSICAL_ADDRESS PhysicalAddress, IN ULONG NumberOfBytes, IN MEMORY_CACHING_TYPE CacheType)

Variables

KSPIN_LOCK KiRangeLock
RANGE_INFO KiRangeInfo


Define Documentation

#define DBGMSG  ) 
 

Definition at line 38 of file mtrr.c.

#define GROW_RANGE_TABLE   4
 

Definition at line 55 of file mtrr.c.

Referenced by KiAddRange(), and KiInitializeMTRR().

#define IDBG   0
 

Definition at line 33 of file mtrr.c.

#define MOV_CR4_EAX   _emit { 0Fh, 22h, E0h }
 

Definition at line 1793 of file mtrr.c.

#define MOV_EAX_CR4   _emit { 0Fh, 20h, E0h }
 

Definition at line 1792 of file mtrr.c.

#define STATIC
 

Definition at line 31 of file mtrr.c.


Typedef Documentation

typedef struct _MTRR_RANGE MTRR_RANGE
 

typedef struct _NEW_RANGE NEW_RANGE
 

typedef struct _ONE_RANGE ONE_RANGE
 

Referenced by KiInitializeMTRR().

typedef struct _MTRR_RANGE * PMTRR_RANGE
 

Referenced by KiCompleteEffectiveRangeChange().

typedef struct _NEW_RANGE * PNEW_RANGE
 

Referenced by KiLoadMTRRTarget().

typedef struct _ONE_RANGE * PONE_RANGE
 

Referenced by KiAddRange(), KiCompleteEffectiveRangeChange(), and KiRemoveRange().

typedef struct _RANGE_INFO * PRANGE_INFO
 

typedef struct _RANGE_INFO RANGE_INFO
 


Function Documentation

VOID KeRestoreMtrr VOID   ) 
 

Definition at line 526 of file mtrr.c.

References ASSERT, KeAcquireSpinLock, KeFeatureBits, KeReleaseSpinLock(), KF_AMDK6MTRR, KiCompleteEffectiveRangeChange(), KiLoadMTRR(), KiRangeInfo, KiRangeLock, KiStartEffectiveRangeChange(), NT_SUCCESS, NULL, and _RANGE_INFO::RangesValid.

00531 : 00532 00533 This function reloads the MTRR registers to be the current 00534 known values. This is used on a system wakeup to ensure the 00535 registers are sane. 00536 00537 N.B. The caller must have the PAGELK code locked 00538 00539 Arguments: 00540 00541 none 00542 00543 Return Value: 00544 00545 none 00546 00547 --*/ 00548 { 00549 NEW_RANGE NewRange; 00550 KIRQL OldIrql; 00551 00552 if (KiRangeInfo.RangesValid) { 00553 RtlZeroMemory (&NewRange, sizeof (NewRange)); 00554 KeAcquireSpinLock (&KiRangeLock, &OldIrql); 00555 KiStartEffectiveRangeChange (&NewRange); 00556 ASSERT (NT_SUCCESS(NewRange.Status)); 00557 KiCompleteEffectiveRangeChange (&NewRange); 00558 KeReleaseSpinLock (&KiRangeLock, OldIrql); 00559 return; 00560 } 00561 00562 // 00563 // If the processor is a AMD K6 with MTRR support then perform 00564 // processor specific implentaiton. 00565 // 00566 00567 if (KeFeatureBits & KF_AMDK6MTRR) { 00568 KeAcquireSpinLock (&KiRangeLock, &OldIrql); 00569 KiLoadMTRR(NULL); 00570 KeReleaseSpinLock (&KiRangeLock, OldIrql); 00571 } 00572 }

NTSTATUS KeSetPhysicalCacheTypeRange IN PHYSICAL_ADDRESS  PhysicalAddress,
IN ULONG  NumberOfBytes,
IN MEMORY_CACHING_TYPE  CacheType
 

Definition at line 576 of file mtrr.c.

References ASSERT, _RANGE_INFO::Capabilities, DBGMSG, _RANGE_INFO::DefaultCachedType, DISPATCH_LEVEL, ExPageLockHandle, FALSE, KeAcquireSpinLock, KeFeatureBits, KeReleaseSpinLock(), KF_AMDK6MTRR, KiAddRange(), KiAmdK6MtrrSetMemoryType(), KiCompleteEffectiveRangeChange(), KiRangeInfo, KiRangeLock, KiRemoveRange(), KiStartEffectiveRangeChange(), MmCached, MmLockPagableSectionByHandle(), MmNonCached, MmUnlockPagableImageSection(), MmUSWCCached, MmWriteCombined, MTRR_TYPE_MAX, MTRR_TYPE_UC, MTRR_TYPE_USWC, _RANGE_INFO::MtrrWorkaround, NT_SUCCESS, _RANGE_INFO::RangesValid, TRUE, and _MTRR_CAPABILITIES::u.

Referenced by MmMapIoSpace().

00583 : 00584 00585 This function sets a physical range to a particular cache type. 00586 If the system does not support setting cache policies based on 00587 physical ranges, no action is taken. 00588 00589 Arguments: 00590 00591 PhysicalAddress - The starting address of the range being set 00592 00593 NumberOfBytes - The length, in bytes, of the range being set 00594 00595 CacheType - The caching type for which the physical range is 00596 to be set to. 00597 00598 NonCached: 00599 Setting ranges to be NonCached is done for 00600 book keeping reasons. A return of SUCCESS when 00601 setting a range NonCached does not mean it has 00602 been physically set to as NonCached. The caller 00603 must use a cache-disabled virtual pointer for 00604 any NonCached range. 00605 00606 Cached: 00607 A successful return indicates that the physical 00608 range has been set to cached. This mode requires 00609 the caller to be at irql < dispatch_level. 00610 00611 FrameBuffer: 00612 A successful return indicates that the physical 00613 range has been set to be framebuffer cached. 00614 This mode requires the caller to be at irql < 00615 dispatch_level. 00616 00617 USWCCached: 00618 This type is to be satisfied only via PAT and 00619 fails for the MTRR interface. 00620 00621 Return Value: 00622 00623 STATUS_SUCCESS - if success, the cache attributes of the physical range 00624 have been set. 00625 00626 STATUS_NOT_SUPPORTED - either feature not supported or not yet initialized, 00627 or MmWriteCombined type not supported and is 00628 requested, or input range does not match restrictions 00629 imposed by workarounds for current processor stepping 00630 or is below 1M (in the fixed MTRR range), or not yet 00631 initialized. 00632 00633 STATUS_UNSUCCESSFUL - Unable to satisfy request due to 00634 - Unable to map software image into limited # of 00635 hardware MTRRs. 00636 - irql was not < DISPATCH_LEVEL. 00637 - Failure due to other internal error (out of memory). 00638 00639 STATUS_INVALID_PARAMETER - Incorrect input memory type. 00640 00641 --*/ 00642 { 00643 KIRQL OldIrql; 00644 NEW_RANGE NewRange; 00645 BOOLEAN RemoveThisType[MTRR_TYPE_MAX]; 00646 BOOLEAN EffectRangeChange, AddToRangeDatabase; 00647 00648 // 00649 // If caller has requested the MmUSWCCached memory type then fail 00650 // - MmUSWCCached is supported via PAT and not otherwise 00651 // 00652 00653 if (CacheType == MmUSWCCached) { 00654 return STATUS_NOT_SUPPORTED; 00655 } 00656 00657 // 00658 // Addresses above 4GB, below 1MB or not page aligned and 00659 // page length are not supported. 00660 // 00661 00662 if ((PhysicalAddress.HighPart != 0) || 00663 (PhysicalAddress.LowPart < (1 * 1024 * 1024)) || 00664 (PhysicalAddress.LowPart & 0xfff) || 00665 (NumberOfBytes & 0xfff) ) { 00666 return STATUS_NOT_SUPPORTED; 00667 } 00668 00669 ASSERT (NumberOfBytes != 0); 00670 00671 // 00672 // If the processor is a AMD K6 with MTRR support then perform 00673 // processor specific implentaiton. 00674 // 00675 00676 if (KeFeatureBits & KF_AMDK6MTRR) { 00677 00678 if ((CacheType != MmWriteCombined) && (CacheType != MmNonCached)) { 00679 return STATUS_NOT_SUPPORTED; 00680 } 00681 00682 return KiAmdK6MtrrSetMemoryType(PhysicalAddress.LowPart, 00683 NumberOfBytes, 00684 CacheType); 00685 } 00686 00687 // 00688 // If processor doesn't have the memory type range feature 00689 // return not supported. 00690 // 00691 00692 if (!KiRangeInfo.RangesValid) { 00693 return STATUS_NOT_SUPPORTED; 00694 } 00695 00696 // 00697 // Workaround for cpu signatures 611, 612, 616 and 617 00698 // - if the request for setting a variable MTRR specifies 00699 // an address which is not 4M aligned or length is not 00700 // a multiple of 4M then return status not supported 00701 // 00702 00703 if ((KiRangeInfo.MtrrWorkaround) && 00704 ((PhysicalAddress.LowPart & 0x3fffff) || 00705 (NumberOfBytes & 0x3fffff))) { 00706 00707 return STATUS_NOT_SUPPORTED; 00708 } 00709 00710 RtlZeroMemory (&NewRange, sizeof (NewRange)); 00711 NewRange.Base = PhysicalAddress.QuadPart; 00712 NewRange.Limit = NewRange.Base + NumberOfBytes - 1; 00713 00714 // 00715 // Determine what the new mtrr range type is. If setting NonCached then 00716 // the database need not be updated to reflect the virtual change. This 00717 // is because non-cached virtual pointers are mapped as cache disabled. 00718 // 00719 00720 EffectRangeChange = TRUE; 00721 AddToRangeDatabase = TRUE; 00722 switch (CacheType) { 00723 case MmNonCached: 00724 NewRange.Type = MTRR_TYPE_UC; 00725 00726 // 00727 // NonCached ranges do not need to be reflected into the h/w state 00728 // as all non-cached ranges are mapped with cache-disabled pointers. 00729 // This also means that cache-disabled ranges do not need to 00730 // be put into mtrrs, or held in the range, regardless of the default 00731 // range type. 00732 // 00733 00734 EffectRangeChange = FALSE; 00735 AddToRangeDatabase = FALSE; 00736 break; 00737 00738 case MmCached: 00739 NewRange.Type = KiRangeInfo.DefaultCachedType; 00740 break; 00741 00742 case MmWriteCombined: 00743 NewRange.Type = MTRR_TYPE_USWC; 00744 00745 // 00746 // If USWC type isn't supported, then request can not be honored 00747 // 00748 00749 if (!KiRangeInfo.Capabilities.u.hw.UswcSupported) { 00750 DBGMSG ("KeSetPhysicalCacheTypeRange: USWC not supported\n"); 00751 return STATUS_NOT_SUPPORTED; 00752 } 00753 break; 00754 00755 default: 00756 DBGMSG ("KeSetPhysicalCacheTypeRange: no such cache type\n"); 00757 return STATUS_INVALID_PARAMETER; 00758 break; 00759 } 00760 00761 NewRange.Status = STATUS_SUCCESS; 00762 00763 // 00764 // The default type is UC thus the range is still mapped using 00765 // a Cache Disabled VirtualPointer and hence it need not be added. 00766 // 00767 00768 // 00769 // If h/w needs updated, lock down the code required to effect the change 00770 // 00771 00772 if (EffectRangeChange) { 00773 if (KeGetCurrentIrql() >= DISPATCH_LEVEL) { 00774 00775 // 00776 // Code can not be locked down. Supplying a new range type requires 00777 // that the caller calls at irql < dispatch_level. 00778 // 00779 00780 DBGMSG ("KeSetPhysicalCacheTypeRange failed due to calling IRQL == DISPATCH_LEVEL\n"); 00781 return STATUS_UNSUCCESSFUL; 00782 } 00783 00784 MmLockPagableSectionByHandle(ExPageLockHandle); 00785 } 00786 00787 // 00788 // Serialize the range type database 00789 // 00790 00791 KeAcquireSpinLock (&KiRangeLock, &OldIrql); 00792 00793 // 00794 // If h/w is going to need updated, then start an effective range change 00795 // 00796 00797 if (EffectRangeChange) { 00798 KiStartEffectiveRangeChange (&NewRange); 00799 } 00800 00801 if (NT_SUCCESS (NewRange.Status)) { 00802 00803 // 00804 // If the new range is NonCached, then don't remove standard memory 00805 // caching types 00806 // 00807 00808 memset (RemoveThisType, TRUE, MTRR_TYPE_MAX); 00809 if (NewRange.Type != MTRR_TYPE_UC) { 00810 // 00811 // If the requested type is uncached then the physical 00812 // memory region is mapped using a cache disabled virtual pointer. 00813 // The effective memory type for that region will be the lowest 00814 // common denominator of the MTRR type and the cache type in the 00815 // PTE. Therefore for a request of type UC, the effective type 00816 // will be UC irrespective of the MTRR settings in that range. 00817 // Hence it is not necessary to remove the existing MTRR settings 00818 // (if any) for that range. 00819 // 00820 00821 // 00822 // Clip/remove any ranges in the target area 00823 // 00824 00825 KiRemoveRange (&NewRange, NewRange.Base, NewRange.Limit, RemoveThisType); 00826 } 00827 00828 // 00829 // If needed, add new range type 00830 // 00831 00832 if (AddToRangeDatabase) { 00833 ASSERT (EffectRangeChange == TRUE); 00834 KiAddRange (&NewRange, NewRange.Base, NewRange.Limit, NewRange.Type); 00835 } 00836 00837 // 00838 // If this is an effect range change, then complete it 00839 // 00840 00841 if (EffectRangeChange) { 00842 KiCompleteEffectiveRangeChange (&NewRange); 00843 } 00844 } 00845 00846 KeReleaseSpinLock (&KiRangeLock, OldIrql); 00847 if (EffectRangeChange) { 00848 MmUnlockPagableImageSection(ExPageLockHandle); 00849 } 00850 00851 return NewRange.Status; 00852 }

VOID KiAddRange IN PNEW_RANGE  NewRange,
IN ULONGLONG  Base,
IN ULONGLONG  Limit,
IN UCHAR  Type
 

Definition at line 992 of file mtrr.c.

References _ONE_RANGE::Base, ExAllocatePoolWithTag, ExFreePool(), GROW_RANGE_TABLE, KiRangeInfo, _ONE_RANGE::Limit, _RANGE_INFO::MaxRange, NonPagedPool, _RANGE_INFO::NoRange, PONE_RANGE, _RANGE_INFO::Ranges, and _ONE_RANGE::Type.

Referenced by KeSetPhysicalCacheTypeRange(), KiInitializeMTRR(), and KiRemoveRange().

01000 : 01001 01002 This function adds the passed range to the global range database. 01003 01004 Arguments: 01005 01006 NewRange - Context information 01007 01008 Base - Base & Limit signify the first & last address of a range 01009 Limit - which is to be added to the range database 01010 01011 Type - Type of caching required for this range 01012 01013 Return Value: 01014 01015 None - Context is updated with an error if the table has overflowed 01016 01017 --*/ 01018 { 01019 PONE_RANGE Range, OldRange; 01020 ULONG size; 01021 01022 if (KiRangeInfo.NoRange >= KiRangeInfo.MaxRange) { 01023 01024 // 01025 // Table is out of space, get a bigger one 01026 // 01027 01028 OldRange = KiRangeInfo.Ranges; 01029 size = sizeof(ONE_RANGE) * (KiRangeInfo.MaxRange + GROW_RANGE_TABLE); 01030 Range = ExAllocatePoolWithTag (NonPagedPool, size, ' eK'); 01031 01032 if (!Range) { 01033 NewRange->Status = STATUS_UNSUCCESSFUL; 01034 return ; 01035 } 01036 01037 // 01038 // Grow table 01039 // 01040 01041 RtlZeroMemory (Range, size); 01042 RtlCopyMemory (Range, OldRange, sizeof(ONE_RANGE) * KiRangeInfo.MaxRange); 01043 KiRangeInfo.Ranges = Range; 01044 KiRangeInfo.MaxRange += GROW_RANGE_TABLE; 01045 ExFreePool (OldRange); 01046 } 01047 01048 // 01049 // Add new entry to table 01050 // 01051 01052 KiRangeInfo.Ranges[KiRangeInfo.NoRange].Base = Base; 01053 KiRangeInfo.Ranges[KiRangeInfo.NoRange].Limit = Limit; 01054 KiRangeInfo.Ranges[KiRangeInfo.NoRange].Type = Type; 01055 KiRangeInfo.NoRange += 1; 01056 }

NTSTATUS KiAmdK6MtrrSetMemoryType IN ULONG  BaseAddress,
IN ULONG  NumberOfBytes,
IN MEMORY_CACHING_TYPE  CacheType
 

VOID KiAmdK6MtrrWRMSR VOID   ) 
 

Definition at line 754 of file mtrramd.c.

References AMDK6_MTRR_MSR, KiAmdK6Mtrr, _AMDK6_MTRR_MSR_IMAGE::u, and WRMSR().

Referenced by KiLoadMTRR().

00760 : 00761 00762 Write the AMD K6 MTRRs. 00763 00764 Note: Access to KiAmdK6Mtrr has been synchronized around this 00765 call. 00766 00767 Arguments: 00768 00769 None. 00770 00771 Return Value: 00772 00773 None. 00774 00775 --*/ 00776 00777 { 00778 // 00779 // Write the MTRRs 00780 // 00781 00782 WRMSR (AMDK6_MTRR_MSR, KiAmdK6Mtrr.u.QuadPart); 00783 }

VOID KiCompleteEffectiveRangeChange IN PNEW_RANGE  NewRange  ) 
 

Definition at line 1112 of file mtrr.c.

References ASSERT, _MTRR_RANGE::Base, _ONE_RANGE::Base, _RANGE_INFO::Capabilities, DBGMSG, _RANGE_INFO::Default, DISPATCH_LEVEL, ExFreePool(), FALSE, HIGH_LEVEL, Index, KeActiveProcessors, KeGetCurrentPrcb, KeRaiseIrql(), KiFindFirstSetLeftBit(), KiFindFirstSetRightBit(), KiIpiStallOnPacketTargets(), KiLengthToMask(), KiLoadMTRR(), KiLoadMTRRTarget(), KiLockContextSwap, KiMaskToLength(), KiRangeInfo, KiRangeWeight(), KiRemoveRange(), KiUnlockContextSwap, _ONE_RANGE::Limit, _MTRR_RANGE::Mask, MTRR_MASK_MASK, MTRR_MAX_RANGE_SHIFT, MTRR_PAGE_MASK, MTRR_PAGE_SIZE, MTRR_TYPE_MAX, MTRR_TYPE_UC, MTRR_TYPE_USWC, MTRR_TYPE_WB, MTRR_TYPE_WT, _RANGE_INFO::NoRange, NT_SUCCESS, NULL, PMTRR_RANGE, PONE_RANGE, _RANGE_INFO::Ranges, TRUE, _ONE_RANGE::Type, _MTRR_DEFAULT::u, _MTRR_CAPABILITIES::u, _MTRR_VARIABLE_MASK::u, and _MTRR_VARIABLE_BASE::u.

Referenced by KeRestoreMtrr(), and KeSetPhysicalCacheTypeRange().

01117 : 01118 01119 This functions commits the range database to hardware, or backs 01120 out the current changes to it. 01121 01122 Arguments: 01123 01124 NewRange - Context information 01125 01126 Return Value: 01127 01128 None 01129 01130 --*/ 01131 { 01132 BOOLEAN Restart; 01133 ULONG Index, Index2, RemIndex2, NoMTRR; 01134 ULONGLONG BestLength, WhichMtrr; 01135 ULONGLONG CurrLength; 01136 ULONGLONG l, Base, Length, MLength; 01137 PONE_RANGE Range; 01138 ONE_RANGE OneRange; 01139 PMTRR_RANGE MTRR; 01140 BOOLEAN RoundDown; 01141 BOOLEAN RemoveThisType[MTRR_TYPE_MAX]; 01142 PKPRCB Prcb; 01143 KIRQL OldIrql, OldIrql2; 01144 KAFFINITY TargetProcessors; 01145 01146 01147 ASSERT (KeGetCurrentIrql() == DISPATCH_LEVEL); 01148 Prcb = KeGetCurrentPrcb(); 01149 01150 // 01151 // Round all ranges, according to type, to match what h/w can support 01152 // 01153 01154 for (Index=0; Index < KiRangeInfo.NoRange; Index++) { 01155 Range = &KiRangeInfo.Ranges[Index]; 01156 01157 // 01158 // Determine rounding for this range type 01159 // 01160 01161 RoundDown = TRUE; 01162 if (Range->Type == MTRR_TYPE_UC) { 01163 RoundDown = FALSE; 01164 } 01165 01166 // 01167 // Apply rounding 01168 // 01169 01170 if (RoundDown) { 01171 Range->Base = (Range->Base + MTRR_PAGE_SIZE - 1) & MTRR_PAGE_MASK; 01172 Range->Limit = ((Range->Limit+1) & MTRR_PAGE_MASK)-1; 01173 } else { 01174 Range->Base = (Range->Base & MTRR_PAGE_MASK); 01175 Range->Limit = ((Range->Limit + MTRR_PAGE_SIZE) & MTRR_PAGE_MASK)-1; 01176 } 01177 } 01178 01179 do { 01180 Restart = FALSE; 01181 01182 // 01183 // Sort the ranges by base address 01184 // 01185 01186 for (Index=0; Index < KiRangeInfo.NoRange; Index++) { 01187 Range = &KiRangeInfo.Ranges[Index]; 01188 01189 for (Index2=Index+1; Index2 < KiRangeInfo.NoRange; Index2++) { 01190 01191 if (KiRangeInfo.Ranges[Index2].Base < Range->Base) { 01192 01193 // 01194 // Swap KiRangeInfo.Ranges[Index] with KiRangeInfo.Ranges[Index2] 01195 // 01196 01197 OneRange = *Range; 01198 *Range = KiRangeInfo.Ranges[Index2]; 01199 KiRangeInfo.Ranges[Index2] = OneRange; 01200 } 01201 } 01202 } 01203 01204 // 01205 // At this point the range database is sorted on 01206 // base address. Scan range database combining adjacent and 01207 // overlapping ranges of the same type 01208 // 01209 01210 for (Index=0; Index < (ULONG) KiRangeInfo.NoRange-1; Index++) { 01211 Range = &KiRangeInfo.Ranges[Index]; 01212 01213 // 01214 // Scan the range database. If ranges are adjacent/overlap and are of 01215 // the same type, combine them. 01216 // 01217 01218 for (Index2 = Index+1; Index2 < (ULONG) KiRangeInfo.NoRange; Index2++) { 01219 01220 l = Range[0].Limit + 1; 01221 if (l < Range[0].Limit) { 01222 l = Range[0].Limit; 01223 } 01224 01225 if (l >= KiRangeInfo.Ranges[Index2].Base && 01226 Range[0].Type == KiRangeInfo.Ranges[Index2].Type) { 01227 01228 // 01229 // Increase Range[0] limit to cover Range[Index2] 01230 // 01231 01232 if (KiRangeInfo.Ranges[Index2].Limit > Range[0].Limit) { 01233 Range[0].Limit = KiRangeInfo.Ranges[Index2].Limit; 01234 } 01235 01236 // 01237 // Remove KiRangeInfo.Ranges[Index2] 01238 // 01239 01240 if (Index2 < (ULONG) KiRangeInfo.NoRange - 1 ) { 01241 01242 // 01243 // Copy everything from Index2 till end 01244 // of range list. # Entries to copy is 01245 // (KiRangeInfo.NoRange -1) - (Index2+1) + 1 01246 // 01247 01248 RtlCopyMemory( 01249 &(KiRangeInfo.Ranges[Index2]), 01250 &(KiRangeInfo.Ranges[Index2+1]), 01251 sizeof(ONE_RANGE) * (KiRangeInfo.NoRange-Index2-1) 01252 ); 01253 } 01254 01255 KiRangeInfo.NoRange -= 1; 01256 01257 // 01258 // Recheck current location 01259 // 01260 01261 Index2 -= 1; 01262 } 01263 } 01264 } 01265 01266 // 01267 // At this point the range database is sorted on base 01268 // address and adjacent/overlapping ranges of the same 01269 // type are combined. Check for overlapping ranges - 01270 // If legal then allow else truncate the less "weighty" range 01271 // 01272 01273 for (Index = 0; Index < (ULONG) KiRangeInfo.NoRange-1 && !Restart; Index++) { 01274 01275 Range = &KiRangeInfo.Ranges[Index]; 01276 01277 l = Range[0].Limit + 1; 01278 if (l < Range[0].Limit) { 01279 l = Range[0].Limit; 01280 } 01281 01282 // 01283 // If ranges overlap and are not of same type, and if the 01284 // overlap is not legal then carve them to the best cache type 01285 // available. 01286 // 01287 01288 for (Index2 = Index+1; Index2 < (ULONG) KiRangeInfo.NoRange && !Restart; Index2++) { 01289 01290 if (l > KiRangeInfo.Ranges[Index2].Base) { 01291 01292 if (Range[0].Type == MTRR_TYPE_UC || 01293 KiRangeInfo.Ranges[Index2].Type == MTRR_TYPE_UC) { 01294 01295 // 01296 // Overlap of a UC type with a range of any other type is 01297 // legal 01298 // 01299 01300 } else if ((Range[0].Type == MTRR_TYPE_WT && 01301 KiRangeInfo.Ranges[Index2].Type == MTRR_TYPE_WB) || 01302 (Range[0].Type == MTRR_TYPE_WB && 01303 KiRangeInfo.Ranges[Index2].Type == MTRR_TYPE_WT) ) { 01304 // 01305 // Overlap of WT and WB range is legal. The overlap range will 01306 // be WT. 01307 // 01308 01309 } else { 01310 01311 // 01312 // This is an illegal overlap and we need to carve the ranges 01313 // to remove the overlap. 01314 // 01315 // Pick range which has the cache type which should be used for 01316 // the overlapped area 01317 // 01318 01319 if (KiRangeWeight(&Range[0]) > KiRangeWeight(&(KiRangeInfo.Ranges[Index2]))){ 01320 RemIndex2 = Index2; 01321 } else { 01322 RemIndex2 = Index; 01323 } 01324 01325 // 01326 // Remove ranges of type which do not belong in the overlapped area 01327 // 01328 01329 RtlZeroMemory (RemoveThisType, MTRR_TYPE_MAX); 01330 RemoveThisType[KiRangeInfo.Ranges[RemIndex2].Type] = TRUE; 01331 01332 // 01333 // Remove just the overlapped portion of the range. 01334 // 01335 01336 Restart = KiRemoveRange ( 01337 NewRange, 01338 KiRangeInfo.Ranges[Index2].Base, 01339 (Range[0].Limit < KiRangeInfo.Ranges[Index2].Limit ? 01340 Range[0].Limit : KiRangeInfo.Ranges[Index2].Limit), 01341 RemoveThisType 01342 ); 01343 } 01344 } 01345 } 01346 } 01347 01348 } while (Restart); 01349 01350 // 01351 // The range database is now rounded to fit in the h/w and sorted. 01352 // Attempt to build MTRR settings which exactly describe the ranges 01353 // 01354 01355 MTRR = NewRange->MTRR; 01356 NoMTRR = 0; 01357 for (Index=0;NT_SUCCESS(NewRange->Status)&& Index<KiRangeInfo.NoRange;Index++) { 01358 Range = &KiRangeInfo.Ranges[Index]; 01359 01360 // 01361 // Build MTRRs to fit this range 01362 // 01363 01364 Base = Range->Base; 01365 Length = Range->Limit - Base + 1; 01366 01367 while (Length) { 01368 01369 // 01370 // Compute MTRR length for current range base & length 01371 // 01372 01373 if (Base == 0) { 01374 MLength = Length; 01375 } else { 01376 MLength = (ULONGLONG) 1 << KiFindFirstSetRightBit(Base); 01377 } 01378 if (MLength > Length) { 01379 MLength = Length; 01380 } 01381 01382 l = (ULONGLONG) 1 << KiFindFirstSetLeftBit (MLength); 01383 if (MLength > l) { 01384 MLength = l; 01385 } 01386 01387 // 01388 // Store it in the next MTRR 01389 // 01390 01391 MTRR[NoMTRR].Base.u.QuadPart = Base; 01392 MTRR[NoMTRR].Base.u.hw.Type = Range->Type; 01393 MTRR[NoMTRR].Mask.u.QuadPart = KiLengthToMask(MLength); 01394 MTRR[NoMTRR].Mask.u.hw.Valid = 1; 01395 NoMTRR += 1; 01396 01397 // 01398 // Adjust off amount of data covered by that last MTRR 01399 // 01400 01401 Base += MLength; 01402 Length -= MLength; 01403 01404 // 01405 // If there are too many MTRRs, and currently setting a 01406 // Non-USWC range try to remove a USWC MTRR. 01407 // (ie, convert some MmWriteCombined to MmNonCached). 01408 // 01409 01410 if (NoMTRR > (ULONG) KiRangeInfo.Capabilities.u.hw.VarCnt) { 01411 01412 if (Range->Type != MTRR_TYPE_USWC) { 01413 01414 // 01415 // Find smallest USWC type and drop it 01416 // 01417 // This is okay only if the default type is UC. 01418 // Default type should always be UC unless BIOS changes 01419 // it. Still ASSERT! 01420 // 01421 01422 ASSERT(KiRangeInfo.Default.u.hw.Type == MTRR_TYPE_UC); 01423 01424 BestLength = (ULONGLONG) 1 << (MTRR_MAX_RANGE_SHIFT + 1); 01425 01426 for (Index2=0; Index2 < KiRangeInfo.Capabilities.u.hw.VarCnt; Index2++) { 01427 01428 if (MTRR[Index2].Base.u.hw.Type == MTRR_TYPE_USWC) { 01429 01430 CurrLength = KiMaskToLength(MTRR[Index2].Mask.u.QuadPart & 01431 MTRR_MASK_MASK); 01432 01433 if (CurrLength < BestLength) { 01434 WhichMtrr = Index2; 01435 BestLength = CurrLength; 01436 } 01437 } 01438 } 01439 01440 if (BestLength == ((ULONGLONG) 1 << (MTRR_MAX_RANGE_SHIFT + 1))) { 01441 // 01442 // Range was not found which could be dropped. Abort process 01443 // 01444 01445 NewRange->Status = STATUS_UNSUCCESSFUL; 01446 Length = 0; 01447 01448 } else { 01449 // 01450 // Remove WhichMtrr 01451 // 01452 01453 NoMTRR -= 1; 01454 MTRR[WhichMtrr] = MTRR[NoMTRR]; 01455 } 01456 01457 } else { 01458 01459 NewRange->Status = STATUS_UNSUCCESSFUL; 01460 Length =0; 01461 } 01462 } 01463 } 01464 } 01465 01466 // 01467 // Done building new MTRRs 01468 // 01469 01470 if (NT_SUCCESS(NewRange->Status)) { 01471 01472 // 01473 // Update the MTRRs on all processors 01474 // 01475 01476 #if IDBG 01477 KiDumpMTRR ("Loading the following MTRR:", NewRange->MTRR); 01478 #endif 01479 01480 NewRange->TargetCount = 0; 01481 NewRange->TargetPhase = &Prcb->ReverseStall; 01482 NewRange->Processor = Prcb->Number; 01483 01484 // 01485 // Previously enabled MTRRs with index > NoMTRR 01486 // which could conflict with existing setting should be disabled 01487 // This is taken care of by setting NewRange->NoMTRR to total 01488 // number of variable MTRRs. 01489 // 01490 01491 NewRange->NoMTRR = (ULONG) KiRangeInfo.Capabilities.u.hw.VarCnt; 01492 01493 // 01494 // Synchronize with other IPI functions which may stall 01495 // 01496 01497 KiLockContextSwap(&OldIrql); 01498 01499 #if !defined(NT_UP) 01500 // 01501 // Collect all the (other) processors 01502 // 01503 01504 TargetProcessors = KeActiveProcessors & ~Prcb->SetMember; 01505 if (TargetProcessors != 0) { 01506 01507 KiIpiSendSynchronousPacket ( 01508 Prcb, 01509 TargetProcessors, 01510 KiLoadMTRRTarget, 01511 (PVOID) NewRange, 01512 NULL, 01513 NULL 01514 ); 01515 01516 // 01517 // Wait for all processors to be collected 01518 // 01519 01520 KiIpiStallOnPacketTargets(TargetProcessors); 01521 01522 // 01523 // All processors are now waiting. Raise to high level to 01524 // ensure this processor doesn't enter the debugger due to 01525 // some interrupt service routine. 01526 // 01527 01528 KeRaiseIrql (HIGH_LEVEL, &OldIrql2); 01529 01530 // 01531 // There's no reason for any debug events now, so signal 01532 // the other processors that they can all disable interrupts 01533 // and being the MTRR update 01534 // 01535 01536 Prcb->ReverseStall += 1; 01537 } 01538 #endif 01539 01540 // 01541 // Update MTRRs 01542 // 01543 01544 KiLoadMTRR (NewRange); 01545 01546 // 01547 // Release ContextSwap lock 01548 // 01549 01550 KiUnlockContextSwap(OldIrql); 01551 01552 01553 #if IDBG 01554 KiDumpMTRR ("Processor MTRR:", NewRange->MTRR); 01555 #endif 01556 01557 } else { 01558 01559 // 01560 // There was an error, put original range database back 01561 // 01562 01563 DBGMSG ("KiCompleteEffectiveRangeChange: mtrr update did not occur\n"); 01564 01565 if (NewRange->Ranges) { 01566 KiRangeInfo.NoRange = NewRange->NoRange; 01567 01568 RtlCopyMemory ( 01569 KiRangeInfo.Ranges, 01570 NewRange->Ranges, 01571 sizeof (ONE_RANGE) * KiRangeInfo.NoRange 01572 ); 01573 } 01574 } 01575 01576 // 01577 // Cleanup 01578 // 01579 01580 ExFreePool (NewRange->Ranges); 01581 ExFreePool (NewRange->MTRR); 01582 }

STATIC ULONG KiFindFirstSetLeftBit IN ULONGLONG  Set  ) 
 

Definition at line 1690 of file mtrr.c.

References ASSERT, and KiFindFirstSetLeft.

Referenced by KiCompleteEffectiveRangeChange().

01695 : 01696 01697 This function returns a bit position of the most significant 01698 bit set in the passed ULONGLONG parameter. Passed parameter 01699 must be non-zero. 01700 01701 --*/ 01702 { 01703 ULONG bitno; 01704 01705 ASSERT(Set != 0); 01706 for (bitno=56;!(Set & 0xFF00000000000000); bitno -= 8, Set <<= 8) ; 01707 return KiFindFirstSetLeft[Set >> 56] + bitno; 01708 }

STATIC ULONG KiFindFirstSetRightBit IN ULONGLONG  Set  ) 
 

Definition at line 1669 of file mtrr.c.

References ASSERT, and KiFindFirstSetRight.

Referenced by KiCompleteEffectiveRangeChange(), KiLengthToMask(), and KiMaskToLength().

01674 : 01675 01676 This function returns a bit position of the least significant 01677 bit set in the passed ULONGLONG parameter. Passed parameter 01678 must be non-zero. 01679 01680 --*/ 01681 { 01682 ULONG bitno; 01683 01684 ASSERT(Set != 0); 01685 for (bitno=0; !(Set & 0xFF); bitno += 8, Set >>= 8) ; 01686 return KiFindFirstSetRight[Set & 0xFF] + bitno; 01687 }

VOID KiInitializeMTRR IN BOOLEAN  LastProcessor  ) 
 

Referenced by KiInitMachineDependent().

STATIC ULONGLONG KiLengthToMask IN ULONGLONG  Length  ) 
 

Definition at line 1645 of file mtrr.c.

References KiFindFirstSetRightBit(), MTRR_MAX_RANGE_SHIFT, and MTRR_RESVBIT_MASK.

Referenced by KiCompleteEffectiveRangeChange().

01650 : 01651 01652 This function constructs the mask corresponding to the input length 01653 to be set in a variable MTRR register. The length is assumed to be 01654 a multiple of 4K. 01655 01656 --*/ 01657 { 01658 ULONGLONG FullMask = 0xffffff; 01659 01660 if (Length == ((ULONGLONG) 1 << MTRR_MAX_RANGE_SHIFT)) { 01661 return(0); 01662 } else { 01663 return(((FullMask << KiFindFirstSetRightBit(Length)) & 01664 MTRR_RESVBIT_MASK)); 01665 } 01666 }

NTSTATUS KiLoadMTRR IN PNEW_RANGE  Context  ) 
 

Definition at line 1796 of file mtrr.c.

References Index, is, it, KeFeatureBits, KF_AMDK6MTRR, KiAmdK6MtrrWRMSR(), KiDisableInterrupts(), KiRestoreInterrupts(), KiSynchronizeMTRRLoad(), MTRR_MSR_DEFAULT, MTRR_MSR_VARIABLE_BASE, MTRR_MSR_VARIABLE_MASK, RDMSR(), the, _MTRR_DEFAULT::u, and WRMSR().

01801 : 01802 01803 This function loads the memory type range registers into all processors 01804 01805 Arguments: 01806 01807 Context - Context which include the MTRRs to load 01808 01809 Return Value: 01810 01811 All processors are set into the new state 01812 01813 --*/ 01814 { 01815 MTRR_DEFAULT Default; 01816 BOOLEAN Enable; 01817 ULONG HldCr0, HldCr4; 01818 ULONG Index; 01819 01820 // 01821 // Disable interrupts 01822 // 01823 01824 Enable = KiDisableInterrupts(); 01825 01826 // 01827 // Synchronize all processors 01828 // 01829 01830 if (!(KeFeatureBits & KF_AMDK6MTRR)) { 01831 KiSynchronizeMTRRLoad (Context); 01832 } 01833 01834 _asm { 01835 ; 01836 ; Get current CR0 01837 ; 01838 01839 mov eax, cr0 01840 mov HldCr0, eax 01841 01842 ; 01843 ; Disable caching & line fill 01844 ; 01845 01846 and eax, not CR0_NW 01847 or eax, CR0_CD 01848 mov cr0, eax 01849 01850 ; 01851 ; Flush caches 01852 ; 01853 01854 ; 01855 ; wbinvd 01856 ; 01857 01858 _emit 0Fh 01859 _emit 09h 01860 01861 ; 01862 ; Get current cr4 01863 ; 01864 01865 _emit 0Fh 01866 _emit 20h 01867 _emit 0E0h ; mov eax, cr4 01868 mov HldCr4, eax 01869 01870 ; 01871 ; Disable global page 01872 ; 01873 01874 and eax, not CR4_PGE 01875 _emit 0Fh 01876 _emit 22h 01877 _emit 0E0h ; mov cr4, eax 01878 01879 ; 01880 ; Flush TLB 01881 ; 01882 01883 mov eax, cr3 01884 mov cr3, eax 01885 } 01886 01887 if (KeFeatureBits & KF_AMDK6MTRR) { 01888 01889 // 01890 // Write the MTRRs 01891 // 01892 01893 KiAmdK6MtrrWRMSR(); 01894 01895 } else { 01896 01897 // 01898 // Disable MTRRs 01899 // 01900 01901 Default.u.QuadPart = RDMSR(MTRR_MSR_DEFAULT); 01902 Default.u.hw.MtrrEnabled = 0; 01903 WRMSR (MTRR_MSR_DEFAULT, Default.u.QuadPart); 01904 01905 // 01906 // Synchronize all processors 01907 // 01908 01909 KiSynchronizeMTRRLoad (Context); 01910 01911 // 01912 // Load new MTRRs 01913 // 01914 01915 for (Index=0; Index < Context->NoMTRR; Index++) { 01916 WRMSR (MTRR_MSR_VARIABLE_BASE+2*Index, Context->MTRR[Index].Base.u.QuadPart); 01917 WRMSR (MTRR_MSR_VARIABLE_MASK+2*Index, Context->MTRR[Index].Mask.u.QuadPart); 01918 } 01919 01920 // 01921 // Synchronize all processors 01922 // 01923 01924 KiSynchronizeMTRRLoad (Context); 01925 } 01926 _asm { 01927 01928 ; 01929 ; Flush caches (this should be a "nop", but it was in the Intel reference algorithm) 01930 ; This is required because of aggressive prefetch of both instr + data 01931 ; 01932 01933 ; 01934 ; wbinvd 01935 ; 01936 01937 _emit 0Fh 01938 _emit 09h 01939 01940 ; 01941 ; Flush TLBs (same comment as above) 01942 ; Same explanation as above 01943 ; 01944 01945 mov eax, cr3 01946 mov cr3, eax 01947 } 01948 01949 if (!(KeFeatureBits & KF_AMDK6MTRR)) { 01950 01951 // 01952 // Enable MTRRs 01953 // 01954 01955 Default.u.hw.MtrrEnabled = 1; 01956 WRMSR (MTRR_MSR_DEFAULT, Default.u.QuadPart); 01957 01958 // 01959 // Synchronize all processors 01960 // 01961 01962 KiSynchronizeMTRRLoad (Context); 01963 } 01964 01965 _asm { 01966 ; 01967 ; Restore CR4 (global page enable) 01968 ; 01969 01970 mov eax, HldCr4 01971 _emit 0Fh 01972 _emit 22h 01973 _emit 0E0h ; mov cr4, eax 01974 01975 ; 01976 ; Restore CR0 (cache enable) 01977 ; 01978 01979 mov eax, HldCr0 01980 mov cr0, eax 01981 } 01982 01983 // 01984 // Restore interrupts and return 01985 // 01986 01987 KiRestoreInterrupts (Enable); 01988 return STATUS_SUCCESS; 01989 }

VOID KiLoadMTRRTarget IN PKIPI_CONTEXT  SignalDone,
IN PVOID  Context,
IN PVOID  Parameter2,
IN PVOID  Parameter3
 

Definition at line 1766 of file mtrr.c.

References KiLoadMTRR(), PNEW_RANGE, and _NEW_RANGE::TargetPhase.

Referenced by KiCompleteEffectiveRangeChange().

01772 { 01773 PNEW_RANGE Context; 01774 01775 Context = (PNEW_RANGE) NewRange; 01776 01777 // 01778 // Wait for all processors to be ready 01779 // 01780 01781 KiIpiSignalPacketDoneAndStall (SignalDone, Context->TargetPhase); 01782 01783 // 01784 // Update MTRRs 01785 // 01786 01787 KiLoadMTRR (Context); 01788 }

STATIC ULONGLONG KiMaskToLength IN ULONGLONG  Mask  ) 
 

Definition at line 1624 of file mtrr.c.

References KiFindFirstSetRightBit(), and MTRR_MAX_RANGE_SHIFT.

Referenced by KiCompleteEffectiveRangeChange(), and KiInitializeMTRR().

01629 : 01630 01631 This function returns the length specified by a particular 01632 mtrr variable register mask. 01633 01634 --*/ 01635 { 01636 if (Mask == 0) { 01637 // Zero Mask signifies a length of 2**36 01638 return(((ULONGLONG) 1 << MTRR_MAX_RANGE_SHIFT)); 01639 } else { 01640 return(((ULONGLONG) 1 << KiFindFirstSetRightBit(Mask))); 01641 } 01642 }

STATIC ULONG KiRangeWeight IN PONE_RANGE  Range  ) 
 

Definition at line 1586 of file mtrr.c.

References MTRR_TYPE_UC, MTRR_TYPE_USWC, MTRR_TYPE_WB, MTRR_TYPE_WP, and MTRR_TYPE_WT.

Referenced by KiCompleteEffectiveRangeChange().

01591 : 01592 01593 This functions returns a weighting of the passed in range's cache 01594 type. When two or more regions collide within the same h/w region 01595 the types are weighted and that cache type of the higher weight 01596 is used for the collision area. 01597 01598 Arguments: 01599 01600 Range - Range to obtain weighting for 01601 01602 Return Value: 01603 01604 The weight of the particular cache type 01605 01606 --*/ 01607 { 01608 ULONG Weight; 01609 01610 switch (Range->Type) { 01611 case MTRR_TYPE_UC: Weight = 5; break; 01612 case MTRR_TYPE_USWC: Weight = 4; break; 01613 case MTRR_TYPE_WP: Weight = 3; break; 01614 case MTRR_TYPE_WT: Weight = 2; break; 01615 case MTRR_TYPE_WB: Weight = 1; break; 01616 default: Weight = 0; break; 01617 } 01618 01619 return Weight; 01620 }

BOOLEAN KiRemoveRange IN PNEW_RANGE  NewRange,
IN ULONGLONG  Base,
IN ULONGLONG  Limit,
IN PBOOLEAN  RemoveThisType
 

Definition at line 855 of file mtrr.c.

References _ONE_RANGE::Base, DBGMSG, FALSE, KiAddRange(), KiRangeInfo, _ONE_RANGE::Limit, _RANGE_INFO::NoRange, NT_SUCCESS, PONE_RANGE, _RANGE_INFO::Ranges, TRUE, and _ONE_RANGE::Type.

Referenced by KeSetPhysicalCacheTypeRange(), and KiCompleteEffectiveRangeChange().

00863 : 00864 00865 This function removes any range overlapping with the passed range, of 00866 type supplied in RemoveThisType from the global range database. 00867 00868 Arguments: 00869 00870 NewRange - Context information 00871 00872 Base - Base & Limit signify the first & last address of a range 00873 Limit - which is to be removed from the range database 00874 00875 RemoveThisType - A TRUE flag for each type which can not overlap the 00876 target range 00877 00878 00879 Return Value: 00880 00881 TRUE - if the range database was altered such that it may no longer 00882 be sorted. 00883 00884 --*/ 00885 { 00886 ULONG i; 00887 PONE_RANGE Range; 00888 BOOLEAN DatabaseNeedsSorted; 00889 00890 00891 DatabaseNeedsSorted = FALSE; 00892 00893 // 00894 // Check each range 00895 // 00896 00897 for (i=0, Range=KiRangeInfo.Ranges; i < KiRangeInfo.NoRange; i++, Range++) { 00898 00899 // 00900 // If this range type doesn't need to be altered, skip it 00901 // 00902 00903 if (!RemoveThisType[Range->Type]) { 00904 continue; 00905 } 00906 00907 // 00908 // Check range to see if it overlaps with range being removed 00909 // 00910 00911 if (Range->Base < Base) { 00912 00913 if (Range->Limit >= Base && Range->Limit <= Limit) { 00914 00915 // 00916 // Truncate range to not overlap with area being removed 00917 // 00918 00919 Range->Limit = Base - 1; 00920 } 00921 00922 if (Range->Limit > Limit) { 00923 00924 // 00925 // Target area is contained totally within this area. 00926 // Split into two ranges 00927 // 00928 00929 // 00930 // Add range at end 00931 // 00932 00933 DatabaseNeedsSorted = TRUE; 00934 KiAddRange ( 00935 NewRange, 00936 Limit+1, 00937 Range->Limit, 00938 Range->Type 00939 ); 00940 00941 // 00942 // Turn current range into range at beginning 00943 // 00944 00945 Range->Limit = Base - 1; 00946 } 00947 00948 } else { 00949 00950 // Range->Base >= Base 00951 00952 if (Range->Base <= Limit) { 00953 if (Range->Limit <= Limit) { 00954 // 00955 // This range is totally within the target area. Remove it. 00956 // 00957 00958 DatabaseNeedsSorted = TRUE; 00959 KiRangeInfo.NoRange -= 1; 00960 Range->Base = KiRangeInfo.Ranges[KiRangeInfo.NoRange].Base; 00961 Range->Limit = KiRangeInfo.Ranges[KiRangeInfo.NoRange].Limit; 00962 Range->Type = KiRangeInfo.Ranges[KiRangeInfo.NoRange].Type; 00963 00964 // 00965 // recheck at current location 00966 // 00967 00968 i -= 1; 00969 Range -= 1; 00970 00971 } else { 00972 00973 // 00974 // Bump beginning past area being removed 00975 // 00976 00977 Range->Base = Limit + 1; 00978 } 00979 } 00980 } 00981 } 00982 00983 if (!NT_SUCCESS (NewRange->Status)) { 00984 DBGMSG ("KiRemoveRange: failure\n"); 00985 } 00986 00987 return DatabaseNeedsSorted; 00988 }

VOID KiStartEffectiveRangeChange IN PNEW_RANGE  NewRange  ) 
 

Definition at line 1060 of file mtrr.c.

References _RANGE_INFO::Capabilities, ExAllocatePoolWithTag, KiRangeInfo, NonPagedPool, _RANGE_INFO::NoRange, _RANGE_INFO::Ranges, and _MTRR_CAPABILITIES::u.

Referenced by KeRestoreMtrr(), and KeSetPhysicalCacheTypeRange().

01065 : 01066 01067 This functions sets up the context information required to 01068 track & later effect a range change in hardware 01069 01070 Arguments: 01071 01072 NewRange - Context information 01073 01074 Return Value: 01075 01076 None 01077 01078 --*/ 01079 { 01080 ULONG size; 01081 01082 // 01083 // Allocate working space for MTRR image 01084 // 01085 01086 size = sizeof(MTRR_RANGE) * ((ULONG) KiRangeInfo.Capabilities.u.hw.VarCnt + 1); 01087 NewRange->MTRR = ExAllocatePoolWithTag (NonPagedPool, size, ' eK'); 01088 if (!NewRange->MTRR) { 01089 NewRange->Status = STATUS_UNSUCCESSFUL; 01090 return ; 01091 } 01092 01093 RtlZeroMemory (NewRange->MTRR, size); 01094 01095 // 01096 // Save current range information in case of an error 01097 // 01098 01099 size = sizeof(ONE_RANGE) * KiRangeInfo.NoRange; 01100 NewRange->NoRange = KiRangeInfo.NoRange; 01101 NewRange->Ranges = ExAllocatePoolWithTag (NonPagedPool, size, ' eK'); 01102 if (!NewRange->Ranges) { 01103 NewRange->Status = STATUS_UNSUCCESSFUL; 01104 return ; 01105 } 01106 01107 RtlCopyMemory (NewRange->Ranges, KiRangeInfo.Ranges, size); 01108 }

VOID KiSynchronizeMTRRLoad IN PNEW_RANGE  Context  ) 
 

Definition at line 1993 of file mtrr.c.

References KeGetCurrentPrcb, and KeNumberProcessors.

Referenced by KiLoadMTRR().

01996 { 01997 01998 #if !defined(NT_UP) 01999 02000 ULONG CurrentPhase; 02001 volatile ULONG *TargetPhase; 02002 PKPRCB Prcb; 02003 02004 TargetPhase = Context->TargetPhase; 02005 Prcb = KeGetCurrentPrcb(); 02006 02007 if (Prcb->Number == (CCHAR) Context->Processor) { 02008 02009 // 02010 // Wait for all processors to signal 02011 // 02012 02013 while (Context->TargetCount != (ULONG) KeNumberProcessors - 1) { 02014 KeYieldProcessor (); 02015 } 02016 02017 // 02018 // Reset count for next time 02019 // 02020 02021 Context->TargetCount = 0; 02022 02023 // 02024 // Let waiting processor go to next synchronization point 02025 // 02026 02027 InterlockedIncrement ((PULONG) TargetPhase); 02028 02029 02030 } else { 02031 02032 // 02033 // Get current phase 02034 // 02035 02036 CurrentPhase = *TargetPhase; 02037 02038 // 02039 // Signal that we have completed the current phase 02040 // 02041 02042 InterlockedIncrement ((PULONG) &Context->TargetCount); 02043 02044 // 02045 // Wait for new phase to begin 02046 // 02047 02048 while (*TargetPhase == CurrentPhase) { 02049 KeYieldProcessor (); 02050 } 02051 } 02052 02053 #endif 02054 02055 }


Variable Documentation

RANGE_INFO KiRangeInfo
 

Definition at line 272 of file mtrr.c.

Referenced by KeRestoreMtrr(), KeSetPhysicalCacheTypeRange(), KiAddRange(), KiCompleteEffectiveRangeChange(), KiInitializeMTRR(), KiRemoveRange(), and KiStartEffectiveRangeChange().

KSPIN_LOCK KiRangeLock
 

Definition at line 265 of file mtrr.c.

Referenced by KeRestoreMtrr(), KeSetPhysicalCacheTypeRange(), KiAmdK6InitializeMTRR(), KiAmdK6MtrrSetMemoryType(), and KiInitializeMTRR().


Generated on Sat May 15 19:44:48 2004 for test by doxygen 1.3.7