x86: shrink pat_x_mtrr_type to its essentials
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com> Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Cc: Suresh B Siddha <suresh.b.siddha@intel.com> Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
6cf514fce1
commit
dd0c7c4903
|
@ -161,29 +161,21 @@ static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
|
|||
*/
|
||||
static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
|
||||
{
|
||||
u8 mtrr_type;
|
||||
|
||||
/*
|
||||
* We return the PAT request directly for types where PAT takes
|
||||
* precedence with respect to MTRR and for UC_MINUS.
|
||||
* Consistency checks with other PAT requests is done later
|
||||
* while going through memtype list.
|
||||
*/
|
||||
if (req_type == _PAGE_CACHE_WC ||
|
||||
req_type == _PAGE_CACHE_UC_MINUS ||
|
||||
req_type == _PAGE_CACHE_UC)
|
||||
return req_type;
|
||||
|
||||
/*
|
||||
* Look for MTRR hint to get the effective type in case where PAT
|
||||
* request is for WB.
|
||||
*/
|
||||
mtrr_type = mtrr_type_lookup(start, end);
|
||||
if (mtrr_type == MTRR_TYPE_UNCACHABLE)
|
||||
return _PAGE_CACHE_UC;
|
||||
if (mtrr_type == MTRR_TYPE_WRCOMB)
|
||||
return _PAGE_CACHE_WC;
|
||||
return _PAGE_CACHE_WB;
|
||||
if (req_type == _PAGE_CACHE_WB) {
|
||||
u8 mtrr_type;
|
||||
|
||||
mtrr_type = mtrr_type_lookup(start, end);
|
||||
if (mtrr_type == MTRR_TYPE_UNCACHABLE)
|
||||
return _PAGE_CACHE_UC;
|
||||
if (mtrr_type == MTRR_TYPE_WRCOMB)
|
||||
return _PAGE_CACHE_WC;
|
||||
}
|
||||
|
||||
return req_type;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue