diff mbox series

[v3,03/10] x86/mtrr: replace use_intel() with a local flag

Message ID 20220908084914.21703-4-jgross@suse.com (mailing list archive)
State New, archived
Headers show
Series x86: make pat and mtrr independent from each other | expand

Commit Message

Jürgen Groß Sept. 8, 2022, 8:49 a.m. UTC
In MTRR code use_intel() is only used in one source file, and the
relevant use_intel_if member of struct mtrr_ops is set only in
generic_mtrr_ops.

Replace use_intel() with a single flag in cacheinfo.c, which can be set
when assigning generic_mtrr_ops to mtrr_if. This allows to drop
use_intel_if from mtrr_ops, while preparing to support PAT without
MTRR. As another preparation for the PAT/MTRR decoupling use a bit for
MTRR control and one for PAT control. For now set both bits together,
this can be changed later.

As the new flag will be set only if mtrr_enabled is set, the test for
mtrr_enabled can be dropped at some places.

At the same time drop the local mtrr_enabled() function and rename
the __mtrr_enabled flag to mtrr_enabled.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- new patch
---
 arch/x86/include/asm/cacheinfo.h   |  5 +++
 arch/x86/kernel/cpu/cacheinfo.c    |  3 ++
 arch/x86/kernel/cpu/mtrr/generic.c |  1 -
 arch/x86/kernel/cpu/mtrr/mtrr.c    | 58 ++++++++++++++----------------
 arch/x86/kernel/cpu/mtrr/mtrr.h    |  2 --
 5 files changed, 35 insertions(+), 34 deletions(-)

Comments

Borislav Petkov Sept. 11, 2022, 10:16 a.m. UTC | #1
On Thu, Sep 08, 2022 at 10:49:07AM +0200, Juergen Gross wrote:
> diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h
> index 86b2e0dcc4bf..1aeafa9888f7 100644
> --- a/arch/x86/include/asm/cacheinfo.h
> +++ b/arch/x86/include/asm/cacheinfo.h
> @@ -2,6 +2,11 @@
>  #ifndef _ASM_X86_CACHEINFO_H
>  #define _ASM_X86_CACHEINFO_H
>  
> +/* Kernel controls MTRR and/or PAT MSRs. */
> +extern unsigned int cache_generic;

So this should be called something more descriptive like

	memory_caching_types

or so to denote that this is a bitfield of supported memory caching
technologies. The code then would read as

	if (memory_caching_types & CACHE_MTRR)

The name's still not optimal tho - needs more brooding over.

> +#define CACHE_GENERIC_MTRR 0x01
> +#define CACHE_GENERIC_PAT  0x02

And those should be CACHE_{MTRR,PAT}.

>  void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu);
>  void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);
>  
> diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
> index 66556833d7af..3b05d3ade7a6 100644
> --- a/arch/x86/kernel/cpu/cacheinfo.c
> +++ b/arch/x86/kernel/cpu/cacheinfo.c
> @@ -35,6 +35,9 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
>  /* Shared L2 cache maps */
>  DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map);
>  
> +/* Kernel controls MTRR and/or PAT MSRs. */
> +unsigned int cache_generic;

This should either be __ro_after_init and initialized to 0 or you need
accessors...

>  u32 num_var_ranges;
> -static bool __mtrr_enabled;
> -
> -static bool mtrr_enabled(void)
> -{
> -	return __mtrr_enabled;
> -}
> +static bool mtrr_enabled;

Hmm, I don't like this. There's way too many boolean flags in the mtrr
code. There's mtrr_state.enabled too. ;-\

Can we set (or clear) X86_FEATURE_MTRR to denote MTRR enablement status
and get rid of one more boolean flag?

...

>  void __init mtrr_bp_init(void)
>  {
> +	bool use_generic = false;
>  	u32 phys_addr;
>  
>  	init_ifs();
> @@ -694,6 +691,7 @@ void __init mtrr_bp_init(void)
>  
>  	if (boot_cpu_has(X86_FEATURE_MTRR)) {
>  		mtrr_if = &generic_mtrr_ops;
> +		use_generic = true;
>  		size_or_mask = SIZE_OR_MASK_BITS(36);
>  		size_and_mask = 0x00f00000;
>  		phys_addr = 36;
> @@ -755,15 +753,18 @@ void __init mtrr_bp_init(void)
>  	}
>  
>  	if (mtrr_if) {
> -		__mtrr_enabled = true;
> -		set_num_var_ranges();
> +		mtrr_enabled = true;
> +		set_num_var_ranges(use_generic);

You don't need use_generic either:

		set_num_var_ranges(mtrr_if == generic_mtrr_ops);

(The reason being I wanna get rid of that nasty minefield of boolean
vars all round that code).
Jürgen Groß Sept. 12, 2022, 9:10 a.m. UTC | #2
On 11.09.22 12:16, Borislav Petkov wrote:
> On Thu, Sep 08, 2022 at 10:49:07AM +0200, Juergen Gross wrote:
>> diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h
>> index 86b2e0dcc4bf..1aeafa9888f7 100644
>> --- a/arch/x86/include/asm/cacheinfo.h
>> +++ b/arch/x86/include/asm/cacheinfo.h
>> @@ -2,6 +2,11 @@
>>   #ifndef _ASM_X86_CACHEINFO_H
>>   #define _ASM_X86_CACHEINFO_H
>>   
>> +/* Kernel controls MTRR and/or PAT MSRs. */
>> +extern unsigned int cache_generic;
> 
> So this should be called something more descriptive like
> 
> 	memory_caching_types

In the end this variable doesn't specify which caching types are available,
but the ways to select/control the caching types.

So what about "memory_caching_select" or "memory_caching_control" instead?

> or so to denote that this is a bitfield of supported memory caching
> technologies. The code then would read as
> 
> 	if (memory_caching_types & CACHE_MTRR)
> 
> The name's still not optimal tho - needs more brooding over.
> 
>> +#define CACHE_GENERIC_MTRR 0x01
>> +#define CACHE_GENERIC_PAT  0x02
> 
> And those should be CACHE_{MTRR,PAT}.

Fine with me.

>>   void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu);
>>   void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);
>>   
>> diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
>> index 66556833d7af..3b05d3ade7a6 100644
>> --- a/arch/x86/kernel/cpu/cacheinfo.c
>> +++ b/arch/x86/kernel/cpu/cacheinfo.c
>> @@ -35,6 +35,9 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
>>   /* Shared L2 cache maps */
>>   DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map);
>>   
>> +/* Kernel controls MTRR and/or PAT MSRs. */
>> +unsigned int cache_generic;
> 
> This should either be __ro_after_init and initialized to 0 or you need
> accessors...

Okay.

> 
>>   u32 num_var_ranges;
>> -static bool __mtrr_enabled;
>> -
>> -static bool mtrr_enabled(void)
>> -{
>> -	return __mtrr_enabled;
>> -}
>> +static bool mtrr_enabled;
> 
> Hmm, I don't like this. There's way too many boolean flags in the mtrr
> code. There's mtrr_state.enabled too. ;-\
> 
> Can we set (or clear) X86_FEATURE_MTRR to denote MTRR enablement status
> and get rid of one more boolean flag?

I'll have a look.

> 
> ...
> 
>>   void __init mtrr_bp_init(void)
>>   {
>> +	bool use_generic = false;
>>   	u32 phys_addr;
>>   
>>   	init_ifs();
>> @@ -694,6 +691,7 @@ void __init mtrr_bp_init(void)
>>   
>>   	if (boot_cpu_has(X86_FEATURE_MTRR)) {
>>   		mtrr_if = &generic_mtrr_ops;
>> +		use_generic = true;
>>   		size_or_mask = SIZE_OR_MASK_BITS(36);
>>   		size_and_mask = 0x00f00000;
>>   		phys_addr = 36;
>> @@ -755,15 +753,18 @@ void __init mtrr_bp_init(void)
>>   	}
>>   
>>   	if (mtrr_if) {
>> -		__mtrr_enabled = true;
>> -		set_num_var_ranges();
>> +		mtrr_enabled = true;
>> +		set_num_var_ranges(use_generic);
> 
> You don't need use_generic either:
> 
> 		set_num_var_ranges(mtrr_if == generic_mtrr_ops);
> 
> (The reason being I wanna get rid of that nasty minefield of boolean
> vars all round that code).

Fine with me.


Juergen
Borislav Petkov Sept. 19, 2022, 7:10 p.m. UTC | #3
On Mon, Sep 12, 2022 at 11:10:29AM +0200, Juergen Gross wrote:
> In the end this variable doesn't specify which caching types are available,
> but the ways to select/control the caching types.
> 
> So what about "memory_caching_select" or "memory_caching_control" instead?

_control sounds like the right thing. As in, which of the memory caching
things the kernel controls. Along with a comment above it what this
exactly is. Yap.

Thx.
Jürgen Groß Sept. 28, 2022, 10:17 a.m. UTC | #4
On 12.09.22 11:10, Juergen Gross wrote:
> On 11.09.22 12:16, Borislav Petkov wrote:
>> On Thu, Sep 08, 2022 at 10:49:07AM +0200, Juergen Gross wrote:
>>> diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h
>>> index 86b2e0dcc4bf..1aeafa9888f7 100644
>>> --- a/arch/x86/include/asm/cacheinfo.h
>>> +++ b/arch/x86/include/asm/cacheinfo.h
>>> @@ -2,6 +2,11 @@
>>>   #ifndef _ASM_X86_CACHEINFO_H
>>>   #define _ASM_X86_CACHEINFO_H
>>> +/* Kernel controls MTRR and/or PAT MSRs. */
>>> +extern unsigned int cache_generic;
>>
>> So this should be called something more descriptive like
>>
>>     memory_caching_types
> 
> In the end this variable doesn't specify which caching types are available,
> but the ways to select/control the caching types.
> 
> So what about "memory_caching_select" or "memory_caching_control" instead?
> 
>> or so to denote that this is a bitfield of supported memory caching
>> technologies. The code then would read as
>>
>>     if (memory_caching_types & CACHE_MTRR)
>>
>> The name's still not optimal tho - needs more brooding over.
>>
>>> +#define CACHE_GENERIC_MTRR 0x01
>>> +#define CACHE_GENERIC_PAT  0x02
>>
>> And those should be CACHE_{MTRR,PAT}.
> 
> Fine with me.
> 
>>>   void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu);
>>>   void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);
>>> diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
>>> index 66556833d7af..3b05d3ade7a6 100644
>>> --- a/arch/x86/kernel/cpu/cacheinfo.c
>>> +++ b/arch/x86/kernel/cpu/cacheinfo.c
>>> @@ -35,6 +35,9 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
>>>   /* Shared L2 cache maps */
>>>   DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map);
>>> +/* Kernel controls MTRR and/or PAT MSRs. */
>>> +unsigned int cache_generic;
>>
>> This should either be __ro_after_init and initialized to 0 or you need
>> accessors...
> 
> Okay.
> 
>>
>>>   u32 num_var_ranges;
>>> -static bool __mtrr_enabled;
>>> -
>>> -static bool mtrr_enabled(void)
>>> -{
>>> -    return __mtrr_enabled;
>>> -}
>>> +static bool mtrr_enabled;
>>
>> Hmm, I don't like this. There's way too many boolean flags in the mtrr
>> code. There's mtrr_state.enabled too. ;-\
>>
>> Can we set (or clear) X86_FEATURE_MTRR to denote MTRR enablement status
>> and get rid of one more boolean flag?
> 
> I'll have a look.

Hmm, this might be a little bit risky.

It can be done, but then X86_FEATURE_MTRR could be set even for cpus
NOT supporting it (the 32-bit special cases AMD, CENTAUR, CYRIX).

So we have the following alternatives:

- do the switch to X86_FEATURE_MTRR risking code breakage for later
   code changes querying X86_FEATURE_MTRR and assuming the MTRR MSRs
   being available

- keep the current bool

- replace the bool with mtrr_if != NULL

- add a new synthetic feature, e.g. X86_FEATURE_MTRR_ENABLED (which in
   fact would be just a replacement of the current bool)

My preference would be the replacement with mtrr_if != NULL.


Juergen
diff mbox series

Patch

diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h
index 86b2e0dcc4bf..1aeafa9888f7 100644
--- a/arch/x86/include/asm/cacheinfo.h
+++ b/arch/x86/include/asm/cacheinfo.h
@@ -2,6 +2,11 @@ 
 #ifndef _ASM_X86_CACHEINFO_H
 #define _ASM_X86_CACHEINFO_H
 
+/* Kernel controls MTRR and/or PAT MSRs. */
+extern unsigned int cache_generic;
+#define CACHE_GENERIC_MTRR 0x01
+#define CACHE_GENERIC_PAT  0x02
+
 void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu);
 void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);
 
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index 66556833d7af..3b05d3ade7a6 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -35,6 +35,9 @@  DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
 /* Shared L2 cache maps */
 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map);
 
+/* Kernel controls MTRR and/or PAT MSRs. */
+unsigned int cache_generic;
+
 struct _cache_table {
 	unsigned char descriptor;
 	char cache_type;
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index cd64eab02393..81742870ecc5 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -917,7 +917,6 @@  int positive_have_wrcomb(void)
  * Generic structure...
  */
 const struct mtrr_ops generic_mtrr_ops = {
-	.use_intel_if		= 1,
 	.set_all		= generic_set_all,
 	.get			= generic_get_mtrr,
 	.get_free_region	= generic_get_free_region,
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 2746cac9d8a9..7d7d5bd30219 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -46,6 +46,7 @@ 
 #include <linux/syscore_ops.h>
 #include <linux/rcupdate.h>
 
+#include <asm/cacheinfo.h>
 #include <asm/cpufeature.h>
 #include <asm/e820/api.h>
 #include <asm/mtrr.h>
@@ -58,12 +59,7 @@ 
 #define MTRR_TO_PHYS_WC_OFFSET 1000
 
 u32 num_var_ranges;
-static bool __mtrr_enabled;
-
-static bool mtrr_enabled(void)
-{
-	return __mtrr_enabled;
-}
+static bool mtrr_enabled;
 
 unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
 static DEFINE_MUTEX(mtrr_mutex);
@@ -119,11 +115,11 @@  static int have_wrcomb(void)
 }
 
 /*  This function returns the number of variable MTRRs  */
-static void __init set_num_var_ranges(void)
+static void __init set_num_var_ranges(bool use_generic)
 {
 	unsigned long config = 0, dummy;
 
-	if (use_intel())
+	if (use_generic)
 		rdmsr(MSR_MTRRcap, config, dummy);
 	else if (is_cpu(AMD) || is_cpu(HYGON))
 		config = 2;
@@ -303,7 +299,7 @@  int mtrr_add_page(unsigned long base, unsigned long size,
 	int i, replace, error;
 	mtrr_type ltype;
 
-	if (!mtrr_enabled())
+	if (!mtrr_enabled)
 		return -ENXIO;
 
 	error = mtrr_if->validate_add_page(base, size, type);
@@ -451,7 +447,7 @@  static int mtrr_check(unsigned long base, unsigned long size)
 int mtrr_add(unsigned long base, unsigned long size, unsigned int type,
 	     bool increment)
 {
-	if (!mtrr_enabled())
+	if (!mtrr_enabled)
 		return -ENODEV;
 	if (mtrr_check(base, size))
 		return -EINVAL;
@@ -480,7 +476,7 @@  int mtrr_del_page(int reg, unsigned long base, unsigned long size)
 	unsigned long lbase, lsize;
 	int error = -EINVAL;
 
-	if (!mtrr_enabled())
+	if (!mtrr_enabled)
 		return -ENODEV;
 
 	max = num_var_ranges;
@@ -540,7 +536,7 @@  int mtrr_del_page(int reg, unsigned long base, unsigned long size)
  */
 int mtrr_del(int reg, unsigned long base, unsigned long size)
 {
-	if (!mtrr_enabled())
+	if (!mtrr_enabled)
 		return -ENODEV;
 	if (mtrr_check(base, size))
 		return -EINVAL;
@@ -566,7 +562,7 @@  int arch_phys_wc_add(unsigned long base, unsigned long size)
 {
 	int ret;
 
-	if (pat_enabled() || !mtrr_enabled())
+	if (pat_enabled() || !mtrr_enabled)
 		return 0;  /* Success!  (We don't need to do anything.) */
 
 	ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true);
@@ -686,6 +682,7 @@  int __initdata changed_by_mtrr_cleanup;
  */
 void __init mtrr_bp_init(void)
 {
+	bool use_generic = false;
 	u32 phys_addr;
 
 	init_ifs();
@@ -694,6 +691,7 @@  void __init mtrr_bp_init(void)
 
 	if (boot_cpu_has(X86_FEATURE_MTRR)) {
 		mtrr_if = &generic_mtrr_ops;
+		use_generic = true;
 		size_or_mask = SIZE_OR_MASK_BITS(36);
 		size_and_mask = 0x00f00000;
 		phys_addr = 36;
@@ -755,15 +753,18 @@  void __init mtrr_bp_init(void)
 	}
 
 	if (mtrr_if) {
-		__mtrr_enabled = true;
-		set_num_var_ranges();
+		mtrr_enabled = true;
+		set_num_var_ranges(use_generic);
 		init_table();
-		if (use_intel()) {
+		if (use_generic) {
 			/* BIOS may override */
-			__mtrr_enabled = get_mtrr_state();
+			mtrr_enabled = get_mtrr_state();
 
-			if (mtrr_enabled())
+			if (mtrr_enabled) {
 				mtrr_bp_pat_init();
+				cache_generic |= CACHE_GENERIC_MTRR |
+						 CACHE_GENERIC_PAT;
+			}
 
 			if (mtrr_cleanup(phys_addr)) {
 				changed_by_mtrr_cleanup = 1;
@@ -772,7 +773,7 @@  void __init mtrr_bp_init(void)
 		}
 	}
 
-	if (!mtrr_enabled()) {
+	if (!mtrr_enabled) {
 		pr_info("Disabled\n");
 
 		/*
@@ -786,10 +787,7 @@  void __init mtrr_bp_init(void)
 
 void mtrr_ap_init(void)
 {
-	if (!mtrr_enabled())
-		return;
-
-	if (!use_intel() || mtrr_aps_delayed_init)
+	if (!cache_generic || mtrr_aps_delayed_init)
 		return;
 
 	/*
@@ -816,7 +814,7 @@  void mtrr_save_state(void)
 {
 	int first_cpu;
 
-	if (!mtrr_enabled())
+	if (!mtrr_enabled)
 		return;
 
 	first_cpu = cpumask_first(cpu_online_mask);
@@ -825,9 +823,7 @@  void mtrr_save_state(void)
 
 void set_mtrr_aps_delayed_init(void)
 {
-	if (!mtrr_enabled())
-		return;
-	if (!use_intel())
+	if (!cache_generic)
 		return;
 
 	mtrr_aps_delayed_init = true;
@@ -838,7 +834,7 @@  void set_mtrr_aps_delayed_init(void)
  */
 void mtrr_aps_init(void)
 {
-	if (!use_intel() || !mtrr_enabled())
+	if (!cache_generic)
 		return;
 
 	/*
@@ -855,7 +851,7 @@  void mtrr_aps_init(void)
 
 void mtrr_bp_restore(void)
 {
-	if (!use_intel() || !mtrr_enabled())
+	if (!cache_generic)
 		return;
 
 	mtrr_if->set_all();
@@ -863,10 +859,10 @@  void mtrr_bp_restore(void)
 
 static int __init mtrr_init_finialize(void)
 {
-	if (!mtrr_enabled())
+	if (!mtrr_enabled)
 		return 0;
 
-	if (use_intel()) {
+	if (cache_generic & CACHE_GENERIC_MTRR) {
 		if (!changed_by_mtrr_cleanup)
 			mtrr_state_warn();
 		return 0;
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index 2ac99e561181..88b1c4b6174a 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -14,7 +14,6 @@  extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
 
 struct mtrr_ops {
 	u32	vendor;
-	u32	use_intel_if;
 	void	(*set)(unsigned int reg, unsigned long base,
 		       unsigned long size, mtrr_type type);
 	void	(*set_all)(void);
@@ -61,7 +60,6 @@  extern u64 size_or_mask, size_and_mask;
 extern const struct mtrr_ops *mtrr_if;
 
 #define is_cpu(vnd)	(mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
-#define use_intel()	(mtrr_if && mtrr_if->use_intel_if == 1)
 
 extern unsigned int num_var_ranges;
 extern u64 mtrr_tom2;