@@ -35,6 +35,9 @@
#define PSR_CAT (1<<1)
#define PSR_CDP (1<<2)
+#define CAT_CBM_LEN_MASK 0x1f
+#define CAT_COS_MAX_MASK 0xffff
+
/*
* Per SDM chapter 'Cache Allocation Technology: Cache Mask Configuration',
* the MSRs range from 0C90H through 0D0FH (inclusive), enables support for
@@ -127,6 +130,13 @@ struct feat_node {
struct list_head list;
};
+struct cpuid_leaf_regs {
+ unsigned int eax;
+ unsigned int ebx;
+ unsigned int ecx;
+ unsigned int edx;
+};
+
struct psr_assoc {
uint64_t val;
uint64_t cos_mask;
@@ -134,11 +144,76 @@ struct psr_assoc {
struct psr_cmt *__read_mostly psr_cmt;
+static struct psr_socket_info *__read_mostly socket_info;
+
static unsigned int opt_psr;
static unsigned int __initdata opt_rmid_max = 255;
+static unsigned int __read_mostly opt_cos_max = MAX_COS_REG_CNT;
static uint64_t rmid_mask;
static DEFINE_PER_CPU(struct psr_assoc, psr_assoc);
+/*
+ * Declare global feature list entry for every feature to facilitate the
+ * feature list creation. It will be allocated in psr_cpu_prepare() and
+ * inserted into feature list in cpu_init_work().
+ */
+static struct feat_node *feat_l3_cat;
+
+/* Common functions. */
+static void free_feature(struct psr_socket_info *info)
+{
+ struct feat_node *feat, *next;
+
+ if ( !info )
+ return;
+
+ list_for_each_entry_safe(feat, next, &info->feat_list, list)
+ {
+ clear_bit(feat->feature, &info->feat_mask);
+ list_del(&feat->list);
+ xfree(feat);
+ }
+}
+
+/* L3 CAT functions implementation. */
+static void l3_cat_init_feature(struct cpuid_leaf_regs regs,
+ struct feat_node *feat,
+ struct psr_socket_info *info)
+{
+ struct psr_cat_hw_info l3_cat;
+ unsigned int socket;
+
+ /* No valid value so do not enable feature. */
+ if ( !regs.eax || !regs.edx )
+ return;
+
+ l3_cat.cbm_len = (regs.eax & CAT_CBM_LEN_MASK) + 1;
+ l3_cat.cos_max = min(opt_cos_max, regs.edx & CAT_COS_MAX_MASK);
+
+ /* cos=0 is reserved as default cbm(all bits within cbm_len are 1). */
+ feat->cos_reg_val[0] = (1ull << l3_cat.cbm_len) - 1;
+
+ feat->feature = PSR_SOCKET_L3_CAT;
+ __set_bit(PSR_SOCKET_L3_CAT, &info->feat_mask);
+
+ feat->info.l3_cat_info = l3_cat;
+
+ info->nr_feat++;
+
+ /* Add this feature into list. */
+ list_add_tail(&feat->list, &info->feat_list);
+
+ socket = cpu_to_socket(smp_processor_id());
+ if ( opt_cpu_info )
+ printk(XENLOG_INFO
+ "L3 CAT: enabled on socket %u, cos_max:%u, cbm_len:%u\n",
+ socket, feat->info.l3_cat_info.cos_max,
+ feat->info.l3_cat_info.cbm_len);
+}
+
+static const struct feat_ops l3_cat_ops = {
+};
+
static void __init parse_psr_bool(char *s, char *value, char *feature,
unsigned int mask)
{
@@ -178,6 +253,9 @@ static void __init parse_psr_param(char *s)
if ( val_str && !strcmp(s, "rmid_max") )
opt_rmid_max = simple_strtoul(val_str, NULL, 0);
+ if ( val_str && !strcmp(s, "cos_max") )
+ opt_cos_max = simple_strtoul(val_str, NULL, 0);
+
s = ss + 1;
} while ( ss );
}
@@ -333,18 +411,108 @@ void psr_domain_free(struct domain *d)
psr_free_rmid(d);
}
+static void cpu_init_work(void)
+{
+ struct psr_socket_info *info;
+ unsigned int socket;
+ unsigned int cpu = smp_processor_id();
+ struct feat_node *feat;
+ struct cpuid_leaf_regs regs;
+
+ if ( !cpu_has(¤t_cpu_data, X86_FEATURE_PQE) )
+ return;
+ else if ( current_cpu_data.cpuid_level < PSR_CPUID_LEVEL_CAT )
+ {
+ clear_bit(X86_FEATURE_PQE, current_cpu_data.x86_capability);
+ return;
+ }
+
+ socket = cpu_to_socket(cpu);
+ info = socket_info + socket;
+ if ( info->feat_mask )
+ return;
+
+ INIT_LIST_HEAD(&info->feat_list);
+ spin_lock_init(&info->ref_lock);
+
+ cpuid_count(PSR_CPUID_LEVEL_CAT, 0,
+ ®s.eax, ®s.ebx, ®s.ecx, ®s.edx);
+ if ( regs.ebx & PSR_RESOURCE_TYPE_L3 )
+ {
+ cpuid_count(PSR_CPUID_LEVEL_CAT, 1,
+ ®s.eax, ®s.ebx, ®s.ecx, ®s.edx);
+
+ feat = feat_l3_cat;
+ feat_l3_cat = NULL;
+ feat->ops = l3_cat_ops;
+
+ l3_cat_init_feature(regs, feat, info);
+ }
+}
+
+static void cpu_fini_work(unsigned int cpu)
+{
+ unsigned int socket = cpu_to_socket(cpu);
+
+ if ( !socket_cpumask[socket] || cpumask_empty(socket_cpumask[socket]) )
+ {
+ free_feature(socket_info + socket);
+ }
+}
+
+static void __init init_psr(void)
+{
+ if ( opt_cos_max < 1 )
+ {
+ printk(XENLOG_INFO "CAT: disabled, cos_max is too small\n");
+ return;
+ }
+
+ socket_info = xzalloc_array(struct psr_socket_info, nr_sockets);
+
+ if ( !socket_info )
+ {
+ printk(XENLOG_INFO "Fail to alloc socket_info!\n");
+ return;
+ }
+}
+
+static void __init psr_free(void)
+{
+ unsigned int i;
+
+ for ( i = 0; i < nr_sockets; i++ )
+ free_feature(&socket_info[i]);
+
+ xfree(socket_info);
+ socket_info = NULL;
+}
+
static int psr_cpu_prepare(unsigned int cpu)
{
+ if ( !socket_info )
+ return 0;
+
+ /* Malloc memory for the global feature head here. */
+ if ( feat_l3_cat == NULL &&
+ (feat_l3_cat = xzalloc(struct feat_node)) == NULL )
+ return -ENOMEM;
+
return 0;
}
static void psr_cpu_init(void)
{
+ if ( socket_info )
+ cpu_init_work();
+
psr_assoc_init();
}
static void psr_cpu_fini(unsigned int cpu)
{
+ if ( socket_info )
+ cpu_fini_work(cpu);
return;
}
@@ -386,10 +554,14 @@ static int __init psr_presmp_init(void)
if ( (opt_psr & PSR_CMT) && opt_rmid_max )
init_psr_cmt(opt_rmid_max);
- psr_cpu_prepare(0);
+ if ( opt_psr & PSR_CAT )
+ init_psr();
+
+ if ( psr_cpu_prepare(0) )
+ psr_free();
psr_cpu_init();
- if ( psr_cmt_enabled() )
+ if ( psr_cmt_enabled() || socket_info )
register_cpu_notifier(&cpu_nfb);
return 0;
This patch implements the CPU init and free flow including L3 CAT initialization and feature list free. Signed-off-by: Yi Sun <yi.y.sun@linux.intel.com> --- xen/arch/x86/psr.c | 176 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 174 insertions(+), 2 deletions(-)