diff mbox series

[v2,30/31] EDAC/amd64: Add support for address translation on DF3 systems

Message ID 20210623192002.3671647-31-yazen.ghannam@amd.com (mailing list archive)
State New, archived
Headers show
Series AMD MCA Address Translation Updates | expand

Commit Message

Yazen Ghannam June 23, 2021, 7:20 p.m. UTC
DF3-based systems (Rome and later) support new interleaving modes and a
number of bit fields have changed or moved entirely. Add support for
these new modes and fields.

Refactoring should be minimal due to earlier changes, and most updates
will be additions.

Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
---
Link:
https://lkml.kernel.org/r/20210507190140.18854-26-Yazen.Ghannam@amd.com

v1->v2:
* Moved from arch/x86 to EDAC.
* Use function pointers as needed.

 drivers/edac/amd64_edac.c | 189 +++++++++++++++++++++++++++++++++++++-
 1 file changed, 187 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index aa8b8d0b319d..882ac3fbc832 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1059,9 +1059,11 @@  enum df_reg_names {
 	DRAM_BASE_ADDR,
 	DRAM_LIMIT_ADDR,
 	DRAM_OFFSET,
+	DF_GLOBAL_CTL,
 
 	/* Function 1 */
 	SYS_FAB_ID_MASK,
+	SYS_FAB_ID_MASK_1,
 };
 
 static struct df_reg df_regs[] = {
@@ -1075,14 +1077,23 @@  static struct df_reg df_regs[] = {
 	[DRAM_LIMIT_ADDR]	=	{0, 0x114},
 	/* D18F0x1B4 (DramOffset) */
 	[DRAM_OFFSET]		=	{0, 0x1B4},
+	/* D18F0x3F8 (DfGlobalCtrl) */
+	[DF_GLOBAL_CTL]		=	{0, 0x3F8},
 	/* D18F1x208 (SystemFabricIdMask) */
 	[SYS_FAB_ID_MASK]	=	{1, 0x208},
+	/* D18F1x20C (SystemFabricIdMask1) */
+	[SYS_FAB_ID_MASK_1]	=	{1, 0x20C},
 };
 
 /* These are mapped 1:1 to the hardware values. Special cases are set at > 0x20. */
 enum intlv_modes {
 	NONE		= 0x00,
 	NOHASH_2CH	= 0x01,
+	NOHASH_4CH	= 0x03,
+	NOHASH_8CH	= 0x05,
+	HASH_COD4_2CH	= 0x0C,
+	HASH_COD2_4CH	= 0x0D,
+	HASH_COD1_8CH	= 0x0E,
 	DF2_HASH_2CH	= 0x21,
 };
 
@@ -1094,6 +1105,7 @@  struct addr_ctx {
 	u32 reg_base_addr;
 	u32 reg_limit_addr;
 	u32 reg_fab_id_mask0;
+	u32 reg_fab_id_mask1;
 	u16 cs_fabric_id;
 	u16 die_id_mask;
 	u16 socket_id_mask;
@@ -1105,6 +1117,7 @@  struct addr_ctx {
 	u8 intlv_num_dies;
 	u8 intlv_num_sockets;
 	u8 cs_id;
+	u8 node_id_shift;
 	int (*dehash_addr)(struct addr_ctx *ctx);
 	void (*make_space_for_cs_id)(struct addr_ctx *ctx);
 	void (*insert_cs_id)(struct addr_ctx *ctx);
@@ -1262,6 +1275,164 @@  struct data_fabric_ops df2_ops = {
 	.get_component_id_mask		=	&get_component_id_mask_df2,
 };
 
+static u64 get_hi_addr_offset_df3(struct addr_ctx *ctx)
+{
+	return (ctx->reg_dram_offset & GENMASK_ULL(31, 12)) << 16;
+}
+
+static void make_space_for_cs_id_cod_hash(struct addr_ctx *ctx)
+{
+	u8 num_intlv_bits = ctx->intlv_num_chan;
+
+	num_intlv_bits += ctx->intlv_num_sockets;
+	expand_bits(ctx->intlv_addr_bit, 1, &ctx->ret_addr);
+	if (num_intlv_bits > 1)
+		expand_bits(12, num_intlv_bits - 1, &ctx->ret_addr);
+}
+
+static void insert_cs_id_cod_hash(struct addr_ctx *ctx)
+{
+	ctx->ret_addr |= ((ctx->cs_id & 0x1) << ctx->intlv_addr_bit);
+	ctx->ret_addr |= ((ctx->cs_id & 0xE) << 11);
+}
+
+static int dehash_addr_df3(struct addr_ctx *ctx)
+{
+	u8 hashed_bit, intlv_ctl_64k, intlv_ctl_2M, intlv_ctl_1G;
+	u32 tmp;
+
+	if (amd_df_indirect_read(0, df_regs[DF_GLOBAL_CTL], DF_BROADCAST, &tmp))
+		return -EINVAL;
+
+	intlv_ctl_64k = !!((tmp >> 20) & 0x1);
+	intlv_ctl_2M  = !!((tmp >> 21) & 0x1);
+	intlv_ctl_1G  = !!((tmp >> 22) & 0x1);
+
+	hashed_bit =	(ctx->ret_addr >> 14) ^
+			((ctx->ret_addr >> 18) & intlv_ctl_64k) ^
+			((ctx->ret_addr >> 23) & intlv_ctl_2M) ^
+			((ctx->ret_addr >> 32) & intlv_ctl_1G) ^
+			(ctx->ret_addr >> ctx->intlv_addr_bit);
+
+	hashed_bit &= BIT(0);
+
+	if (hashed_bit != ((ctx->ret_addr >> ctx->intlv_addr_bit) & BIT(0)))
+		ctx->ret_addr ^= BIT(ctx->intlv_addr_bit);
+
+	if (ctx->intlv_mode != HASH_COD2_4CH &&
+	    ctx->intlv_mode != HASH_COD1_8CH)
+		return 0;
+
+	hashed_bit =	(ctx->ret_addr >> 12) ^
+			((ctx->ret_addr >> 16) & intlv_ctl_64k) ^
+			((ctx->ret_addr >> 21) & intlv_ctl_2M) ^
+			((ctx->ret_addr >> 30) & intlv_ctl_1G);
+
+	hashed_bit &= BIT(0);
+
+	if (hashed_bit != ((ctx->ret_addr >> 12) & BIT(0)))
+		ctx->ret_addr ^= BIT(12);
+
+	if (ctx->intlv_mode != HASH_COD1_8CH)
+		return 0;
+
+	hashed_bit =	(ctx->ret_addr >> 13) ^
+			((ctx->ret_addr >> 17) & intlv_ctl_64k) ^
+			((ctx->ret_addr >> 22) & intlv_ctl_2M) ^
+			((ctx->ret_addr >> 31) & intlv_ctl_1G);
+
+	hashed_bit &= BIT(0);
+
+	if (hashed_bit != ((ctx->ret_addr >> 13) & BIT(0)))
+		ctx->ret_addr ^= BIT(13);
+
+	return 0;
+}
+
+static int get_intlv_mode_df3(struct addr_ctx *ctx)
+{
+	ctx->intlv_mode = (ctx->reg_base_addr >> 2) & 0xF;
+
+	if (ctx->intlv_mode == HASH_COD4_2CH ||
+	    ctx->intlv_mode == HASH_COD2_4CH ||
+	    ctx->intlv_mode == HASH_COD1_8CH) {
+		ctx->make_space_for_cs_id = &make_space_for_cs_id_cod_hash;
+		ctx->insert_cs_id = &insert_cs_id_cod_hash;
+		ctx->dehash_addr = &dehash_addr_df3;
+	} else {
+		ctx->make_space_for_cs_id = &make_space_for_cs_id_simple;
+		ctx->insert_cs_id = &insert_cs_id_simple;
+	}
+
+	return 0;
+}
+
+static u8 get_intlv_addr_sel_df3(struct addr_ctx *ctx)
+{
+	return (ctx->reg_base_addr >> 9) & 0x7;
+}
+
+static void get_intlv_num_dies_df3(struct addr_ctx *ctx)
+{
+	ctx->intlv_num_dies  = (ctx->reg_base_addr >> 6) & 0x3;
+}
+
+static void get_intlv_num_sockets_df3(struct addr_ctx *ctx)
+{
+	ctx->intlv_num_sockets = (ctx->reg_base_addr >> 8) & 0x1;
+}
+
+static u8 get_die_id_shift_df3(struct addr_ctx *ctx)
+{
+	return ctx->node_id_shift;
+}
+
+static u8 get_socket_id_shift_df3(struct addr_ctx *ctx)
+{
+	return ((ctx->reg_fab_id_mask1 >> 8) & 0x3) + ctx->node_id_shift;
+}
+
+static int get_masks_df3(struct addr_ctx *ctx)
+{
+	if (amd_df_indirect_read(0, df_regs[SYS_FAB_ID_MASK_1],
+				 DF_BROADCAST, &ctx->reg_fab_id_mask1))
+		return -EINVAL;
+
+	ctx->node_id_shift = ctx->reg_fab_id_mask1 & 0xF;
+
+	ctx->die_id_mask = (ctx->reg_fab_id_mask1 >> 16) & 0x7;
+	ctx->die_id_mask <<= ctx->node_id_shift;
+
+	ctx->socket_id_mask = (ctx->reg_fab_id_mask1 >> 24) & 0x7;
+	ctx->socket_id_mask <<= ctx->node_id_shift;
+
+	return 0;
+}
+
+static u16 get_dst_fabric_id_df3(struct addr_ctx *ctx)
+{
+	return ctx->reg_limit_addr & 0x3FF;
+}
+
+static u16 get_component_id_mask_df3(struct addr_ctx *ctx)
+{
+	return ctx->reg_fab_id_mask0 & 0x3FF;
+}
+
+struct data_fabric_ops df3_ops = {
+	.get_hi_addr_offset		=	&get_hi_addr_offset_df3,
+	.get_intlv_mode			=	&get_intlv_mode_df3,
+	.get_intlv_addr_sel		=	&get_intlv_addr_sel_df3,
+	.get_intlv_num_dies		=	&get_intlv_num_dies_df3,
+	.get_intlv_num_sockets		=	&get_intlv_num_sockets_df3,
+	.get_cs_fabric_id		=	&get_cs_fabric_id_df2,
+	.get_masks			=	&get_masks_df3,
+	.get_die_id_shift		=	&get_die_id_shift_df3,
+	.get_socket_id_shift		=	&get_socket_id_shift_df3,
+	.get_dst_fabric_id		=	&get_dst_fabric_id_df3,
+	.get_component_id_mask		=	&get_component_id_mask_df3,
+};
+
 struct data_fabric_ops *df_ops;
 
 static int set_df_ops(struct addr_ctx *ctx)
@@ -1270,6 +1441,11 @@  static int set_df_ops(struct addr_ctx *ctx)
 				 DF_BROADCAST, &ctx->reg_fab_id_mask0))
 		return -EINVAL;
 
+	if ((ctx->reg_fab_id_mask0 & 0xFF) != 0) {
+		df_ops = &df3_ops;
+		return 0;
+	}
+
 	df_ops = &df2_ops;
 
 	return 0;
@@ -1332,8 +1508,8 @@  static int get_intlv_addr_bit(struct addr_ctx *ctx)
 {
 	u8 intlv_addr_sel = df_ops->get_intlv_addr_sel(ctx);
 
-	/* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
-	if (intlv_addr_sel > 3) {
+	/* {0, 1, 2, 3, 4} map to address bits {8, 9, 10, 11, 12} respectively */
+	if (intlv_addr_sel > 4) {
 		pr_debug("Invalid interleave address select %d.\n", intlv_addr_sel);
 		return -EINVAL;
 	}
@@ -1351,9 +1527,18 @@  static void get_intlv_num_chan(struct addr_ctx *ctx)
 		ctx->intlv_num_chan = 0;
 		break;
 	case NOHASH_2CH:
+	case HASH_COD4_2CH:
 	case DF2_HASH_2CH:
 		ctx->intlv_num_chan = 1;
 		break;
+	case NOHASH_4CH:
+	case HASH_COD2_4CH:
+		ctx->intlv_num_chan = 2;
+		break;
+	case NOHASH_8CH:
+	case HASH_COD1_8CH:
+		ctx->intlv_num_chan = 3;
+		break;
 	default:
 		/* Valid interleaving modes where checked earlier. */
 		break;