diff mbox series

[v4,1/3] perf/x86/amd/uncore: Add flex array to struct amd_uncore_ctx

Message ID AS8PR02MB723778DEAB8E5C9F976953B18BFD2@AS8PR02MB7237.eurprd02.prod.outlook.com (mailing list archive)
State Handled Elsewhere
Headers show
Series Hardening perf subsystem | expand

Commit Message

Erick Archer June 1, 2024, 4:56 p.m. UTC
This is an effort to get rid of all multiplications from allocation
functions in order to prevent integer overflows [1][2].

The "struct amd_uncore_ctx" can be refactored to use a flex array for
the "events" member. This way, the allocation/freeing of the memory can
be simplified.

Specifically, as the "curr" variable is a pointer to the amd_uncore_ctx
structure and it now ends up in a flexible array:

struct amd_uncore_ctx {
        [...]
        struct perf_event *events[];
};

the two-step allocation can be simplifief by using just one kzalloc_node
function and the struct_size() helper to do the arithmetic calculation
for the memory to be allocated.

This way, the code is more readable and safer.

Link: https://www.kernel.org/doc/html/latest/process/deprecated.html#open-coded-arithmetic-in-allocator-arguments [1]
Link: https://github.com/KSPP/linux/issues/160 [2]
Suggested-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Erick Archer <erick.archer@outlook.com>
---
 arch/x86/events/amd/uncore.c | 18 +++++-------------
 1 file changed, 5 insertions(+), 13 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 0fafe233bba4..9d43048bfdab 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -37,8 +37,8 @@  static int pmu_version;
 struct amd_uncore_ctx {
 	int refcnt;
 	int cpu;
-	struct perf_event **events;
 	struct hlist_node node;
+	struct perf_event *events[];
 };
 
 struct amd_uncore_pmu {
@@ -430,10 +430,8 @@  static void amd_uncore_ctx_free(struct amd_uncore *uncore, unsigned int cpu)
 		if (cpu == ctx->cpu)
 			cpumask_clear_cpu(cpu, &pmu->active_mask);
 
-		if (!--ctx->refcnt) {
-			kfree(ctx->events);
+		if (!--ctx->refcnt)
 			kfree(ctx);
-		}
 
 		*per_cpu_ptr(pmu->ctx, cpu) = NULL;
 	}
@@ -478,19 +476,13 @@  static int amd_uncore_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
 		/* Allocate context if sibling does not exist */
 		if (!curr) {
 			node = cpu_to_node(cpu);
-			curr = kzalloc_node(sizeof(*curr), GFP_KERNEL, node);
+			curr = kzalloc_node(struct_size(curr, events,
+							pmu->num_counters),
+					    GFP_KERNEL, node);
 			if (!curr)
 				goto fail;
 
 			curr->cpu = cpu;
-			curr->events = kzalloc_node(sizeof(*curr->events) *
-						    pmu->num_counters,
-						    GFP_KERNEL, node);
-			if (!curr->events) {
-				kfree(curr);
-				goto fail;
-			}
-
 			cpumask_set_cpu(cpu, &pmu->active_mask);
 		}