@@ -1109,6 +1109,24 @@ static void pipapo_map(struct nft_pipapo_match *m,
f->mt[map[i].to + j].e = e;
}
+/**
+ * pipapo_free_scratch() - Free per-CPU map at original (not aligned) address
+ * @m: Matching data
+ * @cpu: CPU number
+ */
+static void pipapo_free_scratch(const struct nft_pipapo_match *m, unsigned int cpu)
+{
+ struct nft_pipapo_scratch *s;
+ void *mem;
+
+ s = *per_cpu_ptr(m->scratch, cpu);
+ if (!s)
+ return;
+
+ mem = s;
+ kfree(mem);
+}
+
/**
* pipapo_realloc_scratch() - Reallocate scratch maps for partial match results
* @clone: Copy of matching data with pending insertions and deletions
@@ -1141,7 +1159,7 @@ static int pipapo_realloc_scratch(struct nft_pipapo_match *clone,
return -ENOMEM;
}
- kfree(*per_cpu_ptr(clone->scratch, i));
+ pipapo_free_scratch(clone, i);
*per_cpu_ptr(clone->scratch, i) = scratch;
@@ -1368,7 +1386,7 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
}
out_scratch_realloc:
for_each_possible_cpu(i)
- kfree(*per_cpu_ptr(new->scratch, i));
+ pipapo_free_scratch(new, i);
#ifdef NFT_PIPAPO_ALIGN
free_percpu(new->scratch_aligned);
#endif
@@ -1652,7 +1670,7 @@ static void pipapo_free_match(struct nft_pipapo_match *m)
int i;
for_each_possible_cpu(i)
- kfree(*per_cpu_ptr(m->scratch, i));
+ pipapo_free_scratch(m, i);
#ifdef NFT_PIPAPO_ALIGN
free_percpu(m->scratch_aligned);
@@ -2252,7 +2270,7 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
free_percpu(m->scratch_aligned);
#endif
for_each_possible_cpu(cpu)
- kfree(*per_cpu_ptr(m->scratch, cpu));
+ pipapo_free_scratch(m, cpu);
free_percpu(m->scratch);
pipapo_free_fields(m);
kfree(m);
@@ -2269,7 +2287,7 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
free_percpu(priv->clone->scratch_aligned);
#endif
for_each_possible_cpu(cpu)
- kfree(*per_cpu_ptr(priv->clone->scratch, cpu));
+ pipapo_free_scratch(priv->clone, cpu);
free_percpu(priv->clone->scratch);
pipapo_free_fields(priv->clone);