@@ -1012,6 +1012,9 @@ typedef struct pglist_data {
/* Per-node vmstats */
struct per_cpu_nodestat __percpu *per_cpu_nodestats;
atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
+#ifdef CONFIG_NUMA
+ struct memory_tier __rcu *memtier;
+#endif
} pg_data_t;
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
@@ -4,6 +4,7 @@
#include <linux/sysfs.h>
#include <linux/kobject.h>
#include <linux/memory.h>
+#include <linux/mmzone.h>
#include <linux/memory-tiers.h>
#include "internal.h"
@@ -136,12 +137,18 @@ static struct memory_tier *find_create_memory_tier(struct memory_dev_type *memty
static struct memory_tier *__node_get_memory_tier(int node)
{
- struct memory_dev_type *memtype;
+ pg_data_t *pgdat;
- memtype = node_memory_types[node];
- if (memtype && node_isset(node, memtype->nodes))
- return memtype->memtier;
- return NULL;
+ pgdat = NODE_DATA(node);
+ if (!pgdat)
+ return NULL;
+ /*
+ * Since we hold memory_tier_lock, we can avoid
+ * RCU read locks when accessing the details. No
+ * parallel updates are possible here.
+ */
+ return rcu_dereference_check(pgdat->memtier,
+ lockdep_is_held(&memory_tier_lock));
}
#ifdef CONFIG_MIGRATION
@@ -294,6 +301,8 @@ static struct memory_tier *set_node_memory_tier(int node)
{
struct memory_tier *memtier;
struct memory_dev_type *memtype;
+ pg_data_t *pgdat = NODE_DATA(node);
+
lockdep_assert_held_once(&memory_tier_lock);
@@ -305,24 +314,45 @@ static struct memory_tier *set_node_memory_tier(int node)
memtype = node_memory_types[node];
node_set(node, memtype->nodes);
memtier = find_create_memory_tier(memtype);
+ if (!IS_ERR(memtier))
+ rcu_assign_pointer(pgdat->memtier, memtier);
return memtier;
}
static void destroy_memory_tier(struct memory_tier *memtier)
{
list_del(&memtier->list);
+ /*
+ * synchronize_rcu in clear_node_memory_tier makes sure
+ * we don't have rcu access to this memory tier.
+ */
kfree(memtier);
}
static bool clear_node_memory_tier(int node)
{
bool cleared = false;
+ pg_data_t *pgdat;
struct memory_tier *memtier;
+ pgdat = NODE_DATA(node);
+ if (!pgdat)
+ return false;
+
+ /*
+ * Make sure that anybody looking at NODE_DATA who finds
+ * a valid memtier finds memory_dev_types with nodes still
+ * linked to the memtier. We achieve this by waiting for
+ * rcu read section to finish using synchronize_rcu.
+ * This also enables us to free the destroyed memory tier
+ * with kfree instead of kfree_rcu
+ */
memtier = __node_get_memory_tier(node);
if (memtier) {
struct memory_dev_type *memtype;
+ rcu_assign_pointer(pgdat->memtier, NULL);
+ synchronize_rcu();
memtype = node_memory_types[node];
node_clear(node, memtype->nodes);
if (nodes_empty(memtype->nodes)) {
Also update different helpes to use NODE_DATA()->memtier. Since node specific memtier can change based on the reassignment of NUMA node to a different memory tiers, accessing NODE_DATA()->memtier needs to happen under an rcu read lock or memory_tier_lock. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> --- include/linux/mmzone.h | 3 +++ mm/memory-tiers.c | 40 +++++++++++++++++++++++++++++++++++----- 2 files changed, 38 insertions(+), 5 deletions(-)