@@ -230,14 +230,15 @@ static ssize_t region_size_show(struct device *dev,
static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
region_size_show, NULL);
-static ssize_t align_show(struct device *dev,
+static ssize_t region_align_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dax_region *dax_region = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", dax_region->align);
}
-static DEVICE_ATTR_RO(align);
+static struct device_attribute dev_attr_region_align =
+ __ATTR(align, 0400, region_align_show, NULL);
#define for_each_dax_region_resource(dax_region, res) \
for (res = (dax_region)->res.child; res; res = res->sibling)
@@ -488,7 +489,7 @@ static umode_t dax_region_visible(struct kobject *kobj, struct attribute *a,
static struct attribute *dax_region_attributes[] = {
&dev_attr_available_size.attr,
&dev_attr_region_size.attr,
- &dev_attr_align.attr,
+ &dev_attr_region_align.attr,
&dev_attr_create.attr,
&dev_attr_seed.attr,
&dev_attr_delete.attr,
@@ -855,14 +856,13 @@ static ssize_t size_show(struct device *dev,
return sprintf(buf, "%llu\n", size);
}
-static bool alloc_is_aligned(struct dax_region *dax_region,
- resource_size_t size)
+static bool alloc_is_aligned(resource_size_t size, unsigned long align)
{
/*
* The minimum mapping granularity for a device instance is a
* single subsection, unless the arch says otherwise.
*/
- return IS_ALIGNED(size, max_t(unsigned long, dax_region->align,
+ return IS_ALIGNED(size, max_t(unsigned long, align,
memremap_compat_align()));
}
@@ -958,7 +958,7 @@ static ssize_t dev_dax_resize(struct dax_region *dax_region,
return dev_dax_shrink(dev_dax, size);
to_alloc = size - dev_size;
- if (dev_WARN_ONCE(dev, !alloc_is_aligned(dax_region, to_alloc),
+ if (dev_WARN_ONCE(dev, !alloc_is_aligned(to_alloc, dev_dax->align),
"resize of %pa misaligned\n", &to_alloc))
return -ENXIO;
@@ -1022,7 +1022,7 @@ static ssize_t size_store(struct device *dev, struct device_attribute *attr,
if (rc)
return rc;
- if (!alloc_is_aligned(dax_region, val)) {
+ if (!alloc_is_aligned(val, dev_dax->align)) {
dev_dbg(dev, "%s: size: %lld misaligned\n", __func__, val);
return -EINVAL;
}
@@ -1041,6 +1041,87 @@ static ssize_t size_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RW(size);
+static ssize_t align_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+
+ return sprintf(buf, "%d\n", dev_dax->align);
+}
+
+static ssize_t dev_dax_set_align(struct dev_dax *dev_dax,
+ unsigned long long align)
+{
+ resource_size_t dev_size = dev_dax_size(dev_dax);
+ struct device *dev = &dev_dax->dev;
+ ssize_t rc, i;
+
+ if (dev->driver)
+ return -EBUSY;
+
+ rc = -EINVAL;
+ if (dev_size > 0 && !alloc_is_aligned(dev_size, align)) {
+ dev_dbg(dev, "%s: align %llx invalid for size %llu\n",
+ __func__, align, dev_size);
+ return rc;
+ }
+
+ for (i = 0; i < dev_dax->nr_range; i++) {
+ size_t len = range_len(&dev_dax->ranges[i].range);
+
+ if (!alloc_is_aligned(len, align)) {
+ dev_dbg(dev, "%s: align %llx invalid for range %ld\n",
+ __func__, align, i);
+ return rc;
+ }
+ }
+
+ switch (align) {
+ case PUD_SIZE:
+ if (!IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
+ break;
+ fallthrough;
+ case PMD_SIZE:
+ if (!has_transparent_hugepage())
+ break;
+ fallthrough;
+ case PAGE_SIZE:
+ rc = 0;
+ break;
+ }
+
+ if (!rc)
+ dev_dax->align = align;
+
+ return rc;
+}
+
+static ssize_t align_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct dev_dax *dev_dax = to_dev_dax(dev);
+ struct dax_region *dax_region = dev_dax->region;
+ unsigned long long val;
+ ssize_t rc;
+
+ rc = kstrtoull(buf, 0, &val);
+ if (rc)
+ return -ENXIO;
+
+ device_lock(dax_region->dev);
+ if (!dax_region->dev->driver) {
+ device_unlock(dax_region->dev);
+ return -ENXIO;
+ }
+
+ device_lock(dev);
+ rc = dev_dax_set_align(dev_dax, val);
+ device_unlock(dev);
+ device_unlock(dax_region->dev);
+ return rc == 0 ? len : rc;
+}
+static DEVICE_ATTR_RW(align);
+
static int dev_dax_target_node(struct dev_dax *dev_dax)
{
struct dax_region *dax_region = dev_dax->region;
@@ -1101,7 +1182,8 @@ static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n)
return 0;
if (a == &dev_attr_numa_node.attr && !IS_ENABLED(CONFIG_NUMA))
return 0;
- if (a == &dev_attr_size.attr && is_static(dax_region))
+ if ((a == &dev_attr_align.attr ||
+ a == &dev_attr_size.attr) && is_static(dax_region))
return 0444;
return a->mode;
}
@@ -1110,6 +1192,7 @@ static struct attribute *dev_dax_attributes[] = {
&dev_attr_modalias.attr,
&dev_attr_size.attr,
&dev_attr_target_node.attr,
+ &dev_attr_align.attr,
&dev_attr_resource.attr,
&dev_attr_numa_node.attr,
NULL,
Introduce a device align attribute. While doing so, rename the region align attribute to be more explicitly named as so, but keep it named as @align to retain the API for tools like daxctl. Changes on align may not always be valid, when say certain mappings were created with 2M and then we switch to 1G. So, we validate all ranges against the new value being attempted, post resizing. Signed-off-by: Joao Martins <joao.m.martins@oracle.com> --- drivers/dax/bus.c | 101 +++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 92 insertions(+), 9 deletions(-)