@@ -296,6 +296,8 @@ EXPORT_SYMBOL_GPL(dax_attribute_group);
* @pgoff: offset in pages from the start of the device to translate
* @nr_pages: number of consecutive pages caller can handle relative to @pfn
* @kaddr: output parameter that returns a virtual address mapping of pfn
+ * Direct access through this pointer must be guarded by calls to
+ * dax_mk_{readwrite,noaccess}()
* @pfn: output parameter that returns an absolute pfn translation of @pgoff
*
* Return: negative errno if an error occurs, otherwise the number of
@@ -389,6 +391,58 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
#endif
EXPORT_SYMBOL_GPL(dax_flush);
+bool dax_map_protected(struct dax_device *dax_dev)
+{
+ if (!dax_alive(dax_dev))
+ return false;
+
+ if (dax_dev->ops->map_protected)
+ return dax_dev->ops->map_protected(dax_dev);
+ return false;
+}
+EXPORT_SYMBOL_GPL(dax_map_protected);
+
+/**
+ * dax_mk_readwrite() - make protected dax devices read/write
+ * @dax_dev: the dax device representing the memory to access
+ *
+ * Any access of the kaddr memory returned from dax_direct_access() must be
+ * guarded by dax_mk_readwrite() and dax_mk_noaccess(). This ensures that any
+ * dax devices which have additional protections are allowed to relax those
+ * protections for the thread using this memory.
+ *
+ * NOTE these calls must be contained within a single thread of execution and
+ * both must be guarded by dax_read_lock() Which is also a requirement for
+ * dax_direct_access() anyway.
+ */
+void dax_mk_readwrite(struct dax_device *dax_dev)
+{
+ if (!dax_alive(dax_dev))
+ return;
+
+ if (dax_dev->ops->mk_readwrite)
+ dax_dev->ops->mk_readwrite(dax_dev);
+}
+EXPORT_SYMBOL_GPL(dax_mk_readwrite);
+
+/**
+ * dax_mk_noaccess() - restore protection to dax devices if needed
+ * @dax_dev: the dax device representing the memory to access
+ *
+ * See dax_direct_access() and dax_mk_readwrite()
+ *
+ * NOTE Must be called prior to dax_read_unlock()
+ */
+void dax_mk_noaccess(struct dax_device *dax_dev)
+{
+ if (!dax_alive(dax_dev))
+ return;
+
+ if (dax_dev->ops->mk_noaccess)
+ dax_dev->ops->mk_noaccess(dax_dev);
+}
+EXPORT_SYMBOL_GPL(dax_mk_noaccess);
+
void dax_write_cache(struct dax_device *dax_dev, bool wc)
{
if (wc)
@@ -284,7 +284,13 @@ static int persistent_memory_claim(struct dm_writecache *wc)
r = -EOPNOTSUPP;
goto err2;
}
- if (da != p) {
+
+ /*
+ * Force the write cache to map the pages directly if the dax device
+ * mapping is protected or if the number of pages returned was not what
+ * was requested.
+ */
+ if (dax_map_protected(wc->ssd_dev->dax_dev) || da != p) {
long i;
wc->memory_map = NULL;
pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
@@ -728,7 +728,9 @@ static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_d
return rc;
}
vto = kmap_atomic(to);
+ dax_mk_readwrite(dax_dev);
copy_user_page(vto, (void __force *)kaddr, vaddr, to);
+ dax_mk_noaccess(dax_dev);
kunmap_atomic(vto);
dax_read_unlock(id);
return 0;
@@ -1096,8 +1098,10 @@ s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
}
if (!page_aligned) {
+ dax_mk_readwrite(iomap->dax_dev);
memset(kaddr + offset, 0, size);
dax_flush(iomap->dax_dev, kaddr + offset, size);
+ dax_mk_noaccess(iomap->dax_dev);
}
dax_read_unlock(id);
return size;
@@ -1169,6 +1173,8 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
if (map_len > end - pos)
map_len = end - pos;
+ dax_mk_readwrite(dax_dev);
+
/*
* The userspace address for the memory copy has already been
* validated via access_ok() in either vfs_read() or
@@ -1181,6 +1187,8 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
map_len, iter);
+ dax_mk_noaccess(dax_dev);
+
pos += xfer;
length -= xfer;
done += xfer;
@@ -776,8 +776,10 @@ static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL);
if (rc < 0)
return rc;
+ dax_mk_readwrite(dax_dev);
memset(kaddr, 0, nr_pages << PAGE_SHIFT);
dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
+ dax_mk_noaccess(dax_dev);
return 0;
}
@@ -36,6 +36,10 @@ struct dax_operations {
struct iov_iter *);
/* zero_page_range: required operation. Zero page range */
int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
+
+ bool (*map_protected)(struct dax_device *dax_dev);
+ void (*mk_readwrite)(struct dax_device *dax_dev);
+ void (*mk_noaccess)(struct dax_device *dax_dev);
};
extern struct attribute_group dax_attribute_group;
@@ -228,6 +232,10 @@ int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
size_t nr_pages);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
+bool dax_map_protected(struct dax_device *dax_dev);
+void dax_mk_readwrite(struct dax_device *dax_dev);
+void dax_mk_noaccess(struct dax_device *dax_dev);
+
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,