diff mbox series

[v4,16/17] iommufd: Add some fault injection points

Message ID 16-v4-0de2f6c78ed0+9d1-iommufd_jgg@nvidia.com (mailing list archive)
State Accepted
Commit e26eed4f623da70913b535631a29764d108efe98
Headers show
Series IOMMUFD Generic interface | expand

Commit Message

Jason Gunthorpe Nov. 8, 2022, 12:49 a.m. UTC
This increases the coverage the fail_nth test gets, as well as via
syzkaller.

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
 drivers/iommu/iommufd/main.c  |  3 +++
 drivers/iommu/iommufd/pages.c | 26 ++++++++++++++++++++++++++
 2 files changed, 29 insertions(+)

Comments

Nicolin Chen Nov. 8, 2022, 7:25 a.m. UTC | #1
On Mon, Nov 07, 2022 at 08:49:09PM -0400, Jason Gunthorpe wrote:
 
> diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c

> @@ -489,6 +494,15 @@ static int pages_to_xarray(struct xarray *xa, unsigned long start_index,
>  
>  		xas_lock(&xas);
>  		while (pages != end_pages) {
> +			/* xarray does not participate in fault injection */
> +			if (pages == half_pages && iommufd_should_fail()) {
> +				xas_set_err(&xas, -EINVAL);
> +				xas_unlock(&xas);
> +				/* aka xas_destroy() */
> +				xas_nomem(&xas, GFP_KERNEL);
> +				goto err_clear;

Coverity reports an "unchecked return value" at xas_nomem()...

> +err_clear:
>  	if (xas_error(&xas)) {

...yet, I think we should be fine since we do xas_error here?
Jason Gunthorpe Nov. 8, 2022, 12:37 p.m. UTC | #2
On Mon, Nov 07, 2022 at 11:25:26PM -0800, Nicolin Chen wrote:
> On Mon, Nov 07, 2022 at 08:49:09PM -0400, Jason Gunthorpe wrote:
>  
> > diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
> 
> > @@ -489,6 +494,15 @@ static int pages_to_xarray(struct xarray *xa, unsigned long start_index,
> >  
> >  		xas_lock(&xas);
> >  		while (pages != end_pages) {
> > +			/* xarray does not participate in fault injection */
> > +			if (pages == half_pages && iommufd_should_fail()) {
> > +				xas_set_err(&xas, -EINVAL);
> > +				xas_unlock(&xas);
> > +				/* aka xas_destroy() */
> > +				xas_nomem(&xas, GFP_KERNEL);
> > +				goto err_clear;
> 
> Coverity reports an "unchecked return value" at xas_nomem()...
> 
> > +err_clear:
> >  	if (xas_error(&xas)) {
> 
> ...yet, I think we should be fine since we do xas_error here?

Yes, in this flow xas_nomem() always returns false and there is no
reason to check it. Just Coverity noise

Jason
diff mbox series

Patch

diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index 7de0f95f2ee68a..ab3fa05f38505d 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -102,6 +102,9 @@  struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id,
 {
 	struct iommufd_object *obj;
 
+	if (iommufd_should_fail())
+		return ERR_PTR(-ENOENT);
+
 	xa_lock(&ictx->objects);
 	obj = xa_load(&ictx->objects, id);
 	if (!obj || (type != IOMMUFD_OBJ_ANY && obj->type != type) ||
diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c
index c3783ea01d7996..2ddcb0d4f71e04 100644
--- a/drivers/iommu/iommufd/pages.c
+++ b/drivers/iommu/iommufd/pages.c
@@ -80,6 +80,10 @@  static void *temp_kmalloc(size_t *size, void *backup, size_t backup_len)
 
 	if (*size < backup_len)
 		return backup;
+
+	if (!backup && iommufd_should_fail())
+		return NULL;
+
 	*size = min_t(size_t, *size, TEMP_MEMORY_LIMIT);
 	res = kmalloc(*size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
 	if (res)
@@ -482,6 +486,7 @@  static int pages_to_xarray(struct xarray *xa, unsigned long start_index,
 			   unsigned long last_index, struct page **pages)
 {
 	struct page **end_pages = pages + (last_index - start_index) + 1;
+	struct page **half_pages = pages + (end_pages - pages) / 2;
 	XA_STATE(xas, xa, start_index);
 
 	do {
@@ -489,6 +494,15 @@  static int pages_to_xarray(struct xarray *xa, unsigned long start_index,
 
 		xas_lock(&xas);
 		while (pages != end_pages) {
+			/* xarray does not participate in fault injection */
+			if (pages == half_pages && iommufd_should_fail()) {
+				xas_set_err(&xas, -EINVAL);
+				xas_unlock(&xas);
+				/* aka xas_destroy() */
+				xas_nomem(&xas, GFP_KERNEL);
+				goto err_clear;
+			}
+
 			old = xas_store(&xas, xa_mk_value(page_to_pfn(*pages)));
 			if (xas_error(&xas))
 				break;
@@ -499,6 +513,7 @@  static int pages_to_xarray(struct xarray *xa, unsigned long start_index,
 		xas_unlock(&xas);
 	} while (xas_nomem(&xas, GFP_KERNEL));
 
+err_clear:
 	if (xas_error(&xas)) {
 		if (xas.xa_index != start_index)
 			clear_xarray(xa, start_index, xas.xa_index - 1);
@@ -662,6 +677,10 @@  static int pfn_reader_user_pin(struct pfn_reader_user *user,
 	npages = min_t(unsigned long, last_index - start_index + 1,
 		       user->upages_len / sizeof(*user->upages));
 
+
+	if (iommufd_should_fail())
+		return -EFAULT;
+
 	uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE);
 	if (!remote_mm)
 		rc = pin_user_pages_fast(uptr, npages, user->gup_flags,
@@ -806,6 +825,8 @@  static int pfn_reader_user_update_pinned(struct pfn_reader_user *user,
 		npages = pages->last_npinned - pages->npinned;
 		inc = false;
 	} else {
+		if (iommufd_should_fail())
+			return -ENOMEM;
 		npages = pages->npinned - pages->last_npinned;
 		inc = true;
 	}
@@ -1636,6 +1657,11 @@  static int iopt_pages_rw_page(struct iopt_pages *pages, unsigned long index,
 		return iopt_pages_rw_slow(pages, index, index, offset, data,
 					  length, flags);
 
+	if (iommufd_should_fail()) {
+		rc = -EINVAL;
+		goto out_mmput;
+	}
+
 	mmap_read_lock(pages->source_mm);
 	rc = pin_user_pages_remote(
 		pages->source_mm, (uintptr_t)(pages->uptr + index * PAGE_SIZE),