@@ -397,9 +397,8 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
bool lock_cap = ns_capable(task_active_pid_ns(dma->task)->user_ns,
CAP_IPC_LOCK);
struct mm_struct *mm;
- long ret, i, lock_acct = 0;
+ long ret, i = 0, lock_acct = 0;
bool rsvd;
- struct vfio_pfn *vpfn;
dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
mm = get_task_mm(dma->task);
@@ -411,61 +410,58 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
goto pin_pg_remote_exit;
rsvd = is_invalid_reserved_pfn(*pfn_base);
- if (!rsvd) {
- vpfn = vfio_find_vpfn(dma, iova);
- if (!vpfn)
- lock_acct = 1;
- }
-
limit = task_rlimit(dma->task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- if (!rsvd && !lock_cap && mm->locked_vm + lock_acct > limit) {
- put_pfn(*pfn_base, dma->prot);
- pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
- limit << PAGE_SHIFT);
- ret = -ENOMEM;
- goto pin_pg_remote_exit;
- }
+ /*
+ * Reserved pages aren't counted against the user, externally pinned
+ * pages are already counted against the user.
+ */
+ if (!rsvd && !vfio_find_vpfn(dma, iova)) {
+ if (!lock_cap && mm->locked_vm + 1 > limit) {
+ put_pfn(*pfn_base, dma->prot);
+ pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
+ limit << PAGE_SHIFT);
+ ret = -ENOMEM;
+ goto pin_pg_remote_exit;
+ }
- if (unlikely(disable_hugepages)) {
- if (!rsvd)
- vfio_lock_acct(dma->task, lock_acct);
- ret = 1;
- goto pin_pg_remote_exit;
+ lock_acct++;
}
- /* Lock all the consecutive pages from pfn_base */
- for (i = 1, vaddr += PAGE_SIZE, iova += PAGE_SIZE; i < npage;
- i++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
- unsigned long pfn = 0;
+ i++;
- ret = vaddr_get_pfn(mm, vaddr, dma->prot, &pfn);
- if (ret)
- break;
+ if (likely(!disable_hugepages)) {
+ /* Lock all the consecutive pages from pfn_base */
+ for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; i < npage;
+ i++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
+ unsigned long pfn = 0;
- if (pfn != *pfn_base + i ||
- rsvd != is_invalid_reserved_pfn(pfn)) {
- put_pfn(pfn, dma->prot);
- break;
- }
+ ret = vaddr_get_pfn(mm, vaddr, dma->prot, &pfn);
+ if (ret)
+ break;
- if (!rsvd && !lock_cap &&
- mm->locked_vm + lock_acct + 1 > limit) {
- put_pfn(pfn, dma->prot);
- pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
- __func__, limit << PAGE_SHIFT);
- break;
- }
+ if (pfn != *pfn_base + i ||
+ rsvd != is_invalid_reserved_pfn(pfn)) {
+ put_pfn(pfn, dma->prot);
+ break;
+ }
+
+ if (!rsvd && !vfio_find_vpfn(dma, iova)) {
+ if (!lock_cap &&
+ mm->locked_vm + lock_acct + 1 > limit) {
+ put_pfn(pfn, dma->prot);
+ pr_warn("%s: RLIMIT_MEMLOCK (%ld) "
+ "exceeded\n", __func__,
+ limit << PAGE_SHIFT);
+ break;
+ }
- if (!rsvd) {
- vpfn = vfio_find_vpfn(dma, iova);
- if (!vpfn)
lock_acct++;
+ }
}
}
- if (!rsvd)
- vfio_lock_acct(dma->task, lock_acct);
+ vfio_lock_acct(dma->task, lock_acct);
ret = i;
pin_pg_remote_exit:
@@ -481,12 +477,9 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
long i;
for (i = 0; i < npage; i++) {
- struct vfio_pfn *vpfn;
-
if (put_pfn(pfn++, dma->prot)) {
unlocked++;
- vpfn = vfio_find_vpfn(dma, iova + (i << PAGE_SHIFT));
- if (vpfn)
+ if (vfio_find_vpfn(dma, iova + (i << PAGE_SHIFT)))
locked++;
}
}
@@ -1342,12 +1335,10 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu)
unlocked += vfio_unmap_unpin(iommu, dma, false);
p = rb_first(&dma->pfn_list);
for (; p; p = rb_next(p)) {
- bool rsvd;
struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn,
node);
- rsvd = is_invalid_reserved_pfn(vpfn->pfn);
- if (!rsvd)
+ if (!is_invalid_reserved_pfn(vpfn->pfn))
locked++;
}
vfio_lock_acct(dma->task, locked - unlocked);