@@ -76,10 +76,12 @@ int rxe_copy_mr_data(struct sk_buff *skb, struct rxe_mr *mr, u64 iova,
enum rxe_mr_copy_op op);
int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
enum rxe_mr_copy_op op);
+int rxe_num_dma_frags(const struct rxe_pd *pd, const struct rxe_dma_info *dma,
+ int length);
int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
void *addr, int length, enum rxe_mr_copy_op op);
void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);
-struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
+struct rxe_mr *lookup_mr(const struct rxe_pd *pd, int access, u32 key,
enum rxe_mr_lookup_type type);
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
@@ -522,6 +522,71 @@ int rxe_copy_mr_data(struct sk_buff *skb, struct rxe_mr *mr, u64 iova,
return 0;
}
+/**
+ * rxe_num_dma_frags() - Count the number of skb frags needed to copy
+ * length bytes from a dma info struct to an skb
+ * @pd: protection domain used by dma entries
+ * @dma: dma info
+ * @length: number of bytes to copy
+ *
+ * Returns: number of frags needed or negative error
+ */
+int rxe_num_dma_frags(const struct rxe_pd *pd, const struct rxe_dma_info *dma,
+ int length)
+{
+ int cur_sge = dma->cur_sge;
+ const struct rxe_sge *sge = &dma->sge[cur_sge];
+ int buf_offset = dma->sge_offset;
+ int resid = dma->resid;
+ struct rxe_mr *mr = NULL;
+ int bytes;
+ u64 iova;
+ int ret;
+ int num_frags = 0;
+
+ if (length == 0)
+ return 0;
+
+ if (length > resid)
+ return -EINVAL;
+
+ while (length > 0) {
+ if (buf_offset >= sge->length) {
+ if (mr)
+ rxe_put(mr);
+
+ sge++;
+ cur_sge++;
+ buf_offset = 0;
+
+ if (cur_sge >= dma->num_sge)
+ return -ENOSPC;
+ if (!sge->length)
+ continue;
+ }
+
+ mr = lookup_mr(pd, 0, sge->lkey, RXE_LOOKUP_LOCAL);
+ if (!mr)
+ return -EINVAL;
+
+ bytes = min_t(int, length, sge->length - buf_offset);
+ if (bytes > 0) {
+ iova = sge->addr + buf_offset;
+ ret = rxe_num_mr_frags(mr, iova, length);
+ if (ret < 0) {
+ rxe_put(mr);
+ return ret;
+ }
+
+ buf_offset += bytes;
+ resid -= bytes;
+ length -= bytes;
+ }
+ }
+
+ return num_frags;
+}
+
/* copy data in or out of a wqe, i.e. sg list
* under the control of a dma descriptor
*/
@@ -658,7 +723,7 @@ int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
* (3) verify that the mr can support the requested access
* (4) verify that mr state is valid
*/
-struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
+struct rxe_mr *lookup_mr(const struct rxe_pd *pd, int access, u32 key,
enum rxe_mr_lookup_type type)
{
struct rxe_mr *mr;
Add routine named rxe_num_dma_frags() to compute the number of skb frags needed to copy length bytes from a dma info struct. Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe_loc.h | 4 +- drivers/infiniband/sw/rxe/rxe_mr.c | 67 ++++++++++++++++++++++++++++- 2 files changed, 69 insertions(+), 2 deletions(-)