@@ -1511,20 +1511,20 @@ static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
}
/*
- * Map up to ib_sg->dma_nents elements of state->sg where ib_sg->offset
- * is the offset where to start in the first element. If ib_sg->offset != 0 then
- * ib_sg->offset is updated to the offset in state->sg[retval] of the first
- * byte that has not yet been mapped.
+ * Map up to state->ib_sg.dma_nents elements of state->ib_sg.sg where
+ * state->ib_sg.offset is the offset where to start in the first element.
+ * If state->ib_sg.offset != 0 then state->ib_sg.offset is updated to the offset
+ * in state->ib_sg.sg[retval] of the first byte that has not yet been mapped.
*/
static int srp_map_finish_fr(struct srp_map_state *state,
struct srp_request *req,
- struct srp_rdma_ch *ch,
- struct ib_scatterlist *ib_sg)
+ struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
struct ib_reg_wr wr;
struct srp_fr_desc *desc;
+ struct ib_scatterlist *ib_sg = &state->ib_sg;
u32 rkey;
int n, err;
@@ -1538,8 +1538,8 @@ static int srp_map_finish_fr(struct srp_map_state *state,
WARN_ON_ONCE(!dev->use_fast_reg);
if (ib_sg->dma_nents == 1 && target->global_rkey) {
- srp_map_desc(state, sg_dma_address(state->sg) + ib_sg->offset,
- sg_dma_len(state->sg) - ib_sg->offset,
+ srp_map_desc(state, sg_dma_address(ib_sg->sg) + ib_sg->offset,
+ sg_dma_len(ib_sg->sg) - ib_sg->offset,
target->global_rkey);
ib_sg->offset = 0;
return 1;
@@ -1664,12 +1664,10 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
struct srp_request *req, struct scatterlist *scat,
int count)
{
- struct ib_scatterlist ib_sg;
-
- ib_sg.offset = 0;
state->fr.next = req->fr_list;
state->fr.end = req->fr_list + ch->target->mr_per_cmd;
- state->sg = scat;
+ state->ib_sg.sg = scat;
+ state->ib_sg.offset = 0;
if (count == 0)
return 0;
@@ -1677,15 +1675,14 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
while (count) {
int i, n;
- ib_sg.sg = state->sg;
- ib_sg.dma_nents = count;
- n = srp_map_finish_fr(state, req, ch, &ib_sg);
+ state->ib_sg.dma_nents = count;
+ n = srp_map_finish_fr(state, req, ch);
if (unlikely(n < 0))
return n;
count -= n;
for (i = 0; i < n; i++)
- state->sg = sg_next(state->sg);
+ state->ib_sg.sg = sg_next(state->ib_sg.sg);
}
return 0;
@@ -1726,7 +1723,6 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
struct srp_direct_buf idb_desc;
u64 idb_pages[1];
struct scatterlist idb_sg[1];
- struct ib_scatterlist ib_sg;
int ret;
memset(&state, 0, sizeof(state));
@@ -1738,16 +1734,15 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
state.dma_len = idb_len;
if (dev->use_fast_reg) {
- state.sg = idb_sg;
+ state.ib_sg.sg = idb_sg;
sg_init_one(idb_sg, req->indirect_desc, idb_len);
idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
#ifdef CONFIG_NEED_SG_DMA_LENGTH
idb_sg->dma_length = idb_sg->length; /* hack^2 */
#endif
- ib_sg.sg = state.sg;
- ib_sg.dma_nents = 1;
- ib_sg.offset = 0;
- ret = srp_map_finish_fr(&state, req, ch, &ib_sg);
+ state.ib_sg.dma_nents = 1;
+ state.ib_sg.offset = 0;
+ ret = srp_map_finish_fr(&state, req, ch);
if (ret < 0)
return ret;
WARN_ON_ONCE(ret < 1);
@@ -340,7 +340,7 @@ struct srp_map_state {
struct srp_direct_buf *desc;
union {
u64 *pages;
- struct scatterlist *sg;
+ struct ib_scatterlist ib_sg;
};
dma_addr_t base_dma_addr;
u32 dma_len;