struct ib_pool_fmr *fmr;
u64 io_addr = 0;
- if (!state->npages)
- return 0;
-
- if (state->npages == 1) {
- srp_map_desc(state, state->base_dma_addr, state->fmr_len,
- target->rkey);
- state->npages = state->fmr_len = 0;
- return 0;
- }
-
fmr = ib_fmr_pool_map_phys(dev->fmr_pool, state->pages,
state->npages, io_addr);
if (IS_ERR(fmr))
state->nfmr++;
srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey);
- state->npages = state->fmr_len = 0;
+
return 0;
}
+static int srp_finish_mapping(struct srp_map_state *state,
+ struct srp_target_port *target)
+{
+ int ret = 0;
+
+ if (state->npages == 0)
+ return 0;
+
+ if (state->npages == 1)
+ srp_map_desc(state, state->base_dma_addr, state->fmr_len,
+ target->rkey);
+ else
+ ret = srp_map_finish_fmr(state, target);
+
+ if (ret == 0) {
+ state->npages = 0;
+ state->fmr_len = 0;
+ }
+
+ return ret;
+}
+
static void srp_map_update_start(struct srp_map_state *state,
struct scatterlist *sg, int sg_index,
dma_addr_t dma_addr)
* avoided using FMR on such page fragments.
*/
if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) {
- ret = srp_map_finish_fmr(state, target);
+ ret = srp_finish_mapping(state, target);
if (ret)
return ret;
while (dma_len) {
if (state->npages == SRP_FMR_SIZE) {
- ret = srp_map_finish_fmr(state, target);
+ ret = srp_finish_mapping(state, target);
if (ret)
return ret;
*/
ret = 0;
if (len != dev->fmr_page_size) {
- ret = srp_map_finish_fmr(state, target);
+ ret = srp_finish_mapping(state, target);
if (!ret)
srp_map_update_start(state, NULL, 0, 0);
}
}
}
- if (use_fmr == SRP_MAP_ALLOW_FMR && srp_map_finish_fmr(state, target))
+ if (use_fmr == SRP_MAP_ALLOW_FMR && srp_finish_mapping(state, target))
goto backtrack;
req->nfmr = state->nfmr;