struct scatterlist sg_list[0];
};
-#define BUFFER_NR_PAGES(buf, len) \
- (DIV_ROUND_UP((unsigned long)(buf) + (len), PAGE_SIZE) \
- - (unsigned long)(buf) / PAGE_SIZE)
+static inline int get_buf_page_count(void *buf, int size)
+{
+ return DIV_ROUND_UP((uintptr_t)buf + size, PAGE_SIZE) -
+ (uintptr_t)buf / PAGE_SIZE;
+}
static void smb_direct_destroy_pools(struct smb_direct_transport *transport);
static void smb_direct_post_recv_credits(struct work_struct *work);
int offset, len;
int i = 0;
- if (nentries < BUFFER_NR_PAGES(buf, size))
+ if (nentries < get_buf_page_count(buf, size))
return -EINVAL;
offset = offset_in_page(buf);
msg->sgt.sgl = &msg->sg_list[0];
ret = sg_alloc_table_chained(&msg->sgt,
- BUFFER_NR_PAGES(buf, buf_len),
+ get_buf_page_count(buf, buf_len),
msg->sg_list, SG_CHUNK_SIZE);
if (ret) {
atomic_inc(&t->rw_avail_ops);
}
ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port,
- msg->sg_list, BUFFER_NR_PAGES(buf, buf_len),
+ msg->sg_list, get_buf_page_count(buf, buf_len),
0, remote_offset, remote_key,
is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
if (ret < 0) {