unsigned long flags;
int ret, err = 0;
struct page *page;
+ unsigned int order;
if (send_ringbuffer_size % PAGE_SIZE ||
recv_ringbuffer_size % PAGE_SIZE)
return -EINVAL;
+ order = get_order(send_ringbuffer_size + recv_ringbuffer_size);
+
spin_lock_irqsave(&newchannel->lock, flags);
if (newchannel->state == CHANNEL_OPEN_STATE) {
newchannel->state = CHANNEL_OPENING_STATE;
/* Allocate the ring buffer */
page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
- GFP_KERNEL|__GFP_ZERO,
- get_order(send_ringbuffer_size +
- recv_ringbuffer_size));
+ GFP_KERNEL|__GFP_ZERO, order);
if (!page)
- page = alloc_pages(GFP_KERNEL|__GFP_ZERO,
- get_order(send_ringbuffer_size +
- recv_ringbuffer_size));
+ page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order);
if (!page) {
err = -ENOMEM;
goto error_set_chnstate;
}
- newchannel->ringbuffer_pages = page_address(page);
+ newchannel->ringbuffer_page = page;
newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
recv_ringbuffer_size) >> PAGE_SHIFT;
error_free_pages:
hv_ringbuffer_cleanup(&newchannel->outbound);
hv_ringbuffer_cleanup(&newchannel->inbound);
- __free_pages(page,
- get_order(send_ringbuffer_size + recv_ringbuffer_size));
+ __free_pages(page, order);
error_set_chnstate:
newchannel->state = CHANNEL_OPEN_STATE;
return err;
hv_ringbuffer_cleanup(&channel->outbound);
hv_ringbuffer_cleanup(&channel->inbound);
- free_pages((unsigned long)channel->ringbuffer_pages,
- get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
+ __free_pages(channel->ringbuffer_page,
+ get_order(channel->ringbuffer_pagecount << PAGE_SHIFT));
out:
return ret;
= container_of(kobj, struct vmbus_channel, kobj);
struct hv_device *dev = channel->primary_channel->device_obj;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
+ void *ring_buffer = page_address(channel->ringbuffer_page);
dev_dbg(&dev->device, "mmap channel %u pages %#lx at %#lx\n",
q_idx, vma_pages(vma), vma->vm_pgoff);
- return vm_iomap_memory(vma, virt_to_phys(channel->ringbuffer_pages),
+ return vm_iomap_memory(vma, virt_to_phys(ring_buffer),
channel->ringbuffer_pagecount << PAGE_SHIFT);
}
/* mem resources */
pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings";
pdata->info.mem[TXRX_RING_MAP].addr
- = (uintptr_t)dev->channel->ringbuffer_pages;
+ = (uintptr_t)page_address(dev->channel->ringbuffer_page);
pdata->info.mem[TXRX_RING_MAP].size
= dev->channel->ringbuffer_pagecount << PAGE_SHIFT;
pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_LOGICAL;
u32 ringbuffer_gpadlhandle;
/* Allocated memory for ring buffer */
- void *ringbuffer_pages;
+ struct page *ringbuffer_page;
u32 ringbuffer_pagecount;
struct hv_ring_buffer_info outbound; /* send to parent */
struct hv_ring_buffer_info inbound; /* receive from parent */