for_each_sg(table->sgl, sg, table->nents, i) {
struct page *page = sg_page(sg);
- for (j = 0; j < sg_dma_len(sg) / PAGE_SIZE; j++)
+ for (j = 0; j < sg->length / PAGE_SIZE; j++)
buffer->pages[k++] = page++;
}
sg = table->sgl;
for (i -= 1; i >= 0; i--) {
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
- sg_dma_len(sg));
+ sg->length);
sg = sg_next(sg);
}
sg_free_table(table);
for_each_sg(table->sgl, sg, table->nents, i) {
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
- sg_dma_len(sg));
+ sg->length);
}
chunk_heap->allocated -= allocated_size;
sg_free_table(table);
pgprot = pgprot_writecombine(PAGE_KERNEL);
for_each_sg(table->sgl, sg, table->nents, i) {
- int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
+ int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
struct page *page = sg_page(sg);
BUG_ON(i >= npages);
for (j = 0; j < npages_this_entry; j++) {
for_each_sg(table->sgl, sg, table->nents, i) {
struct page *page = sg_page(sg);
unsigned long remainder = vma->vm_end - addr;
- unsigned long len = sg_dma_len(sg);
+ unsigned long len = sg->length;
- if (offset >= sg_dma_len(sg)) {
- offset -= sg_dma_len(sg);
+ if (offset >= sg->length) {
+ offset -= sg->length;
continue;
} else if (offset) {
page += offset / PAGE_SIZE;
- len = sg_dma_len(sg) - offset;
+ len = sg->length - offset;
offset = 0;
}
len = min(len, remainder);
for_each_sg(table->sgl, sg, table->nents, i) {
struct page *page = sg_page(sg);
- unsigned long len = sg_dma_len(sg);
+ unsigned long len = sg->length;
for (j = 0; j < len / PAGE_SIZE; j++) {
struct page *sub_page = page + j;
for_each_sg(table->sgl, sg, table->nents, i)
free_buffer_page(sys_heap, buffer, sg_page(sg),
- get_order(sg_dma_len(sg)));
+ get_order(sg->length));
sg_free_table(table);
kfree(table);
}