return NULL;
}
-/* Disgusting wrapper functions */
-static inline unsigned long sg_kmap_atomic(struct scatterlist *sgl, int idx)
-{
- void *addr = kmap_atomic(sg_page(sgl + idx));
- return (unsigned long)addr;
-}
-
-static inline void sg_kunmap_atomic(unsigned long addr)
-{
- kunmap_atomic((void *)addr);
-}
-
-
/* Assume the original sgl has enough room */
static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
struct scatterlist *bounce_sgl,
unsigned long bounce_addr = 0;
unsigned long dest_addr = 0;
unsigned long flags;
+ struct scatterlist *cur_dest_sgl;
+ struct scatterlist *cur_src_sgl;
local_irq_save(flags);
-
+ cur_dest_sgl = orig_sgl;
+ cur_src_sgl = bounce_sgl;
for (i = 0; i < orig_sgl_count; i++) {
- dest_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
+ dest_addr = (unsigned long)
+ kmap_atomic(sg_page(cur_dest_sgl)) +
+ cur_dest_sgl->offset;
dest = dest_addr;
- destlen = orig_sgl[i].length;
+ destlen = cur_dest_sgl->length;
if (bounce_addr == 0)
- bounce_addr = sg_kmap_atomic(bounce_sgl,j);
+ bounce_addr = (unsigned long)kmap_atomic(
+ sg_page(cur_src_sgl));
while (destlen) {
- src = bounce_addr + bounce_sgl[j].offset;
- srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
+ src = bounce_addr + cur_src_sgl->offset;
+ srclen = cur_src_sgl->length - cur_src_sgl->offset;
copylen = min(srclen, destlen);
memcpy((void *)dest, (void *)src, copylen);
total_copied += copylen;
- bounce_sgl[j].offset += copylen;
+ cur_src_sgl->offset += copylen;
destlen -= copylen;
dest += copylen;
- if (bounce_sgl[j].offset == bounce_sgl[j].length) {
+ if (cur_src_sgl->offset == cur_src_sgl->length) {
/* full */
- sg_kunmap_atomic(bounce_addr);
+ kunmap_atomic((void *)bounce_addr);
j++;
/*
/*
* We are done; cleanup and return.
*/
- sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
+ kunmap_atomic((void *)(dest_addr -
+ cur_dest_sgl->offset));
local_irq_restore(flags);
return total_copied;
}
/* if we need to use another bounce buffer */
- if (destlen || i != orig_sgl_count - 1)
- bounce_addr = sg_kmap_atomic(bounce_sgl,j);
+ if (destlen || i != orig_sgl_count - 1) {
+ cur_src_sgl = sg_next(cur_src_sgl);
+ bounce_addr = (unsigned long)
+ kmap_atomic(
+ sg_page(cur_src_sgl));
+ }
} else if (destlen == 0 && i == orig_sgl_count - 1) {
/* unmap the last bounce that is < PAGE_SIZE */
- sg_kunmap_atomic(bounce_addr);
+ kunmap_atomic((void *)bounce_addr);
}
}
- sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
+ kunmap_atomic((void *)(dest_addr - cur_dest_sgl->offset));
+ cur_dest_sgl = sg_next(cur_dest_sgl);
}
local_irq_restore(flags);
unsigned long bounce_addr = 0;
unsigned long src_addr = 0;
unsigned long flags;
+ struct scatterlist *cur_src_sgl;
+ struct scatterlist *cur_dest_sgl;
local_irq_save(flags);
+ cur_src_sgl = orig_sgl;
+ cur_dest_sgl = bounce_sgl;
+
for (i = 0; i < orig_sgl_count; i++) {
- src_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
+ src_addr = (unsigned long)
+ kmap_atomic(sg_page(cur_src_sgl)) +
+ cur_src_sgl->offset;
src = src_addr;
- srclen = orig_sgl[i].length;
+ srclen = cur_src_sgl->length;
if (bounce_addr == 0)
- bounce_addr = sg_kmap_atomic(bounce_sgl,j);
+ bounce_addr = (unsigned long)
+ kmap_atomic(sg_page(cur_dest_sgl));
while (srclen) {
/* assume bounce offset always == 0 */
- dest = bounce_addr + bounce_sgl[j].length;
- destlen = PAGE_SIZE - bounce_sgl[j].length;
+ dest = bounce_addr + cur_dest_sgl->length;
+ destlen = PAGE_SIZE - cur_dest_sgl->length;
copylen = min(srclen, destlen);
memcpy((void *)dest, (void *)src, copylen);
total_copied += copylen;
- bounce_sgl[j].length += copylen;
+ cur_dest_sgl->length += copylen;
srclen -= copylen;
src += copylen;
- if (bounce_sgl[j].length == PAGE_SIZE) {
+ if (cur_dest_sgl->length == PAGE_SIZE) {
/* full..move to next entry */
- sg_kunmap_atomic(bounce_addr);
+ kunmap_atomic((void *)bounce_addr);
bounce_addr = 0;
j++;
}
/* if we need to use another bounce buffer */
- if (srclen && bounce_addr == 0)
- bounce_addr = sg_kmap_atomic(bounce_sgl, j);
+ if (srclen && bounce_addr == 0) {
+ cur_dest_sgl = sg_next(cur_dest_sgl);
+ bounce_addr = (unsigned long)
+ kmap_atomic(
+ sg_page(cur_dest_sgl));
+ }
}
- sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
+ kunmap_atomic((void *)(src_addr - cur_src_sgl->offset));
+ cur_src_sgl = sg_next(cur_src_sgl);
}
if (bounce_addr)
- sg_kunmap_atomic(bounce_addr);
+ kunmap_atomic((void *)bounce_addr);
local_irq_restore(flags);
struct scatterlist *sgl;
unsigned int sg_count = 0;
struct vmscsi_request *vm_srb;
+ struct scatterlist *cur_sgl;
if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) {
/*
}
cmd_request->data_buffer.offset = sgl[0].offset;
-
- for (i = 0; i < sg_count; i++)
+ cur_sgl = sgl;
+ for (i = 0; i < sg_count; i++) {
cmd_request->data_buffer.pfn_array[i] =
- page_to_pfn(sg_page((&sgl[i])));
+ page_to_pfn(sg_page((cur_sgl)));
+ cur_sgl = sg_next(cur_sgl);
+ }
} else if (scsi_sglist(scmnd)) {
cmd_request->data_buffer.offset =