/*
* This also sets hw/phys segments, boundary and size
*/
- blk_queue_make_request(q, __make_request);
+ blk_queue_make_request(q, blk_queue_bio);
q->sg_reserved_size = INT_MAX;
blk_rq_bio_prep(req->q, req, bio);
}
-int __make_request(struct request_queue *q, struct bio *bio)
+int blk_queue_bio(struct request_queue *q, struct bio *bio)
{
const bool sync = !!(bio->bi_rw & REQ_SYNC);
struct blk_plug *plug;
out:
return 0;
}
-EXPORT_SYMBOL_GPL(__make_request); /* for device mapper only */
+EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */
/*
* If bio->bi_dev is a partition, remap the location
struct mapped_device *md = q->queuedata;
if (dm_request_based(md))
- return __make_request(q, bio);
+ return blk_queue_bio(q, bio);
return _dm_request(q, bio);
}
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
struct scsi_ioctl_command __user *);
-extern int __make_request(struct request_queue *q, struct bio *bio);
+extern int blk_queue_bio(struct request_queue *q, struct bio *bio);
/*
* A queue has just exitted congestion. Note this in the global counter of