* For an empty barrier, there's no actual BAR request, which
* in turn makes POSTFLUSH unnecessary. Mask them off.
*/
- if (!rq->hard_nr_sectors) {
+ if (!blk_rq_sectors(rq)) {
q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
QUEUE_ORDERED_DO_POSTFLUSH);
/*
unsigned int blk_rq_bytes(struct request *rq)
{
if (blk_fs_request(rq))
- return rq->hard_nr_sectors << 9;
+ return blk_rq_sectors(rq) << 9;
return rq->data_len;
}
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
cfqd->rq_in_driver);
- cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
+ cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
}
static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
dev_dbg(&dev->sbd.core,
"%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n",
__func__, __LINE__, op, n, req->nr_sectors,
- req->hard_nr_sectors);
+ blk_rq_sectors(req));
#endif
start_sector = req->sector * priv->blocking_factor;
blkdev_dequeue_request(req);
/* check that request contains a valid command */
if (!blk_fs_request(req)) {
- viodasd_end_request(req, -EIO, req->hard_nr_sectors);
+ viodasd_end_request(req, -EIO, blk_rq_sectors(req));
continue;
}
/* Try sending the request */
if (send_request(req) != 0)
- viodasd_end_request(req, -EIO, req->hard_nr_sectors);
+ viodasd_end_request(req, -EIO, blk_rq_sectors(req));
}
}
err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n",
event->xRc, bevent->sub_result, err->msg);
- num_sect = req->hard_nr_sectors;
+ num_sect = blk_rq_sectors(req);
}
qlock = req->q->queue_lock;
spin_lock_irqsave(qlock, irq_flags);
/* Okay, it's a data request, set it up for transfer */
dev_dbg(ace->dev,
- "request: sec=%llx hcnt=%lx, ccnt=%x, dir=%i\n",
- (unsigned long long) req->sector, req->hard_nr_sectors,
+ "request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
+ (unsigned long long) req->sector, blk_rq_sectors(req),
req->current_nr_sectors, rq_data_dir(req));
ace->req = req;
ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR;
ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF);
- count = req->hard_nr_sectors;
+ count = blk_rq_sectors(req);
if (rq_data_dir(req)) {
/* Kick off write request */
dev_dbg(ace->dev, "write data\n");
/* bio finished; is there another one? */
if (__blk_end_request(ace->req, 0,
blk_rq_cur_bytes(ace->req))) {
- /* dev_dbg(ace->dev, "next block; h=%li c=%i\n",
- * ace->req->hard_nr_sectors,
+ /* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
+ * blk_rq_sectors(ace->req),
* ace->req->current_nr_sectors);
*/
ace->data_ptr = ace->req->buffer;
if (blk_pc_request(rq))
nsectors = (rq->data_len + 511) >> 9;
else
- nsectors = rq->hard_nr_sectors;
+ nsectors = blk_rq_sectors(rq);
if (nsectors == 0)
nsectors = 1;
return ide_issue_pc(drive, &cmd);
out_end:
- nsectors = rq->hard_nr_sectors;
+ nsectors = blk_rq_sectors(rq);
if (nsectors == 0)
nsectors = 1;
static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
{
int hard_sect = queue_hardsect_size(q);
- long block = (long)rq->hard_sector / (hard_sect >> 9);
- unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9);
+ long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
+ unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
memset(rq->cmd, 0, BLK_MAX_CDB);
if (blk_pc_request(rq))
return rq->data_len;
else
- return rq->hard_cur_sectors << 9;
+ return blk_rq_cur_sectors(rq) << 9;
}
EXPORT_SYMBOL_GPL(ide_rq_bytes);
* and complete the whole request right now
*/
if (blk_noretry_request(rq) && error <= 0)
- nr_bytes = rq->hard_nr_sectors << 9;
+ nr_bytes = blk_rq_sectors(rq) << 9;
rc = ide_end_rq(drive, rq, error, nr_bytes);
if (rc == 0)
unsigned long flags;
if (blk_end_request(req, error, nr_bytes)) {
- int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT);
+ int leftover = (blk_rq_sectors(req) << KERNEL_SECTOR_SHIFT);
if (blk_pc_request(req))
leftover = req->data_len;
* to queue the remainder of them.
*/
if (blk_end_request(req, error, bytes)) {
- int leftover = (req->hard_nr_sectors << 9);
+ int leftover = blk_rq_sectors(req) << 9;
if (blk_pc_request(req))
leftover = req->resid_len;
extern void blkdev_dequeue_request(struct request *req);
/*
- * blk_end_request() takes bytes instead of sectors as a complete size.
- * blk_rq_bytes() returns bytes left to complete in the entire request.
- * blk_rq_cur_bytes() returns bytes left to complete in the current segment.
+ * blk_rq_pos() : the current sector
+ * blk_rq_bytes() : bytes left in the entire request
+ * blk_rq_cur_bytes() : bytes left in the current segment
+ * blk_rq_sectors() : sectors left in the entire request
+ * blk_rq_cur_sectors() : sectors left in the current segment
*/
+static inline sector_t blk_rq_pos(const struct request *rq)
+{
+ return rq->hard_sector;
+}
+
extern unsigned int blk_rq_bytes(struct request *rq);
extern unsigned int blk_rq_cur_bytes(struct request *rq);
+static inline unsigned int blk_rq_sectors(const struct request *rq)
+{
+ return rq->hard_nr_sectors;
+}
+
+static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
+{
+ return rq->hard_cur_sectors;
+}
+
/*
* Request completion related functions.
*
rq->cmd_len, rq->cmd);
} else {
what |= BLK_TC_ACT(BLK_TC_FS);
- __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
+ __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_sectors(rq) << 9,
rw, what, rq->errors, 0, NULL);
}
}
__blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
rq->errors, len, data);
else
- __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
+ __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_sectors(rq) << 9,
0, BLK_TA_DRV_DATA, rq->errors, len, data);
}
EXPORT_SYMBOL_GPL(blk_add_driver_data);