EXPORT_SYMBOL(blk_queue_ordered);
-/**
- * blk_queue_issue_flush_fn - set function for issuing a flush
- * @q: the request queue
- * @iff: the function to be called issuing the flush
- *
- * Description:
- * If a driver supports issuing a flush command, the support is notified
- * to the block layer by defining it through this call.
- *
- **/
-void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff)
-{
- q->issue_flush_fn = iff;
-}
-
-EXPORT_SYMBOL(blk_queue_issue_flush_fn);
-
/*
* Cache flushing for ordered writes handling
*/
EXPORT_SYMBOL(blk_execute_rq);
+static void bio_end_empty_barrier(struct bio *bio, int err)
+{
+ if (err)
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+
+ complete(bio->bi_private);
+}
+
/**
* blkdev_issue_flush - queue a flush
* @bdev: blockdev to issue flush for
*/
int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
{
+ DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q;
+ struct bio *bio;
+ int ret;
if (bdev->bd_disk == NULL)
return -ENXIO;
q = bdev_get_queue(bdev);
if (!q)
return -ENXIO;
- if (!q->issue_flush_fn)
- return -EOPNOTSUPP;
- return q->issue_flush_fn(q, bdev->bd_disk, error_sector);
+ bio = bio_alloc(GFP_KERNEL, 0);
+ if (!bio)
+ return -ENOMEM;
+
+ bio->bi_end_io = bio_end_empty_barrier;
+ bio->bi_private = &wait;
+ bio->bi_bdev = bdev;
+ submit_bio(1 << BIO_RW_BARRIER, bio);
+
+ wait_for_completion(&wait);
+
+ /*
+ * The driver must store the error location in ->bi_sector, if
+ * it supports it. For non-stacked drivers, this should be copied
+ * from rq->sector.
+ */
+ if (error_sector)
+ *error_sector = bio->bi_sector;
+
+ ret = 0;
+ if (!bio_flagged(bio, BIO_UPTODATE))
+ ret = -EIO;
+
+ bio_put(bio);
+ return ret;
}
EXPORT_SYMBOL(blkdev_issue_flush);
req->cmd_type = REQ_TYPE_FLUSH;
}
-static int ps3disk_issue_flush(struct request_queue *q, struct gendisk *gendisk,
- sector_t *sector)
-{
- struct ps3_storage_device *dev = q->queuedata;
- struct request *req;
- int res;
-
- dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
-
- req = blk_get_request(q, WRITE, __GFP_WAIT);
- ps3disk_prepare_flush(q, req);
- res = blk_execute_rq(q, gendisk, req, 0);
- if (res)
- dev_err(&dev->sbd.core, "%s:%u: flush request failed %d\n",
- __func__, __LINE__, res);
- blk_put_request(req);
- return res;
-}
-
-
static unsigned long ps3disk_mask;
static DEFINE_MUTEX(ps3disk_mask_mutex);
blk_queue_dma_alignment(queue, dev->blk_size-1);
blk_queue_hardsect_size(queue, dev->blk_size);
- blk_queue_issue_flush_fn(queue, ps3disk_issue_flush);
blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
ps3disk_prepare_flush);
rq->buffer = rq->cmd;
}
-static int idedisk_issue_flush(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- ide_drive_t *drive = q->queuedata;
- struct request *rq;
- int ret;
-
- if (!drive->wcache)
- return 0;
-
- rq = blk_get_request(q, WRITE, __GFP_WAIT);
-
- idedisk_prepare_flush(q, rq);
-
- ret = blk_execute_rq(q, disk, rq, 0);
-
- /*
- * if we failed and caller wants error offset, get it
- */
- if (ret && error_sector)
- *error_sector = ide_get_error_location(drive, rq->cmd);
-
- blk_put_request(rq);
- return ret;
-}
-
/*
* This is tightly woven into the driver->do_special can not touch.
* DON'T do it again until a total personality rewrite is committed.
struct hd_driveid *id = drive->id;
unsigned ordered = QUEUE_ORDERED_NONE;
prepare_flush_fn *prep_fn = NULL;
- issue_flush_fn *issue_fn = NULL;
if (drive->wcache) {
unsigned long long capacity;
if (barrier) {
ordered = QUEUE_ORDERED_DRAIN_FLUSH;
prep_fn = idedisk_prepare_flush;
- issue_fn = idedisk_issue_flush;
}
} else
ordered = QUEUE_ORDERED_DRAIN;
blk_queue_ordered(drive->queue, ordered, prep_fn);
- blk_queue_issue_flush_fn(drive->queue, issue_fn);
}
static int write_cache(ide_drive_t *drive, int arg)
}
}
-int dm_table_flush_all(struct dm_table *t)
-{
- struct list_head *d, *devices = dm_table_get_devices(t);
- int ret = 0;
- unsigned i;
-
- for (i = 0; i < t->num_targets; i++)
- if (t->targets[i].type->flush)
- t->targets[i].type->flush(&t->targets[i]);
-
- for (d = devices->next; d != devices; d = d->next) {
- struct dm_dev *dd = list_entry(d, struct dm_dev, list);
- struct request_queue *q = bdev_get_queue(dd->bdev);
- int err;
-
- if (!q->issue_flush_fn)
- err = -EOPNOTSUPP;
- else
- err = q->issue_flush_fn(q, dd->bdev->bd_disk, NULL);
-
- if (!ret)
- ret = err;
- }
-
- return ret;
-}
-
struct mapped_device *dm_table_get_md(struct dm_table *t)
{
dm_get(t->md);
EXPORT_SYMBOL(dm_table_put);
EXPORT_SYMBOL(dm_table_get);
EXPORT_SYMBOL(dm_table_unplug_all);
-EXPORT_SYMBOL(dm_table_flush_all);
return 0;
}
-static int dm_flush_all(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_table(md);
- int ret = -ENXIO;
-
- if (map) {
- ret = dm_table_flush_all(map);
- dm_table_put(map);
- }
-
- return ret;
-}
-
static void dm_unplug_all(struct request_queue *q)
{
struct mapped_device *md = q->queuedata;
blk_queue_make_request(md->queue, dm_request);
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
md->queue->unplug_fn = dm_unplug_all;
- md->queue->issue_flush_fn = dm_flush_all;
md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
if (!md->io_pool)
int dm_table_resume_targets(struct dm_table *t);
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
void dm_table_unplug_all(struct dm_table *t);
-int dm_table_flush_all(struct dm_table *t);
/*-----------------------------------------------------------------
* A registry of target types.
}
}
-static int linear_issue_flush(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- mddev_t *mddev = q->queuedata;
- linear_conf_t *conf = mddev_to_conf(mddev);
- int i, ret = 0;
-
- for (i=0; i < mddev->raid_disks && ret == 0; i++) {
- struct block_device *bdev = conf->disks[i].rdev->bdev;
- struct request_queue *r_queue = bdev_get_queue(bdev);
-
- if (!r_queue->issue_flush_fn)
- ret = -EOPNOTSUPP;
- else
- ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
- }
- return ret;
-}
-
static int linear_congested(void *data, int bits)
{
mddev_t *mddev = data;
blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
mddev->queue->unplug_fn = linear_unplug;
- mddev->queue->issue_flush_fn = linear_issue_flush;
mddev->queue->backing_dev_info.congested_fn = linear_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
return 0;
mddev->pers->stop(mddev);
mddev->queue->merge_bvec_fn = NULL;
mddev->queue->unplug_fn = NULL;
- mddev->queue->issue_flush_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
if (mddev->pers->sync_request)
sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
seq_printf (seq, "]");
}
-static int multipath_issue_flush(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- mddev_t *mddev = q->queuedata;
- multipath_conf_t *conf = mddev_to_conf(mddev);
- int i, ret = 0;
-
- rcu_read_lock();
- for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct block_device *bdev = rdev->bdev;
- struct request_queue *r_queue = bdev_get_queue(bdev);
-
- if (!r_queue->issue_flush_fn)
- ret = -EOPNOTSUPP;
- else {
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
- ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
- error_sector);
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
- }
- }
- }
- rcu_read_unlock();
- return ret;
-}
static int multipath_congested(void *data, int bits)
{
mddev_t *mddev = data;
mddev->array_size = mddev->size;
mddev->queue->unplug_fn = multipath_unplug;
- mddev->queue->issue_flush_fn = multipath_issue_flush;
mddev->queue->backing_dev_info.congested_fn = multipath_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
}
}
-static int raid0_issue_flush(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- mddev_t *mddev = q->queuedata;
- raid0_conf_t *conf = mddev_to_conf(mddev);
- mdk_rdev_t **devlist = conf->strip_zone[0].dev;
- int i, ret = 0;
-
- for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- struct block_device *bdev = devlist[i]->bdev;
- struct request_queue *r_queue = bdev_get_queue(bdev);
-
- if (!r_queue->issue_flush_fn)
- ret = -EOPNOTSUPP;
- else
- ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, error_sector);
- }
- return ret;
-}
-
static int raid0_congested(void *data, int bits)
{
mddev_t *mddev = data;
mddev->queue->unplug_fn = raid0_unplug;
- mddev->queue->issue_flush_fn = raid0_issue_flush;
mddev->queue->backing_dev_info.congested_fn = raid0_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
md_wakeup_thread(mddev->thread);
}
-static int raid1_issue_flush(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- mddev_t *mddev = q->queuedata;
- conf_t *conf = mddev_to_conf(mddev);
- int i, ret = 0;
-
- rcu_read_lock();
- for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct block_device *bdev = rdev->bdev;
- struct request_queue *r_queue = bdev_get_queue(bdev);
-
- if (!r_queue->issue_flush_fn)
- ret = -EOPNOTSUPP;
- else {
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
- ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
- error_sector);
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
- }
- }
- }
- rcu_read_unlock();
- return ret;
-}
-
static int raid1_congested(void *data, int bits)
{
mddev_t *mddev = data;
mddev->array_size = mddev->size;
mddev->queue->unplug_fn = raid1_unplug;
- mddev->queue->issue_flush_fn = raid1_issue_flush;
mddev->queue->backing_dev_info.congested_fn = raid1_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
md_wakeup_thread(mddev->thread);
}
-static int raid10_issue_flush(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- mddev_t *mddev = q->queuedata;
- conf_t *conf = mddev_to_conf(mddev);
- int i, ret = 0;
-
- rcu_read_lock();
- for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct block_device *bdev = rdev->bdev;
- struct request_queue *r_queue = bdev_get_queue(bdev);
-
- if (!r_queue->issue_flush_fn)
- ret = -EOPNOTSUPP;
- else {
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
- ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
- error_sector);
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
- }
- }
- }
- rcu_read_unlock();
- return ret;
-}
-
static int raid10_congested(void *data, int bits)
{
mddev_t *mddev = data;
mddev->resync_max_sectors = size << conf->chunk_shift;
mddev->queue->unplug_fn = raid10_unplug;
- mddev->queue->issue_flush_fn = raid10_issue_flush;
mddev->queue->backing_dev_info.congested_fn = raid10_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
unplug_slaves(mddev);
}
-static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- mddev_t *mddev = q->queuedata;
- raid5_conf_t *conf = mddev_to_conf(mddev);
- int i, ret = 0;
-
- rcu_read_lock();
- for (i=0; i<mddev->raid_disks && ret == 0; i++) {
- mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)) {
- struct block_device *bdev = rdev->bdev;
- struct request_queue *r_queue = bdev_get_queue(bdev);
-
- if (!r_queue->issue_flush_fn)
- ret = -EOPNOTSUPP;
- else {
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
- ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk,
- error_sector);
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
- }
- }
- }
- rcu_read_unlock();
- return ret;
-}
-
static int raid5_congested(void *data, int bits)
{
mddev_t *mddev = data;
mdname(mddev));
mddev->queue->unplug_fn = raid5_unplug_device;
- mddev->queue->issue_flush_fn = raid5_issue_flush;
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
};
/**
- * i2o_block_issue_flush - device-flush interface for block-layer
- * @queue: the request queue of the device which should be flushed
- * @disk: gendisk
- * @error_sector: error offset
- *
- * Helper function to provide flush functionality to block-layer.
- *
- * Returns 0 on success or negative error code on failure.
- */
-
-static int i2o_block_issue_flush(struct request_queue * queue, struct gendisk *disk,
- sector_t * error_sector)
-{
- struct i2o_block_device *i2o_blk_dev = queue->queuedata;
- int rc = -ENODEV;
-
- if (likely(i2o_blk_dev))
- rc = i2o_block_device_flush(i2o_blk_dev->i2o_dev);
-
- return rc;
-}
-
-/**
* i2o_block_device_mount - Mount (load) the media of device dev
* @dev: I2O device which should receive the mount request
* @media_id: Media Identifier
}
blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
- blk_queue_issue_flush_fn(queue, i2o_block_issue_flush);
gd->major = I2O_MAJOR;
gd->queue = queue;
return 0;
}
-static int sd_issue_flush(struct request_queue *q, struct gendisk *disk,
- sector_t *error_sector)
-{
- int ret = 0;
- struct scsi_device *sdp = q->queuedata;
- struct scsi_disk *sdkp;
-
- if (sdp->sdev_state != SDEV_RUNNING)
- return -ENXIO;
-
- sdkp = scsi_disk_get_from_dev(&sdp->sdev_gendev);
-
- if (!sdkp)
- return -ENODEV;
-
- if (sdkp->WCE)
- ret = sd_sync_cache(sdkp);
- scsi_disk_put(sdkp);
- return ret;
-}
-
static void sd_prepare_flush(struct request_queue *q, struct request *rq)
{
memset(rq->cmd, 0, sizeof(rq->cmd));
sd_revalidate_disk(gd);
blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
- blk_queue_issue_flush_fn(sdp->request_queue, sd_issue_flush);
gd->driverfs_dev = &sdp->sdev_gendev;
gd->flags = GENHD_FL_DRIVERFS;
struct bio_vec;
typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
-typedef int (issue_flush_fn) (struct request_queue *, struct gendisk *, sector_t *);
typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
typedef void (softirq_done_fn)(struct request *);
prep_rq_fn *prep_rq_fn;
unplug_fn *unplug_fn;
merge_bvec_fn *merge_bvec_fn;
- issue_flush_fn *issue_flush_fn;
prepare_flush_fn *prepare_flush_fn;
softirq_done_fn *softirq_done_fn;
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
-extern void blk_queue_issue_flush_fn(struct request_queue *, issue_flush_fn *);
extern int blk_do_ordered(struct request_queue *, struct request **);
extern unsigned blk_ordered_cur_seq(struct request_queue *);
extern unsigned blk_ordered_req_seq(struct request *);