}
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
-void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
-{
- if (queueable)
- blk_queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q);
- else
- blk_queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q);
-}
-EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
-
/**
* blk_set_queue_depth - tell the block layer about the device queue depth
* @q: the request queue for the device
scsi_change_queue_depth(sdev, depth);
}
- blk_queue_flush_queueable(q, false);
-
if (dev->flags & ATA_DFLAG_TRUSTED)
sdev->security_supported = 1;
if (dev->cache_size > 0) {
set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
blk_queue_write_cache(nullb->q, true, true);
- blk_queue_flush_queueable(nullb->q, true);
}
if (dev->zoned) {
#define QUEUE_FLAG_POLL 19 /* IO polling enabled if set */
#define QUEUE_FLAG_WC 20 /* Write back caching */
#define QUEUE_FLAG_FUA 21 /* device supports FUA writes */
-#define QUEUE_FLAG_FLUSH_NQ 22 /* flush not queueuable */
#define QUEUE_FLAG_DAX 23 /* device supports DAX */
#define QUEUE_FLAG_STATS 24 /* track IO start and completion times */
#define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */
extern void blk_queue_dma_alignment(struct request_queue *, int);
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
-extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
/*
return bdev->bd_block_size;
}
-static inline bool queue_flush_queueable(struct request_queue *q)
-{
- return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
-}
-
typedef struct {struct page *v;} Sector;
unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);