used to indicate the whole sequence of performing barrier requests
including draining and flushing.
-typedef void (prepare_flush_fn)(request_queue_t *q, struct request *rq);
+typedef void (prepare_flush_fn)(struct request_queue *q, struct request *rq);
-int blk_queue_ordered(request_queue_t *q, unsigned ordered,
+int blk_queue_ordered(struct request_queue *q, unsigned ordered,
prepare_flush_fn *prepare_flush_fn);
@q : the queue in question
For example, SCSI disk driver's prepare_flush_fn looks like the
following.
-static void sd_prepare_flush(request_queue_t *q, struct request *rq)
+static void sd_prepare_flush(struct request_queue *q, struct request *rq)
{
memset(rq->cmd, 0, sizeof(rq->cmd));
rq->cmd_type = REQ_TYPE_BLOCK_PC;
queueing (typically known as tagged command queueing), ie manage more than
one outstanding command on a queue at any given time.
- blk_queue_init_tags(request_queue_t *q, int depth)
+ blk_queue_init_tags(struct request_queue *q, int depth)
Initialize internal command tagging structures for a maximum
depth of 'depth'.
- blk_queue_free_tags((request_queue_t *q)
+ blk_queue_free_tags((struct request_queue *q)
Teardown tag info associated with the queue. This will be done
automatically by block if blk_queue_cleanup() is called on a queue
The above are initialization and exit management, the main helpers during
normal operations are:
- blk_queue_start_tag(request_queue_t *q, struct request *rq)
+ blk_queue_start_tag(struct request_queue *q, struct request *rq)
Start tagged operation for this request. A free tag number between
0 and 'depth' is assigned to the request (rq->tag holds this number),
for this queue is already achieved (or if the tag wasn't started for
some other reason), 1 is returned. Otherwise 0 is returned.
- blk_queue_end_tag(request_queue_t *q, struct request *rq)
+ blk_queue_end_tag(struct request_queue *q, struct request *rq)
End tagged operation on this request. 'rq' is removed from the internal
book keeping structures.
the hardware and software block queue and enable the driver to sanely restart
all the outstanding requests. There's a third helper to do that:
- blk_queue_invalidate_tags(request_queue_t *q)
+ blk_queue_invalidate_tags(struct request_queue *q)
Clear the internal block tag queue and re-add all the pending requests
to the request queue. The driver will receive them again on the
struct bio *biotail DBI Last bio in request
-request_queue_t *q DB Request queue this request belongs to
+struct request_queue *q DB Request queue this request belongs to
struct request_list *rl B Request list this request came from
measured from __make_request() to end_that_request_last()).
Field 9 -- # of I/Os currently in progress
The only field that should go to zero. Incremented as requests are
- given to appropriate request_queue_t and decremented as they finish.
+ given to appropriate struct request_queue and decremented as they finish.
Field 10 -- # of milliseconds spent doing I/Os
This field is increases so long as field 9 is nonzero.
Field 11 -- weighted # of milliseconds spent doing I/Os
/*
* Mailbox interrupt handler
*/
-static void mbox_txq_fn(request_queue_t * q)
+static void mbox_txq_fn(struct request_queue * q)
{
}
-static void mbox_rxq_fn(request_queue_t * q)
+static void mbox_rxq_fn(struct request_queue * q)
{
}
{
struct request *rq;
mbox_msg_t msg;
- request_queue_t *q = mbox->rxq->queue;
+ struct request_queue *q = mbox->rxq->queue;
disable_mbox_irq(mbox, IRQ_RX);
request_fn_proc * proc,
void (*work) (struct work_struct *))
{
- request_queue_t *q;
+ struct request_queue *q;
struct omap_mbox_queue *mq;
mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL);
" Change the ubd device name to \"hd\".\n\n"
);
-static void do_ubd_request(request_queue_t * q);
+static void do_ubd_request(struct request_queue * q);
/* Only changed by ubd_init, which is an initcall. */
int thread_fd = -1;
}
/* Called with dev->lock held */
-static void do_ubd_request(request_queue_t *q)
+static void do_ubd_request(struct request_queue *q)
{
struct io_thread_req *io_req;
struct request *req;
* as_completed_request is to be called when a request has completed and
* returned something to the requesting process, be it an error or data.
*/
-static void as_completed_request(request_queue_t *q, struct request *rq)
+static void as_completed_request(struct request_queue *q, struct request *rq)
{
struct as_data *ad = q->elevator->elevator_data;
* reference unless it replaces the request at somepart of the elevator
* (ie. the dispatch queue)
*/
-static void as_remove_queued_request(request_queue_t *q, struct request *rq)
+static void as_remove_queued_request(struct request_queue *q,
+ struct request *rq)
{
const int data_dir = rq_is_sync(rq);
struct as_data *ad = q->elevator->elevator_data;
* read/write expire, batch expire, etc, and moves it to the dispatch
* queue. Returns 1 if a request was found, 0 otherwise.
*/
-static int as_dispatch_request(request_queue_t *q, int force)
+static int as_dispatch_request(struct request_queue *q, int force)
{
struct as_data *ad = q->elevator->elevator_data;
const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
/*
* add rq to rbtree and fifo
*/
-static void as_add_request(request_queue_t *q, struct request *rq)
+static void as_add_request(struct request_queue *q, struct request *rq)
{
struct as_data *ad = q->elevator->elevator_data;
int data_dir;
RQ_SET_STATE(rq, AS_RQ_QUEUED);
}
-static void as_activate_request(request_queue_t *q, struct request *rq)
+static void as_activate_request(struct request_queue *q, struct request *rq)
{
WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED);
RQ_SET_STATE(rq, AS_RQ_REMOVED);
atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched);
}
-static void as_deactivate_request(request_queue_t *q, struct request *rq)
+static void as_deactivate_request(struct request_queue *q, struct request *rq)
{
WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED);
RQ_SET_STATE(rq, AS_RQ_DISPATCHED);
* is not empty - it is used in the block layer to check for plugging and
* merging opportunities
*/
-static int as_queue_empty(request_queue_t *q)
+static int as_queue_empty(struct request_queue *q)
{
struct as_data *ad = q->elevator->elevator_data;
}
static int
-as_merge(request_queue_t *q, struct request **req, struct bio *bio)
+as_merge(struct request_queue *q, struct request **req, struct bio *bio)
{
struct as_data *ad = q->elevator->elevator_data;
sector_t rb_key = bio->bi_sector + bio_sectors(bio);
return ELEVATOR_NO_MERGE;
}
-static void as_merged_request(request_queue_t *q, struct request *req, int type)
+static void as_merged_request(struct request_queue *q, struct request *req,
+ int type)
{
struct as_data *ad = q->elevator->elevator_data;
}
}
-static void as_merged_requests(request_queue_t *q, struct request *req,
+static void as_merged_requests(struct request_queue *q, struct request *req,
struct request *next)
{
/*
spin_unlock_irqrestore(q->queue_lock, flags);
}
-static int as_may_queue(request_queue_t *q, int rw)
+static int as_may_queue(struct request_queue *q, int rw)
{
int ret = ELV_MQUEUE_MAY;
struct as_data *ad = q->elevator->elevator_data;
/*
* initialize elevator private data (as_data).
*/
-static void *as_init_queue(request_queue_t *q)
+static void *as_init_queue(struct request_queue *q)
{
struct as_data *ad;
kfree(bt);
}
-static int blk_trace_remove(request_queue_t *q)
+static int blk_trace_remove(struct request_queue *q)
{
struct blk_trace *bt;
/*
* Setup everything required to start tracing
*/
-static int blk_trace_setup(request_queue_t *q, struct block_device *bdev,
+static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
char __user *arg)
{
struct blk_user_trace_setup buts;
return ret;
}
-static int blk_trace_startstop(request_queue_t *q, int start)
+static int blk_trace_startstop(struct request_queue *q, int start)
{
struct blk_trace *bt;
int ret;
**/
int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
{
- request_queue_t *q;
+ struct request_queue *q;
int ret, start = 0;
q = bdev_get_queue(bdev);
* @q: the request queue associated with the device
*
**/
-void blk_trace_shutdown(request_queue_t *q)
+void blk_trace_shutdown(struct request_queue *q)
{
if (q->blk_trace) {
blk_trace_startstop(q, 0);
#define BSG_VERSION "0.4"
struct bsg_device {
- request_queue_t *queue;
+ struct request_queue *queue;
spinlock_t lock;
struct list_head busy_list;
struct list_head done_list;
return ret;
}
-static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq,
+static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_v4 *hdr, int has_write_perm)
{
memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
* Check if sg_io_v4 from user is allowed and valid
*/
static int
-bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw)
+bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
{
int ret = 0;
static struct request *
bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
{
- request_queue_t *q = bd->queue;
+ struct request_queue *q = bd->queue;
struct request *rq, *next_rq = NULL;
int ret, rw;
unsigned int dxfer_len;
* do final setup of a 'bc' and submit the matching 'rq' to the block
* layer for io
*/
-static void bsg_add_command(struct bsg_device *bd, request_queue_t *q,
+static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
struct bsg_command *bc, struct request *rq)
{
rq->sense = bc->sense;
bc = NULL;
ret = 0;
while (nr_commands) {
- request_queue_t *q = bd->queue;
+ struct request_queue *q = bd->queue;
bc = bsg_alloc_command(bd);
if (IS_ERR(bc)) {
* Per block device queue structure
*/
struct cfq_data {
- request_queue_t *queue;
+ struct request_queue *queue;
/*
* rr list of queues with requests and the count of them
CFQ_CFQQ_FNS(sync);
#undef CFQ_CFQQ_FNS
-static void cfq_dispatch_insert(request_queue_t *, struct request *);
+static void cfq_dispatch_insert(struct request_queue *, struct request *);
static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
struct task_struct *, gfp_t);
static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *,
kblockd_schedule_work(&cfqd->unplug_work);
}
-static int cfq_queue_empty(request_queue_t *q)
+static int cfq_queue_empty(struct request_queue *q)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
return NULL;
}
-static void cfq_activate_request(request_queue_t *q, struct request *rq)
+static void cfq_activate_request(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
}
-static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
+static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
}
}
-static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
+static int cfq_merge(struct request_queue *q, struct request **req,
+ struct bio *bio)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct request *__rq;
return ELEVATOR_NO_MERGE;
}
-static void cfq_merged_request(request_queue_t *q, struct request *req,
+static void cfq_merged_request(struct request_queue *q, struct request *req,
int type)
{
if (type == ELEVATOR_FRONT_MERGE) {
}
static void
-cfq_merged_requests(request_queue_t *q, struct request *rq,
+cfq_merged_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
/*
cfq_remove_request(next);
}
-static int cfq_allow_merge(request_queue_t *q, struct request *rq,
+static int cfq_allow_merge(struct request_queue *q, struct request *rq,
struct bio *bio)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
/*
* Move request from internal lists to the request queue dispatch list.
*/
-static void cfq_dispatch_insert(request_queue_t *q, struct request *rq)
+static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq = RQ_CFQQ(rq);
return dispatched;
}
-static int cfq_dispatch_requests(request_queue_t *q, int force)
+static int cfq_dispatch_requests(struct request_queue *q, int force)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq;
struct cfq_data *cfqd = cic->key;
if (cfqd) {
- request_queue_t *q = cfqd->queue;
+ struct request_queue *q = cfqd->queue;
spin_lock_irq(q->queue_lock);
__cfq_exit_single_io_context(cfqd, cic);
}
}
-static void cfq_insert_request(request_queue_t *q, struct request *rq)
+static void cfq_insert_request(struct request_queue *q, struct request *rq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq = RQ_CFQQ(rq);
cfq_rq_enqueued(cfqd, cfqq, rq);
}
-static void cfq_completed_request(request_queue_t *q, struct request *rq)
+static void cfq_completed_request(struct request_queue *q, struct request *rq)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
struct cfq_data *cfqd = cfqq->cfqd;
return ELV_MQUEUE_MAY;
}
-static int cfq_may_queue(request_queue_t *q, int rw)
+static int cfq_may_queue(struct request_queue *q, int rw)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
* Allocate cfq data structures associated with this request.
*/
static int
-cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
+cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct task_struct *tsk = current;
{
struct cfq_data *cfqd =
container_of(work, struct cfq_data, unplug_work);
- request_queue_t *q = cfqd->queue;
+ struct request_queue *q = cfqd->queue;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
static void cfq_exit_queue(elevator_t *e)
{
struct cfq_data *cfqd = e->elevator_data;
- request_queue_t *q = cfqd->queue;
+ struct request_queue *q = cfqd->queue;
cfq_shutdown_timer_wq(cfqd);
kfree(cfqd);
}
-static void *cfq_init_queue(request_queue_t *q)
+static void *cfq_init_queue(struct request_queue *q)
{
struct cfq_data *cfqd;
/*
* remove rq from rbtree and fifo.
*/
-static void deadline_remove_request(request_queue_t *q, struct request *rq)
+static void deadline_remove_request(struct request_queue *q, struct request *rq)
{
struct deadline_data *dd = q->elevator->elevator_data;
}
static int
-deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
+deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
{
struct deadline_data *dd = q->elevator->elevator_data;
struct request *__rq;
return ret;
}
-static void deadline_merged_request(request_queue_t *q, struct request *req,
- int type)
+static void deadline_merged_request(struct request_queue *q,
+ struct request *req, int type)
{
struct deadline_data *dd = q->elevator->elevator_data;
}
static void
-deadline_merged_requests(request_queue_t *q, struct request *req,
+deadline_merged_requests(struct request_queue *q, struct request *req,
struct request *next)
{
/*
static inline void
deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
{
- request_queue_t *q = rq->q;
+ struct request_queue *q = rq->q;
deadline_remove_request(q, rq);
elv_dispatch_add_tail(q, rq);
* deadline_dispatch_requests selects the best request according to
* read/write expire, fifo_batch, etc
*/
-static int deadline_dispatch_requests(request_queue_t *q, int force)
+static int deadline_dispatch_requests(struct request_queue *q, int force)
{
struct deadline_data *dd = q->elevator->elevator_data;
const int reads = !list_empty(&dd->fifo_list[READ]);
return 1;
}
-static int deadline_queue_empty(request_queue_t *q)
+static int deadline_queue_empty(struct request_queue *q)
{
struct deadline_data *dd = q->elevator->elevator_data;
/*
* initialize elevator private data (deadline_data).
*/
-static void *deadline_init_queue(request_queue_t *q)
+static void *deadline_init_queue(struct request_queue *q)
{
struct deadline_data *dd;
*/
static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
{
- request_queue_t *q = rq->q;
+ struct request_queue *q = rq->q;
elevator_t *e = q->elevator;
if (e->ops->elevator_allow_merge_fn)
return e;
}
-static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
+static void *elevator_init_queue(struct request_queue *q,
+ struct elevator_queue *eq)
{
return eq->ops->elevator_init_fn(q);
}
-static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
+static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
void *data)
{
q->elevator = eq;
static struct kobj_type elv_ktype;
-static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e)
+static elevator_t *elevator_alloc(struct request_queue *q,
+ struct elevator_type *e)
{
elevator_t *eq;
int i;
kfree(e);
}
-int elevator_init(request_queue_t *q, char *name)
+int elevator_init(struct request_queue *q, char *name)
{
struct elevator_type *e = NULL;
struct elevator_queue *eq;
EXPORT_SYMBOL(elevator_exit);
-static void elv_activate_rq(request_queue_t *q, struct request *rq)
+static void elv_activate_rq(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
e->ops->elevator_activate_req_fn(q, rq);
}
-static void elv_deactivate_rq(request_queue_t *q, struct request *rq)
+static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
hlist_del_init(&rq->hash);
}
-static void elv_rqhash_del(request_queue_t *q, struct request *rq)
+static void elv_rqhash_del(struct request_queue *q, struct request *rq)
{
if (ELV_ON_HASH(rq))
__elv_rqhash_del(rq);
}
-static void elv_rqhash_add(request_queue_t *q, struct request *rq)
+static void elv_rqhash_add(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
}
-static void elv_rqhash_reposition(request_queue_t *q, struct request *rq)
+static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
{
__elv_rqhash_del(rq);
elv_rqhash_add(q, rq);
}
-static struct request *elv_rqhash_find(request_queue_t *q, sector_t offset)
+static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
{
elevator_t *e = q->elevator;
struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
* entry. rq is sort insted into the dispatch queue. To be used by
* specific elevators.
*/
-void elv_dispatch_sort(request_queue_t *q, struct request *rq)
+void elv_dispatch_sort(struct request_queue *q, struct request *rq)
{
sector_t boundary;
struct list_head *entry;
EXPORT_SYMBOL(elv_dispatch_add_tail);
-int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
+int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
{
elevator_t *e = q->elevator;
struct request *__rq;
return ELEVATOR_NO_MERGE;
}
-void elv_merged_request(request_queue_t *q, struct request *rq, int type)
+void elv_merged_request(struct request_queue *q, struct request *rq, int type)
{
elevator_t *e = q->elevator;
q->last_merge = rq;
}
-void elv_merge_requests(request_queue_t *q, struct request *rq,
+void elv_merge_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
elevator_t *e = q->elevator;
q->last_merge = rq;
}
-void elv_requeue_request(request_queue_t *q, struct request *rq)
+void elv_requeue_request(struct request_queue *q, struct request *rq)
{
/*
* it already went through dequeue, we need to decrement the
elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
}
-static void elv_drain_elevator(request_queue_t *q)
+static void elv_drain_elevator(struct request_queue *q)
{
static int printed;
while (q->elevator->ops->elevator_dispatch_fn(q, 1))
}
}
-void elv_insert(request_queue_t *q, struct request *rq, int where)
+void elv_insert(struct request_queue *q, struct request *rq, int where)
{
struct list_head *pos;
unsigned ordseq;
}
}
-void __elv_add_request(request_queue_t *q, struct request *rq, int where,
+void __elv_add_request(struct request_queue *q, struct request *rq, int where,
int plug)
{
if (q->ordcolor)
EXPORT_SYMBOL(__elv_add_request);
-void elv_add_request(request_queue_t *q, struct request *rq, int where,
+void elv_add_request(struct request_queue *q, struct request *rq, int where,
int plug)
{
unsigned long flags;
EXPORT_SYMBOL(elv_add_request);
-static inline struct request *__elv_next_request(request_queue_t *q)
+static inline struct request *__elv_next_request(struct request_queue *q)
{
struct request *rq;
}
}
-struct request *elv_next_request(request_queue_t *q)
+struct request *elv_next_request(struct request_queue *q)
{
struct request *rq;
int ret;
EXPORT_SYMBOL(elv_next_request);
-void elv_dequeue_request(request_queue_t *q, struct request *rq)
+void elv_dequeue_request(struct request_queue *q, struct request *rq)
{
BUG_ON(list_empty(&rq->queuelist));
BUG_ON(ELV_ON_HASH(rq));
EXPORT_SYMBOL(elv_dequeue_request);
-int elv_queue_empty(request_queue_t *q)
+int elv_queue_empty(struct request_queue *q)
{
elevator_t *e = q->elevator;
EXPORT_SYMBOL(elv_queue_empty);
-struct request *elv_latter_request(request_queue_t *q, struct request *rq)
+struct request *elv_latter_request(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
return NULL;
}
-struct request *elv_former_request(request_queue_t *q, struct request *rq)
+struct request *elv_former_request(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
return NULL;
}
-int elv_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
+int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
{
elevator_t *e = q->elevator;
return 0;
}
-void elv_put_request(request_queue_t *q, struct request *rq)
+void elv_put_request(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
e->ops->elevator_put_req_fn(rq);
}
-int elv_may_queue(request_queue_t *q, int rw)
+int elv_may_queue(struct request_queue *q, int rw)
{
elevator_t *e = q->elevator;
return ELV_MQUEUE_MAY;
}
-void elv_completed_request(request_queue_t *q, struct request *rq)
+void elv_completed_request(struct request_queue *q, struct request *rq)
{
elevator_t *e = q->elevator;
* need for the new one. this way we have a chance of going back to the old
* one, if the new one fails init for some reason.
*/
-static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
+static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
{
elevator_t *old_elevator, *e;
void *data;
return 0;
}
-ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
+ssize_t elv_iosched_store(struct request_queue *q, const char *name,
+ size_t count)
{
char elevator_name[ELV_NAME_MAX];
size_t len;
return count;
}
-ssize_t elv_iosched_show(request_queue_t *q, char *name)
+ssize_t elv_iosched_show(struct request_queue *q, char *name)
{
elevator_t *e = q->elevator;
struct elevator_type *elv = e->elevator_type;
return len;
}
-struct request *elv_rb_former_request(request_queue_t *q, struct request *rq)
+struct request *elv_rb_former_request(struct request_queue *q,
+ struct request *rq)
{
struct rb_node *rbprev = rb_prev(&rq->rb_node);
EXPORT_SYMBOL(elv_rb_former_request);
-struct request *elv_rb_latter_request(request_queue_t *q, struct request *rq)
+struct request *elv_rb_latter_request(struct request_queue *q,
+ struct request *rq)
{
struct rb_node *rbnext = rb_next(&rq->rb_node);
static void blk_unplug_timeout(unsigned long data);
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
static void init_request_from_bio(struct request *req, struct bio *bio);
-static int __make_request(request_queue_t *q, struct bio *bio);
+static int __make_request(struct request_queue *q, struct bio *bio);
static struct io_context *current_io_context(gfp_t gfp_flags, int node);
/*
struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
{
struct backing_dev_info *ret = NULL;
- request_queue_t *q = bdev_get_queue(bdev);
+ struct request_queue *q = bdev_get_queue(bdev);
if (q)
ret = &q->backing_dev_info;
* cdb from the request data for instance.
*
*/
-void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn)
+void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
{
q->prep_rq_fn = pfn;
}
* no merge_bvec_fn is defined for a queue, and only the fixed limits are
* honored.
*/
-void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn)
+void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
{
q->merge_bvec_fn = mbfn;
}
EXPORT_SYMBOL(blk_queue_merge_bvec);
-void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn)
+void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
{
q->softirq_done_fn = fn;
}
* __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
* blk_queue_bounce() to create a buffer in normal memory.
**/
-void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
+void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
{
/*
* set defaults
EXPORT_SYMBOL(blk_queue_make_request);
-static void rq_init(request_queue_t *q, struct request *rq)
+static void rq_init(struct request_queue *q, struct request *rq)
{
INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->donelist);
* feature should call this function and indicate so.
*
**/
-int blk_queue_ordered(request_queue_t *q, unsigned ordered,
+int blk_queue_ordered(struct request_queue *q, unsigned ordered,
prepare_flush_fn *prepare_flush_fn)
{
if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
* to the block layer by defining it through this call.
*
**/
-void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff)
+void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff)
{
q->issue_flush_fn = iff;
}
/*
* Cache flushing for ordered writes handling
*/
-inline unsigned blk_ordered_cur_seq(request_queue_t *q)
+inline unsigned blk_ordered_cur_seq(struct request_queue *q)
{
if (!q->ordseq)
return 0;
unsigned blk_ordered_req_seq(struct request *rq)
{
- request_queue_t *q = rq->q;
+ struct request_queue *q = rq->q;
BUG_ON(q->ordseq == 0);
return QUEUE_ORDSEQ_DONE;
}
-void blk_ordered_complete_seq(request_queue_t *q, unsigned seq, int error)
+void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
{
struct request *rq;
int uptodate;
blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
}
-static void queue_flush(request_queue_t *q, unsigned which)
+static void queue_flush(struct request_queue *q, unsigned which)
{
struct request *rq;
rq_end_io_fn *end_io;
elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
}
-static inline struct request *start_ordered(request_queue_t *q,
+static inline struct request *start_ordered(struct request_queue *q,
struct request *rq)
{
q->bi_size = 0;
return rq;
}
-int blk_do_ordered(request_queue_t *q, struct request **rqp)
+int blk_do_ordered(struct request_queue *q, struct request **rqp)
{
struct request *rq = *rqp;
int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
{
- request_queue_t *q = bio->bi_private;
+ struct request_queue *q = bio->bi_private;
/*
* This is dry run, restore bio_sector and size. We'll finish
static int ordered_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, int error)
{
- request_queue_t *q = rq->q;
+ struct request_queue *q = rq->q;
bio_end_io_t *endio;
void *private;
* blk_queue_bounce_limit to have lower memory pages allocated as bounce
* buffers for doing I/O to pages residing above @page.
**/
-void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
+void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
{
unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
int dma = 0;
* Enables a low level driver to set an upper limit on the size of
* received requests.
**/
-void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors)
+void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
{
if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
* physical data segments in a request. This would be the largest sized
* scatter list the driver could handle.
**/
-void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments)
+void blk_queue_max_phys_segments(struct request_queue *q,
+ unsigned short max_segments)
{
if (!max_segments) {
max_segments = 1;
* address/length pairs the host adapter can actually give as once
* to the device.
**/
-void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments)
+void blk_queue_max_hw_segments(struct request_queue *q,
+ unsigned short max_segments)
{
if (!max_segments) {
max_segments = 1;
* Enables a low level driver to set an upper limit on the size of a
* coalesced segment
**/
-void blk_queue_max_segment_size(request_queue_t *q, unsigned int max_size)
+void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{
if (max_size < PAGE_CACHE_SIZE) {
max_size = PAGE_CACHE_SIZE;
* even internal read-modify-write operations). Usually the default
* of 512 covers most hardware.
**/
-void blk_queue_hardsect_size(request_queue_t *q, unsigned short size)
+void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
{
q->hardsect_size = size;
}
* @t: the stacking driver (top)
* @b: the underlying device (bottom)
**/
-void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
+void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
{
/* zero is "infinity" */
t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
* @q: the request queue for the device
* @mask: the memory boundary mask
**/
-void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask)
+void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
{
if (mask < PAGE_CACHE_SIZE - 1) {
mask = PAGE_CACHE_SIZE - 1;
* this is used when buiding direct io requests for the queue.
*
**/
-void blk_queue_dma_alignment(request_queue_t *q, int mask)
+void blk_queue_dma_alignment(struct request_queue *q, int mask)
{
q->dma_alignment = mask;
}
*
* no locks need be held.
**/
-struct request *blk_queue_find_tag(request_queue_t *q, int tag)
+struct request *blk_queue_find_tag(struct request_queue *q, int tag)
{
return blk_map_queue_find_tag(q->queue_tags, tag);
}
* blk_cleanup_queue() will take care of calling this function, if tagging
* has been used. So there's no need to call this directly.
**/
-static void __blk_queue_free_tags(request_queue_t *q)
+static void __blk_queue_free_tags(struct request_queue *q)
{
struct blk_queue_tag *bqt = q->queue_tags;
* This is used to disabled tagged queuing to a device, yet leave
* queue in function.
**/
-void blk_queue_free_tags(request_queue_t *q)
+void blk_queue_free_tags(struct request_queue *q)
{
clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
}
EXPORT_SYMBOL(blk_queue_free_tags);
static int
-init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
+init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
{
struct request **tag_index;
unsigned long *tag_map;
* @depth: the maximum queue depth supported
* @tags: the tag to use
**/
-int blk_queue_init_tags(request_queue_t *q, int depth,
+int blk_queue_init_tags(struct request_queue *q, int depth,
struct blk_queue_tag *tags)
{
int rc;
* Notes:
* Must be called with the queue lock held.
**/
-int blk_queue_resize_tags(request_queue_t *q, int new_depth)
+int blk_queue_resize_tags(struct request_queue *q, int new_depth)
{
struct blk_queue_tag *bqt = q->queue_tags;
struct request **tag_index;
* Notes:
* queue lock must be held.
**/
-void blk_queue_end_tag(request_queue_t *q, struct request *rq)
+void blk_queue_end_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
int tag = rq->tag;
* Notes:
* queue lock must be held.
**/
-int blk_queue_start_tag(request_queue_t *q, struct request *rq)
+int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
int tag;
* Notes:
* queue lock must be held.
**/
-void blk_queue_invalidate_tags(request_queue_t *q)
+void blk_queue_invalidate_tags(struct request_queue *q)
{
struct blk_queue_tag *bqt = q->queue_tags;
struct list_head *tmp, *n;
EXPORT_SYMBOL(blk_dump_rq_flags);
-void blk_recount_segments(request_queue_t *q, struct bio *bio)
+void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
struct bio_vec *bv, *bvprv = NULL;
int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;
}
EXPORT_SYMBOL(blk_recount_segments);
-static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
+static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
{
if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
return 0;
}
-static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
+static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
{
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
* map a request to scatterlist, return number of sg entries setup. Caller
* must make sure sg can hold rq->nr_phys_segments entries
*/
-int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg)
+int blk_rq_map_sg(struct request_queue *q, struct request *rq,
+ struct scatterlist *sg)
{
struct bio_vec *bvec, *bvprv;
struct bio *bio;
* specific ones if so desired
*/
-static inline int ll_new_mergeable(request_queue_t *q,
+static inline int ll_new_mergeable(struct request_queue *q,
struct request *req,
struct bio *bio)
{
return 1;
}
-static inline int ll_new_hw_segment(request_queue_t *q,
+static inline int ll_new_hw_segment(struct request_queue *q,
struct request *req,
struct bio *bio)
{
return 1;
}
-int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio)
+int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio)
{
unsigned short max_sectors;
int len;
}
EXPORT_SYMBOL(ll_back_merge_fn);
-static int ll_front_merge_fn(request_queue_t *q, struct request *req,
+static int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
{
unsigned short max_sectors;
return ll_new_hw_segment(q, req, bio);
}
-static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
+static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
struct request *next)
{
int total_phys_segments;
* This is called with interrupts off and no requests on the queue and
* with the queue lock held.
*/
-void blk_plug_device(request_queue_t *q)
+void blk_plug_device(struct request_queue *q)
{
WARN_ON(!irqs_disabled());
* remove the queue from the plugged list, if present. called with
* queue lock held and interrupts disabled.
*/
-int blk_remove_plug(request_queue_t *q)
+int blk_remove_plug(struct request_queue *q)
{
WARN_ON(!irqs_disabled());
/*
* remove the plug and let it rip..
*/
-void __generic_unplug_device(request_queue_t *q)
+void __generic_unplug_device(struct request_queue *q)
{
if (unlikely(blk_queue_stopped(q)))
return;
/**
* generic_unplug_device - fire a request queue
- * @q: The &request_queue_t in question
+ * @q: The &struct request_queue in question
*
* Description:
* Linux uses plugging to build bigger requests queues before letting
* gets unplugged, the request_fn defined for the queue is invoked and
* transfers started.
**/
-void generic_unplug_device(request_queue_t *q)
+void generic_unplug_device(struct request_queue *q)
{
spin_lock_irq(q->queue_lock);
__generic_unplug_device(q);
static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
struct page *page)
{
- request_queue_t *q = bdi->unplug_io_data;
+ struct request_queue *q = bdi->unplug_io_data;
/*
* devices don't necessarily have an ->unplug_fn defined
static void blk_unplug_work(struct work_struct *work)
{
- request_queue_t *q = container_of(work, request_queue_t, unplug_work);
+ struct request_queue *q =
+ container_of(work, struct request_queue, unplug_work);
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);
static void blk_unplug_timeout(unsigned long data)
{
- request_queue_t *q = (request_queue_t *)data;
+ struct request_queue *q = (struct request_queue *)data;
blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
q->rq.count[READ] + q->rq.count[WRITE]);
/**
* blk_start_queue - restart a previously stopped queue
- * @q: The &request_queue_t in question
+ * @q: The &struct request_queue in question
*
* Description:
* blk_start_queue() will clear the stop flag on the queue, and call
* the request_fn for the queue if it was in a stopped state when
* entered. Also see blk_stop_queue(). Queue lock must be held.
**/
-void blk_start_queue(request_queue_t *q)
+void blk_start_queue(struct request_queue *q)
{
WARN_ON(!irqs_disabled());
/**
* blk_stop_queue - stop a queue
- * @q: The &request_queue_t in question
+ * @q: The &struct request_queue in question
*
* Description:
* The Linux block layer assumes that a block driver will consume all
* the driver has signalled it's ready to go again. This happens by calling
* blk_start_queue() to restart queue operations. Queue lock must be held.
**/
-void blk_stop_queue(request_queue_t *q)
+void blk_stop_queue(struct request_queue *q)
{
blk_remove_plug(q);
set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
EXPORT_SYMBOL(blk_run_queue);
/**
- * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
+ * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
* @kobj: the kobj belonging of the request queue to be released
*
* Description:
**/
static void blk_release_queue(struct kobject *kobj)
{
- request_queue_t *q = container_of(kobj, struct request_queue, kobj);
+ struct request_queue *q =
+ container_of(kobj, struct request_queue, kobj);
struct request_list *rl = &q->rq;
blk_sync_queue(q);
kmem_cache_free(requestq_cachep, q);
}
-void blk_put_queue(request_queue_t *q)
+void blk_put_queue(struct request_queue *q)
{
kobject_put(&q->kobj);
}
EXPORT_SYMBOL(blk_put_queue);
-void blk_cleanup_queue(request_queue_t * q)
+void blk_cleanup_queue(struct request_queue * q)
{
mutex_lock(&q->sysfs_lock);
set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
EXPORT_SYMBOL(blk_cleanup_queue);
-static int blk_init_free_list(request_queue_t *q)
+static int blk_init_free_list(struct request_queue *q)
{
struct request_list *rl = &q->rq;
return 0;
}
-request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
+struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
{
return blk_alloc_queue_node(gfp_mask, -1);
}
static struct kobj_type queue_ktype;
-request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
- request_queue_t *q;
+ struct request_queue *q;
q = kmem_cache_alloc_node(requestq_cachep,
gfp_mask | __GFP_ZERO, node_id);
* when the block device is deactivated (such as at module unload).
**/
-request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
+struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
{
return blk_init_queue_node(rfn, lock, -1);
}
EXPORT_SYMBOL(blk_init_queue);
-request_queue_t *
+struct request_queue *
blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
{
- request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
+ struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
if (!q)
return NULL;
}
EXPORT_SYMBOL(blk_init_queue_node);
-int blk_get_queue(request_queue_t *q)
+int blk_get_queue(struct request_queue *q)
{
if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
kobject_get(&q->kobj);
EXPORT_SYMBOL(blk_get_queue);
-static inline void blk_free_request(request_queue_t *q, struct request *rq)
+static inline void blk_free_request(struct request_queue *q, struct request *rq)
{
if (rq->cmd_flags & REQ_ELVPRIV)
elv_put_request(q, rq);
}
static struct request *
-blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask)
+blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
{
struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
* ioc_batching returns true if the ioc is a valid batching request and
* should be given priority access to a request.
*/
-static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)
+static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
{
if (!ioc)
return 0;
* is the behaviour we want though - once it gets a wakeup it should be given
* a nice run.
*/
-static void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
+static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
{
if (!ioc || ioc_batching(q, ioc))
return;
ioc->last_waited = jiffies;
}
-static void __freed_request(request_queue_t *q, int rw)
+static void __freed_request(struct request_queue *q, int rw)
{
struct request_list *rl = &q->rq;
* A request has just been released. Account for it, update the full and
* congestion status, wake up any waiters. Called under q->queue_lock.
*/
-static void freed_request(request_queue_t *q, int rw, int priv)
+static void freed_request(struct request_queue *q, int rw, int priv)
{
struct request_list *rl = &q->rq;
* Returns NULL on failure, with queue_lock held.
* Returns !NULL on success, with queue_lock *not held*.
*/
-static struct request *get_request(request_queue_t *q, int rw_flags,
+static struct request *get_request(struct request_queue *q, int rw_flags,
struct bio *bio, gfp_t gfp_mask)
{
struct request *rq = NULL;
*
* Called with q->queue_lock held, and returns with it unlocked.
*/
-static struct request *get_request_wait(request_queue_t *q, int rw_flags,
+static struct request *get_request_wait(struct request_queue *q, int rw_flags,
struct bio *bio)
{
const int rw = rw_flags & 0x01;
return rq;
}
-struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask)
+struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
{
struct request *rq;
*
* The queue lock must be held with interrupts disabled.
*/
-void blk_start_queueing(request_queue_t *q)
+void blk_start_queueing(struct request_queue *q)
{
if (!blk_queue_plugged(q))
q->request_fn(q);
* more, when that condition happens we need to put the request back
* on the queue. Must be called with queue lock held.
*/
-void blk_requeue_request(request_queue_t *q, struct request *rq)
+void blk_requeue_request(struct request_queue *q, struct request *rq)
{
blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
* of the queue for things like a QUEUE_FULL message from a device, or a
* host that is unable to accept a particular command.
*/
-void blk_insert_request(request_queue_t *q, struct request *rq,
+void blk_insert_request(struct request_queue *q, struct request *rq,
int at_head, void *data)
{
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
return ret;
}
-static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
+static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
void __user *ubuf, unsigned int len)
{
unsigned long uaddr;
* original bio must be passed back in to blk_rq_unmap_user() for proper
* unmapping.
*/
-int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
- unsigned long len)
+int blk_rq_map_user(struct request_queue *q, struct request *rq,
+ void __user *ubuf, unsigned long len)
{
unsigned long bytes_read = 0;
struct bio *bio = NULL;
* original bio must be passed back in to blk_rq_unmap_user() for proper
* unmapping.
*/
-int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
+int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct sg_iovec *iov, int iov_count, unsigned int len)
{
struct bio *bio;
* @len: length of user data
* @gfp_mask: memory allocation flags
*/
-int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
+int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
unsigned int len, gfp_t gfp_mask)
{
struct bio *bio;
* Insert a fully prepared request at the back of the io scheduler queue
* for execution. Don't wait for completion.
*/
-void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
+void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
struct request *rq, int at_head,
rq_end_io_fn *done)
{
* Insert a fully prepared request at the back of the io scheduler queue
* for execution and wait for completion.
*/
-int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
+int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
struct request *rq, int at_head)
{
DECLARE_COMPLETION_ONSTACK(wait);
*/
int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
{
- request_queue_t *q;
+ struct request_queue *q;
if (bdev->bd_disk == NULL)
return -ENXIO;
* queue lock is held and interrupts disabled, as we muck with the
* request queue list.
*/
-static inline void add_request(request_queue_t * q, struct request * req)
+static inline void add_request(struct request_queue * q, struct request * req)
{
drive_stat_acct(req, req->nr_sectors, 1);
/*
* queue lock must be held
*/
-void __blk_put_request(request_queue_t *q, struct request *req)
+void __blk_put_request(struct request_queue *q, struct request *req)
{
if (unlikely(!q))
return;
void blk_put_request(struct request *req)
{
unsigned long flags;
- request_queue_t *q = req->q;
+ struct request_queue *q = req->q;
/*
* Gee, IDE calls in w/ NULL q. Fix IDE and remove the
/*
* Has to be called with the request spinlock acquired
*/
-static int attempt_merge(request_queue_t *q, struct request *req,
+static int attempt_merge(struct request_queue *q, struct request *req,
struct request *next)
{
if (!rq_mergeable(req) || !rq_mergeable(next))
return 1;
}
-static inline int attempt_back_merge(request_queue_t *q, struct request *rq)
+static inline int attempt_back_merge(struct request_queue *q,
+ struct request *rq)
{
struct request *next = elv_latter_request(q, rq);
return 0;
}
-static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
+static inline int attempt_front_merge(struct request_queue *q,
+ struct request *rq)
{
struct request *prev = elv_former_request(q, rq);
req->start_time = jiffies;
}
-static int __make_request(request_queue_t *q, struct bio *bio)
+static int __make_request(struct request_queue *q, struct bio *bio)
{
struct request *req;
int el_ret, nr_sectors, barrier, err;
*/
static inline void __generic_make_request(struct bio *bio)
{
- request_queue_t *q;
+ struct request_queue *q;
sector_t maxsector;
sector_t old_sector;
int ret, nr_sectors = bio_sectors(bio);
struct bio *bio, *prevbio = NULL;
int nr_phys_segs, nr_hw_segs;
unsigned int phys_size, hw_size;
- request_queue_t *q = rq->q;
+ struct request_queue *q = rq->q;
if (!rq->bio)
return;
EXPORT_SYMBOL(end_request);
-void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
+void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
+ struct bio *bio)
{
/* first two bits are identical in rq->cmd_flags and bio->bi_rw */
rq->cmd_flags |= (bio->bi_rw & 3);
sizeof(struct request), 0, SLAB_PANIC, NULL);
requestq_cachep = kmem_cache_create("blkdev_queue",
- sizeof(request_queue_t), 0, SLAB_PANIC, NULL);
+ sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
iocontext_cachep = kmem_cache_create("blkdev_ioc",
sizeof(struct io_context), 0, SLAB_PANIC, NULL);
queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
struct queue_sysfs_entry *entry = to_queue(attr);
- request_queue_t *q = container_of(kobj, struct request_queue, kobj);
+ struct request_queue *q =
+ container_of(kobj, struct request_queue, kobj);
ssize_t res;
if (!entry->show)
const char *page, size_t length)
{
struct queue_sysfs_entry *entry = to_queue(attr);
- request_queue_t *q = container_of(kobj, struct request_queue, kobj);
+ struct request_queue *q = container_of(kobj, struct request_queue, kobj);
ssize_t res;
{
int ret;
- request_queue_t *q = disk->queue;
+ struct request_queue *q = disk->queue;
if (!q || !q->request_fn)
return -ENXIO;
void blk_unregister_queue(struct gendisk *disk)
{
- request_queue_t *q = disk->queue;
+ struct request_queue *q = disk->queue;
if (q && q->request_fn) {
elv_unregister_queue(q);
struct list_head queue;
};
-static void noop_merged_requests(request_queue_t *q, struct request *rq,
+static void noop_merged_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
list_del_init(&next->queuelist);
}
-static int noop_dispatch(request_queue_t *q, int force)
+static int noop_dispatch(struct request_queue *q, int force)
{
struct noop_data *nd = q->elevator->elevator_data;
return 0;
}
-static void noop_add_request(request_queue_t *q, struct request *rq)
+static void noop_add_request(struct request_queue *q, struct request *rq)
{
struct noop_data *nd = q->elevator->elevator_data;
list_add_tail(&rq->queuelist, &nd->queue);
}
-static int noop_queue_empty(request_queue_t *q)
+static int noop_queue_empty(struct request_queue *q)
{
struct noop_data *nd = q->elevator->elevator_data;
}
static struct request *
-noop_former_request(request_queue_t *q, struct request *rq)
+noop_former_request(struct request_queue *q, struct request *rq)
{
struct noop_data *nd = q->elevator->elevator_data;
}
static struct request *
-noop_latter_request(request_queue_t *q, struct request *rq)
+noop_latter_request(struct request_queue *q, struct request *rq)
{
struct noop_data *nd = q->elevator->elevator_data;
return list_entry(rq->queuelist.next, struct request, queuelist);
}
-static void *noop_init_queue(request_queue_t *q)
+static void *noop_init_queue(struct request_queue *q)
{
struct noop_data *nd;
return put_user(sg_version_num, p);
}
-static int scsi_get_idlun(request_queue_t *q, int __user *p)
+static int scsi_get_idlun(struct request_queue *q, int __user *p)
{
return put_user(0, p);
}
-static int scsi_get_bus(request_queue_t *q, int __user *p)
+static int scsi_get_bus(struct request_queue *q, int __user *p)
{
return put_user(0, p);
}
-static int sg_get_timeout(request_queue_t *q)
+static int sg_get_timeout(struct request_queue *q)
{
return q->sg_timeout / (HZ / USER_HZ);
}
-static int sg_set_timeout(request_queue_t *q, int __user *p)
+static int sg_set_timeout(struct request_queue *q, int __user *p)
{
int timeout, err = get_user(timeout, p);
return err;
}
-static int sg_get_reserved_size(request_queue_t *q, int __user *p)
+static int sg_get_reserved_size(struct request_queue *q, int __user *p)
{
unsigned val = min(q->sg_reserved_size, q->max_sectors << 9);
return put_user(val, p);
}
-static int sg_set_reserved_size(request_queue_t *q, int __user *p)
+static int sg_set_reserved_size(struct request_queue *q, int __user *p)
{
int size, err = get_user(size, p);
* will always return that we are ATAPI even for a real SCSI drive, I'm not
* so sure this is worth doing anything about (why would you care??)
*/
-static int sg_emulated_host(request_queue_t *q, int __user *p)
+static int sg_emulated_host(struct request_queue *q, int __user *p)
{
return put_user(1, p);
}
}
EXPORT_SYMBOL_GPL(blk_verify_command);
-static int blk_fill_sghdr_rq(request_queue_t *q, struct request *rq,
+static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
struct sg_io_hdr *hdr, int has_write_perm)
{
memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
return r;
}
-static int sg_io(struct file *file, request_queue_t *q,
+static int sg_io(struct file *file, struct request_queue *q,
struct gendisk *bd_disk, struct sg_io_hdr *hdr)
{
unsigned long start_time;
EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
/* Send basic block requests */
-static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int cmd, int data)
+static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
+ int cmd, int data)
{
struct request *rq;
int err;
return err;
}
-static inline int blk_send_start_stop(request_queue_t *q, struct gendisk *bd_disk, int data)
+static inline int blk_send_start_stop(struct request_queue *q,
+ struct gendisk *bd_disk, int data)
{
return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data);
}
static void config_types(void);
static int floppy_open(struct inode *inode, struct file *filp);
static int floppy_release(struct inode *inode, struct file *filp);
-static void do_fd_request(request_queue_t *);
+static void do_fd_request(struct request_queue *);
/************************* End of Prototypes **************************/
}
}
-static void do_fd_request(request_queue_t* q)
+static void do_fd_request(struct request_queue* q)
{
unsigned long flags;
DBG("mfm_request: Dropping out bottom\n");
}
-static void do_mfm_request(request_queue_t *q)
+static void do_mfm_request(struct request_queue *q)
{
DBG("do_mfm_request: about to mfm_request\n");
mfm_request();
* Decrement max hw segments accordingly.
*/
if (dev->class == ATA_DEV_ATAPI) {
- request_queue_t *q = sdev->request_queue;
+ struct request_queue *q = sdev->request_queue;
blk_queue_max_hw_segments(q, q->max_hw_segments - 1);
}
goto repeat;
}
-static void do_fd_request(request_queue_t * q)
+static void do_fd_request(struct request_queue * q)
{
redo_fd_request();
}
u16 maxbcnt;
struct work_struct work;/* disk create work struct */
struct gendisk *gd;
- request_queue_t blkq;
+ struct request_queue blkq;
struct hd_geometry geo;
sector_t ssize;
struct timer_list timer;
}
static int
-aoeblk_make_request(request_queue_t *q, struct bio *bio)
+aoeblk_make_request(struct request_queue *q, struct bio *bio)
{
struct aoedev *d;
struct buf *buf;
}
-void do_fd_request(request_queue_t * q)
+void do_fd_request(struct request_queue * q)
{
unsigned long flags;
static ctlr_info_t *hba[MAX_CTLR];
-static void do_cciss_request(request_queue_t *q);
+static void do_cciss_request(struct request_queue *q);
static irqreturn_t do_cciss_intr(int irq, void *dev_id);
static int cciss_open(struct inode *inode, struct file *filep);
static int cciss_release(struct inode *inode, struct file *filep);
*/
if (h->gendisk[0] != disk) {
if (disk) {
- request_queue_t *q = disk->queue;
+ struct request_queue *q = disk->queue;
if (disk->flags & GENHD_FL_UP)
del_gendisk(disk);
if (q) {
/*
* Get a request and submit it to the controller.
*/
-static void do_cciss_request(request_queue_t *q)
+static void do_cciss_request(struct request_queue *q)
{
ctlr_info_t *h = q->queuedata;
CommandList_struct *c;
do {
drive_info_struct *drv = &(hba[i]->drv[j]);
struct gendisk *disk = hba[i]->gendisk[j];
- request_queue_t *q;
+ struct request_queue *q;
/* Check if the disk was allocated already */
if (!disk){
for (j = 0; j < CISS_MAX_LUN; j++) {
struct gendisk *disk = hba[i]->gendisk[j];
if (disk) {
- request_queue_t *q = disk->queue;
+ struct request_queue *q = disk->queue;
if (disk->flags & GENHD_FL_UP)
del_gendisk(disk);
static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
-static void do_ida_request(request_queue_t *q);
+static void do_ida_request(struct request_queue *q);
static void start_io(ctlr_info_t *h);
static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
/* pdev is NULL for eisa */
static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
{
- request_queue_t *q;
+ struct request_queue *q;
int j;
/*
* are in here (either via the dummy do_ida_request functions or by being
* called from the interrupt handler
*/
-static void do_ida_request(request_queue_t *q)
+static void do_ida_request(struct request_queue *q)
{
ctlr_info_t *h = q->queuedata;
cmdlist_t *c;
static struct request *current_req;
static struct request_queue *floppy_queue;
-static void do_fd_request(request_queue_t * q);
+static void do_fd_request(struct request_queue * q);
#ifndef fd_get_dma_residue
#define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA)
schedule_bh(redo_fd_request);
}
-static void do_fd_request(request_queue_t * q)
+static void do_fd_request(struct request_queue * q)
{
if (max_buffer_sectors == 0) {
printk("VFS: do_fd_request called on non-open device\n");
lguest_send_dma(bd->phys_addr, &ping);
}
-static void do_lgb_request(request_queue_t *q)
+static void do_lgb_request(struct request_queue *q)
{
struct blockdev *bd;
struct request *req;
return bio;
}
-static int loop_make_request(request_queue_t *q, struct bio *old_bio)
+static int loop_make_request(struct request_queue *q, struct bio *old_bio)
{
struct loop_device *lo = q->queuedata;
int rw = bio_rw(old_bio);
/*
* kick off io on the underlying address space
*/
-static void loop_unplug(request_queue_t *q)
+static void loop_unplug(struct request_queue *q)
{
struct loop_device *lo = q->queuedata;
static void nbd_end_request(struct request *req)
{
int uptodate = (req->errors == 0) ? 1 : 0;
- request_queue_t *q = req->q;
+ struct request_queue *q = req->q;
unsigned long flags;
dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name,
* { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
*/
-static void do_nbd_request(request_queue_t * q)
+static void do_nbd_request(struct request_queue * q)
{
struct request *req;
static int pcd_detect(void);
static void pcd_probe_capabilities(void);
static void do_pcd_read_drq(void);
-static void do_pcd_request(request_queue_t * q);
+static void do_pcd_request(struct request_queue * q);
static void do_pcd_read(void);
struct pcd_unit {
/* I/O request processing */
static struct request_queue *pcd_queue;
-static void do_pcd_request(request_queue_t * q)
+static void do_pcd_request(struct request_queue * q)
{
if (pcd_busy)
return;
/* end of io request engine */
-static void do_pd_request(request_queue_t * q)
+static void do_pd_request(struct request_queue * q)
{
if (pd_req)
return;
#define ATAPI_WRITE_10 0x2a
static int pf_open(struct inode *inode, struct file *file);
-static void do_pf_request(request_queue_t * q);
+static void do_pf_request(struct request_queue * q);
static int pf_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg);
static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
}
}
-static void do_pf_request(request_queue_t * q)
+static void do_pf_request(struct request_queue * q)
{
if (pf_busy)
return;
*/
static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
{
- request_queue_t *q = bdev_get_queue(pd->bdev);
+ struct request_queue *q = bdev_get_queue(pd->bdev);
struct request *rq;
int ret = 0;
* Special care is needed if the underlying block device has a small
* max_phys_segments value.
*/
-static int pkt_set_segment_merging(struct pktcdvd_device *pd, request_queue_t *q)
+static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
{
if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) {
/*
{
int ret;
long lba;
- request_queue_t *q;
+ struct request_queue *q;
/*
* We need to re-open the cdrom device without O_NONBLOCK to be able
return 0;
}
-static int pkt_make_request(request_queue_t *q, struct bio *bio)
+static int pkt_make_request(struct request_queue *q, struct bio *bio)
{
struct pktcdvd_device *pd;
char b[BDEVNAME_SIZE];
-static int pkt_merge_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *bvec)
+static int pkt_merge_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *bvec)
{
struct pktcdvd_device *pd = q->queuedata;
sector_t zone = ZONE(bio->bi_sector, pd);
static void pkt_init_queue(struct pktcdvd_device *pd)
{
- request_queue_t *q = pd->disk->queue;
+ struct request_queue *q = pd->disk->queue;
blk_queue_make_request(q, pkt_make_request);
blk_queue_hardsect_size(q, CD_FRAMESIZE);
static int ps2esdi_geninit(void);
-static void do_ps2esdi_request(request_queue_t * q);
+static void do_ps2esdi_request(struct request_queue * q);
static void ps2esdi_readwrite(int cmd, struct request *req);
}
/* strategy routine that handles most of the IO requests */
-static void do_ps2esdi_request(request_queue_t * q)
+static void do_ps2esdi_request(struct request_queue * q)
{
struct request *req;
/* since, this routine is called with interrupts cleared - they
}
static void ps3disk_do_request(struct ps3_storage_device *dev,
- request_queue_t *q)
+ struct request_queue *q)
{
struct request *req;
}
}
-static void ps3disk_request(request_queue_t *q)
+static void ps3disk_request(struct request_queue *q)
{
struct ps3_storage_device *dev = q->queuedata;
struct ps3disk_private *priv = dev->sbd.core.driver_data;
return 0;
}
-static void ps3disk_prepare_flush(request_queue_t *q, struct request *req)
+static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
{
struct ps3_storage_device *dev = q->queuedata;
req->cmd_type = REQ_TYPE_FLUSH;
}
-static int ps3disk_issue_flush(request_queue_t *q, struct gendisk *gendisk,
+static int ps3disk_issue_flush(struct request_queue *q, struct gendisk *gendisk,
sector_t *sector)
{
struct ps3_storage_device *dev = q->queuedata;
* 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Added devfs support
*
*/
-static int rd_make_request(request_queue_t *q, struct bio *bio)
+static int rd_make_request(struct request_queue *q, struct bio *bio)
{
struct block_device *bdev = bio->bi_bdev;
struct address_space * mapping = bdev->bd_inode->i_mapping;
return err;
}
-static void do_vdc_request(request_queue_t *q)
+static void do_vdc_request(struct request_queue *q)
{
while (1) {
struct request *req = elv_next_request(q);
static void swim3_select(struct floppy_state *fs, int sel);
static void swim3_action(struct floppy_state *fs, int action);
static int swim3_readbit(struct floppy_state *fs, int bit);
-static void do_fd_request(request_queue_t * q);
+static void do_fd_request(struct request_queue * q);
static void start_request(struct floppy_state *fs);
static void set_timeout(struct floppy_state *fs, int nticks,
void (*proc)(unsigned long));
return (stat & DATA) == 0;
}
-static void do_fd_request(request_queue_t * q)
+static void do_fd_request(struct request_queue * q)
{
int i;
for(i=0;i<floppy_count;i++)
unsigned int state;
u32 fw_ver;
- request_queue_t *oob_q;
+ struct request_queue *oob_q;
unsigned int n_oob;
unsigned int hw_sg_used;
unsigned int wait_q_prod;
unsigned int wait_q_cons;
- request_queue_t *wait_q[CARM_MAX_WAIT_Q];
+ struct request_queue *wait_q[CARM_MAX_WAIT_Q];
unsigned int n_msgs;
u64 msg_alloc;
assert(rc == 0);
}
-static inline void carm_push_q (struct carm_host *host, request_queue_t *q)
+static inline void carm_push_q (struct carm_host *host, struct request_queue *q)
{
unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q;
BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */
}
-static inline request_queue_t *carm_pop_q(struct carm_host *host)
+static inline struct request_queue *carm_pop_q(struct carm_host *host)
{
unsigned int idx;
static inline void carm_round_robin(struct carm_host *host)
{
- request_queue_t *q = carm_pop_q(host);
+ struct request_queue *q = carm_pop_q(host);
if (q) {
blk_start_queue(q);
VPRINTK("STARTED QUEUE %p\n", q);
}
}
-static void carm_oob_rq_fn(request_queue_t *q)
+static void carm_oob_rq_fn(struct request_queue *q)
{
struct carm_host *host = q->queuedata;
struct carm_request *crq;
}
}
-static void carm_rq_fn(request_queue_t *q)
+static void carm_rq_fn(struct request_queue *q)
{
struct carm_port *port = q->queuedata;
struct carm_host *host = port->host;
for (i = 0; i < CARM_MAX_PORTS; i++) {
struct gendisk *disk;
- request_queue_t *q;
+ struct request_queue *q;
struct carm_port *port;
port = &host->port[i];
for (i = 0; i < CARM_MAX_PORTS; i++) {
struct gendisk *disk = host->port[i].disk;
if (disk) {
- request_queue_t *q = disk->queue;
+ struct request_queue *q = disk->queue;
if (disk->flags & GENHD_FL_UP)
del_gendisk(disk);
struct carm_host *host;
unsigned int pci_dac;
int rc;
- request_queue_t *q;
+ struct request_queue *q;
unsigned int i;
if (!printed_version++)
{
struct list_head *p;
struct ub_lun *lun;
- request_queue_t *q;
+ struct request_queue *q;
while (!list_empty(&sc->luns)) {
p = sc->luns.next;
* The request function is our main entry point
*/
-static void ub_request_fn(request_queue_t *q)
+static void ub_request_fn(struct request_queue *q)
{
struct ub_lun *lun = q->queuedata;
struct request *rq;
static int ub_probe_lun(struct ub_dev *sc, int lnum)
{
struct ub_lun *lun;
- request_queue_t *q;
+ struct request_queue *q;
struct gendisk *disk;
int rc;
*/
struct bio *bio, *currentbio, **biotail;
- request_queue_t *queue;
+ struct request_queue *queue;
struct mm_page {
dma_addr_t page_dma;
page->biotail = & page->bio;
}
-static void mm_unplug_device(request_queue_t *q)
+static void mm_unplug_device(struct request_queue *q)
{
struct cardinfo *card = q->queuedata;
unsigned long flags;
-- mm_make_request
-----------------------------------------------------------------------------------
*/
-static int mm_make_request(request_queue_t *q, struct bio *bio)
+static int mm_make_request(struct request_queue *q, struct bio *bio)
{
struct cardinfo *card = q->queuedata;
pr_debug("mm_make_request %llu %u\n",
/*
* This is the external request processing routine
*/
-static void do_viodasd_request(request_queue_t *q)
+static void do_viodasd_request(struct request_queue *q)
{
struct request *req;
}
/* do_xd_request: handle an incoming request */
-static void do_xd_request (request_queue_t * q)
+static void do_xd_request (struct request_queue * q)
{
struct request *req;
static u_char xd_detect (u_char *controller, unsigned int *address);
static u_char xd_initdrives (void (*init_drive)(u_char drive));
-static void do_xd_request (request_queue_t * q);
+static void do_xd_request (struct request_queue * q);
static int xd_ioctl (struct inode *inode,struct file *file,unsigned int cmd,unsigned long arg);
static int xd_readwrite (u_char operation,XD_INFO *disk,char *buffer,u_int block,u_int count);
static void xd_recalibrate (u_char drive);
* do_blkif_request
* read a block; request is in a request queue
*/
-static void do_blkif_request(request_queue_t *rq)
+static void do_blkif_request(struct request_queue *rq)
{
struct blkfront_info *info = NULL;
struct request *req;
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
{
- request_queue_t *rq;
+ struct request_queue *rq;
rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
if (rq == NULL)
}
/* Get the next read/write request; ending requests that we don't handle */
-struct request *ace_get_next_request(request_queue_t * q)
+struct request *ace_get_next_request(struct request_queue * q)
{
struct request *req;
/* ---------------------------------------------------------------------
* Block ops
*/
-static void ace_request(request_queue_t * q)
+static void ace_request(struct request_queue * q)
{
struct request *req;
struct ace_device *ace;
static struct block_device_operations z2_fops;
static struct gendisk *z2ram_gendisk;
-static void do_z2_request(request_queue_t *q)
+static void do_z2_request(struct request_queue *q)
{
struct request *req;
while ((req = elv_next_request(q)) != NULL) {
static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
int lba, int nframes)
{
- request_queue_t *q = cdi->disk->queue;
+ struct request_queue *q = cdi->disk->queue;
struct request *rq;
struct bio *bio;
unsigned int len;
static int rwreq;
-static void do_viocd_request(request_queue_t *q)
+static void do_viocd_request(struct request_queue *q)
{
struct request *req;
/*
* standard prep_rq_fn that builds 10 byte cmds
*/
-static int ide_cdrom_prep_fs(request_queue_t *q, struct request *rq)
+static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
{
int hard_sect = queue_hardsect_size(q);
long block = (long)rq->hard_sector / (hard_sect >> 9);
return BLKPREP_OK;
}
-static int ide_cdrom_prep_fn(request_queue_t *q, struct request *rq)
+static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq)
{
if (blk_fs_request(rq))
return ide_cdrom_prep_fs(q, rq);
};
#endif /* CONFIG_IDE_PROC_FS */
-static void idedisk_prepare_flush(request_queue_t *q, struct request *rq)
+static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
{
ide_drive_t *drive = q->queuedata;
rq->buffer = rq->cmd;
}
-static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int idedisk_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
ide_drive_t *drive = q->queuedata;
/*
* Passes the stuff to ide_do_request
*/
-void do_ide_request(request_queue_t *q)
+void do_ide_request(struct request_queue *q)
{
ide_drive_t *drive = q->queuedata;
*/
static int ide_init_queue(ide_drive_t *drive)
{
- request_queue_t *q;
+ struct request_queue *q;
ide_hwif_t *hwif = HWIF(drive);
int max_sectors = 256;
int max_sg_entries = PRD_ENTRIES;
}
}
-static void do_hd_request (request_queue_t * q)
+static void do_hd_request (struct request_queue * q)
{
disable_irq(HD_IRQ);
hd_request();
void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
{
- request_queue_t *q = bdev_get_queue(bdev);
+ struct request_queue *q = bdev_get_queue(bdev);
struct io_restrictions *rs = &ti->limits;
/*
devices = dm_table_get_devices(t);
for (d = devices->next; d != devices; d = d->next) {
struct dm_dev *dd = list_entry(d, struct dm_dev, list);
- request_queue_t *q = bdev_get_queue(dd->bdev);
+ struct request_queue *q = bdev_get_queue(dd->bdev);
r |= bdi_congested(&q->backing_dev_info, bdi_bits);
}
for (d = devices->next; d != devices; d = d->next) {
struct dm_dev *dd = list_entry(d, struct dm_dev, list);
- request_queue_t *q = bdev_get_queue(dd->bdev);
+ struct request_queue *q = bdev_get_queue(dd->bdev);
if (q->unplug_fn)
q->unplug_fn(q);
for (d = devices->next; d != devices; d = d->next) {
struct dm_dev *dd = list_entry(d, struct dm_dev, list);
- request_queue_t *q = bdev_get_queue(dd->bdev);
+ struct request_queue *q = bdev_get_queue(dd->bdev);
int err;
if (!q->issue_flush_fn)
unsigned long flags;
- request_queue_t *queue;
+ struct request_queue *queue;
struct gendisk *disk;
char name[16];
* The request function that just remaps the bio built up by
* dm_merge_bvec.
*/
-static int dm_request(request_queue_t *q, struct bio *bio)
+static int dm_request(struct request_queue *q, struct bio *bio)
{
int r;
int rw = bio_data_dir(bio);
return 0;
}
-static int dm_flush_all(request_queue_t *q, struct gendisk *disk,
+static int dm_flush_all(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
struct mapped_device *md = q->queuedata;
return ret;
}
-static void dm_unplug_all(request_queue_t *q)
+static void dm_unplug_all(struct request_queue *q)
{
struct mapped_device *md = q->queuedata;
struct dm_table *map = dm_get_table(md);
static int __bind(struct mapped_device *md, struct dm_table *t)
{
- request_queue_t *q = md->queue;
+ struct request_queue *q = md->queue;
sector_t size;
size = dm_table_get_size(t);
conf->nfaults = n+1;
}
-static int make_request(request_queue_t *q, struct bio *bio)
+static int make_request(struct request_queue *q, struct bio *bio)
{
mddev_t *mddev = q->queuedata;
conf_t *conf = (conf_t*)mddev->private;
*
* Return amount of bytes we can take at this offset
*/
-static int linear_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
+static int linear_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
{
mddev_t *mddev = q->queuedata;
dev_info_t *dev0;
return maxsectors << 9;
}
-static void linear_unplug(request_queue_t *q)
+static void linear_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
linear_conf_t *conf = mddev_to_conf(mddev);
int i;
for (i=0; i < mddev->raid_disks; i++) {
- request_queue_t *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
+ struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
if (r_queue->unplug_fn)
r_queue->unplug_fn(r_queue);
}
}
-static int linear_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int linear_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
for (i=0; i < mddev->raid_disks && ret == 0; i++) {
struct block_device *bdev = conf->disks[i].rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
int i, ret = 0;
for (i = 0; i < mddev->raid_disks && !ret ; i++) {
- request_queue_t *q = bdev_get_queue(conf->disks[i].rdev->bdev);
+ struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
}
return ret;
return 0;
}
-static int linear_make_request (request_queue_t *q, struct bio *bio)
+static int linear_make_request (struct request_queue *q, struct bio *bio)
{
const int rw = bio_data_dir(bio);
mddev_t *mddev = q->queuedata;
)
-static int md_fail_request (request_queue_t *q, struct bio *bio)
+static int md_fail_request (struct request_queue *q, struct bio *bio)
{
bio_io_error(bio, bio->bi_size);
return 0;
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)
&& atomic_read(&rdev->nr_pending)) {
- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+ struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
rcu_read_unlock();
}
-static void multipath_unplug(request_queue_t *q)
+static void multipath_unplug(struct request_queue *q)
{
unplug_slaves(q->queuedata);
}
-static int multipath_make_request (request_queue_t *q, struct bio * bio)
+static int multipath_make_request (struct request_queue *q, struct bio * bio)
{
mddev_t *mddev = q->queuedata;
multipath_conf_t *conf = mddev_to_conf(mddev);
seq_printf (seq, "]");
}
-static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int multipath_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
for (i = 0; i < mddev->raid_disks ; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
- request_queue_t *q = bdev_get_queue(rdev->bdev);
+ struct request_queue *q = bdev_get_queue(rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
/* Just like multipath_map, we just check the
#define MD_DRIVER
#define MD_PERSONALITY
-static void raid0_unplug(request_queue_t *q)
+static void raid0_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
raid0_conf_t *conf = mddev_to_conf(mddev);
int i;
for (i=0; i<mddev->raid_disks; i++) {
- request_queue_t *r_queue = bdev_get_queue(devlist[i]->bdev);
+ struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
if (r_queue->unplug_fn)
r_queue->unplug_fn(r_queue);
}
}
-static int raid0_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int raid0_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
struct block_device *bdev = devlist[i]->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
int i, ret = 0;
for (i = 0; i < mddev->raid_disks && !ret ; i++) {
- request_queue_t *q = bdev_get_queue(devlist[i]->bdev);
+ struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
}
*
* Return amount of bytes we can accept at this offset
*/
-static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
+static int raid0_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
{
mddev_t *mddev = q->queuedata;
sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
return 0;
}
-static int raid0_make_request (request_queue_t *q, struct bio *bio)
+static int raid0_make_request (struct request_queue *q, struct bio *bio)
{
mddev_t *mddev = q->queuedata;
unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects;
for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+ struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
rcu_read_unlock();
}
-static void raid1_unplug(request_queue_t *q)
+static void raid1_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
md_wakeup_thread(mddev->thread);
}
-static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int raid1_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
for (i = 0; i < mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
- request_queue_t *q = bdev_get_queue(rdev->bdev);
+ struct request_queue *q = bdev_get_queue(rdev->bdev);
/* Note the '|| 1' - when read_balance prefers
* non-congested targets, it can be removed
return NULL;
}
-static int make_request(request_queue_t *q, struct bio * bio)
+static int make_request(struct request_queue *q, struct bio * bio)
{
mddev_t *mddev = q->queuedata;
conf_t *conf = mddev_to_conf(mddev);
* If near_copies == raid_disk, there are no striping issues,
* but in that case, the function isn't called at all.
*/
-static int raid10_mergeable_bvec(request_queue_t *q, struct bio *bio,
+static int raid10_mergeable_bvec(struct request_queue *q, struct bio *bio,
struct bio_vec *bio_vec)
{
mddev_t *mddev = q->queuedata;
for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+ struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
rcu_read_unlock();
}
-static void raid10_unplug(request_queue_t *q)
+static void raid10_unplug(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
md_wakeup_thread(mddev->thread);
}
-static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int raid10_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
for (i = 0; i < mddev->raid_disks && ret == 0; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
- request_queue_t *q = bdev_get_queue(rdev->bdev);
+ struct request_queue *q = bdev_get_queue(rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
}
spin_unlock_irq(&conf->resync_lock);
}
-static int make_request(request_queue_t *q, struct bio * bio)
+static int make_request(struct request_queue *q, struct bio * bio)
{
mddev_t *mddev = q->queuedata;
conf_t *conf = mddev_to_conf(mddev);
}
static void unplug_slaves(mddev_t *mddev);
-static void raid5_unplug_device(request_queue_t *q);
+static void raid5_unplug_device(struct request_queue *q);
static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks,
int pd_idx, int noblock)
for (i=0; i<mddev->raid_disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
- request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+ struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
rcu_read_unlock();
}
-static void raid5_unplug_device(request_queue_t *q)
+static void raid5_unplug_device(struct request_queue *q)
{
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev);
unplug_slaves(mddev);
}
-static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
mddev_t *mddev = q->queuedata;
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct block_device *bdev = rdev->bdev;
- request_queue_t *r_queue = bdev_get_queue(bdev);
+ struct request_queue *r_queue = bdev_get_queue(bdev);
if (!r_queue->issue_flush_fn)
ret = -EOPNOTSUPP;
/* We want read requests to align with chunks where possible,
* but write requests don't need to.
*/
-static int raid5_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
+static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec)
{
mddev_t *mddev = q->queuedata;
sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
static int bio_fits_rdev(struct bio *bi)
{
- request_queue_t *q = bdev_get_queue(bi->bi_bdev);
+ struct request_queue *q = bdev_get_queue(bi->bi_bdev);
if ((bi->bi_size>>9) > q->max_sectors)
return 0;
}
-static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio)
+static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
{
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev);
}
-static int make_request(request_queue_t *q, struct bio * bi)
+static int make_request(struct request_queue *q, struct bio * bi)
{
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev);
* Returns 0 on success or negative error code on failure.
*/
-static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk,
+static int i2o_block_issue_flush(struct request_queue * queue, struct gendisk *disk,
sector_t * error_sector)
{
struct i2o_block_device *i2o_blk_dev = queue->queuedata;
{
struct i2o_block_request *ireq = req->special;
struct i2o_block_device *dev = ireq->i2o_blk_dev;
- request_queue_t *q = req->q;
+ struct request_queue *q = req->q;
unsigned long flags;
if (end_that_request_chunk(req, uptodate, nr_bytes)) {
* on any queue on this host, and attempt to issue it. This may
* not be the queue we were asked to process.
*/
-static void mmc_request(request_queue_t *q)
+static void mmc_request(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
struct request *req;
void mmc_cleanup_queue(struct mmc_queue *mq)
{
- request_queue_t *q = mq->queue;
+ struct request_queue *q = mq->queue;
unsigned long flags;
/* Mark that we should start throwing out stragglers */
*/
void mmc_queue_suspend(struct mmc_queue *mq)
{
- request_queue_t *q = mq->queue;
+ struct request_queue *q = mq->queue;
unsigned long flags;
if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
*/
void mmc_queue_resume(struct mmc_queue *mq)
{
- request_queue_t *q = mq->queue;
+ struct request_queue *q = mq->queue;
unsigned long flags;
if (mq->flags & MMC_QUEUE_SUSPENDED) {
static void
__dasd_process_blk_queue(struct dasd_device * device)
{
- request_queue_t *queue;
+ struct request_queue *queue;
struct request *req;
struct dasd_ccw_req *cqr;
int nr_queued;
* Dasd request queue function. Called from ll_rw_blk.c
*/
static void
-do_dasd_request(request_queue_t * queue)
+do_dasd_request(struct request_queue * queue)
{
struct dasd_device *device;
struct dasd_device {
/* Block device stuff. */
struct gendisk *gdp;
- request_queue_t *request_queue;
+ struct request_queue *request_queue;
spinlock_t request_queue_lock;
struct block_device *bdev;
unsigned int devindex;
}
static int
-dcssblk_make_request(request_queue_t *q, struct bio *bio)
+dcssblk_make_request(struct request_queue *q, struct bio *bio)
{
struct dcssblk_dev_info *dev_info;
struct bio_vec *bvec;
/*
* Block device make request function.
*/
-static int xpram_make_request(request_queue_t *q, struct bio *bio)
+static int xpram_make_request(struct request_queue *q, struct bio *bio)
{
xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
struct bio_vec *bvec;
{
struct tape_device * device;
/* Block device request queue. */
- request_queue_t * request_queue;
+ struct request_queue * request_queue;
spinlock_t request_queue_lock;
/* Task to move entries from block request to CCS request queue. */
tapeblock_requeue(struct work_struct *work) {
struct tape_blk_data * blkdat;
struct tape_device * device;
- request_queue_t * queue;
+ struct request_queue * queue;
int nr_queued;
struct request * req;
struct list_head * l;
* Tape request queue function. Called from ll_rw_blk.c
*/
static void
-tapeblock_request_fn(request_queue_t *queue)
+tapeblock_request_fn(struct request_queue *queue)
{
struct tape_device *device;
}
}
-static void jsfd_do_request(request_queue_t *q)
+static void jsfd_do_request(struct request_queue *q)
{
struct request *req;
static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
int bytes, int requeue)
{
- request_queue_t *q = cmd->device->request_queue;
+ struct request_queue *q = cmd->device->request_queue;
struct request *req = cmd->request;
unsigned long flags;
{
int result = cmd->result;
int this_count = cmd->request_bufflen;
- request_queue_t *q = cmd->device->request_queue;
+ struct request_queue *q = cmd->device->request_queue;
struct request *req = cmd->request;
int clear_errors = 1;
struct scsi_sense_hdr sshdr;
return BLKPREP_KILL;
}
-static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
+static int scsi_issue_flush_fn(struct request_queue *q, struct gendisk *disk,
sector_t *error_sector)
{
struct scsi_device *sdev = q->queuedata;
/*
* Kill a request for a dead device
*/
-static void scsi_kill_request(struct request *req, request_queue_t *q)
+static void scsi_kill_request(struct request *req, struct request_queue *q)
{
struct scsi_cmnd *cmd = req->special;
struct scsi_device *sdev = cmd->device;
int
scsi_internal_device_block(struct scsi_device *sdev)
{
- request_queue_t *q = sdev->request_queue;
+ struct request_queue *q = sdev->request_queue;
unsigned long flags;
int err = 0;
int
scsi_internal_device_unblock(struct scsi_device *sdev)
{
- request_queue_t *q = sdev->request_queue;
+ struct request_queue *q = sdev->request_queue;
int err;
unsigned long flags;
return ret;
}
-static void sd_prepare_flush(request_queue_t *q, struct request *rq)
+static void sd_prepare_flush(struct request_queue *q, struct request *rq)
{
memset(rq->cmd, 0, sizeof(rq->cmd));
rq->cmd_type = REQ_TYPE_BLOCK_PC;
*/
int hard_sector = sector_size;
sector_t sz = (sdkp->capacity/2) * (hard_sector/256);
- request_queue_t *queue = sdp->request_queue;
+ struct request_queue *queue = sdp->request_queue;
sector_t mb = sz;
blk_queue_hardsect_size(queue, hard_sector);
unsigned char *buffer;
int the_result, retries = 3;
int sector_size;
- request_queue_t *queue;
+ struct request_queue *queue;
buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
if (!buffer)
}
}
-inline int bio_phys_segments(request_queue_t *q, struct bio *bio)
+inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
{
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
blk_recount_segments(q, bio);
return bio->bi_phys_segments;
}
-inline int bio_hw_segments(request_queue_t *q, struct bio *bio)
+inline int bio_hw_segments(struct request_queue *q, struct bio *bio)
{
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
blk_recount_segments(q, bio);
*/
void __bio_clone(struct bio *bio, struct bio *bio_src)
{
- request_queue_t *q = bdev_get_queue(bio_src->bi_bdev);
+ struct request_queue *q = bdev_get_queue(bio_src->bi_bdev);
memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
bio_src->bi_max_vecs * sizeof(struct bio_vec));
*/
int bio_get_nr_vecs(struct block_device *bdev)
{
- request_queue_t *q = bdev_get_queue(bdev);
+ struct request_queue *q = bdev_get_queue(bdev);
int nr_pages;
nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
return nr_pages;
}
-static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
+static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
*page, unsigned int len, unsigned int offset,
unsigned short max_sectors)
{
* smaller than PAGE_SIZE, so it is always possible to add a single
* page to an empty bio. This should only be used by REQ_PC bios.
*/
-int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page,
+int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
{
return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
* to/from kernel pages as necessary. Must be paired with
* call bio_uncopy_user() on io completion.
*/
-struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
+struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr,
unsigned int len, int write_to_vm)
{
unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
return ERR_PTR(ret);
}
-static struct bio *__bio_map_user_iov(request_queue_t *q,
+static struct bio *__bio_map_user_iov(struct request_queue *q,
struct block_device *bdev,
struct sg_iovec *iov, int iov_count,
int write_to_vm)
/**
* bio_map_user - map user address into bio
- * @q: the request_queue_t for the bio
+ * @q: the struct request_queue for the bio
* @bdev: destination block device
* @uaddr: start of user address
* @len: length in bytes
* Map the user space address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
-struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev,
+struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
unsigned long uaddr, unsigned int len, int write_to_vm)
{
struct sg_iovec iov;
/**
* bio_map_user_iov - map user sg_iovec table into bio
- * @q: the request_queue_t for the bio
+ * @q: the struct request_queue for the bio
* @bdev: destination block device
* @iov: the iovec.
* @iov_count: number of elements in the iovec
* Map the user space address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
-struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev,
+struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
struct sg_iovec *iov, int iov_count,
int write_to_vm)
{
}
-static struct bio *__bio_map_kern(request_queue_t *q, void *data,
+static struct bio *__bio_map_kern(struct request_queue *q, void *data,
unsigned int len, gfp_t gfp_mask)
{
unsigned long kaddr = (unsigned long)data;
/**
* bio_map_kern - map kernel address into bio
- * @q: the request_queue_t for the bio
+ * @q: the struct request_queue for the bio
* @data: pointer to buffer to map
* @len: length in bytes
* @gfp_mask: allocation flags for bio allocation
* Map the kernel address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
-struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len,
+struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
gfp_t gfp_mask)
{
struct bio *bio;
struct omap_mbox_queue {
spinlock_t lock;
- request_queue_t *queue;
+ struct request_queue *queue;
struct work_struct work;
int (*callback)(void *);
struct omap_mbox *mbox;
struct scsi_ioctl_command;
struct request_queue;
-typedef struct request_queue request_queue_t;
struct elevator_queue;
typedef struct elevator_queue elevator_t;
struct request_pm_state;
struct list_head queuelist;
struct list_head donelist;
- request_queue_t *q;
+ struct request_queue *q;
unsigned int cmd_flags;
enum rq_cmd_type_bits cmd_type;
#include <linux/elevator.h>
-typedef void (request_fn_proc) (request_queue_t *q);
-typedef int (make_request_fn) (request_queue_t *q, struct bio *bio);
-typedef int (prep_rq_fn) (request_queue_t *, struct request *);
-typedef void (unplug_fn) (request_queue_t *);
+typedef void (request_fn_proc) (struct request_queue *q);
+typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
+typedef int (prep_rq_fn) (struct request_queue *, struct request *);
+typedef void (unplug_fn) (struct request_queue *);
struct bio_vec;
-typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *);
-typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
-typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
+typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
+typedef int (issue_flush_fn) (struct request_queue *, struct gendisk *, sector_t *);
+typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
typedef void (softirq_done_fn)(struct request *);
enum blk_queue_state {
#ifdef CONFIG_BOUNCE
extern int init_emergency_isa_pool(void);
-extern void blk_queue_bounce(request_queue_t *q, struct bio **bio);
+extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
#else
static inline int init_emergency_isa_pool(void)
{
return 0;
}
-static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
+static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
{
}
#endif /* CONFIG_MMU */
extern void register_disk(struct gendisk *dev);
extern void generic_make_request(struct bio *bio);
extern void blk_put_request(struct request *);
-extern void __blk_put_request(request_queue_t *, struct request *);
+extern void __blk_put_request(struct request_queue *, struct request *);
extern void blk_end_sync_rq(struct request *rq, int error);
-extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
-extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
-extern void blk_requeue_request(request_queue_t *, struct request *);
-extern void blk_plug_device(request_queue_t *);
-extern int blk_remove_plug(request_queue_t *);
-extern void blk_recount_segments(request_queue_t *, struct bio *);
+extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
+extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
+extern void blk_requeue_request(struct request_queue *, struct request *);
+extern void blk_plug_device(struct request_queue *);
+extern int blk_remove_plug(struct request_queue *);
+extern void blk_recount_segments(struct request_queue *, struct bio *);
extern int scsi_cmd_ioctl(struct file *, struct request_queue *,
struct gendisk *, unsigned int, void __user *);
extern int sg_scsi_ioctl(struct file *, struct request_queue *,
/*
* Temporary export, until SCSI gets fixed up.
*/
-extern int ll_back_merge_fn(request_queue_t *, struct request *, struct bio *);
+extern int ll_back_merge_fn(struct request_queue *, struct request *,
+ struct bio *);
/*
* A queue has just exitted congestion. Note this in the global counter of
* congested queues, and wake up anyone who was waiting for requests to be
* put back.
*/
-static inline void blk_clear_queue_congested(request_queue_t *q, int rw)
+static inline void blk_clear_queue_congested(struct request_queue *q, int rw)
{
clear_bdi_congested(&q->backing_dev_info, rw);
}
* A queue has just entered congestion. Flag that in the queue's VM-visible
* state flags and increment the global gounter of congested queues.
*/
-static inline void blk_set_queue_congested(request_queue_t *q, int rw)
+static inline void blk_set_queue_congested(struct request_queue *q, int rw)
{
set_bdi_congested(&q->backing_dev_info, rw);
}
-extern void blk_start_queue(request_queue_t *q);
-extern void blk_stop_queue(request_queue_t *q);
+extern void blk_start_queue(struct request_queue *q);
+extern void blk_stop_queue(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
-extern void __blk_stop_queue(request_queue_t *q);
-extern void blk_run_queue(request_queue_t *);
-extern void blk_start_queueing(request_queue_t *);
-extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long);
+extern void __blk_stop_queue(struct request_queue *q);
+extern void blk_run_queue(struct request_queue *);
+extern void blk_start_queueing(struct request_queue *);
+extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long);
extern int blk_rq_unmap_user(struct bio *);
-extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t);
-extern int blk_rq_map_user_iov(request_queue_t *, struct request *,
+extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
+extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
struct sg_iovec *, int, unsigned int);
-extern int blk_execute_rq(request_queue_t *, struct gendisk *,
+extern int blk_execute_rq(struct request_queue *, struct gendisk *,
struct request *, int);
-extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *,
+extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
struct request *, int, rq_end_io_fn *);
extern int blk_verify_command(unsigned char *, int);
-static inline request_queue_t *bdev_get_queue(struct block_device *bdev)
+static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
return bdev->bd_disk->queue;
}
/*
* Access functions for manipulating queue properties
*/
-extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn,
+extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id);
-extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *);
-extern void blk_cleanup_queue(request_queue_t *);
-extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
-extern void blk_queue_bounce_limit(request_queue_t *, u64);
-extern void blk_queue_max_sectors(request_queue_t *, unsigned int);
-extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short);
-extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short);
-extern void blk_queue_max_segment_size(request_queue_t *, unsigned int);
-extern void blk_queue_hardsect_size(request_queue_t *, unsigned short);
-extern void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b);
-extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
-extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
-extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
-extern void blk_queue_dma_alignment(request_queue_t *, int);
-extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *);
+extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
+extern void blk_cleanup_queue(struct request_queue *);
+extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
+extern void blk_queue_bounce_limit(struct request_queue *, u64);
+extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
+extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
+extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
+extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
+extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
+extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
+extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
+extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
+extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
+extern void blk_queue_dma_alignment(struct request_queue *, int);
+extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
-extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *);
-extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *);
-extern int blk_do_ordered(request_queue_t *, struct request **);
-extern unsigned blk_ordered_cur_seq(request_queue_t *);
+extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
+extern void blk_queue_issue_flush_fn(struct request_queue *, issue_flush_fn *);
+extern int blk_do_ordered(struct request_queue *, struct request **);
+extern unsigned blk_ordered_cur_seq(struct request_queue *);
extern unsigned blk_ordered_req_seq(struct request *);
-extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int);
+extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int);
-extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
+extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
extern void blk_dump_rq_flags(struct request *, char *);
-extern void generic_unplug_device(request_queue_t *);
-extern void __generic_unplug_device(request_queue_t *);
+extern void generic_unplug_device(struct request_queue *);
+extern void __generic_unplug_device(struct request_queue *);
extern long nr_blockdev_pages(void);
-int blk_get_queue(request_queue_t *);
-request_queue_t *blk_alloc_queue(gfp_t);
-request_queue_t *blk_alloc_queue_node(gfp_t, int);
-extern void blk_put_queue(request_queue_t *);
+int blk_get_queue(struct request_queue *);
+struct request_queue *blk_alloc_queue(gfp_t);
+struct request_queue *blk_alloc_queue_node(gfp_t, int);
+extern void blk_put_queue(struct request_queue *);
/*
* tag stuff
#define blk_queue_tag_depth(q) ((q)->queue_tags->busy)
#define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth)
#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
-extern int blk_queue_start_tag(request_queue_t *, struct request *);
-extern struct request *blk_queue_find_tag(request_queue_t *, int);
-extern void blk_queue_end_tag(request_queue_t *, struct request *);
-extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *);
-extern void blk_queue_free_tags(request_queue_t *);
-extern int blk_queue_resize_tags(request_queue_t *, int);
-extern void blk_queue_invalidate_tags(request_queue_t *);
+extern int blk_queue_start_tag(struct request_queue *, struct request *);
+extern struct request *blk_queue_find_tag(struct request_queue *, int);
+extern void blk_queue_end_tag(struct request_queue *, struct request *);
+extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
+extern void blk_queue_free_tags(struct request_queue *);
+extern int blk_queue_resize_tags(struct request_queue *, int);
+extern void blk_queue_invalidate_tags(struct request_queue *);
extern struct blk_queue_tag *blk_init_tags(int);
extern void blk_free_tags(struct blk_queue_tag *);
return bqt->tag_index[tag];
}
-extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *);
+extern void blk_rq_bio_prep(struct request_queue *, struct request *, struct bio *);
extern int blkdev_issue_flush(struct block_device *, sector_t *);
#define MAX_PHYS_SEGMENTS 128
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
-static inline int queue_hardsect_size(request_queue_t *q)
+static inline int queue_hardsect_size(struct request_queue *q)
{
int retval = 512;
return queue_hardsect_size(bdev_get_queue(bdev));
}
-static inline int queue_dma_alignment(request_queue_t *q)
+static inline int queue_dma_alignment(struct request_queue *q)
{
int retval = 511;
#if defined(CONFIG_BLK_DEV_IO_TRACE)
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
-extern void blk_trace_shutdown(request_queue_t *);
+extern void blk_trace_shutdown(struct request_queue *);
extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
/**
#ifdef CONFIG_BLOCK
-typedef int (elevator_merge_fn) (request_queue_t *, struct request **,
+typedef int (elevator_merge_fn) (struct request_queue *, struct request **,
struct bio *);
-typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struct request *);
+typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *);
-typedef void (elevator_merged_fn) (request_queue_t *, struct request *, int);
+typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int);
-typedef int (elevator_allow_merge_fn) (request_queue_t *, struct request *, struct bio *);
+typedef int (elevator_allow_merge_fn) (struct request_queue *, struct request *, struct bio *);
-typedef int (elevator_dispatch_fn) (request_queue_t *, int);
+typedef int (elevator_dispatch_fn) (struct request_queue *, int);
-typedef void (elevator_add_req_fn) (request_queue_t *, struct request *);
-typedef int (elevator_queue_empty_fn) (request_queue_t *);
-typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *);
-typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *);
-typedef int (elevator_may_queue_fn) (request_queue_t *, int);
+typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
+typedef int (elevator_queue_empty_fn) (struct request_queue *);
+typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
+typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
+typedef int (elevator_may_queue_fn) (struct request_queue *, int);
-typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, gfp_t);
+typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t);
typedef void (elevator_put_req_fn) (struct request *);
-typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *);
-typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *);
+typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
+typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
-typedef void *(elevator_init_fn) (request_queue_t *);
+typedef void *(elevator_init_fn) (struct request_queue *);
typedef void (elevator_exit_fn) (elevator_t *);
struct elevator_ops
/*
* block elevator interface
*/
-extern void elv_dispatch_sort(request_queue_t *, struct request *);
-extern void elv_dispatch_add_tail(request_queue_t *, struct request *);
-extern void elv_add_request(request_queue_t *, struct request *, int, int);
-extern void __elv_add_request(request_queue_t *, struct request *, int, int);
-extern void elv_insert(request_queue_t *, struct request *, int);
-extern int elv_merge(request_queue_t *, struct request **, struct bio *);
-extern void elv_merge_requests(request_queue_t *, struct request *,
+extern void elv_dispatch_sort(struct request_queue *, struct request *);
+extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
+extern void elv_add_request(struct request_queue *, struct request *, int, int);
+extern void __elv_add_request(struct request_queue *, struct request *, int, int);
+extern void elv_insert(struct request_queue *, struct request *, int);
+extern int elv_merge(struct request_queue *, struct request **, struct bio *);
+extern void elv_merge_requests(struct request_queue *, struct request *,
struct request *);
-extern void elv_merged_request(request_queue_t *, struct request *, int);
-extern void elv_dequeue_request(request_queue_t *, struct request *);
-extern void elv_requeue_request(request_queue_t *, struct request *);
-extern int elv_queue_empty(request_queue_t *);
+extern void elv_merged_request(struct request_queue *, struct request *, int);
+extern void elv_dequeue_request(struct request_queue *, struct request *);
+extern void elv_requeue_request(struct request_queue *, struct request *);
+extern int elv_queue_empty(struct request_queue *);
extern struct request *elv_next_request(struct request_queue *q);
-extern struct request *elv_former_request(request_queue_t *, struct request *);
-extern struct request *elv_latter_request(request_queue_t *, struct request *);
-extern int elv_register_queue(request_queue_t *q);
-extern void elv_unregister_queue(request_queue_t *q);
-extern int elv_may_queue(request_queue_t *, int);
-extern void elv_completed_request(request_queue_t *, struct request *);
-extern int elv_set_request(request_queue_t *, struct request *, gfp_t);
-extern void elv_put_request(request_queue_t *, struct request *);
+extern struct request *elv_former_request(struct request_queue *, struct request *);
+extern struct request *elv_latter_request(struct request_queue *, struct request *);
+extern int elv_register_queue(struct request_queue *q);
+extern void elv_unregister_queue(struct request_queue *q);
+extern int elv_may_queue(struct request_queue *, int);
+extern void elv_completed_request(struct request_queue *, struct request *);
+extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
+extern void elv_put_request(struct request_queue *, struct request *);
/*
* io scheduler registration
/*
* io scheduler sysfs switching
*/
-extern ssize_t elv_iosched_show(request_queue_t *, char *);
-extern ssize_t elv_iosched_store(request_queue_t *, const char *, size_t);
+extern ssize_t elv_iosched_show(struct request_queue *, char *);
+extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
-extern int elevator_init(request_queue_t *, char *);
+extern int elevator_init(struct request_queue *, char *);
extern void elevator_exit(elevator_t *);
extern int elv_rq_merge_ok(struct request *, struct bio *);
/*
* Helper functions.
*/
-extern struct request *elv_rb_former_request(request_queue_t *, struct request *);
-extern struct request *elv_rb_latter_request(request_queue_t *, struct request *);
+extern struct request *elv_rb_former_request(struct request_queue *, struct request *);
+extern struct request *elv_rb_latter_request(struct request_queue *, struct request *);
/*
* rb support functions.
char name[4]; /* drive name, such as "hda" */
char driver_req[10]; /* requests specific driver */
- request_queue_t *queue; /* request queue */
+ struct request_queue *queue; /* request queue */
struct request *rq; /* current request */
struct ide_drive_s *next; /* circular list of hwgroup drives */
extern int ide_spin_wait_hwgroup(ide_drive_t *);
extern void ide_timer_expiry(unsigned long);
extern irqreturn_t ide_intr(int irq, void *dev_id);
-extern void do_ide_request(request_queue_t *);
+extern void do_ide_request(struct request_queue *);
void ide_init_disk(struct gendisk *, ide_drive_t *);
struct task_struct *lo_thread;
wait_queue_head_t lo_event;
- request_queue_t *lo_queue;
+ struct request_queue *lo_queue;
struct gendisk *lo_disk;
struct list_head lo_list;
};
unsigned int safemode_delay;
struct timer_list safemode_timer;
atomic_t writes_pending;
- request_queue_t *queue; /* for plugging ... */
+ struct request_queue *queue; /* for plugging ... */
atomic_t write_behind; /* outstanding async IO */
unsigned int max_write_behind; /* 0 = sync */
int level;
struct list_head list;
struct module *owner;
- int (*make_request)(request_queue_t *q, struct bio *bio);
+ int (*make_request)(struct request_queue *q, struct bio *bio);
int (*run)(mddev_t *mddev);
int (*stop)(mddev_t *mddev);
void (*status)(struct seq_file *seq, mddev_t *mddev);
static void sd_rescan(struct device *);
static int sd_init_command(struct scsi_cmnd *);
static int sd_issue_flush(struct device *, sector_t *);
-static void sd_prepare_flush(request_queue_t *, struct request *);
+static void sd_prepare_flush(struct request_queue *, struct request *);
static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
static void scsi_disk_release(struct class_device *cdev);
static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
return 0;
}
-static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
+static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
mempool_t *pool)
{
struct page *page;
*bio_orig = bio;
}
-void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
+void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
{
mempool_t *pool;