init_completion(&al_work.event);
al_work.w.cb = w_al_write_transaction;
al_work.w.mdev = mdev;
- drbd_queue_work_front(&mdev->tconn->data.work, &al_work.w);
+ drbd_queue_work_front(&mdev->tconn->sender_work, &al_work.w);
wait_for_completion(&al_work.event);
return al_work.err;
udw->enr = ext->lce.lc_number;
udw->w.cb = w_update_odbm;
udw->w.mdev = mdev;
- drbd_queue_work_front(&mdev->tconn->data.work, &udw->w);
+ drbd_queue_work_front(&mdev->tconn->sender_work, &udw->w);
} else {
dev_warn(DEV, "Could not kmalloc an udw\n");
}
};
struct drbd_socket {
- struct drbd_work_queue work;
struct mutex mutex;
struct socket *socket;
/* this way we get our
struct drbd_thread worker;
struct drbd_thread asender;
cpumask_var_t cpu_mask;
+ struct drbd_work_queue sender_work;
};
struct drbd_conf {
wake_up(&mdev->misc_wait);
if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
- drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
}
}
set_bit(CREATE_BARRIER, &tconn->flags);
}
- drbd_queue_work(&tconn->data.work, &b->w);
+ drbd_queue_work(&tconn->sender_work, &b->w);
}
pn = &b->next;
} else {
D_ASSERT(list_empty(&mdev->read_ee));
D_ASSERT(list_empty(&mdev->net_ee));
D_ASSERT(list_empty(&mdev->resync_reads));
- D_ASSERT(list_empty(&mdev->tconn->data.work.q));
- D_ASSERT(list_empty(&mdev->tconn->meta.work.q));
+ D_ASSERT(list_empty(&mdev->tconn->sender_work.q));
D_ASSERT(list_empty(&mdev->resync_work.list));
D_ASSERT(list_empty(&mdev->unplug_work.list));
D_ASSERT(list_empty(&mdev->go_diskless.list));
/* paranoia asserts */
D_ASSERT(mdev->open_cnt == 0);
- D_ASSERT(list_empty(&mdev->tconn->data.work.q));
/* end paranoia asserts */
/* cleanup stuff that may have been allocated during
init_waitqueue_head(&tconn->ping_wait);
idr_init(&tconn->volumes);
- drbd_init_workqueue(&tconn->data.work);
+ drbd_init_workqueue(&tconn->sender_work);
mutex_init(&tconn->data.mutex);
-
- drbd_init_workqueue(&tconn->meta.work);
mutex_init(&tconn->meta.mutex);
drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
{
D_ASSERT(mdev->state.disk == D_FAILED);
if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
- drbd_queue_work(&mdev->tconn->data.work, &mdev->go_diskless);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->go_diskless);
}
/**
set_bit(BITMAP_IO, &mdev->flags);
if (atomic_read(&mdev->ap_bio_cnt) == 0) {
if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
- drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
}
spin_unlock_irq(&mdev->tconn->req_lock);
}
{
struct drbd_conf *mdev = (struct drbd_conf *) data;
- drbd_queue_work_front(&mdev->tconn->data.work, &mdev->md_sync_work);
+ drbd_queue_work_front(&mdev->tconn->sender_work, &mdev->md_sync_work);
}
static int w_md_sync(struct drbd_work *w, int unused)
barr.w.cb = w_prev_work_done;
barr.w.tconn = tconn;
init_completion(&barr.done);
- drbd_queue_work(&tconn->data.work, &barr.w);
+ drbd_queue_work(&tconn->sender_work, &barr.w);
wait_for_completion(&barr.done);
}
if (w) {
w->cb = w_ov_finished;
w->mdev = mdev;
- drbd_queue_work_front(&mdev->tconn->data.work, w);
+ drbd_queue_work(&mdev->tconn->sender_work, w);
} else {
dev_err(DEV, "kmalloc(w) failed.");
ov_out_of_sync_print(mdev);
* dec_ap_pending will be done in got_BarrierAck
* or (on connection loss) in tl_clear. */
inc_ap_pending(mdev);
- drbd_queue_work(&tconn->data.work, &b->w);
+ drbd_queue_work(&tconn->sender_work, &b->w);
set_bit(CREATE_BARRIER, &tconn->flags);
}
D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0);
req->rq_state |= RQ_NET_QUEUED;
req->w.cb = w_send_read_req;
- drbd_queue_work(&mdev->tconn->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
break;
case QUEUE_FOR_NET_WRITE:
D_ASSERT(req->rq_state & RQ_NET_PENDING);
req->rq_state |= RQ_NET_QUEUED;
req->w.cb = w_send_dblock;
- drbd_queue_work(&mdev->tconn->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
/* close the epoch, in case it outgrew the limit */
rcu_read_lock();
case QUEUE_FOR_SEND_OOS:
req->rq_state |= RQ_NET_QUEUED;
req->w.cb = w_send_out_of_sync;
- drbd_queue_work(&mdev->tconn->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
break;
case READ_RETRY_REMOTE_CANCELED:
get_ldev(mdev);
req->w.cb = w_restart_disk_io;
- drbd_queue_work(&mdev->tconn->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
break;
case RESEND:
During connection handshake, we ensure that the peer was not rebooted. */
if (!(req->rq_state & RQ_NET_OK)) {
if (req->w.cb) {
- drbd_queue_work(&mdev->tconn->data.work, &req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &req->w);
rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
}
break;
ascw->w.cb = w_after_state_ch;
ascw->w.mdev = mdev;
ascw->done = done;
- drbd_queue_work(&mdev->tconn->data.work, &ascw->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &ascw->w);
} else {
dev_err(DEV, "Could not kmalloc an ascw\n");
}
acscw->w.cb = w_after_conn_state_ch;
kref_get(&tconn->kref);
acscw->w.tconn = tconn;
- drbd_queue_work(&tconn->data.work, &acscw->w);
+ drbd_queue_work(&tconn->sender_work, &acscw->w);
} else {
conn_err(tconn, "Could not kmalloc an acscw\n");
}
__drbd_chk_io_error(mdev, false);
spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
- drbd_queue_work(&mdev->tconn->data.work, &peer_req->w);
+ drbd_queue_work(&mdev->tconn->sender_work, &peer_req->w);
put_ldev(mdev);
}
struct drbd_conf *mdev = (struct drbd_conf *) data;
if (list_empty(&mdev->resync_work.list))
- drbd_queue_work(&mdev->tconn->data.work, &mdev->resync_work);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->resync_work);
}
static void fifo_set(struct fifo_buffer *fb, int value)
if (w) {
w->cb = w_resync_finished;
w->mdev = mdev;
- drbd_queue_work(&mdev->tconn->data.work, w);
+ drbd_queue_work(&mdev->tconn->sender_work, w);
return 1;
}
dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
{
struct drbd_conf *mdev = (struct drbd_conf *) data;
- drbd_queue_work(&mdev->tconn->data.work, &mdev->start_resync_work);
+ drbd_queue_work(&mdev->tconn->sender_work, &mdev->start_resync_work);
}
int w_start_resync(struct drbd_work *w, int cancel)
/* as long as we use drbd_queue_work_front(),
* we may only dequeue single work items here, not batches. */
if (list_empty(&work_list))
- dequeue_work_item(&tconn->data.work, &work_list);
+ dequeue_work_item(&tconn->sender_work, &work_list);
/* Still nothing to do? Poke TCP, just in case,
* then wait for new work (or signal). */
drbd_tcp_uncork(tconn->data.socket);
mutex_unlock(&tconn->data.mutex);
- wait_event_interruptible(tconn->data.work.q_wait,
- dequeue_work_item(&tconn->data.work, &work_list));
+ wait_event_interruptible(tconn->sender_work.q_wait,
+ dequeue_work_item(&tconn->sender_work, &work_list));
mutex_lock(&tconn->data.mutex);
if (tconn->data.socket && cork)
list_del_init(&w->list);
w->cb(w, 1);
}
- dequeue_work_batch(&tconn->data.work, &work_list);
+ dequeue_work_batch(&tconn->sender_work, &work_list);
} while (!list_empty(&work_list));
rcu_read_lock();