extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_conf *, u64,
sector_t, unsigned int,
gfp_t) __must_hold(local);
-extern void drbd_free_some_ee(struct drbd_conf *, struct drbd_peer_request *,
- int);
-#define drbd_free_ee(m,e) drbd_free_some_ee(m, e, 0)
-#define drbd_free_net_ee(m,e) drbd_free_some_ee(m, e, 1)
+extern void __drbd_free_peer_req(struct drbd_conf *, struct drbd_peer_request *,
+ int);
+#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
+#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
struct list_head *head);
extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
spin_unlock_irq(&mdev->tconn->req_lock);
list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
- drbd_free_net_ee(mdev, peer_req);
+ drbd_free_net_peer_req(mdev, peer_req);
}
/**
_drbd_wait_ee_list_empty()
You must not have the req_lock:
- drbd_free_ee()
+ drbd_free_peer_req()
drbd_alloc_peer_req()
drbd_release_ee()
drbd_ee_fix_bhs()
return NULL;
}
-void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
+void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
int is_net)
{
if (peer_req->flags & EE_HAS_DIGEST)
spin_unlock_irq(&mdev->tconn->req_lock);
list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
- drbd_free_some_ee(mdev, peer_req, is_net);
+ __drbd_free_peer_req(mdev, peer_req, is_net);
count++;
}
return count;
spin_unlock_irq(&mdev->tconn->req_lock);
list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
- drbd_free_net_ee(mdev, peer_req);
+ drbd_free_net_peer_req(mdev, peer_req);
/* possible callbacks here:
* e_end_block, and e_end_resync_block, e_send_discard_write.
err2 = peer_req->w.cb(&peer_req->w, !!err);
if (!err)
err = err2;
- drbd_free_ee(mdev, peer_req);
+ drbd_free_peer_req(mdev, peer_req);
}
wake_up(&mdev->ee_wait);
}
kunmap(page);
if (err) {
- drbd_free_ee(mdev, peer_req);
+ drbd_free_peer_req(mdev, peer_req);
return NULL;
}
ds -= len;
if (memcmp(dig_in, dig_vv, dgs)) {
dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
(unsigned long long)sector, data_size);
- drbd_free_ee(mdev, peer_req);
+ drbd_free_peer_req(mdev, peer_req);
return NULL;
}
}
list_del(&peer_req->w.list);
spin_unlock_irq(&mdev->tconn->req_lock);
- drbd_free_ee(mdev, peer_req);
+ drbd_free_peer_req(mdev, peer_req);
fail:
put_ldev(mdev);
return -EIO;
out_interrupted:
drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + EV_CLEANUP);
put_ldev(mdev);
- drbd_free_ee(mdev, peer_req);
+ drbd_free_peer_req(mdev, peer_req);
return err;
}
out_free_e:
put_ldev(mdev);
- drbd_free_ee(mdev, peer_req);
+ drbd_free_peer_req(mdev, peer_req);
return -EIO;
}
* some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in
* drbd_pp_alloc due to pp_in_use > max_buffers. */
- drbd_free_ee(mdev, peer_req);
+ drbd_free_peer_req(mdev, peer_req);
peer_req = NULL;
inc_rs_pending(mdev);
err = drbd_send_drequest_csum(mdev, sector, size,
out:
if (peer_req)
- drbd_free_ee(mdev, peer_req);
+ drbd_free_peer_req(mdev, peer_req);
if (unlikely(err))
dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
list_del(&peer_req->w.list);
spin_unlock_irq(&mdev->tconn->req_lock);
- drbd_free_ee(mdev, peer_req);
+ drbd_free_peer_req(mdev, peer_req);
defer:
put_ldev(mdev);
return -EAGAIN;
spin_unlock_irq(&mdev->tconn->req_lock);
wake_up(&drbd_pp_wait);
} else
- drbd_free_ee(mdev, peer_req);
+ drbd_free_peer_req(mdev, peer_req);
}
/**
int err;
if (unlikely(cancel)) {
- drbd_free_ee(mdev, peer_req);
+ drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev);
return 0;
}
int err;
if (unlikely(cancel)) {
- drbd_free_ee(mdev, peer_req);
+ drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev);
return 0;
}
int err, eq = 0;
if (unlikely(cancel)) {
- drbd_free_ee(mdev, peer_req);
+ drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev);
return 0;
}
* some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in
* drbd_pp_alloc due to pp_in_use > max_buffers. */
- drbd_free_ee(mdev, peer_req);
+ drbd_free_peer_req(mdev, peer_req);
peer_req = NULL;
inc_rs_pending(mdev);
err = drbd_send_drequest_csum(mdev, sector, size, digest, digest_size, P_OV_REPLY);
out:
if (peer_req)
- drbd_free_ee(mdev, peer_req);
+ drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev);
return err;
}
int err, eq = 0;
if (unlikely(cancel)) {
- drbd_free_ee(mdev, peer_req);
+ drbd_free_peer_req(mdev, peer_req);
dec_unacked(mdev);
return 0;
}
* some distributed deadlock, if the other side blocks on
* congestion as well, because our receiver blocks in
* drbd_pp_alloc due to pp_in_use > max_buffers. */
- drbd_free_ee(mdev, peer_req);
+ drbd_free_peer_req(mdev, peer_req);
if (!eq)
drbd_ov_out_of_sync_found(mdev, sector, size);
else