struct drbd_conf *mdev;
int vnr;
+ rcu_read_lock();
idr_for_each_entry(&tconn->volumes, mdev, vnr) {
if (get_ldev_if_state(mdev, D_CONSISTENT)) {
fp = max_t(enum drbd_fencing_p, fp, mdev->ldev->dc.fencing);
put_ldev(mdev);
}
}
+ rcu_read_unlock();
return fp;
}
static bool conn_resync_running(struct drbd_tconn *tconn)
{
struct drbd_conf *mdev;
+ bool rv = false;
int vnr;
+ rcu_read_lock();
idr_for_each_entry(&tconn->volumes, mdev, vnr) {
if (mdev->state.conn == C_SYNC_SOURCE ||
mdev->state.conn == C_SYNC_TARGET ||
mdev->state.conn == C_PAUSED_SYNC_S ||
- mdev->state.conn == C_PAUSED_SYNC_T)
- return true;
+ mdev->state.conn == C_PAUSED_SYNC_T) {
+ rv = true;
+ break;
+ }
}
- return false;
+ rcu_read_unlock();
+
+ return rv;
}
static bool conn_ov_running(struct drbd_tconn *tconn)
{
struct drbd_conf *mdev;
+ bool rv = false;
int vnr;
+ rcu_read_lock();
idr_for_each_entry(&tconn->volumes, mdev, vnr) {
if (mdev->state.conn == C_VERIFY_S ||
- mdev->state.conn == C_VERIFY_T)
- return true;
+ mdev->state.conn == C_VERIFY_T) {
+ rv = true;
+ break;
+ }
}
- return false;
+ rcu_read_unlock();
+
+ return rv;
}
int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
goto fail;
}
+ rcu_read_lock();
idr_for_each_entry(&tconn->volumes, mdev, i) {
if (get_ldev(mdev)) {
enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
put_ldev(mdev);
if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
retcode = ERR_STONITH_AND_PROT_A;
- goto fail;
+ goto fail_rcu_unlock;
}
}
if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
retcode = ERR_DISCARD;
- goto fail;
+ goto fail_rcu_unlock;
}
if (!mdev->bitmap) {
if(drbd_bm_init(mdev)) {
retcode = ERR_NOMEM;
- goto fail;
+ goto fail_rcu_unlock;
}
}
}
+ rcu_read_unlock();
if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) {
retcode = ERR_CONG_NOT_PROTO_A;
retcode = _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
spin_unlock_irq(&tconn->req_lock);
+ rcu_read_lock();
idr_for_each_entry(&tconn->volumes, mdev, i) {
mdev->send_cnt = 0;
mdev->recv_cnt = 0;
kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
}
+ rcu_read_unlock();
conn_reconfig_done(tconn);
drbd_adm_finish(info, retcode);
return 0;
+fail_rcu_unlock:
+ rcu_read_unlock();
fail:
kfree(int_dig_in);
kfree(int_dig_vv);
/* synchronize with drbd_new_tconn/drbd_free_tconn */
mutex_lock(&drbd_cfg_mutex);
- /* synchronize with drbd_delete_device */
- rcu_read_lock();
next_tconn:
/* revalidate iterator position */
list_for_each_entry(tmp, &drbd_tconns, all_tconn) {
}
out:
- rcu_read_unlock();
mutex_unlock(&drbd_cfg_mutex);
/* where to start the next iteration */
cb->args[0] = (long)pos;
bool conn_all_vols_unconf(struct drbd_tconn *tconn)
{
struct drbd_conf *mdev;
+ bool rv = true;
int vnr;
+ rcu_read_lock();
idr_for_each_entry(&tconn->volumes, mdev, vnr) {
if (mdev->state.disk != D_DISKLESS ||
mdev->state.conn != C_STANDALONE ||
- mdev->state.role != R_SECONDARY)
- return false;
+ mdev->state.role != R_SECONDARY) {
+ rv = false;
+ break;
+ }
}
- return true;
+ rcu_read_unlock();
+
+ return rv;
}
/* Unfortunately the states where not correctly ordered, when
struct drbd_conf *mdev;
int vnr;
+ rcu_read_lock();
idr_for_each_entry(&tconn->volumes, mdev, vnr)
role = max_role(role, mdev->state.role);
+ rcu_read_unlock();
return role;
}
struct drbd_conf *mdev;
int vnr;
+ rcu_read_lock();
idr_for_each_entry(&tconn->volumes, mdev, vnr)
peer = max_role(peer, mdev->state.peer);
+ rcu_read_unlock();
return peer;
}
struct drbd_conf *mdev;
int vnr;
+ rcu_read_lock();
idr_for_each_entry(&tconn->volumes, mdev, vnr)
ds = max_t(enum drbd_disk_state, ds, mdev->state.disk);
+ rcu_read_unlock();
return ds;
}
struct drbd_conf *mdev;
int vnr;
+ rcu_read_lock();
idr_for_each_entry(&tconn->volumes, mdev, vnr)
ds = min_t(enum drbd_disk_state, ds, mdev->state.disk);
+ rcu_read_unlock();
return ds;
}
struct drbd_conf *mdev;
int vnr;
+ rcu_read_lock();
idr_for_each_entry(&tconn->volumes, mdev, vnr)
ds = max_t(enum drbd_disk_state, ds, mdev->state.pdsk);
+ rcu_read_unlock();
return ds;
}
struct drbd_conf *mdev;
int vnr;
+ rcu_read_lock();
idr_for_each_entry(&tconn->volumes, mdev, vnr)
conn = min_t(enum drbd_conns, conn, mdev->state.conn);
+ rcu_read_unlock();
return conn;
}
/* case1: The outdate peer handler is successful: */
if (ns_max.pdsk <= D_OUTDATED) {
tl_clear(tconn);
+ rcu_read_lock();
idr_for_each_entry(&tconn->volumes, mdev, vnr) {
if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
drbd_uuid_new_current(mdev);
clear_bit(NEW_CUR_UUID, &mdev->flags);
}
}
+ rcu_read_unlock();
conn_request_state(tconn,
(union drbd_state) { { .susp_fen = 1 } },
(union drbd_state) { { .susp_fen = 0 } },
}
/* case2: The connection was established again: */
if (ns_min.conn >= C_CONNECTED) {
+ rcu_read_lock();
idr_for_each_entry(&tconn->volumes, mdev, vnr)
clear_bit(NEW_CUR_UUID, &mdev->flags);
+ rcu_read_unlock();
spin_lock_irq(&tconn->req_lock);
_tl_restart(tconn, RESEND);
_conn_request_state(tconn,
struct drbd_conf *mdev;
int vnr, first_vol = 1;
+ rcu_read_lock();
idr_for_each_entry(&tconn->volumes, mdev, vnr) {
os = mdev->state;
if (cs.pdsk != os.pdsk)
flags &= ~CS_DC_PDSK;
}
+ rcu_read_unlock();
*pf |= CS_DC_MASK;
*pf &= flags;
struct drbd_conf *mdev;
int vnr;
+ rcu_read_lock();
idr_for_each_entry(&tconn->volumes, mdev, vnr) {
os = drbd_read_state(mdev);
ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
if (rv < SS_SUCCESS)
break;
}
+ rcu_read_unlock();
if (rv < SS_SUCCESS && flags & CS_VERBOSE)
print_st_err(mdev, os, ns, rv);
if (mask.conn == C_MASK)
tconn->cstate = val.conn;
+ rcu_read_lock();
idr_for_each_entry(&tconn->volumes, mdev, vnr) {
os = drbd_read_state(mdev);
ns = apply_mask_val(os, mask, val);
ns_min.disk = min_t(enum drbd_disk_state, ns.disk, ns_min.disk);
ns_min.pdsk = min_t(enum drbd_disk_state, ns.pdsk, ns_min.pdsk);
}
+ rcu_read_unlock();
ns_min.susp = ns_max.susp = tconn->susp;
ns_min.susp_nod = ns_max.susp_nod = tconn->susp_nod;