}
request = list_entry(pool->prp_req_list.next, struct ptlrpc_request,
- rq_list);
+ rq_list);
list_del_init(&request->rq_list);
spin_unlock(&pool->prp_lock);
LASSERTF((unsigned long)imp > 0x1000, "%p", imp);
LASSERT(imp != LP_POISON);
LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p",
- imp->imp_client);
+ imp->imp_client);
LASSERT(imp->imp_client != LP_POISON);
request->rq_import = class_import_get(imp);
RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
list_for_each(tmp, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ list_entry(tmp, struct ptlrpc_request, rq_set_chain);
LASSERT(req->rq_phase == expected_phase);
n++;
list_for_each_safe(tmp, next, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ list_entry(tmp, struct ptlrpc_request, rq_set_chain);
list_del_init(&req->rq_set_chain);
LASSERT(req->rq_phase == expected_phase);
struct ptlrpc_request *last;
last = list_entry(imp->imp_replay_list.prev,
- struct ptlrpc_request,
- rq_replay_list);
+ struct ptlrpc_request,
+ rq_replay_list);
/*
* Requests with rq_replay stay on the list even if no
* commit is expected.
INIT_LIST_HEAD(&comp_reqs);
list_for_each_safe(tmp, next, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ list_entry(tmp, struct ptlrpc_request, rq_set_chain);
struct obd_import *imp = req->rq_import;
int unregistered = 0;
int rc = 0;
*/
list_del_init(&req->rq_list);
list_add_tail(&req->rq_list,
- &imp->
- imp_delayed_list);
+ &imp->imp_delayed_list);
spin_unlock(&imp->imp_lock);
continue;
}
if (status != 0) {
req->rq_status = status;
ptlrpc_rqphase_move(req,
- RQ_PHASE_INTERPRET);
+ RQ_PHASE_INTERPRET);
spin_unlock(&imp->imp_lock);
goto interpret;
}
list_del_init(&req->rq_list);
list_add_tail(&req->rq_list,
- &imp->imp_sending_list);
+ &imp->imp_sending_list);
spin_unlock(&imp->imp_lock);
/* A timeout expired. See which reqs it applies to... */
list_for_each(tmp, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ list_entry(tmp, struct ptlrpc_request, rq_set_chain);
/* don't expire request waiting for context */
if (req->rq_wait_ctx)
list_for_each(tmp, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ list_entry(tmp, struct ptlrpc_request, rq_set_chain);
if (req->rq_phase != RQ_PHASE_RPC &&
req->rq_phase != RQ_PHASE_UNREGISTERING)
else
list_for_each(tmp, &set->set_requests) {
req = list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ rq_set_chain);
if (req->rq_phase == RQ_PHASE_NEW)
(void)ptlrpc_send_new_req(req);
}
if (rc == 0 && atomic_read(&set->set_remaining) == 0) {
list_for_each(tmp, &set->set_requests) {
req = list_entry(tmp, struct ptlrpc_request,
- rq_set_chain);
+ rq_set_chain);
spin_lock(&req->rq_lock);
req->rq_invalid_rqset = 1;
spin_unlock(&req->rq_lock);
ptlrpc_request_addref(req);
list_for_each_prev(tmp, &imp->imp_replay_list) {
struct ptlrpc_request *iter =
- list_entry(tmp, struct ptlrpc_request,
- rq_replay_list);
+ list_entry(tmp, struct ptlrpc_request, rq_replay_list);
/*
* We may have duplicate transnos if we create and then
list_for_each_safe(pos, tmp, &set->set_requests) {
struct ptlrpc_request *req =
- list_entry(pos, struct ptlrpc_request,
- rq_set_chain);
+ list_entry(pos, struct ptlrpc_request, rq_set_chain);
spin_lock(&req->rq_lock);
if (req->rq_phase != RQ_PHASE_RPC) {
return;
if (!strncmp(*uuid_start + *uuid_len - strlen(UUID_STR),
- UUID_STR, strlen(UUID_STR)))
+ UUID_STR, strlen(UUID_STR)))
*uuid_len -= strlen(UUID_STR);
}
rc = 0;
} else {
list_for_each_safe(tmp, n,
- &imp->imp_sending_list) {
+ &imp->imp_sending_list) {
req = list_entry(tmp,
- struct ptlrpc_request,
- rq_list);
+ struct ptlrpc_request,
+ rq_list);
DEBUG_REQ(D_ERROR, req,
"still on sending list");
}
list_for_each_safe(tmp, n,
- &imp->imp_delayed_list) {
+ &imp->imp_delayed_list) {
req = list_entry(tmp,
- struct ptlrpc_request,
- rq_list);
+ struct ptlrpc_request,
+ rq_list);
DEBUG_REQ(D_ERROR, req,
"still on delayed list");
}
*/
if ((conn->oic_last_attempt == 0) ||
cfs_time_beforeq_64(conn->oic_last_attempt,
- imp->imp_last_success_conn)) {
+ imp->imp_last_success_conn)) {
imp_conn = conn;
tried_all = 0;
break;
spin_lock(&imp->imp_lock);
list_del(&imp->imp_conn_current->oic_item);
- list_add(&imp->imp_conn_current->oic_item,
- &imp->imp_conn_list);
+ list_add(&imp->imp_conn_current->oic_item, &imp->imp_conn_list);
imp->imp_last_success_conn =
imp->imp_conn_current->oic_last_attempt;
*/
class_import_get(imp);
task = kthread_run(ptlrpc_invalidate_import_thread, imp,
- "ll_imp_inval");
+ "ll_imp_inval");
if (IS_ERR(task)) {
class_import_put(imp);
CERROR("error starting invalidate thread: %d\n", rc);
struct req_msg_field RMF_TGTUUID =
DEFINE_MSGF("tgtuuid", RMF_F_STRING, sizeof(struct obd_uuid) - 1, NULL,
- NULL);
+ NULL);
EXPORT_SYMBOL(RMF_TGTUUID);
struct req_msg_field RMF_CLUUID =
DEFINE_MSGF("cluuid", RMF_F_STRING, sizeof(struct obd_uuid) - 1, NULL,
- NULL);
+ NULL);
EXPORT_SYMBOL(RMF_CLUUID);
struct req_msg_field RMF_STRING =
struct req_msg_field RMF_EAVALS_LENS =
DEFINE_MSGF("eavals_lens", RMF_F_STRUCT_ARRAY, sizeof(__u32),
- lustre_swab_generic_32s, NULL);
+ lustre_swab_generic_32s, NULL);
EXPORT_SYMBOL(RMF_EAVALS_LENS);
struct req_msg_field RMF_OBD_ID =
struct req_format RQF_MGS_TARGET_REG =
DEFINE_REQ_FMT0("MGS_TARGET_REG", mgs_target_info_only,
- mgs_target_info_only);
+ mgs_target_info_only);
EXPORT_SYMBOL(RQF_MGS_TARGET_REG);
struct req_format RQF_MGS_SET_INFO =
DEFINE_REQ_FMT0("MGS_SET_INFO", mgs_set_info,
- mgs_set_info);
+ mgs_set_info);
EXPORT_SYMBOL(RQF_MGS_SET_INFO);
struct req_format RQF_MGS_CONFIG_READ =
DEFINE_REQ_FMT0("MGS_CONFIG_READ", mgs_config_read_client,
- mgs_config_read_server);
+ mgs_config_read_server);
EXPORT_SYMBOL(RQF_MGS_CONFIG_READ);
struct req_format RQF_SEQ_QUERY =
struct req_format RQF_OST_SET_GRANT_INFO =
DEFINE_REQ_FMT0("OST_SET_GRANT_INFO", ost_grant_shrink_client,
- ost_body_only);
+ ost_body_only);
EXPORT_SYMBOL(RQF_OST_SET_GRANT_INFO);
struct req_format RQF_OST_GET_INFO_GENERIC =
DEFINE_REQ_FMT0("OST_GET_INFO", ost_get_info_generic_client,
- ost_get_info_generic_server);
+ ost_get_info_generic_server);
EXPORT_SYMBOL(RQF_OST_GET_INFO_GENERIC);
struct req_format RQF_OST_GET_INFO_LAST_ID =
DEFINE_REQ_FMT0("OST_GET_INFO_LAST_ID", ost_get_info_generic_client,
- ost_get_last_id_server);
+ ost_get_last_id_server);
EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_ID);
struct req_format RQF_OST_GET_INFO_LAST_FID =
DEFINE_REQ_FMT0("OST_GET_INFO_LAST_FID", obd_set_info_client,
- ost_get_last_fid_server);
+ ost_get_last_fid_server);
EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_FID);
struct req_format RQF_OST_SET_INFO_LAST_FID =
DEFINE_REQ_FMT0("OST_SET_INFO_LAST_FID", obd_set_info_client,
- empty);
+ empty);
EXPORT_SYMBOL(RQF_OST_SET_INFO_LAST_FID);
struct req_format RQF_OST_GET_INFO_FIEMAP =
DEFINE_REQ_FMT0("OST_GET_INFO_FIEMAP", ost_get_fiemap_client,
- ost_get_fiemap_server);
+ ost_get_fiemap_server);
EXPORT_SYMBOL(RQF_OST_GET_INFO_FIEMAP);
#if !defined(__REQ_LAYOUT_USER__)
pill->rc_area[RCL_SERVER], NULL);
if (rc != 0) {
DEBUG_REQ(D_ERROR, pill->rc_req,
- "Cannot pack %d fields in format `%s': ",
- count, fmt->rf_name);
+ "Cannot pack %d fields in format `%s': ",
+ count, fmt->rf_name);
}
return rc;
}
int offset;
offset = field->rmf_offset[pill->rc_fmt->rf_idx][loc];
- LASSERTF(offset > 0, "%s:%s, off=%d, loc=%d\n",
- pill->rc_fmt->rf_name,
- field->rmf_name, offset, loc);
+ LASSERTF(offset > 0, "%s:%s, off=%d, loc=%d\n", pill->rc_fmt->rf_name,
+ field->rmf_name, offset, loc);
offset--;
LASSERT(0 <= offset && offset < REQ_MAX_FIELD_NR);
msg = __req_msg(pill, loc);
len = lustre_msg_buflen(msg, offset);
LASSERTF(newlen <= len, "%s:%s, oldlen=%d, newlen=%d\n",
- fmt->rf_name, field->rmf_name, len, newlen);
+ fmt->rf_name, field->rmf_name, len, newlen);
if (loc == RCL_CLIENT)
pill->rc_req->rq_reqlen = lustre_shrink_msg(msg, offset, newlen,
* \param[out] info Holds returned status information
*/
static void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_pol_info *info)
+ struct ptlrpc_nrs_pol_info *info)
{
assert_spin_locked(&policy->pol_nrs->nrs_lock);
pol_idx = 0;
- list_for_each_entry(policy, &nrs->nrs_policy_list,
- pol_list) {
+ list_for_each_entry(policy, &nrs->nrs_policy_list, pol_list) {
LASSERT(pol_idx < num_pols);
nrs_policy_get_info_locked(policy, &tmp);
* active: 0
*/
seq_printf(m, "%s\n",
- !hp ? "\nregular_requests:" : "high_priority_requests:");
+ !hp ? "\nregular_requests:" : "high_priority_requests:");
for (pol_idx = 0; pol_idx < num_pols; pol_idx++) {
seq_printf(m, " - name: %s\n"
if (AT_OFF) {
seq_printf(m, "adaptive timeouts off, using obd_timeout %u\n",
- obd_timeout);
+ obd_timeout);
return 0;
}
s2dhms(&ts, ktime_get_real_seconds() - worstt);
seq_printf(m, "%10s : cur %3u worst %3u (at %lld, "
- DHMS_FMT" ago) ", "service",
- cur, worst, (s64)worstt, DHMS_VARS(&ts));
+ DHMS_FMT " ago) ", "service",
+ cur, worst, (s64)worstt, DHMS_VARS(&ts));
lprocfs_at_hist_helper(m, &svcpt->scp_at_estimate);
}
EXPORT_SYMBOL(lprocfs_rd_pinger_recov);
int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer,
- size_t count, loff_t *off)
+ size_t count, loff_t *off)
{
struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
struct client_obd *cli = &obd->u.cli;
*/
LASSERT(!request->rq_receiving_reply);
LASSERT(!((lustre_msg_get_flags(request->rq_reqmsg) & MSG_REPLAY) &&
- (request->rq_import->imp_state == LUSTRE_IMP_FULL)));
+ (request->rq_import->imp_state == LUSTRE_IMP_FULL)));
if (unlikely(obd && obd->obd_fail)) {
CDEBUG(D_HA, "muting rpc for failed imp obd %s\n",
- obd->obd_name);
+ obd->obd_name);
/* this prevents us from waiting in ptlrpc_queue_wait */
spin_lock(&request->rq_lock);
request->rq_err = 1;
policy->pol_ref--;
if (unlikely(policy->pol_ref == 0 &&
- policy->pol_state == NRS_POL_STATE_STOPPING))
+ policy->pol_state == NRS_POL_STATE_STOPPING))
nrs_policy_stop0(policy);
}
* Find and return a policy by name.
*/
static struct ptlrpc_nrs_policy *nrs_policy_find_locked(struct ptlrpc_nrs *nrs,
- char *name)
+ char *name)
{
struct ptlrpc_nrs_policy *tmp;
*/
if (unlikely(list_empty(&policy->pol_list_queued)))
list_add_tail(&policy->pol_list_queued,
- &policy->pol_nrs->nrs_policy_queued);
+ &policy->pol_nrs->nrs_policy_queued);
}
/**
nrs = nrs_svcpt2nrs(svcpt, hp);
nrs->nrs_stopping = 1;
- list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list,
- pol_list) {
+ list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list, pol_list) {
rc = nrs_policy_unregister(nrs, policy->pol_desc->pd_name);
LASSERT(rc == 0);
}
LASSERT(conf->nc_ops);
LASSERT(conf->nc_compat);
LASSERT(ergo(conf->nc_compat == nrs_policy_compat_one,
- conf->nc_compat_svc_name));
+ conf->nc_compat_svc_name));
LASSERT(ergo((conf->nc_flags & PTLRPC_NRS_FL_REG_EXTERN) != 0,
conf->nc_owner));
policy->pol_nrs->nrs_req_queued);
list_move_tail(&policy->pol_list_queued,
- &policy->pol_nrs->nrs_policy_queued);
+ &policy->pol_nrs->nrs_policy_queued);
}
}
* Always try to drain requests from all NRS polices even if they are
* inactive, because the user can change policy status at runtime.
*/
- list_for_each_entry(policy, &nrs->nrs_policy_queued,
- pol_list_queued) {
+ list_for_each_entry(policy, &nrs->nrs_policy_queued, pol_list_queued) {
nrq = nrs_request_get(policy, peek, force);
if (nrq) {
if (likely(!peek)) {
struct ptlrpc_nrs_pol_desc *desc;
struct ptlrpc_nrs_pol_desc *tmp;
- list_for_each_entry_safe(desc, tmp, &nrs_core.nrs_policies,
- pd_list) {
+ list_for_each_entry_safe(desc, tmp, &nrs_core.nrs_policies, pd_list) {
list_del_init(&desc->pd_list);
kfree(desc);
}
nrq = unlikely(list_empty(&head->fh_list)) ? NULL :
list_entry(head->fh_list.next, struct ptlrpc_nrs_request,
- nr_u.fifo.fr_list);
+ nr_u.fifo.fr_list);
if (likely(!peek && nrq)) {
struct ptlrpc_request *req = container_of(nrq,
}
rs = list_entry(svcpt->scp_rep_idle.next,
- struct ptlrpc_reply_state, rs_list);
+ struct ptlrpc_reply_state, rs_list);
list_del(&rs->rs_list);
spin_unlock(&svcpt->scp_rep_lock);
CDEBUG(D_OTHER, "\tlmm_stripe_size: %#x\n", lum->lmm_stripe_size);
CDEBUG(D_OTHER, "\tlmm_stripe_count: %#x\n", lum->lmm_stripe_count);
CDEBUG(D_OTHER, "\tlmm_stripe_offset/lmm_layout_gen: %#x\n",
- lum->lmm_stripe_offset);
+ lum->lmm_stripe_offset);
}
static void lustre_swab_lmm_oi(struct ost_id *oi)
list_for_each(iter, &pinger_imports) {
struct obd_import *imp =
list_entry(iter, struct obd_import,
- imp_pinger_chain);
+ imp_pinger_chain);
ptlrpc_pinger_process_import(imp, this_ping);
/* obd_timeout might have changed */
* be called when timeout happens.
*/
static struct timeout_item *ptlrpc_new_timeout(int time,
- enum timeout_event event, timeout_cb_t cb, void *data)
+ enum timeout_event event,
+ timeout_cb_t cb, void *data)
{
struct timeout_item *ti;
if (likely(!list_empty(&src->set_new_requests))) {
list_for_each_safe(pos, tmp, &src->set_new_requests) {
req = list_entry(pos, struct ptlrpc_request,
- rq_set_chain);
+ rq_set_chain);
req->rq_set = des;
}
- list_splice_init(&src->set_new_requests,
- &des->set_requests);
+ list_splice_init(&src->set_new_requests, &des->set_requests);
rc = atomic_read(&src->set_new_count);
atomic_add(rc, &des->set_remaining);
atomic_set(&src->set_new_count, 0);
spin_lock(&set->set_new_req_lock);
if (likely(!list_empty(&set->set_new_requests))) {
list_splice_init(&set->set_new_requests,
- &set->set_requests);
+ &set->set_requests);
atomic_add(atomic_read(&set->set_new_count),
- &set->set_remaining);
+ &set->set_remaining);
atomic_set(&set->set_new_count, 0);
/*
* Need to calculate its timeout.
/* Replay all the committed open requests on committed_list first */
if (!list_empty(&imp->imp_committed_list)) {
tmp = imp->imp_committed_list.prev;
- req = list_entry(tmp, struct ptlrpc_request,
- rq_replay_list);
+ req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
/* The last request on committed_list hasn't been replayed */
if (req->rq_transno > last_transno) {
return -1;
}
- list_for_each_entry_safe(req, next, &imp->imp_sending_list,
- rq_list) {
+ list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) {
LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON,
"req %p bad\n", req);
LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
- req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
+ req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
req_off_ctx_list(req, ctx);
sptlrpc_req_replace_dead_ctx(req);
ctx = req->rq_cli_ctx;
msglen + sizeof(struct ptlrpc_reply_state)) {
/* Just return failure if the size is too big */
CERROR("size of message is too big (%zd), %d allowed",
- msglen + sizeof(struct ptlrpc_reply_state),
- svcpt->scp_service->srv_max_reply_size);
+ msglen + sizeof(struct ptlrpc_reply_state),
+ svcpt->scp_service->srv_max_reply_size);
return -ENOMEM;
}
sptlrpc_rule_set_free(&conf->sc_rset);
list_for_each_entry_safe(conf_tgt, conf_tgt_next,
- &conf->sc_tgts, sct_list) {
+ &conf->sc_tgts, sct_list) {
sptlrpc_rule_set_free(&conf_tgt->sct_rset);
list_del(&conf_tgt->sct_list);
kfree(conf_tgt);
while (!list_empty(&sec_gc_ctx_list)) {
ctx = list_entry(sec_gc_ctx_list.next,
- struct ptlrpc_cli_ctx, cc_gc_chain);
+ struct ptlrpc_cli_ctx, cc_gc_chain);
list_del_init(&ctx->cc_gc_chain);
spin_unlock(&sec_gc_ctx_list_lock);
if (unlikely(sec->ps_gc_next == 0)) {
CDEBUG(D_SEC, "sec %p(%s) has 0 gc time\n",
- sec, sec->ps_policy->sp_name);
+ sec, sec->ps_policy->sp_name);
return;
}
}
rqbd = list_entry(svcpt->scp_rqbd_idle.next,
- struct ptlrpc_request_buffer_desc,
- rqbd_list);
+ struct ptlrpc_request_buffer_desc,
+ rqbd_list);
list_del(&rqbd->rqbd_list);
/* assume we will post successfully */
*/
while (svcpt->scp_hist_nrqbds > svc->srv_hist_nrqbds_cpt_max) {
rqbd = list_entry(svcpt->scp_hist_rqbds.next,
- struct ptlrpc_request_buffer_desc,
- rqbd_list);
+ struct ptlrpc_request_buffer_desc,
+ rqbd_list);
list_del(&rqbd->rqbd_list);
svcpt->scp_hist_nrqbds--;
*/
list_for_each(tmp, &rqbd->rqbd_reqs) {
req = list_entry(tmp, struct ptlrpc_request,
- rq_list);
+ rq_list);
/* Track the highest culled req seq */
if (req->rq_history_seq >
svcpt->scp_hist_seq_culled) {
list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) {
req = list_entry(rqbd->rqbd_reqs.next,
- struct ptlrpc_request,
- rq_list);
+ struct ptlrpc_request,
+ rq_list);
list_del(&req->rq_list);
ptlrpc_server_free_request(req);
}
*/
LASSERT(atomic_read(&rqbd->rqbd_req.rq_refcount) ==
0);
- list_add_tail(&rqbd->rqbd_list,
- &svcpt->scp_rqbd_idle);
+ list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
}
spin_unlock(&svcpt->scp_lock);
/* latest rpcs will have the latest deadlines in the list,
* so search backward.
*/
- list_for_each_entry_reverse(rq,
- &array->paa_reqs_array[index],
- rq_timed_list) {
+ list_for_each_entry_reverse(rq, &array->paa_reqs_array[index],
+ rq_timed_list) {
if (req->rq_deadline >= rq->rq_deadline) {
list_add(&req->rq_timed_list,
- &rq->rq_timed_list);
+ &rq->rq_timed_list);
break;
}
}
/* Add the request at the head of the list */
if (list_empty(&req->rq_timed_list))
- list_add(&req->rq_timed_list,
- &array->paa_reqs_array[index]);
+ list_add(&req->rq_timed_list, &array->paa_reqs_array[index]);
spin_lock(&req->rq_lock);
req->rq_at_linked = 1;
count = array->paa_count;
while (count > 0) {
count -= array->paa_reqs_count[index];
- list_for_each_entry_safe(rq, n,
- &array->paa_reqs_array[index],
- rq_timed_list) {
+ list_for_each_entry_safe(rq, n, &array->paa_reqs_array[index],
+ rq_timed_list) {
if (rq->rq_deadline > now + at_early_margin) {
/* update the earliest deadline */
if (deadline == -1 ||
*/
while (!list_empty(&work_list)) {
rq = list_entry(work_list.next, struct ptlrpc_request,
- rq_timed_list);
+ rq_timed_list);
list_del_init(&rq->rq_timed_list);
if (ptlrpc_at_send_early_reply(rq) == 0)
}
spin_lock_bh(&req->rq_export->exp_rpc_lock);
- list_add(&req->rq_exp_list,
- &req->rq_export->exp_hp_rpcs);
+ list_add(&req->rq_exp_list, &req->rq_export->exp_hp_rpcs);
spin_unlock_bh(&req->rq_export->exp_rpc_lock);
}
}
req = list_entry(svcpt->scp_req_incoming.next,
- struct ptlrpc_request, rq_list);
+ struct ptlrpc_request, rq_list);
list_del_init(&req->rq_list);
svcpt->scp_nreqs_incoming--;
/* Consider this still a "queued" request as far as stats are
continue;
CERROR("Failed to post rqbd for %s on CPT %d: %d\n",
- svc->srv_name, svcpt->scp_cpt, rc);
+ svc->srv_name, svcpt->scp_cpt, rc);
goto out_srv_fini;
}
while (!list_empty(&replies)) {
struct ptlrpc_reply_state *rs;
- rs = list_entry(replies.prev,
- struct ptlrpc_reply_state,
- rs_list);
+ rs = list_entry(replies.prev, struct ptlrpc_reply_state,
+ rs_list);
list_del_init(&rs->rs_list);
ptlrpc_handle_rs(rs);
}
if (!hrp->hrp_thrs)
continue; /* uninitialized */
wait_event(ptlrpc_hr.hr_waitq,
- atomic_read(&hrp->hrp_nstopped) ==
- atomic_read(&hrp->hrp_nstarted));
+ atomic_read(&hrp->hrp_nstopped) ==
+ atomic_read(&hrp->hrp_nstarted));
}
}
struct task_struct *task;
task = kthread_run(ptlrpc_hr_main,
- &hrp->hrp_thrs[j],
- "ptlrpc_hr%02d_%03d",
- hrp->hrp_cpt,
- hrt->hrt_id);
+ &hrp->hrp_thrs[j],
+ "ptlrpc_hr%02d_%03d",
+ hrp->hrp_cpt, hrt->hrt_id);
if (IS_ERR(task)) {
rc = PTR_ERR(task);
break;
}
}
wait_event(ptlrpc_hr.hr_waitq,
- atomic_read(&hrp->hrp_nstarted) == j);
+ atomic_read(&hrp->hrp_nstarted) == j);
if (rc < 0) {
CERROR("cannot start reply handler thread %d:%d: rc = %d\n",
while (!list_empty(&svcpt->scp_threads)) {
thread = list_entry(svcpt->scp_threads.next,
- struct ptlrpc_thread, t_link);
+ struct ptlrpc_thread, t_link);
if (thread_is_stopped(thread)) {
list_del(&thread->t_link);
list_add(&thread->t_link, &zombie);
spin_lock(&svcpt->scp_rep_lock);
while (!list_empty(&svcpt->scp_rep_active)) {
rs = list_entry(svcpt->scp_rep_active.next,
- struct ptlrpc_reply_state, rs_list);
+ struct ptlrpc_reply_state, rs_list);
spin_lock(&rs->rs_lock);
ptlrpc_schedule_difficult_reply(rs);
spin_unlock(&rs->rs_lock);
*/
while (!list_empty(&svcpt->scp_req_incoming)) {
req = list_entry(svcpt->scp_req_incoming.next,
- struct ptlrpc_request, rq_list);
+ struct ptlrpc_request, rq_list);
list_del(&req->rq_list);
svcpt->scp_nreqs_incoming--;
while (!list_empty(&svcpt->scp_rqbd_idle)) {
rqbd = list_entry(svcpt->scp_rqbd_idle.next,
- struct ptlrpc_request_buffer_desc,
- rqbd_list);
+ struct ptlrpc_request_buffer_desc,
+ rqbd_list);
ptlrpc_free_rqbd(rqbd);
}
ptlrpc_wait_replies(svcpt);
while (!list_empty(&svcpt->scp_rep_idle)) {
rs = list_entry(svcpt->scp_rep_idle.next,
- struct ptlrpc_reply_state,
- rs_list);
+ struct ptlrpc_reply_state,
+ rs_list);
list_del(&rs->rs_list);
kvfree(rs);
}