static const char *nfsd4_op_name(unsigned opnum);
/*
+ * This is a replay of a compound for which no cache entry pages
+ * were used. Encode the sequence operation, and if cachethis is FALSE
+ * encode the uncache rep error on the next operation.
+ */
+static __be32
+nfsd4_enc_uncached_replay(struct nfsd4_compoundargs *args,
+ struct nfsd4_compoundres *resp)
+{
+ struct nfsd4_op *op;
+
+ dprintk("--> %s resp->opcnt %d ce_cachethis %u \n", __func__,
+ resp->opcnt, resp->cstate.slot->sl_cache_entry.ce_cachethis);
+
+ /* Encode the replayed sequence operation */
+ BUG_ON(resp->opcnt != 1);
+ op = &args->ops[resp->opcnt - 1];
+ nfsd4_encode_operation(resp, op);
+
+ /*return nfserr_retry_uncached_rep in next operation. */
+ if (resp->cstate.slot->sl_cache_entry.ce_cachethis == 0) {
+ op = &args->ops[resp->opcnt++];
+ op->status = nfserr_retry_uncached_rep;
+ nfsd4_encode_operation(resp, op);
+ }
+ return op->status;
+}
+
+/*
* Enforce NFSv4.1 COMPOUND ordering rules.
*
* TODO:
dprintk("nfsv4 compound op #%d/%d: %d (%s)\n",
resp->opcnt, args->opcnt, op->opnum,
nfsd4_op_name(op->opnum));
-
/*
* The XDR decode routines may have pre-set op->status;
* for example, if there is a miscellaneous XDR error
/* Only from SEQUENCE or CREATE_SESSION */
if (resp->cstate.status == nfserr_replay_cache) {
dprintk("%s NFS4.1 replay from cache\n", __func__);
- status = op->status;
+ if (nfsd4_not_cached(resp))
+ status = nfsd4_enc_uncached_replay(args, resp);
+ else
+ status = op->status;
goto out;
}
if (op->status == nfserr_replay_me) {
/* Don't cache a failed OP_SEQUENCE. */
if (resp->opcnt == 1 && op->opnum == OP_SEQUENCE && resp->cstate.status)
return;
+
nfsd4_release_respages(entry->ce_respages, entry->ce_resused);
+ entry->ce_opcnt = resp->opcnt;
+ entry->ce_status = resp->cstate.status;
+
+ /*
+ * Don't need a page to cache just the sequence operation - the slot
+ * does this for us!
+ */
+
+ if (nfsd4_not_cached(resp)) {
+ entry->ce_resused = 0;
+ entry->ce_rpchdrlen = 0;
+ dprintk("%s Just cache SEQUENCE. ce_cachethis %d\n", __func__,
+ resp->cstate.slot->sl_cache_entry.ce_cachethis);
+ return;
+ }
entry->ce_resused = rqstp->rq_resused;
if (entry->ce_resused > NFSD_PAGES_PER_SLOT + 1)
entry->ce_resused = NFSD_PAGES_PER_SLOT + 1;
nfsd4_copy_pages(entry->ce_respages, rqstp->rq_respages,
entry->ce_resused);
- entry->ce_status = resp->cstate.status;
entry->ce_datav.iov_base = resp->cstate.statp;
entry->ce_datav.iov_len = resv->iov_len - ((char *)resp->cstate.statp -
(char *)page_address(rqstp->rq_respages[0]));
- entry->ce_opcnt = resp->opcnt;
/* Current request rpc header length*/
entry->ce_rpchdrlen = (char *)resp->cstate.statp -
(char *)page_address(rqstp->rq_respages[0]);
* cached page. Replace any futher replay pages from the cache.
*/
__be32
-nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp)
+nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
+ struct nfsd4_sequence *seq)
{
struct nfsd4_cache_entry *entry = &resp->cstate.slot->sl_cache_entry;
__be32 status;
dprintk("--> %s entry %p\n", __func__, entry);
+ /*
+ * If this is just the sequence operation, we did not keep
+ * a page in the cache entry because we can just use the
+ * slot info stored in struct nfsd4_sequence that was checked
+ * against the slot in nfsd4_sequence().
+ *
+ * This occurs when seq->cachethis is FALSE, or when the client
+ * session inactivity timer fires and a solo sequence operation
+ * is sent (lease renewal).
+ */
+ if (seq && nfsd4_not_cached(resp)) {
+ seq->maxslots = resp->cstate.session->se_fnumslots;
+ return nfs_ok;
+ }
if (!nfsd41_copy_replay_data(resp, entry)) {
/*
cstate->slot = slot;
cstate->status = status;
/* Return the cached reply status */
- status = nfsd4_replay_cache_entry(resp);
+ status = nfsd4_replay_cache_entry(resp, NULL);
goto out;
} else if (cr_ses->seqid != conf->cl_slot.sl_seqid + 1) {
status = nfserr_seq_misordered;
slot->sl_inuse = true;
cstate->slot = slot;
+ /* Ensure a page is used for the cache */
+ slot->sl_cache_entry.ce_cachethis = 1;
out:
nfs4_unlock_state();
dprintk("%s returns %d\n", __func__, ntohl(status));
cstate->slot = slot;
cstate->session = session;
/* Return the cached reply status and set cstate->status
- * for nfsd4_svc_encode_compoundres processing*/
- status = nfsd4_replay_cache_entry(resp);
+ * for nfsd4_svc_encode_compoundres processing */
+ status = nfsd4_replay_cache_entry(resp, seq);
cstate->status = nfserr_replay_cache;
goto replay_cache;
}
/* Success! bump slot seqid */
slot->sl_inuse = true;
slot->sl_seqid = seq->seqid;
+ slot->sl_cache_entry.ce_cachethis = seq->cachethis;
+ /* Always set the cache entry cachethis for solo sequence */
+ if (nfsd4_is_solo_sequence(resp))
+ slot->sl_cache_entry.ce_cachethis = 1;
cstate->slot = slot;
cstate->session = session;
struct nfsd4_compound_state cstate;
};
+static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp)
+{
+ struct nfsd4_compoundargs *args = resp->rqstp->rq_argp;
+ return args->opcnt == 1;
+}
+
+static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp)
+{
+ return !resp->cstate.slot->sl_cache_entry.ce_cachethis ||
+ nfsd4_is_solo_sequence(resp);
+}
+
#define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs)
static inline void
struct nfsd4_compound_state *,
struct nfsd4_setclientid_confirm *setclientid_confirm);
extern void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp);
-extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp);
+extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
+ struct nfsd4_sequence *seq);
extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp,
struct nfsd4_compound_state *,
struct nfsd4_exchange_id *);