Fix typo in comments within lustre/include.
Signed-off-by: Masanari Iida <standby24x7@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
/**
* Update Lock Value Block Operations (LVBO) on a resource taking into account
- * data from reqest \a r
+ * data from request \a r
*/
static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
struct ptlrpc_request *r, int increase)
#define ldlm_clear_test_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 19)
/**
- * Immediatelly cancel such locks when they block some other locks. Send
+ * Immediately cancel such locks when they block some other locks. Send
* cancel notification to original lock holder, but expect no reply. This
* is for clients (like liblustre) that cannot be expected to reliably
* response to blocking AST. */
/**
* A lock contributes to the known minimum size (KMS) calculation until it
- * has finished the part of its cancelation that performs write back on its
+ * has finished the part of its cancellation that performs write back on its
* dirty pages. It can remain on the granted list during this whole time.
* Threads racing to update the KMS after performing their writeback need
* to know to exclude each other's locks from the calculation as they walk
struct obd_import *exp_imp_reverse;
struct nid_stat *exp_nid_stats;
struct lprocfs_stats *exp_md_stats;
- /** Active connetion */
+ /** Active connection */
struct ptlrpc_connection *exp_connection;
- /** Connection count value from last succesful reconnect rpc */
+ /** Connection count value from last successful reconnect rpc */
__u32 exp_conn_cnt;
/** Hash list of all ldlm locks granted on this export */
struct cfs_hash *exp_lock_hash;
struct mutex lcs_mutex;
/*
- * Range of allowed for allocation sequeces. When using lu_client_seq on
+ * Range of allowed for allocation sequences. When using lu_client_seq on
* clients, this contains meta-sequence range. And for servers this
* contains super-sequence range.
*/
/* LUSTRE_SEQ_SERVER or LUSTRE_SEQ_CONTROLLER */
enum lu_mgr_type lss_type;
- /* Client interafce to request controller */
+ /* Client interface to request controller */
struct lu_client_seq *lss_cli;
/* Mutex for protecting allocation */
* finally, when we replace ost_id with FID in data stack.
*
* Currently, resid from the old client, whose res[0] = object_id,
- * res[1] = object_seq, is just oposite with Metatdata
+ * res[1] = object_seq, is just opposite with Metatdata
* resid, where, res[0] = fid->f_seq, res[1] = fid->f_oid.
* To unifiy the resid identification, we will reverse the data
* resid to keep it same with Metadata resid, i.e.
*
* For resid from the old client,
* res[0] = objid, res[1] = 0, still keep the original order,
- * for compatiblity.
+ * for compatibility.
*
* For new resid
* res will be built from normal FID directly, i.e. res[0] = f_seq,
lnet_handle_md_t rs_md_h;
atomic_t rs_refcount;
- /** Context for the sevice thread */
+ /** Context for the service thread */
struct ptlrpc_svc_ctx *rs_svc_ctx;
/** Reply buffer (actually sent to the client), encoded if needed */
struct lustre_msg *rs_repbuf; /* wrapper */
spinlock_t prp_lock;
/** list of ptlrpc_request structs */
struct list_head prp_req_list;
- /** Maximum message size that would fit into a rquest from this pool */
+ /** Maximum message size that would fit into a request from this pool */
int prp_rq_size;
/** Function to allocate more requests for this pool */
void (*prp_populate)(struct ptlrpc_request_pool *, int);
*/
enum nrs_orr_supp od_supp;
/**
- * Round Robin quantum; the maxium number of RPCs that each request
+ * Round Robin quantum; the maximum number of RPCs that each request
* batch for each object or OST can have in a scheduling round.
*/
__u16 od_quantum;
*/
struct nrs_fifo_req fifo;
/**
- * CRR-N request defintion
+ * CRR-N request definition
*/
struct nrs_crrn_req crr;
/** ORR and TRR share the same request definition */
* requests in time
*/
struct list_head rq_timed_list;
- /** server-side history, used for debuging purposes. */
+ /** server-side history, used for debugging purposes. */
struct list_head rq_history_list;
/** server-side per-export list */
struct list_head rq_exp_list;
enum rq_phase rq_phase; /* one of RQ_PHASE_* */
enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
atomic_t rq_refcount;/* client-side refcount for SENT race,
- server-side refcounf for multiple replies */
+ server-side refcount for multiple replies */
/** Portal to which this request would be sent */
short rq_request_portal; /* XXX FIXME bug 249 */
/** xid */
__u64 rq_xid;
/**
- * List item to for replay list. Not yet commited requests get linked
+ * List item to for replay list. Not yet committed requests get linked
* there.
* Also see \a rq_replay comment above.
*/
__attribute__ ((format (printf, 3, 4)));
/**
- * Helper that decides if we need to print request accordig to current debug
+ * Helper that decides if we need to print request according to current debug
* level settings
*/
#define debug_req(msgdata, mask, cdls, req, fmt, a...) \
} while(0)
/**
- * This is the debug print function you need to use to print request sturucture
+ * This is the debug print function you need to use to print request structure
* content into lustre debug log.
* for most callers (level is a constant) this is resolved at compile time */
#define DEBUG_REQ(level, req, fmt, args...) \
*
* lustre/include/md_object.h
*
- * Extention of lu_object.h for metadata objects
+ * Extension of lu_object.h for metadata objects
*/
#ifndef _LUSTRE_MD_OBJECT_H
/* statfs data specific for every OSC, if needed at all. */
struct obd_statfs *oi_osfs;
/* An update callback which is called to update some data on upper
- * level. E.g. it is used for update lsm->lsm_oinfo at every recieved
+ * level. E.g. it is used for update lsm->lsm_oinfo at every received
* request in osc level for enqueue requests. It is also possible to
* update some caller data from LOV layer if needed. */
obd_enqueue_update_f oi_cb_up;
}
struct md_op_data {
- struct lu_fid op_fid1; /* operation fid1 (usualy parent) */
- struct lu_fid op_fid2; /* operation fid2 (usualy child) */
+ struct lu_fid op_fid1; /* operation fid1 (usually parent) */
+ struct lu_fid op_fid2; /* operation fid2 (usually child) */
struct lu_fid op_fid3; /* 2 extra fids to find conflicting */
struct lu_fid op_fid4; /* to the operation locks. */
mdsno_t op_mds; /* what mds server open will go to */