This path fix spelling typo in lustre/include.
Signed-off-by: Masanari Iida <standby24x7@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
* calls. To achieve this, every layer can implement ->clo_fits_into() method,
* that is called by lock matching code (cl_lock_lookup()), and that can be
* used to selectively disable matching of certain locks for certain IOs. For
- * exmaple, lov layer implements lov_lock_fits_into() that allow multi-stripe
+ * example, lov layer implements lov_lock_fits_into() that allow multi-stripe
* locks to be matched only for truncates and O_APPEND writes.
*
* Interaction with DLM
* Check if layout changed after the IO finishes. Mainly for HSM
* requirement. If IO occurs to openning files, it doesn't need to
* verify layout because HSM won't release openning files.
- * Right now, only two opertaions need to verify layout: glimpse
+ * Right now, only two operations need to verify layout: glimpse
* and setattr.
*/
ci_verify_layout:1,
*/
struct cl_req {
enum cl_req_type crq_type;
- /** A list of pages being transfered */
+ /** A list of pages being transferred */
struct list_head crq_pages;
/** Number of pages in cl_req::crq_pages */
unsigned crq_nrpages;
*
* - call chains have no non-lustre portions inserted between lustre code.
*
- * On a client both these assumtpion fails, because every user thread can
+ * On a client both these assumption fails, because every user thread can
* potentially execute lustre code as part of a system call, and lustre calls
* into VFS or MM that call back into lustre.
*
* and on newer kernels this header is shared as _ASM_GENERIC_IOCTL_H.
*
* We can avoid any problems with the kernel header being included again by
- * defining _ASM_I386_IOCTL_H here so that a later occurence of <asm/ioctl.h>
+ * defining _ASM_I386_IOCTL_H here so that a later occurrence of <asm/ioctl.h>
* does not include the kernel's ioctl.h after this one. b=14746 */
#define _ASM_I386_IOCTL_H
#define _ASM_GENERIC_IOCTL_H
/**
* True, if \a io is a normal io, False for splice_{read,write}.
- * must be impementated in arch specific code.
+ * must be implemented in arch specific code.
*/
int cl_is_normalio(const struct lu_env *env, const struct cl_io *io);
};
typedef enum {
- /** invalide type */
+ /** invalid type */
LDLM_NS_TYPE_UNKNOWN = 0,
/** mdc namespace */
LDLM_NS_TYPE_MDC,
* Currently, resid from the old client, whose res[0] = object_id,
* res[1] = object_seq, is just opposite with Metatdata
* resid, where, res[0] = fid->f_seq, res[1] = fid->f_oid.
- * To unifiy the resid identification, we will reverse the data
+ * To unify the resid identification, we will reverse the data
* resid to keep it same with Metadata resid, i.e.
*
* For resid from the old client,
};
/**
- * Defintion of PortalRPC import structure.
+ * Definition of PortalRPC import structure.
* Imports are representing client-side view to remote target.
*/
struct obd_import {
* after a check to save on unnecessary replay list iterations
*/
int imp_last_generation_checked;
- /** Last tranno we replayed */
+ /** Last transno we replayed */
__u64 imp_last_replay_transno;
/** Last transno committed on remote side */
__u64 imp_peer_committed_transno;
struct lustre_handle imp_remote_handle;
/** When to perform next ping. time in jiffies. */
cfs_time_t imp_next_ping;
- /** When we last succesfully connected. time in 64bit jiffies */
+ /** When we last successfully connected. time in 64bit jiffies */
__u64 imp_last_success_conn;
/** List of all possible connection for import. */
imp_no_lock_replay:1,
/* recovery by versions was failed */
imp_vbr_failed:1,
- /* force an immidiate ping */
+ /* force an immediate ping */
imp_force_verify:1,
/* force a scheduled ping */
imp_force_next_verify:1,
/* need IR MNE swab */
imp_need_mne_swab:1,
/* import must be reconnected instead of
- * chouse new connection */
+ * chose new connection */
imp_force_reconnect:1,
/* import has tried to connect with server */
imp_connect_tried:1;
*/
struct lu_buf *ld_buf;
/**
- * The matched header, entry and its lenght in the EA
+ * The matched header, entry and its length in the EA
*/
struct link_ea_header *ld_leh;
struct link_ea_entry *ld_lee;
/** Size of the reply message */
int rs_repdata_len; /* wrapper msg length */
/**
- * Actual reply message. Its content is encrupted (if needed) to
+ * Actual reply message. Its content is encrypted (if needed) to
* produce reply buffer for actual sending. In simple case
- * of no network encryption we jus set \a rs_repbuf to \a rs_msg
+ * of no network encryption we just set \a rs_repbuf to \a rs_msg
*/
struct lustre_msg *rs_msg; /* reply message */
*/
struct module *nc_owner;
/**
- * Policy registration flags; a bitmast of \e nrs_policy_flags
+ * Policy registration flags; a bitmask of \e nrs_policy_flags
*/
unsigned nc_flags;
};
* (i.e. when ->ldo_recovery_complete is called). This is used
* to notify the qsd layer that quota should now be enforced
* again via the qsd_op_begin/end functions. The last step of the
- * reintegration prodecure (namely usage reconciliation) will be
+ * reintegration procedure (namely usage reconciliation) will be
* completed during start.
*
* - qsd_fini(): is used to release a qsd_instance structure allocated with
/**
* Called then the reference of \a ctx dropped to 0. The policy module
* is supposed to destroy this context or whatever else according to
- * its cache maintainance mechamism.
+ * its cache maintenance mechanism.
*
* \param sync if zero, we shouldn't wait for the context being
* destroyed completely.
void sptlrpc_sec_put(struct ptlrpc_sec *sec);
/*
- * internal apis which only used by policy impelentation
+ * internal apis which only used by policy implementation
*/
int sptlrpc_get_next_secid(void);
void sptlrpc_sec_destroy(struct ptlrpc_sec *sec);
{
/* returns: 0 on healthy
* >0 on unhealthy + reason code/flag
- * however the only suppored reason == 1 right now
+ * however the only supported reason == 1 right now
* We'll need to define some better reasons
* or flags in the future.
* <0 on error
*
* Be very careful when changing this value, especially when decreasing it,
* since vmalloc in Linux doesn't perform well on multi-cores system, calling
- * vmalloc in critical path would hurt peformance badly. See LU-66.
+ * vmalloc in critical path would hurt performance badly. See LU-66.
*/
#define OBD_ALLOC_BIG (4 * PAGE_CACHE_SIZE)