Fix spelling typo in comments within lustre/include.
Signed-off-by: Masanari Iida <standby24x7@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
int
cfs_cpt_number(struct cfs_cpt_table *cptab);
/**
- * return number of HW cores or hypter-threadings in a CPU partition \a cpt
+ * return number of HW cores or hyper-threadings in a CPU partition \a cpt
*/
int cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt);
/**
*/
int cfs_cpt_bind(struct cfs_cpt_table *cptab, int cpt);
/**
- * add \a cpu to CPU partion @cpt of \a cptab, return 1 for success,
+ * add \a cpu to CPU partition @cpt of \a cptab, return 1 for success,
* otherwise 0 is returned
*/
int cfs_cpt_set_cpu(struct cfs_cpt_table *cptab, int cpt, int cpu);
struct cfs_crypto_hash_type {
char *cht_name; /**< hash algorithm name, equal to
* format name for crypto api */
- unsigned int cht_key; /**< init key by default (vaild for
+ unsigned int cht_key; /**< init key by default (valid for
* 4 bytes context like crc32, adler */
unsigned int cht_size; /**< hash digest size */
};
/*
* Ideally we would use HAVE_HASH_LONG for this, but on linux we configure
* the linux kernel and user space at the same time, so we need to differentiate
- * between them explicitely. If this is not needed on other architectures, then
- * we'll need to move the functions to archi specific headers.
+ * between them explicitly. If this is not needed on other architectures, then
+ * we'll need to move the functions to architecture specific headers.
*/
#include <linux/hash.h>
/**
* cfs_hash_bucket is a container of:
- * - lock, couter ...
+ * - lock, counter ...
* - array of hash-head starting from hsb_head[0], hash-head can be one of
* . cfs_hash_head_t
* . cfs_hash_head_dep_t
CFS_HASH_NO_BKTLOCK = 1 << 1,
/** rwlock to protect bucket */
CFS_HASH_RW_BKTLOCK = 1 << 2,
- /** spinlcok to protect bucket */
+ /** spinlock to protect bucket */
CFS_HASH_SPIN_BKTLOCK = 1 << 3,
/** always add new item to tail */
CFS_HASH_ADD_TAIL = 1 << 4,
#define MAX_PORTALS 64
/* these are only used by code with LNET_USE_LIB_FREELIST, but we still
- * exported them to !LNET_USE_LIB_FREELIST for easy implemetation */
+ * exported them to !LNET_USE_LIB_FREELIST for easy implementation */
#define LNET_FL_MAX_MES 2048
#define LNET_FL_MAX_MDS 2048
#define LNET_FL_MAX_EQS 512
unsigned int msg_receiving:1; /* being received */
unsigned int msg_txcredit:1; /* taken an NI send credit */
unsigned int msg_peertxcredit:1; /* taken a peer send credit */
- unsigned int msg_rtrcredit:1; /* taken a globel router credit */
+ unsigned int msg_rtrcredit:1; /* taken a global router credit */
unsigned int msg_peerrtrcredit:1; /* taken a peer router credit */
unsigned int msg_onactivelist:1; /* on the activelist */
/* Start receiving 'mlen' bytes of payload data, skipping the following
* 'rlen' - 'mlen' bytes. 'private' is the 'private' passed to
- * lnet_parse(). Return non-zero for immedaite failure, otherwise
+ * lnet_parse(). Return non-zero for immediate failure, otherwise
* complete later with lnet_finalize(). This also gives back a receive
* credit if the LND does flow control. */
int (*lnd_recv)(struct lnet_ni *ni, void *private, lnet_msg_t *msg,
unsigned int mt_cpt;
unsigned int mt_portal; /* portal index */
/* match table is set as "enabled" if there's non-exhausted MD
- * attached on mt_mhash, it's only valide for wildcard portal */
+ * attached on mt_mhash, it's only valid for wildcard portal */
unsigned int mt_enabled;
/* bitmap to flag whether MEs on mt_hash are exhausted or not */
__u64 mt_exhausted[LNET_MT_EXHAUSTED_BMAP];
/* add stat in session */
typedef struct {
int lstio_sta_key; /* IN: session key */
- int lstio_sta_timeout; /* IN: timeout for stat requst */
+ int lstio_sta_timeout; /* IN: timeout for stat request */
int lstio_sta_nmlen; /* IN: group name length */
char *lstio_sta_namep; /* IN: group name */
int lstio_sta_count; /* IN: # of pid */
* address of an array of lnet_kiov_t and the length field specifies
* the number of entries in the array. The length can't be bigger
* than LNET_MAX_IOV. The lnet_kiov_t is used to describe page-based
- * fragments that are not necessarily mapped in virtal memory.
+ * fragments that are not necessarily mapped in virtual memory.
* - LNET_MD_IOVEC bit set: The start field points to the starting
* address of an array of struct iovec and the length field specifies
* the number of entries in the array. The length can't be bigger