rpcauth_gc_credcache(auth, &free);
hlist_for_each_safe(pos, next, &cache->hashtable[nr]) {
struct rpc_cred *entry;
- entry = hlist_entry(pos, struct rpc_cred, cr_hash);
+ entry = hlist_entry(pos, struct rpc_cred, cr_hash);
if (entry->cr_ops->crmatch(acred, entry, flags)) {
hlist_del(&entry->cr_hash);
cred = entry;
* linux/net/sunrpc/auth_gss/auth_gss.c
*
* RPCSEC_GSS client authentication.
- *
+ *
* Copyright (c) 2000 The Regents of the University of Michigan.
* All rights reserved.
*
* as it is passed to gssd to signal the use of
* machine creds should be part of the shared rpc interface */
-#define CA_RUN_AS_MACHINE 0x00000200
+#define CA_RUN_AS_MACHINE 0x00000200
/* dump the buffer in `emacs-hexl' style */
#define isprint(c) ((c > 0x1f) && (c < 0x7f))
}
}
-/*
- * NOTE: we have the opportunity to use different
+/*
+ * NOTE: we have the opportunity to use different
* parameters based on the input flavor (which must be a pseudoflavor)
*/
static struct rpc_auth *
flav = ntohl(*p++);
if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE)
- goto out_bad;
+ goto out_bad;
if (flav != RPC_AUTH_GSS)
goto out_bad;
seq = htonl(task->tk_rqstp->rq_seqno);
*integ_len = htonl(integ_buf.len);
/* guess whether we're in the head or the tail: */
- if (snd_buf->page_len || snd_buf->tail[0].iov_len)
+ if (snd_buf->page_len || snd_buf->tail[0].iov_len)
iov = snd_buf->tail;
else
iov = snd_buf->head;
maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
/* RPC_SLACK_SPACE should prevent this ever happening: */
BUG_ON(snd_buf->len > snd_buf->buflen);
- status = -EIO;
+ status = -EIO;
/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
* done anyway, so it's safe to put the request on the wire: */
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
status = gss_wrap_req_integ(cred, ctx, encode,
rqstp, p, obj);
break;
- case RPC_GSS_SVC_PRIVACY:
+ case RPC_GSS_SVC_PRIVACY:
status = gss_wrap_req_priv(cred, ctx, encode,
rqstp, p, obj);
break;
if (status)
goto out;
break;
- case RPC_GSS_SVC_PRIVACY:
+ case RPC_GSS_SVC_PRIVACY:
status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p);
if (status)
goto out;
status);
return status;
}
-
+
static struct rpc_authops authgss_ops = {
.owner = THIS_MODULE,
.au_flavor = RPC_AUTH_GSS,
/*
* Copyright 1993 by OpenVision Technologies, Inc.
- *
+ *
* Permission to use, copy, modify, distribute, and sell this software
* and its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appears in all copies and
* without specific, written prior permission. OpenVision makes no
* representations about the suitability of this software for any
* purpose. It is provided "as is" without express or implied warranty.
- *
+ *
* OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
return(G_BAD_TOK_HEADER);
if (*buf++ != 0x06)
return(G_BAD_TOK_HEADER);
-
+
if ((toksize-=1) < 0)
return(G_BAD_TOK_HEADER);
toid.len = *buf++;
toid.data = buf;
buf+=toid.len;
- if (! g_OID_equal(&toid, mech))
+ if (! g_OID_equal(&toid, mech))
ret = G_WRONG_MECH;
-
+
/* G_WRONG_MECH is not returned immediately because it's more important
to return G_BAD_TOK_HEADER if the token header is in fact bad */
int length)
{
u32 ret = -EINVAL;
- struct scatterlist sg[1];
+ struct scatterlist sg[1];
u8 local_iv[16] = {0};
struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
if (crypto_blkcipher_ivsize(tfm) > 16) {
dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n",
- crypto_blkcipher_ivsize(tfm));
+ crypto_blkcipher_ivsize(tfm));
goto out;
}
/*
* Copyright 1993 by OpenVision Technologies, Inc.
- *
+ *
* Permission to use, copy, modify, distribute, and sell this software
* and its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appears in all copies and
* without specific, written prior permission. OpenVision makes no
* representations about the suitability of this software for any
* purpose. It is provided "as is" without express or implied warranty.
- *
+ *
* OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
*
* J. Bruce Fields <bfields@umich.edu>
*
- * Redistribution and use in source and binary forms, with or without
+ * Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
+ * notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
/* count trailing 0's */
for(i = in->len; i > 0; i--) {
- if (*ptr == 0) {
+ if (*ptr == 0) {
ptr--;
elen--;
} else
/*
* decode_asn1_bitstring()
- *
+ *
* decode a bitstring into a buffer of the expected length.
* enclen = bit string length
* explen = expected length (define in rfc)
return 1;
}
-/*
+/*
* SPKMInnerContextToken choice SPKM_MIC asn1 token layout
- *
+ *
* contextid is always 16 bytes plain data. max asn1 bitstring len = 17.
*
* tokenlen = pos[0] to end of token (max pos[45] with MD5 cksum)
* pos value
* ----------
* [0] a4 SPKM-MIC tag
- * [1] ?? innertoken length (max 44)
- *
- *
- * tok_hdr piece of checksum data starts here
+ * [1] ?? innertoken length (max 44)
+ *
+ *
+ * tok_hdr piece of checksum data starts here
*
- * the maximum mic-header len = 9 + 17 = 26
+ * the maximum mic-header len = 9 + 17 = 26
* mic-header
* ----------
- * [2] 30 SEQUENCE tag
- * [3] ?? mic-header length: (max 23) = TokenID + ContextID
+ * [2] 30 SEQUENCE tag
+ * [3] ?? mic-header length: (max 23) = TokenID + ContextID
*
* TokenID - all fields constant and can be hardcoded
* -------
* [4] 02 Type 2
- * [5] 02 Length 2
+ * [5] 02 Length 2
* [6][7] 01 01 TokenID (SPKM_MIC_TOK)
*
* ContextID - encoded length not constant, calculated
* [10] ?? ctxzbit
* [11] contextid
*
- * mic_header piece of checksum data ends here.
+ * mic_header piece of checksum data ends here.
*
* int-cksum - encoded length not constant, calculated
* ---------
* [??] 03 Type 3
- * [??] ?? encoded length
- * [??] ?? md5zbit
+ * [??] ?? encoded length
+ * [??] ?? md5zbit
* [??] int-cksum (NID_md5 = 16)
*
- * maximum SPKM-MIC innercontext token length =
- * 10 + encoded contextid_size(17 max) + 2 + encoded
+ * maximum SPKM-MIC innercontext token length =
+ * 10 + encoded contextid_size(17 max) + 2 + encoded
* cksum_size (17 maxfor NID_md5) = 46
*/
/*
* spkm3_mic_innercontext_token()
*
- * *tokp points to the beginning of the SPKM_MIC token described
- * in rfc 2025, section 3.2.1:
+ * *tokp points to the beginning of the SPKM_MIC token described
+ * in rfc 2025, section 3.2.1:
*
* toklen is the inner token length
*/
/* spkm3 innercontext token preamble */
if ((ptr[0] != 0xa4) || (ptr[2] != 0x30)) {
- dprintk("RPC: BAD SPKM ictoken preamble\n");
+ dprintk("RPC: BAD SPKM ictoken preamble\n");
goto out;
}
goto out;
/*
- * in the current implementation: the optional int-alg is not present
- * so the default int-alg (md5) is used the optional snd-seq field is
- * also not present
+ * in the current implementation: the optional int-alg is not present
+ * so the default int-alg (md5) is used the optional snd-seq field is
+ * also not present
*/
if (*mic_hdrlen != 6 + ctxelen) {
goto out;
}
/* checksum */
- *cksum = (&ptr[10] + ctxelen); /* ctxelen includes ptr[10] */
+ *cksum = (&ptr[10] + ctxelen); /* ctxelen includes ptr[10] */
ret = GSS_S_COMPLETE;
out:
/*
* spkm3_read_token()
- *
+ *
* only SPKM_MIC_TOK with md5 intg-alg is supported
*/
u32
}
static void rsi_request(struct cache_detail *cd,
- struct cache_head *h,
- char **bpp, int *blen)
+ struct cache_head *h,
+ char **bpp, int *blen)
{
struct rsi *rsii = container_of(h, struct rsi, h);
static int rsi_parse(struct cache_detail *cd,
- char *mesg, int mlen)
+ char *mesg, int mlen)
{
/* context token expiry major minor context token */
char *buf = mesg;
*
* A table is then only scanned if the current time is at least
* the nextcheck time.
- *
+ *
*/
static LIST_HEAD(cache_list);
struct proc_dir_entry *p;
cd->proc_ent->owner = cd->owner;
cd->channel_ent = cd->content_ent = NULL;
-
- p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR,
- cd->proc_ent);
+
+ p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR,
+ cd->proc_ent);
cd->flush_ent = p;
- if (p) {
- p->proc_fops = &cache_flush_operations;
- p->owner = cd->owner;
- p->data = cd;
- }
-
+ if (p) {
+ p->proc_fops = &cache_flush_operations;
+ p->owner = cd->owner;
+ p->data = cd;
+ }
+
if (cd->cache_request || cd->cache_parse) {
p = create_proc_entry("channel", S_IFREG|S_IRUSR|S_IWUSR,
cd->proc_ent);
p->data = cd;
}
}
- if (cd->cache_show) {
- p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR,
- cd->proc_ent);
+ if (cd->cache_show) {
+ p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR,
+ cd->proc_ent);
cd->content_ent = p;
- if (p) {
- p->proc_fops = &content_file_operations;
- p->owner = cd->owner;
- p->data = cd;
- }
- }
+ if (p) {
+ p->proc_fops = &content_file_operations;
+ p->owner = cd->owner;
+ p->data = cd;
+ }
+ }
}
rwlock_init(&cd->hash_lock);
INIT_LIST_HEAD(&cd->queue);
current_index++;
/* find a cleanable entry in the bucket and clean it, or set to next bucket */
-
+
if (current_detail && current_index < current_detail->hash_size) {
struct cache_head *ch, **cp;
struct cache_detail *d;
-
+
write_lock(¤t_detail->hash_lock);
/* Ok, now to clean this strand */
-
+
cp = & current_detail->hash_table[current_index];
ch = *cp;
for (; ch; cp= & ch->next, ch= *cp) {
}
-/*
+/*
* Clean all caches promptly. This just calls cache_clean
- * repeatedly until we are sure that every cache has had a chance to
+ * repeatedly until we are sure that every cache has had a chance to
* be fully cleaned
*/
void cache_flush(void)
* All deferred requests are stored in a hash table,
* indexed by "struct cache_head *".
* As it may be wasteful to store a whole request
- * structure, we allow the request to provide a
+ * structure, we allow the request to provide a
* deferred form, which must contain a
* 'struct cache_deferred_req'
* This cache_deferred_req contains a method to allow
INIT_LIST_HEAD(&pending);
spin_lock(&cache_defer_lock);
-
+
lp = cache_defer_hash[hash].next;
if (lp) {
while (lp != &cache_defer_hash[hash]) {
INIT_LIST_HEAD(&pending);
spin_lock(&cache_defer_lock);
-
+
list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
if (dreq->owner == owner) {
list_del(&dreq->hash);
* On write, an update request is processed
* Poll works if anything to read, and always allows write
*
- * Implemented by linked list of requests. Each open file has
+ * Implemented by linked list of requests. Each open file has
* a ->private that also exists in this list. New request are added
* to the end and may wakeup and preceding readers.
* New readers are added to the head. If, on read, an item is found with
* Messages are, like requests, separated into fields by
* spaces and dequotes as \xHEXSTRING or embedded \nnn octal
*
- * Message is
+ * Message is
* reply cachename expiry key ... content....
*
- * key and content are both parsed by cache
+ * key and content are both parsed by cache
*/
#define isodigit(c) (isdigit(c) && c <= '7')
unsigned hash, entry;
struct cache_head *ch;
struct cache_detail *cd = ((struct handle*)m->private)->cd;
-
+
read_lock(&cd->hash_lock);
if (!n--)
do {
hash++;
n += 1LL<<32;
- } while(hash < cd->hash_size &&
+ } while(hash < cd->hash_size &&
cd->hash_table[hash]==NULL);
if (hash >= cd->hash_size)
return NULL;
rpc_shutdown_client(clnt);
clnt = ERR_PTR(err);
}
-out:
+out:
return clnt;
}
* sleeps on RPC calls
*/
#define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM))
-
+
static void rpc_save_sigmask(sigset_t *oldset, int intr)
{
unsigned long sigallow = sigmask(SIGKILL);
int status;
/* If this client is slain all further I/O fails */
- if (clnt->cl_dead)
+ if (clnt->cl_dead)
return -EIO;
BUG_ON(flags & RPC_TASK_ASYNC);
/* If this client is slain all further I/O fails */
status = -EIO;
- if (clnt->cl_dead)
+ if (clnt->cl_dead)
goto out_release;
flags |= RPC_TASK_ASYNC;
goto out_release;
/* Mask signals on GSS_AUTH upcalls */
- rpc_task_sigmask(task, &oldset);
+ rpc_task_sigmask(task, &oldset);
rpc_call_setup(task, msg, 0);
else
rpc_put_task(task);
- rpc_restore_sigmask(&oldset);
+ rpc_restore_sigmask(&oldset);
return status;
out_release:
rpc_release_calldata(tk_ops, data);
struct rpc_xprt *xprt = task->tk_xprt;
unsigned int bufsiz;
- dprintk("RPC: %4d call_allocate (status %d)\n",
+ dprintk("RPC: %4d call_allocate (status %d)\n",
task->tk_pid, task->tk_status);
task->tk_action = call_bind;
if (req->rq_buffer)
if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL)
return;
- printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task);
+ printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task);
if (RPC_IS_ASYNC(task) || !signalled()) {
xprt_release(task);
kxdrproc_t encode;
__be32 *p;
- dprintk("RPC: %4d call_encode (status %d)\n",
+ dprintk("RPC: %4d call_encode (status %d)\n",
task->tk_pid, task->tk_status);
/* Default buffer setup */
struct rpc_clnt *clnt = task->tk_client;
int status = task->tk_status;
- dprintk("RPC: %5u call_connect_status (status %d)\n",
+ dprintk("RPC: %5u call_connect_status (status %d)\n",
task->tk_pid, task->tk_status);
task->tk_status = 0;
static void
call_transmit(struct rpc_task *task)
{
- dprintk("RPC: %4d call_transmit (status %d)\n",
+ dprintk("RPC: %4d call_transmit (status %d)\n",
task->tk_pid, task->tk_status);
task->tk_action = call_status;
if (req->rq_received > 0 && !req->rq_bytes_sent)
task->tk_status = req->rq_received;
- dprintk("RPC: %4d call_status (status %d)\n",
+ dprintk("RPC: %4d call_status (status %d)\n",
task->tk_pid, task->tk_status);
status = task->tk_status;
kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode;
__be32 *p;
- dprintk("RPC: %4d call_decode (status %d)\n",
+ dprintk("RPC: %4d call_decode (status %d)\n",
task->tk_pid, task->tk_status);
if (task->tk_flags & RPC_CALL_MAJORSEEN) {
call_refreshresult(struct rpc_task *task)
{
int status = task->tk_status;
- dprintk("RPC: %4d call_refreshresult (status %d)\n",
+ dprintk("RPC: %4d call_refreshresult (status %d)\n",
task->tk_pid, task->tk_status);
task->tk_status = 0;
static struct rpc_procinfo pmap_procedures[] = {
[PMAP_SET] = {
.p_proc = PMAP_SET,
- .p_encode = (kxdrproc_t) xdr_encode_mapping,
+ .p_encode = (kxdrproc_t) xdr_encode_mapping,
.p_decode = (kxdrproc_t) xdr_decode_bool,
.p_bufsiz = 4,
.p_count = 1,
},
[PMAP_UNSET] = {
.p_proc = PMAP_UNSET,
- .p_encode = (kxdrproc_t) xdr_encode_mapping,
+ .p_encode = (kxdrproc_t) xdr_encode_mapping,
.p_decode = (kxdrproc_t) xdr_decode_bool,
.p_bufsiz = 4,
.p_count = 1,
* Scheduling for synchronous and asynchronous RPC requests.
*
* Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
- *
+ *
* TCP NFS related read + write fixes
* (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
*/
/*
* Make an RPC task runnable.
*
- * Note: If the task is ASYNC, this must be called with
+ * Note: If the task is ASYNC, this must be called with
* the spinlock held to protect the wait queue operation.
*/
static void rpc_make_runnable(struct rpc_task *task)
if (RPC_DO_CALLBACK(task)) {
/* Define a callback save pointer */
void (*save_callback)(struct rpc_task *);
-
- /*
+
+ /*
* If a callback exists, save it, reset it,
* call it.
* The save is needed to stop from resetting
sk_list);
svc_close_socket(svsk);
}
-
+
cache_clean_deferred(serv);
/* Unregister service with the portmapper */
{
int pages;
int arghi;
-
+
pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
* We assume one is at most one page
*/
if (pool != NULL)
return pool;
- return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
+ return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
}
/*
spin_lock_bh(&pool->sp_lock);
} else {
/* choose a pool in round-robin fashion */
- for (i = 0; i < serv->sv_nrpools; i++) {
- pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
+ for (i = 0; i < serv->sv_nrpools; i++) {
+ pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
spin_lock_bh(&pool->sp_lock);
- if (!list_empty(&pool->sp_all_threads))
- goto found_pool;
+ if (!list_empty(&pool->sp_all_threads))
+ goto found_pool;
spin_unlock_bh(&pool->sp_lock);
- }
+ }
return NULL;
}
rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
list_del_init(&rqstp->rq_all);
task = rqstp->rq_task;
- }
+ }
spin_unlock_bh(&pool->sp_lock);
return task;
/*
* Register an RPC service with the local portmapper.
- * To unregister a service, call this routine with
+ * To unregister a service, call this routine with
* proto and port == 0.
*/
int
goto err_short_len;
/* setup response xdr_buf.
- * Initially it has just one page
+ * Initially it has just one page
*/
rqstp->rq_resused = 1;
resv->iov_base = page_address(rqstp->rq_respages[0]);
memset(rqstp->rq_argp, 0, procp->pc_argsize);
memset(rqstp->rq_resp, 0, procp->pc_ressize);
- /* un-reserve some of the out-queue now that we have a
+ /* un-reserve some of the out-queue now that we have a
* better idea of reply size
*/
if (procp->pc_xdrressize)
* linux/net/sunrpc/svcauth.c
*
* The generic interface for RPC authentication on the server side.
- *
+ *
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*
* CHANGES
int rv = 0;
rqstp->rq_authop = NULL;
-
+
if (aops) {
rv = aops->release(rqstp);
module_put(aops->owner);
char text_addr[20];
struct ip_map *im = container_of(h, struct ip_map, h);
__be32 addr = im->m_addr.s_addr;
-
+
snprintf(text_addr, 20, "%u.%u.%u.%u",
ntohl(addr) >> 24 & 0xff,
ntohl(addr) >> 16 & 0xff,
if (sscanf(buf, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) != 4)
return -EINVAL;
-
+
expiry = get_expiry(&mesg);
if (expiry ==0)
return -EINVAL;
/* class addr domain */
addr = im->m_addr;
- if (test_bit(CACHE_VALID, &h->flags) &&
+ if (test_bit(CACHE_VALID, &h->flags) &&
!test_bit(CACHE_NEGATIVE, &h->flags))
dom = im->m_client->h.name;
);
return 0;
}
-
+
struct cache_detail ip_map_cache = {
.owner = THIS_MODULE,
int auth_unix_forget_old(struct auth_domain *dom)
{
struct unix_domain *udom;
-
+
if (dom->flavour != &svcauth_unix)
return -EINVAL;
udom = container_of(dom, struct unix_domain, h);
if (argv->iov_len < 3*4)
return SVC_GARBAGE;
- if (svc_getu32(argv) != 0) {
+ if (svc_getu32(argv) != 0) {
dprintk("svc: bad null cred\n");
*authp = rpc_autherr_badcred;
return SVC_DENIED;
* providing that certain rules are followed:
*
* SK_CONN, SK_DATA, can be set or cleared at any time.
- * after a set, svc_sock_enqueue must be called.
+ * after a set, svc_sock_enqueue must be called.
* after a clear, the socket must be read/accepted
* if this succeeds, it must be set again.
* SK_CLOSE can set at any time. It is never cleared.
svsk->sk_sk, rqstp);
svc_thread_dequeue(pool, rqstp);
if (rqstp->rq_sock)
- printk(KERN_ERR
+ printk(KERN_ERR
"svc_sock_enqueue: server %p, rq_sock=%p!\n",
rqstp, rqstp->rq_sock);
rqstp->rq_sock = svsk;
if (xdr->tail[0].iov_len) {
result = kernel_sendpage(sock, rqstp->rq_respages[0],
((unsigned long)xdr->tail[0].iov_base)
- & (PAGE_SIZE-1),
+ & (PAGE_SIZE-1),
xdr->tail[0].iov_len, 0);
if (result > 0)
tv.tv_sec = xtime.tv_sec;
tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
skb_set_timestamp(skb, &tv);
- /* Don't enable netstamp, sunrpc doesn't
+ /* Don't enable netstamp, sunrpc doesn't
need that much accuracy */
}
skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp);
return 0;
}
local_bh_enable();
- skb_free_datagram(svsk->sk_sk, skb);
+ skb_free_datagram(svsk->sk_sk, skb);
} else {
/* we can use it in-place */
rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr);
svsk->sk_sendto = svc_udp_sendto;
/* initialise setting must have enough space to
- * receive and respond to one request.
+ * receive and respond to one request.
* svc_udp_recvfrom will re-adjust if necessary
*/
svc_sock_setbufsize(svsk->sk_sock,
if (ntohs(sin.sin_port) >= 1024) {
dprintk(KERN_WARNING
"%s: connect from unprivileged port: %u.%u.%u.%u:%d\n",
- serv->sv_name,
+ serv->sv_name,
NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
}
* on the number of threads which will access the socket.
*
* rcvbuf just needs to be able to hold a few requests.
- * Normally they will be removed from the queue
+ * Normally they will be removed from the queue
* as soon a a complete request arrives.
*/
svc_sock_setbufsize(svsk->sk_sock,
if (len < want) {
dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
- len, want);
+ len, want);
svc_sock_received(svsk);
return -EAGAIN; /* record header not complete */
}
tp->nonagle = 1; /* disable Nagle's algorithm */
/* initialise setting must have enough space to
- * receive and respond to one request.
+ * receive and respond to one request.
* svc_tcp_recvfrom will re-adjust if necessary
*/
svc_sock_setbufsize(svsk->sk_sock,
set_bit(SK_CHNGBUF, &svsk->sk_flags);
set_bit(SK_DATA, &svsk->sk_flags);
- if (sk->sk_state != TCP_ESTABLISHED)
+ if (sk->sk_state != TCP_ESTABLISHED)
set_bit(SK_CLOSE, &svsk->sk_flags);
}
}
spin_lock_bh(&serv->sv_lock);
list_for_each(le, &serv->sv_permsocks) {
- struct svc_sock *svsk =
+ struct svc_sock *svsk =
list_entry(le, struct svc_sock, sk_list);
set_bit(SK_CHNGBUF, &svsk->sk_flags);
}
rqstp, timeout);
if (rqstp->rq_sock)
- printk(KERN_ERR
+ printk(KERN_ERR
"svc_recv: service %p, socket not NULL!\n",
rqstp);
if (waitqueue_active(&rqstp->rq_wait))
- printk(KERN_ERR
+ printk(KERN_ERR
"svc_recv: service %p, wait queue active!\n",
rqstp);
return len;
}
-/*
+/*
* Drop request
*/
void
if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags))
list_del_init(&svsk->sk_list);
- /*
+ /*
* We used to delete the svc_sock from whichever list
* it's sk_ready node was on, but we don't actually
* need to. This is because the only time we're called
}
/*
- * Handle defer and revisit of requests
+ * Handle defer and revisit of requests
*/
static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
{
struct svc_deferred_req *dr = NULL;
-
+
if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
return NULL;
spin_lock_bh(&svsk->sk_defer_lock);
sunrpc_table[0].de->owner = THIS_MODULE;
#endif
}
-
+
}
void
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dodebug
- },
+ },
{
.ctl_name = CTL_NFSDEBUG,
.procname = "nfs_debug",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dodebug
- },
+ },
{
.ctl_name = CTL_NFSDDEBUG,
.procname = "nfsd_debug",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dodebug
- },
+ },
{
.ctl_name = CTL_NLMDEBUG,
.procname = "nlm_debug",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dodebug
- },
+ },
{ .ctl_name = 0 }
};
* @buf: xdr_buf
* @len: bytes to remove from buf->head[0]
*
- * Shrinks XDR buffer's header kvec buf->head[0] by
+ * Shrinks XDR buffer's header kvec buf->head[0] by
* 'len' bytes. The extra data is not lost, but is instead
* moved into the inlined pages and/or the tail.
*/
* @buf: xdr_buf
* @len: bytes to remove from buf->pages
*
- * Shrinks XDR buffer's page array buf->pages by
+ * Shrinks XDR buffer's page array buf->pages by
* 'len' bytes. The extra data is not lost, but is instead
* moved into the tail.
*/
int
xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
- int (*actor)(struct scatterlist *, void *), void *data)
+ int (*actor)(struct scatterlist *, void *), void *data)
{
int i, ret = 0;
unsigned page_len, thislen, page_offset;
/*
* xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
* @task: task whose timeout is to be set
- *
+ *
* Set a request's retransmit timeout using the RTT estimator.
*/
void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
*/
void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr)
{
- to->to_initval =
+ to->to_initval =
to->to_increment = incr;
to->to_maxval = to->to_initval + (incr * retr);
to->to_retries = retr;