static inline u16 inet6_tw_offset(const struct proto *prot)
{
- return prot->twsk_obj_size - sizeof(struct inet6_timewait_sock);
+ return prot->twsk_prot->twsk_obj_size -
+ sizeof(struct inet6_timewait_sock);
}
static inline struct inet6_timewait_sock *inet6_twsk(const struct sock *sk)
#include <net/sock.h>
#include <net/tcp_states.h>
+#include <net/timewait_sock.h>
#include <asm/atomic.h>
printk(KERN_DEBUG "%s timewait_sock %p released\n",
tw->tw_prot->name, tw);
#endif
- kmem_cache_free(tw->tw_prot->twsk_slab, tw);
+ kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
module_put(owner);
}
}
extern int sk_wait_data(struct sock *sk, long *timeo);
struct request_sock_ops;
+struct timewait_sock_ops;
/* Networking protocol blocks we attach to sockets.
* socket layer -> transport layer interface
kmem_cache_t *slab;
unsigned int obj_size;
- kmem_cache_t *twsk_slab;
- unsigned int twsk_obj_size;
atomic_t *orphan_count;
struct request_sock_ops *rsk_prot;
+ struct timewait_sock_ops *twsk_prot;
struct module *owner;
extern void tcp_rcv_space_adjust(struct sock *sk);
+extern int tcp_twsk_unique(struct sock *sk,
+ struct sock *sktw, void *twp);
+
static inline void tcp_dec_quickack_mode(struct sock *sk,
const unsigned int pkts)
{
--- /dev/null
+/*
+ * NET Generic infrastructure for Network protocols.
+ *
+ * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _TIMEWAIT_SOCK_H
+#define _TIMEWAIT_SOCK_H
+
+#include <linux/slab.h>
+#include <net/sock.h>
+
+struct timewait_sock_ops {
+ kmem_cache_t *twsk_slab;
+ unsigned int twsk_obj_size;
+ int (*twsk_unique)(struct sock *sk,
+ struct sock *sktw, void *twp);
+};
+
+static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
+{
+ if (sk->sk_prot->twsk_prot->twsk_unique != NULL)
+ return sk->sk_prot->twsk_prot->twsk_unique(sk, sktw, twp);
+ return 0;
+}
+
+#endif /* _TIMEWAIT_SOCK_H */
}
}
- if (prot->twsk_obj_size) {
+ if (prot->twsk_prot != NULL) {
static const char mask[] = "tw_sock_%s";
timewait_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
goto out_free_request_sock_slab;
sprintf(timewait_sock_slab_name, mask, prot->name);
- prot->twsk_slab = kmem_cache_create(timewait_sock_slab_name,
- prot->twsk_obj_size,
- 0, SLAB_HWCACHE_ALIGN,
- NULL, NULL);
- if (prot->twsk_slab == NULL)
+ prot->twsk_prot->twsk_slab =
+ kmem_cache_create(timewait_sock_slab_name,
+ prot->twsk_prot->twsk_obj_size,
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (prot->twsk_prot->twsk_slab == NULL)
goto out_free_timewait_sock_slab_name;
}
}
prot->rsk_prot->slab = NULL;
}
- if (prot->twsk_slab != NULL) {
- const char *name = kmem_cache_name(prot->twsk_slab);
+ if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
+ const char *name = kmem_cache_name(prot->twsk_prot->twsk_slab);
- kmem_cache_destroy(prot->twsk_slab);
+ kmem_cache_destroy(prot->twsk_prot->twsk_slab);
kfree(name);
- prot->twsk_slab = NULL;
+ prot->twsk_prot->twsk_slab = NULL;
}
}
#include <net/icmp.h>
#include <net/inet_hashtables.h>
#include <net/sock.h>
+#include <net/timewait_sock.h>
#include <net/tcp_states.h>
#include <net/xfrm.h>
.send_reset = dccp_v4_ctl_send_reset,
};
+static struct timewait_sock_ops dccp_timewait_sock_ops = {
+ .twsk_obj_size = sizeof(struct inet_timewait_sock),
+};
+
struct proto dccp_prot = {
.name = "DCCP",
.owner = THIS_MODULE,
.max_header = MAX_DCCP_HEADER,
.obj_size = sizeof(struct dccp_sock),
.rsk_prot = &dccp_request_sock_ops,
- .twsk_obj_size = sizeof(struct inet_timewait_sock),
+ .twsk_prot = &dccp_timewait_sock_ops,
};
+
+EXPORT_SYMBOL_GPL(dccp_prot);
.send_reset = dccp_v6_ctl_send_reset,
};
+static struct timewait_sock_ops dccp6_timewait_sock_ops = {
+ .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
+};
+
static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
{
struct ipv6_pinfo *np = inet6_sk(sk);
.max_header = MAX_DCCP_HEADER,
.obj_size = sizeof(struct dccp6_sock),
.rsk_prot = &dccp6_request_sock_ops,
- .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
+ .twsk_prot = &dccp6_timewait_sock_ops,
};
static struct inet6_protocol dccp_v6_protocol = {
struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state)
{
- struct inet_timewait_sock *tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_slab,
- SLAB_ATOMIC);
+ struct inet_timewait_sock *tw =
+ kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
+ SLAB_ATOMIC);
if (tw != NULL) {
const struct inet_sock *inet = inet_sk(sk);
#include <net/transp_v6.h>
#include <net/ipv6.h>
#include <net/inet_common.h>
+#include <net/timewait_sock.h>
#include <net/xfrm.h>
#include <linux/inet.h>
skb->h.th->source);
}
+int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
+{
+ const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ /* With PAWS, it is safe from the viewpoint
+ of data integrity. Even without PAWS it is safe provided sequence
+ spaces do not overlap i.e. at data rates <= 80Mbit/sec.
+
+ Actually, the idea is close to VJ's one, only timestamp cache is
+ held not per host, but per port pair and TW bucket is used as state
+ holder.
+
+ If TW bucket has been already destroyed we fall back to VJ's scheme
+ and use initial timestamp retrieved from peer table.
+ */
+ if (tcptw->tw_ts_recent_stamp &&
+ (twp == NULL || (sysctl_tcp_tw_reuse &&
+ xtime.tv_sec - tcptw->tw_ts_recent_stamp > 1))) {
+ tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
+ if (tp->write_seq == 0)
+ tp->write_seq = 1;
+ tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
+ tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
+ sock_hold(sktw);
+ return 1;
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(tcp_twsk_unique);
+
/* called with local bh disabled */
static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
struct inet_timewait_sock **twp)
tw = inet_twsk(sk2);
if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) {
- const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2);
- struct tcp_sock *tp = tcp_sk(sk);
-
- /* With PAWS, it is safe from the viewpoint
- of data integrity. Even without PAWS it
- is safe provided sequence spaces do not
- overlap i.e. at data rates <= 80Mbit/sec.
-
- Actually, the idea is close to VJ's one,
- only timestamp cache is held not per host,
- but per port pair and TW bucket is used
- as state holder.
-
- If TW bucket has been already destroyed we
- fall back to VJ's scheme and use initial
- timestamp retrieved from peer table.
- */
- if (tcptw->tw_ts_recent_stamp &&
- (!twp || (sysctl_tcp_tw_reuse &&
- xtime.tv_sec -
- tcptw->tw_ts_recent_stamp > 1))) {
- tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
- if (tp->write_seq == 0)
- tp->write_seq = 1;
- tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
- tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
- sock_hold(sk2);
+ if (twsk_unique(sk, sk2, twp))
goto unique;
- } else
+ else
goto not_unique;
}
}
.send_reset = tcp_v4_send_reset,
};
+static struct timewait_sock_ops tcp_timewait_sock_ops = {
+ .twsk_obj_size = sizeof(struct tcp_timewait_sock),
+ .twsk_unique = tcp_twsk_unique,
+};
+
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct inet_request_sock *ireq;
.sysctl_rmem = sysctl_tcp_rmem,
.max_header = MAX_TCP_HEADER,
.obj_size = sizeof(struct tcp_sock),
- .twsk_obj_size = sizeof(struct tcp_timewait_sock),
+ .twsk_prot = &tcp_timewait_sock_ops,
.rsk_prot = &tcp_request_sock_ops,
};
#include <net/addrconf.h>
#include <net/snmp.h>
#include <net/dsfield.h>
+#include <net/timewait_sock.h>
#include <asm/uaccess.h>
ipv6_addr_equal(&tw6->tw_v6_daddr, saddr) &&
ipv6_addr_equal(&tw6->tw_v6_rcv_saddr, daddr) &&
sk2->sk_bound_dev_if == sk->sk_bound_dev_if) {
- const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2);
- struct tcp_sock *tp = tcp_sk(sk);
-
- if (tcptw->tw_ts_recent_stamp &&
- (!twp ||
- (sysctl_tcp_tw_reuse &&
- xtime.tv_sec - tcptw->tw_ts_recent_stamp > 1))) {
- /* See comment in tcp_ipv4.c */
- tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
- if (!tp->write_seq)
- tp->write_seq = 1;
- tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
- tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
- sock_hold(sk2);
+ if (twsk_unique(sk, sk2, twp))
goto unique;
- } else
+ else
goto not_unique;
}
}
.send_reset = tcp_v6_send_reset
};
+static struct timewait_sock_ops tcp6_timewait_sock_ops = {
+ .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
+ .twsk_unique = tcp_twsk_unique,
+};
+
static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
{
struct ipv6_pinfo *np = inet6_sk(sk);
.sysctl_rmem = sysctl_tcp_rmem,
.max_header = MAX_TCP_HEADER,
.obj_size = sizeof(struct tcp6_sock),
- .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
+ .twsk_prot = &tcp6_timewait_sock_ops,
.rsk_prot = &tcp6_request_sock_ops,
};