4 #include <linux/percpu_counter.h>
8 struct list_head lru_list;
11 /* The percpu_counter "mem" need to be cacheline aligned.
12 * mem.count must not share cacheline with other writers
14 struct percpu_counter mem ____cacheline_aligned_in_smp;
22 struct inet_frag_queue {
24 struct timer_list timer; /* when will this queue expire? */
25 struct list_head lru_list; /* lru list member */
26 struct hlist_node list;
28 struct sk_buff *fragments; /* list of received fragments */
29 struct sk_buff *fragments_tail;
31 int len; /* total length of orig datagram */
33 __u8 last_in; /* first/last segment arrived? */
35 #define INET_FRAG_COMPLETE 4
36 #define INET_FRAG_FIRST_IN 2
37 #define INET_FRAG_LAST_IN 1
41 struct netns_frags *net;
44 #define INETFRAGS_HASHSZ 1024
47 * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
48 * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
51 #define INETFRAGS_MAXDEPTH 128
53 struct inet_frag_bucket {
54 struct hlist_head chain;
55 spinlock_t chain_lock;
59 struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
60 /* This rwlock is a global lock (seperate per IPv4, IPv6 and
61 * netfilter). Important to keep this on a seperate cacheline.
62 * Its primarily a rebuild protection rwlock.
64 rwlock_t lock ____cacheline_aligned_in_smp;
66 struct timer_list secret_timer;
70 unsigned int (*hashfn)(struct inet_frag_queue *);
71 bool (*match)(struct inet_frag_queue *q, void *arg);
72 void (*constructor)(struct inet_frag_queue *q,
74 void (*destructor)(struct inet_frag_queue *);
75 void (*skb_free)(struct sk_buff *);
76 void (*frag_expire)(unsigned long data);
79 void inet_frags_init(struct inet_frags *);
80 void inet_frags_fini(struct inet_frags *);
82 void inet_frags_init_net(struct netns_frags *nf);
83 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
85 void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
86 void inet_frag_destroy(struct inet_frag_queue *q,
87 struct inet_frags *f, int *work);
88 int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
89 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
90 struct inet_frags *f, void *key, unsigned int hash)
92 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
95 static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
97 if (atomic_dec_and_test(&q->refcnt))
98 inet_frag_destroy(q, f, NULL);
101 /* Memory Tracking Functions. */
103 /* The default percpu_counter batch size is not big enough to scale to
104 * fragmentation mem acct sizes.
105 * The mem size of a 64K fragment is approx:
106 * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
108 static unsigned int frag_percpu_counter_batch = 130000;
110 static inline int frag_mem_limit(struct netns_frags *nf)
112 return percpu_counter_read(&nf->mem);
115 static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
117 __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
120 static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
122 __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
125 static inline void init_frag_mem_limit(struct netns_frags *nf)
127 percpu_counter_init(&nf->mem, 0);
130 static inline int sum_frag_mem_limit(struct netns_frags *nf)
135 res = percpu_counter_sum_positive(&nf->mem);
141 static inline void inet_frag_lru_move(struct inet_frag_queue *q)
143 spin_lock(&q->net->lru_lock);
144 if (!list_empty(&q->lru_list))
145 list_move_tail(&q->lru_list, &q->net->lru_list);
146 spin_unlock(&q->net->lru_lock);
149 static inline void inet_frag_lru_del(struct inet_frag_queue *q)
151 spin_lock(&q->net->lru_lock);
152 list_del_init(&q->lru_list);
154 spin_unlock(&q->net->lru_lock);
157 static inline void inet_frag_lru_add(struct netns_frags *nf,
158 struct inet_frag_queue *q)
160 spin_lock(&nf->lru_lock);
161 list_add_tail(&q->lru_list, &nf->lru_list);
163 spin_unlock(&nf->lru_lock);
166 /* RFC 3168 support :
167 * We want to check ECN values of all fragments, do detect invalid combinations.
168 * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
170 #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
171 #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
172 #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
173 #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
175 extern const u8 ip_frag_ecn_table[16];