2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (C) 2006-2010 Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/cache.h>
14 #include <linux/capability.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/icmp.h>
22 #include <net/compat.h>
23 #include <linux/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter/x_tables.h>
30 #include <linux/netfilter_ipv4/ip_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv4 packet filter");
38 void *ipt_alloc_initial_table(const struct xt_table *info)
40 return xt_alloc_initial_table(ipt, IPT);
42 EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
44 /* Returns whether matches rule or not. */
45 /* Performance critical - called for every packet */
47 ip_packet_match(const struct iphdr *ip,
50 const struct ipt_ip *ipinfo,
55 if (NF_INVF(ipinfo, IPT_INV_SRCIP,
56 (ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) ||
57 NF_INVF(ipinfo, IPT_INV_DSTIP,
58 (ip->daddr & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr))
61 ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
63 if (NF_INVF(ipinfo, IPT_INV_VIA_IN, ret != 0))
66 ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
68 if (NF_INVF(ipinfo, IPT_INV_VIA_OUT, ret != 0))
71 /* Check specific protocol */
73 NF_INVF(ipinfo, IPT_INV_PROTO, ip->protocol != ipinfo->proto))
76 /* If we have a fragment rule but the packet is not a fragment
77 * then we return zero */
78 if (NF_INVF(ipinfo, IPT_INV_FRAG,
79 (ipinfo->flags & IPT_F_FRAG) && !isfrag))
86 ip_checkentry(const struct ipt_ip *ip)
88 if (ip->flags & ~IPT_F_MASK)
90 if (ip->invflags & ~IPT_INV_MASK)
96 ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
98 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
103 /* Performance critical */
104 static inline struct ipt_entry *
105 get_entry(const void *base, unsigned int offset)
107 return (struct ipt_entry *)(base + offset);
110 /* All zeroes == unconditional rule. */
111 /* Mildly perf critical (only if packet tracing is on) */
112 static inline bool unconditional(const struct ipt_entry *e)
114 static const struct ipt_ip uncond;
116 return e->target_offset == sizeof(struct ipt_entry) &&
117 memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
120 /* for const-correctness */
121 static inline const struct xt_entry_target *
122 ipt_get_target_c(const struct ipt_entry *e)
124 return ipt_get_target((struct ipt_entry *)e);
127 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
128 static const char *const hooknames[] = {
129 [NF_INET_PRE_ROUTING] = "PREROUTING",
130 [NF_INET_LOCAL_IN] = "INPUT",
131 [NF_INET_FORWARD] = "FORWARD",
132 [NF_INET_LOCAL_OUT] = "OUTPUT",
133 [NF_INET_POST_ROUTING] = "POSTROUTING",
136 enum nf_ip_trace_comments {
137 NF_IP_TRACE_COMMENT_RULE,
138 NF_IP_TRACE_COMMENT_RETURN,
139 NF_IP_TRACE_COMMENT_POLICY,
142 static const char *const comments[] = {
143 [NF_IP_TRACE_COMMENT_RULE] = "rule",
144 [NF_IP_TRACE_COMMENT_RETURN] = "return",
145 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
148 static const struct nf_loginfo trace_loginfo = {
149 .type = NF_LOG_TYPE_LOG,
153 .logflags = NF_LOG_DEFAULT_MASK,
158 /* Mildly perf critical (only if packet tracing is on) */
160 get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
161 const char *hookname, const char **chainname,
162 const char **comment, unsigned int *rulenum)
164 const struct xt_standard_target *t = (void *)ipt_get_target_c(s);
166 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
167 /* Head of user chain: ERROR target with chainname */
168 *chainname = t->target.data;
173 if (unconditional(s) &&
174 strcmp(t->target.u.kernel.target->name,
175 XT_STANDARD_TARGET) == 0 &&
177 /* Tail of chains: STANDARD target (return/policy) */
178 *comment = *chainname == hookname
179 ? comments[NF_IP_TRACE_COMMENT_POLICY]
180 : comments[NF_IP_TRACE_COMMENT_RETURN];
189 static void trace_packet(struct net *net,
190 const struct sk_buff *skb,
192 const struct net_device *in,
193 const struct net_device *out,
194 const char *tablename,
195 const struct xt_table_info *private,
196 const struct ipt_entry *e)
198 const struct ipt_entry *root;
199 const char *hookname, *chainname, *comment;
200 const struct ipt_entry *iter;
201 unsigned int rulenum = 0;
203 root = get_entry(private->entries, private->hook_entry[hook]);
205 hookname = chainname = hooknames[hook];
206 comment = comments[NF_IP_TRACE_COMMENT_RULE];
208 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
209 if (get_chainname_rulenum(iter, e, hookname,
210 &chainname, &comment, &rulenum) != 0)
213 nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo,
214 "TRACE: %s:%s:%s:%u ",
215 tablename, chainname, comment, rulenum);
220 struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
222 return (void *)entry + entry->next_offset;
225 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
227 ipt_do_table(struct sk_buff *skb,
228 const struct nf_hook_state *state,
229 struct xt_table *table)
231 unsigned int hook = state->hook;
232 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
233 const struct iphdr *ip;
234 /* Initializing verdict to NF_DROP keeps gcc happy. */
235 unsigned int verdict = NF_DROP;
236 const char *indev, *outdev;
237 const void *table_base;
238 struct ipt_entry *e, **jumpstack;
239 unsigned int stackidx, cpu;
240 const struct xt_table_info *private;
241 struct xt_action_param acpar;
247 indev = state->in ? state->in->name : nulldevname;
248 outdev = state->out ? state->out->name : nulldevname;
249 /* We handle fragments by dealing with the first fragment as
250 * if it was a normal packet. All other fragments are treated
251 * normally, except that they will NEVER match rules that ask
252 * things we don't know, ie. tcp syn flag or ports). If the
253 * rule is also a fragment-specific rule, non-fragments won't
255 acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
256 acpar.thoff = ip_hdrlen(skb);
257 acpar.hotdrop = false;
260 WARN_ON(!(table->valid_hooks & (1 << hook)));
262 addend = xt_write_recseq_begin();
263 private = READ_ONCE(table->private); /* Address dependency. */
264 cpu = smp_processor_id();
265 table_base = private->entries;
266 jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
268 /* Switch to alternate jumpstack if we're being invoked via TEE.
269 * TEE issues XT_CONTINUE verdict on original skb so we must not
270 * clobber the jumpstack.
272 * For recursion via REJECT or SYNPROXY the stack will be clobbered
273 * but it is no problem since absolute verdict is issued by these.
275 if (static_key_false(&xt_tee_enabled))
276 jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
278 e = get_entry(table_base, private->hook_entry[hook]);
281 const struct xt_entry_target *t;
282 const struct xt_entry_match *ematch;
283 struct xt_counters *counter;
286 if (!ip_packet_match(ip, indev, outdev,
287 &e->ip, acpar.fragoff)) {
289 e = ipt_next_entry(e);
293 xt_ematch_foreach(ematch, e) {
294 acpar.match = ematch->u.kernel.match;
295 acpar.matchinfo = ematch->data;
296 if (!acpar.match->match(skb, &acpar))
300 counter = xt_get_this_cpu_counter(&e->counters);
301 ADD_COUNTER(*counter, skb->len, 1);
303 t = ipt_get_target(e);
304 WARN_ON(!t->u.kernel.target);
306 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
307 /* The packet is traced: log it */
308 if (unlikely(skb->nf_trace))
309 trace_packet(state->net, skb, hook, state->in,
310 state->out, table->name, private, e);
312 /* Standard target? */
313 if (!t->u.kernel.target->target) {
316 v = ((struct xt_standard_target *)t)->verdict;
318 /* Pop from stack? */
319 if (v != XT_RETURN) {
320 verdict = (unsigned int)(-v) - 1;
324 e = get_entry(table_base,
325 private->underflow[hook]);
327 e = jumpstack[--stackidx];
328 e = ipt_next_entry(e);
332 if (table_base + v != ipt_next_entry(e) &&
333 !(e->ip.flags & IPT_F_GOTO))
334 jumpstack[stackidx++] = e;
336 e = get_entry(table_base, v);
340 acpar.target = t->u.kernel.target;
341 acpar.targinfo = t->data;
343 verdict = t->u.kernel.target->target(skb, &acpar);
344 if (verdict == XT_CONTINUE) {
345 /* Target might have changed stuff. */
347 e = ipt_next_entry(e);
352 } while (!acpar.hotdrop);
354 xt_write_recseq_end(addend);
362 /* Figures out from what hook each rule can be called: returns 0 if
363 there are loops. Puts hook bitmask in comefrom. */
365 mark_source_chains(const struct xt_table_info *newinfo,
366 unsigned int valid_hooks, void *entry0,
367 unsigned int *offsets)
371 /* No recursion; use packet counter to save back ptrs (reset
372 to 0 as we leave), and comefrom to save source hook bitmask */
373 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
374 unsigned int pos = newinfo->hook_entry[hook];
375 struct ipt_entry *e = entry0 + pos;
377 if (!(valid_hooks & (1 << hook)))
380 /* Set initial back pointer. */
381 e->counters.pcnt = pos;
384 const struct xt_standard_target *t
385 = (void *)ipt_get_target_c(e);
386 int visited = e->comefrom & (1 << hook);
388 if (e->comefrom & (1 << NF_INET_NUMHOOKS))
391 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
393 /* Unconditional return/END. */
394 if ((unconditional(e) &&
395 (strcmp(t->target.u.user.name,
396 XT_STANDARD_TARGET) == 0) &&
397 t->verdict < 0) || visited) {
398 unsigned int oldpos, size;
400 if ((strcmp(t->target.u.user.name,
401 XT_STANDARD_TARGET) == 0) &&
402 t->verdict < -NF_MAX_VERDICT - 1)
405 /* Return: backtrack through the last
408 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
410 pos = e->counters.pcnt;
411 e->counters.pcnt = 0;
413 /* We're at the start. */
418 } while (oldpos == pos + e->next_offset);
421 size = e->next_offset;
422 e = entry0 + pos + size;
423 if (pos + size >= newinfo->size)
425 e->counters.pcnt = pos;
428 int newpos = t->verdict;
430 if (strcmp(t->target.u.user.name,
431 XT_STANDARD_TARGET) == 0 &&
433 /* This a jump; chase it. */
434 if (!xt_find_jump_offset(offsets, newpos,
438 /* ... this is a fallthru */
439 newpos = pos + e->next_offset;
440 if (newpos >= newinfo->size)
444 e->counters.pcnt = pos;
453 static void cleanup_match(struct xt_entry_match *m, struct net *net)
455 struct xt_mtdtor_param par;
458 par.match = m->u.kernel.match;
459 par.matchinfo = m->data;
460 par.family = NFPROTO_IPV4;
461 if (par.match->destroy != NULL)
462 par.match->destroy(&par);
463 module_put(par.match->me);
467 check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
469 const struct ipt_ip *ip = par->entryinfo;
471 par->match = m->u.kernel.match;
472 par->matchinfo = m->data;
474 return xt_check_match(par, m->u.match_size - sizeof(*m),
475 ip->proto, ip->invflags & IPT_INV_PROTO);
479 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
481 struct xt_match *match;
484 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
487 return PTR_ERR(match);
488 m->u.kernel.match = match;
490 ret = check_match(m, par);
496 module_put(m->u.kernel.match->me);
500 static int check_target(struct ipt_entry *e, struct net *net, const char *name)
502 struct xt_entry_target *t = ipt_get_target(e);
503 struct xt_tgchk_param par = {
507 .target = t->u.kernel.target,
509 .hook_mask = e->comefrom,
510 .family = NFPROTO_IPV4,
513 return xt_check_target(&par, t->u.target_size - sizeof(*t),
514 e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
518 find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
520 struct xt_percpu_counter_alloc_state *alloc_state)
522 struct xt_entry_target *t;
523 struct xt_target *target;
526 struct xt_mtchk_param mtpar;
527 struct xt_entry_match *ematch;
529 if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
535 mtpar.entryinfo = &e->ip;
536 mtpar.hook_mask = e->comefrom;
537 mtpar.family = NFPROTO_IPV4;
538 xt_ematch_foreach(ematch, e) {
539 ret = find_check_match(ematch, &mtpar);
541 goto cleanup_matches;
545 t = ipt_get_target(e);
546 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
548 if (IS_ERR(target)) {
549 ret = PTR_ERR(target);
550 goto cleanup_matches;
552 t->u.kernel.target = target;
554 ret = check_target(e, net, name);
560 module_put(t->u.kernel.target->me);
562 xt_ematch_foreach(ematch, e) {
565 cleanup_match(ematch, net);
568 xt_percpu_counter_free(&e->counters);
573 static bool check_underflow(const struct ipt_entry *e)
575 const struct xt_entry_target *t;
576 unsigned int verdict;
578 if (!unconditional(e))
580 t = ipt_get_target_c(e);
581 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
583 verdict = ((struct xt_standard_target *)t)->verdict;
584 verdict = -verdict - 1;
585 return verdict == NF_DROP || verdict == NF_ACCEPT;
589 check_entry_size_and_hooks(struct ipt_entry *e,
590 struct xt_table_info *newinfo,
591 const unsigned char *base,
592 const unsigned char *limit,
593 const unsigned int *hook_entries,
594 const unsigned int *underflows,
595 unsigned int valid_hooks)
600 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
601 (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
602 (unsigned char *)e + e->next_offset > limit)
606 < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target))
609 if (!ip_checkentry(&e->ip))
612 err = xt_check_entry_offsets(e, e->elems, e->target_offset,
617 /* Check hooks & underflows */
618 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
619 if (!(valid_hooks & (1 << h)))
621 if ((unsigned char *)e - base == hook_entries[h])
622 newinfo->hook_entry[h] = hook_entries[h];
623 if ((unsigned char *)e - base == underflows[h]) {
624 if (!check_underflow(e))
627 newinfo->underflow[h] = underflows[h];
631 /* Clear counters and comefrom */
632 e->counters = ((struct xt_counters) { 0, 0 });
638 cleanup_entry(struct ipt_entry *e, struct net *net)
640 struct xt_tgdtor_param par;
641 struct xt_entry_target *t;
642 struct xt_entry_match *ematch;
644 /* Cleanup all matches */
645 xt_ematch_foreach(ematch, e)
646 cleanup_match(ematch, net);
647 t = ipt_get_target(e);
650 par.target = t->u.kernel.target;
651 par.targinfo = t->data;
652 par.family = NFPROTO_IPV4;
653 if (par.target->destroy != NULL)
654 par.target->destroy(&par);
655 module_put(par.target->me);
656 xt_percpu_counter_free(&e->counters);
659 /* Checks and translates the user-supplied table segment (held in
662 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
663 const struct ipt_replace *repl)
665 struct xt_percpu_counter_alloc_state alloc_state = { 0 };
666 struct ipt_entry *iter;
667 unsigned int *offsets;
671 newinfo->size = repl->size;
672 newinfo->number = repl->num_entries;
674 /* Init all hooks to impossible value. */
675 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
676 newinfo->hook_entry[i] = 0xFFFFFFFF;
677 newinfo->underflow[i] = 0xFFFFFFFF;
680 offsets = xt_alloc_entry_offsets(newinfo->number);
684 /* Walk through entries, checking offsets. */
685 xt_entry_foreach(iter, entry0, newinfo->size) {
686 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
693 if (i < repl->num_entries)
694 offsets[i] = (void *)iter - entry0;
696 if (strcmp(ipt_get_target(iter)->u.user.name,
697 XT_ERROR_TARGET) == 0)
698 ++newinfo->stacksize;
702 if (i != repl->num_entries)
705 /* Check hooks all assigned */
706 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
707 /* Only hooks which are valid */
708 if (!(repl->valid_hooks & (1 << i)))
710 if (newinfo->hook_entry[i] == 0xFFFFFFFF)
712 if (newinfo->underflow[i] == 0xFFFFFFFF)
716 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
722 /* Finally, each sanity check must pass */
724 xt_entry_foreach(iter, entry0, newinfo->size) {
725 ret = find_check_entry(iter, net, repl->name, repl->size,
733 xt_entry_foreach(iter, entry0, newinfo->size) {
736 cleanup_entry(iter, net);
748 get_counters(const struct xt_table_info *t,
749 struct xt_counters counters[])
751 struct ipt_entry *iter;
755 for_each_possible_cpu(cpu) {
756 seqcount_t *s = &per_cpu(xt_recseq, cpu);
759 xt_entry_foreach(iter, t->entries, t->size) {
760 struct xt_counters *tmp;
764 tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
766 start = read_seqcount_begin(s);
769 } while (read_seqcount_retry(s, start));
771 ADD_COUNTER(counters[i], bcnt, pcnt);
772 ++i; /* macro does multi eval of i */
778 static void get_old_counters(const struct xt_table_info *t,
779 struct xt_counters counters[])
781 struct ipt_entry *iter;
784 for_each_possible_cpu(cpu) {
786 xt_entry_foreach(iter, t->entries, t->size) {
787 const struct xt_counters *tmp;
789 tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
790 ADD_COUNTER(counters[i], tmp->bcnt, tmp->pcnt);
791 ++i; /* macro does multi eval of i */
798 static struct xt_counters *alloc_counters(const struct xt_table *table)
800 unsigned int countersize;
801 struct xt_counters *counters;
802 const struct xt_table_info *private = table->private;
804 /* We need atomic snapshot of counters: rest doesn't change
805 (other than comefrom, which userspace doesn't care
807 countersize = sizeof(struct xt_counters) * private->number;
808 counters = vzalloc(countersize);
810 if (counters == NULL)
811 return ERR_PTR(-ENOMEM);
813 get_counters(private, counters);
819 copy_entries_to_user(unsigned int total_size,
820 const struct xt_table *table,
821 void __user *userptr)
823 unsigned int off, num;
824 const struct ipt_entry *e;
825 struct xt_counters *counters;
826 const struct xt_table_info *private = table->private;
828 const void *loc_cpu_entry;
830 counters = alloc_counters(table);
831 if (IS_ERR(counters))
832 return PTR_ERR(counters);
834 loc_cpu_entry = private->entries;
836 /* FIXME: use iterator macros --RR */
837 /* ... then go back and fix counters and names */
838 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
840 const struct xt_entry_match *m;
841 const struct xt_entry_target *t;
843 e = loc_cpu_entry + off;
844 if (copy_to_user(userptr + off, e, sizeof(*e))) {
848 if (copy_to_user(userptr + off
849 + offsetof(struct ipt_entry, counters),
851 sizeof(counters[num])) != 0) {
856 for (i = sizeof(struct ipt_entry);
857 i < e->target_offset;
858 i += m->u.match_size) {
861 if (xt_match_to_user(m, userptr + off + i)) {
867 t = ipt_get_target_c(e);
868 if (xt_target_to_user(t, userptr + off + e->target_offset)) {
880 static void compat_standard_from_user(void *dst, const void *src)
882 int v = *(compat_int_t *)src;
885 v += xt_compat_calc_jump(AF_INET, v);
886 memcpy(dst, &v, sizeof(v));
889 static int compat_standard_to_user(void __user *dst, const void *src)
891 compat_int_t cv = *(int *)src;
894 cv -= xt_compat_calc_jump(AF_INET, cv);
895 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
898 static int compat_calc_entry(const struct ipt_entry *e,
899 const struct xt_table_info *info,
900 const void *base, struct xt_table_info *newinfo)
902 const struct xt_entry_match *ematch;
903 const struct xt_entry_target *t;
904 unsigned int entry_offset;
907 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
908 entry_offset = (void *)e - base;
909 xt_ematch_foreach(ematch, e)
910 off += xt_compat_match_offset(ematch->u.kernel.match);
911 t = ipt_get_target_c(e);
912 off += xt_compat_target_offset(t->u.kernel.target);
913 newinfo->size -= off;
914 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
918 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
919 if (info->hook_entry[i] &&
920 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
921 newinfo->hook_entry[i] -= off;
922 if (info->underflow[i] &&
923 (e < (struct ipt_entry *)(base + info->underflow[i])))
924 newinfo->underflow[i] -= off;
929 static int compat_table_info(const struct xt_table_info *info,
930 struct xt_table_info *newinfo)
932 struct ipt_entry *iter;
933 const void *loc_cpu_entry;
936 if (!newinfo || !info)
939 /* we dont care about newinfo->entries */
940 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
941 newinfo->initial_entries = 0;
942 loc_cpu_entry = info->entries;
943 xt_compat_init_offsets(AF_INET, info->number);
944 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
945 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
953 static int get_info(struct net *net, void __user *user,
954 const int *len, int compat)
956 char name[XT_TABLE_MAXNAMELEN];
960 if (*len != sizeof(struct ipt_getinfo))
963 if (copy_from_user(name, user, sizeof(name)) != 0)
966 name[XT_TABLE_MAXNAMELEN-1] = '\0';
969 xt_compat_lock(AF_INET);
971 t = xt_request_find_table_lock(net, AF_INET, name);
973 struct ipt_getinfo info;
974 const struct xt_table_info *private = t->private;
976 struct xt_table_info tmp;
979 ret = compat_table_info(private, &tmp);
980 xt_compat_flush_offsets(AF_INET);
984 memset(&info, 0, sizeof(info));
985 info.valid_hooks = t->valid_hooks;
986 memcpy(info.hook_entry, private->hook_entry,
987 sizeof(info.hook_entry));
988 memcpy(info.underflow, private->underflow,
989 sizeof(info.underflow));
990 info.num_entries = private->number;
991 info.size = private->size;
992 strcpy(info.name, name);
994 if (copy_to_user(user, &info, *len) != 0)
1003 #ifdef CONFIG_COMPAT
1005 xt_compat_unlock(AF_INET);
1011 get_entries(struct net *net, struct ipt_get_entries __user *uptr,
1015 struct ipt_get_entries get;
1018 if (*len < sizeof(get))
1020 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1022 if (*len != sizeof(struct ipt_get_entries) + get.size)
1024 get.name[sizeof(get.name) - 1] = '\0';
1026 t = xt_find_table_lock(net, AF_INET, get.name);
1028 const struct xt_table_info *private = t->private;
1029 if (get.size == private->size)
1030 ret = copy_entries_to_user(private->size,
1031 t, uptr->entrytable);
1044 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1045 struct xt_table_info *newinfo, unsigned int num_counters,
1046 void __user *counters_ptr)
1050 struct xt_table_info *oldinfo;
1051 struct xt_counters *counters;
1052 struct ipt_entry *iter;
1055 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1061 t = xt_request_find_table_lock(net, AF_INET, name);
1064 goto free_newinfo_counters_untrans;
1068 if (valid_hooks != t->valid_hooks) {
1073 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1077 /* Update module usage count based on number of rules */
1078 if ((oldinfo->number > oldinfo->initial_entries) ||
1079 (newinfo->number <= oldinfo->initial_entries))
1081 if ((oldinfo->number > oldinfo->initial_entries) &&
1082 (newinfo->number <= oldinfo->initial_entries))
1085 get_old_counters(oldinfo, counters);
1087 /* Decrease module usage counts and free resource */
1088 xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
1089 cleanup_entry(iter, net);
1091 xt_free_table_info(oldinfo);
1092 if (copy_to_user(counters_ptr, counters,
1093 sizeof(struct xt_counters) * num_counters) != 0) {
1094 /* Silent error, can't fail, new table is already in place */
1095 net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
1104 free_newinfo_counters_untrans:
1111 do_replace(struct net *net, const void __user *user, unsigned int len)
1114 struct ipt_replace tmp;
1115 struct xt_table_info *newinfo;
1116 void *loc_cpu_entry;
1117 struct ipt_entry *iter;
1119 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1122 /* overflow check */
1123 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1125 if (tmp.num_counters == 0)
1128 tmp.name[sizeof(tmp.name)-1] = 0;
1130 newinfo = xt_alloc_table_info(tmp.size);
1134 loc_cpu_entry = newinfo->entries;
1135 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1141 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1145 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1146 tmp.num_counters, tmp.counters);
1148 goto free_newinfo_untrans;
1151 free_newinfo_untrans:
1152 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1153 cleanup_entry(iter, net);
1155 xt_free_table_info(newinfo);
1160 do_add_counters(struct net *net, const void __user *user,
1161 unsigned int len, int compat)
1164 struct xt_counters_info tmp;
1165 struct xt_counters *paddc;
1167 const struct xt_table_info *private;
1169 struct ipt_entry *iter;
1170 unsigned int addend;
1172 paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
1174 return PTR_ERR(paddc);
1176 t = xt_find_table_lock(net, AF_INET, tmp.name);
1183 private = t->private;
1184 if (private->number != tmp.num_counters) {
1186 goto unlock_up_free;
1190 addend = xt_write_recseq_begin();
1191 xt_entry_foreach(iter, private->entries, private->size) {
1192 struct xt_counters *tmp;
1194 tmp = xt_get_this_cpu_counter(&iter->counters);
1195 ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
1198 xt_write_recseq_end(addend);
1209 #ifdef CONFIG_COMPAT
1210 struct compat_ipt_replace {
1211 char name[XT_TABLE_MAXNAMELEN];
1215 u32 hook_entry[NF_INET_NUMHOOKS];
1216 u32 underflow[NF_INET_NUMHOOKS];
1218 compat_uptr_t counters; /* struct xt_counters * */
1219 struct compat_ipt_entry entries[0];
1223 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1224 unsigned int *size, struct xt_counters *counters,
1227 struct xt_entry_target *t;
1228 struct compat_ipt_entry __user *ce;
1229 u_int16_t target_offset, next_offset;
1230 compat_uint_t origsize;
1231 const struct xt_entry_match *ematch;
1236 if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
1237 copy_to_user(&ce->counters, &counters[i],
1238 sizeof(counters[i])) != 0)
1241 *dstptr += sizeof(struct compat_ipt_entry);
1242 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1244 xt_ematch_foreach(ematch, e) {
1245 ret = xt_compat_match_to_user(ematch, dstptr, size);
1249 target_offset = e->target_offset - (origsize - *size);
1250 t = ipt_get_target(e);
1251 ret = xt_compat_target_to_user(t, dstptr, size);
1254 next_offset = e->next_offset - (origsize - *size);
1255 if (put_user(target_offset, &ce->target_offset) != 0 ||
1256 put_user(next_offset, &ce->next_offset) != 0)
1262 compat_find_calc_match(struct xt_entry_match *m,
1263 const struct ipt_ip *ip,
1266 struct xt_match *match;
1268 match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
1269 m->u.user.revision);
1271 return PTR_ERR(match);
1273 m->u.kernel.match = match;
1274 *size += xt_compat_match_offset(match);
1278 static void compat_release_entry(struct compat_ipt_entry *e)
1280 struct xt_entry_target *t;
1281 struct xt_entry_match *ematch;
1283 /* Cleanup all matches */
1284 xt_ematch_foreach(ematch, e)
1285 module_put(ematch->u.kernel.match->me);
1286 t = compat_ipt_get_target(e);
1287 module_put(t->u.kernel.target->me);
1291 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1292 struct xt_table_info *newinfo,
1294 const unsigned char *base,
1295 const unsigned char *limit)
1297 struct xt_entry_match *ematch;
1298 struct xt_entry_target *t;
1299 struct xt_target *target;
1300 unsigned int entry_offset;
1304 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
1305 (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
1306 (unsigned char *)e + e->next_offset > limit)
1309 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1310 sizeof(struct compat_xt_entry_target))
1313 if (!ip_checkentry(&e->ip))
1316 ret = xt_compat_check_entry_offsets(e, e->elems,
1317 e->target_offset, e->next_offset);
1321 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1322 entry_offset = (void *)e - (void *)base;
1324 xt_ematch_foreach(ematch, e) {
1325 ret = compat_find_calc_match(ematch, &e->ip, &off);
1327 goto release_matches;
1331 t = compat_ipt_get_target(e);
1332 target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
1333 t->u.user.revision);
1334 if (IS_ERR(target)) {
1335 ret = PTR_ERR(target);
1336 goto release_matches;
1338 t->u.kernel.target = target;
1340 off += xt_compat_target_offset(target);
1342 ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1349 module_put(t->u.kernel.target->me);
1351 xt_ematch_foreach(ematch, e) {
1354 module_put(ematch->u.kernel.match->me);
1360 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1362 struct xt_table_info *newinfo, unsigned char *base)
1364 struct xt_entry_target *t;
1365 struct ipt_entry *de;
1366 unsigned int origsize;
1368 struct xt_entry_match *ematch;
1372 memcpy(de, e, sizeof(struct ipt_entry));
1373 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1375 *dstptr += sizeof(struct ipt_entry);
1376 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1378 xt_ematch_foreach(ematch, e)
1379 xt_compat_match_from_user(ematch, dstptr, size);
1381 de->target_offset = e->target_offset - (origsize - *size);
1382 t = compat_ipt_get_target(e);
1383 xt_compat_target_from_user(t, dstptr, size);
1385 de->next_offset = e->next_offset - (origsize - *size);
1387 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1388 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1389 newinfo->hook_entry[h] -= origsize - *size;
1390 if ((unsigned char *)de - base < newinfo->underflow[h])
1391 newinfo->underflow[h] -= origsize - *size;
1396 translate_compat_table(struct net *net,
1397 struct xt_table_info **pinfo,
1399 const struct compat_ipt_replace *compatr)
1402 struct xt_table_info *newinfo, *info;
1403 void *pos, *entry0, *entry1;
1404 struct compat_ipt_entry *iter0;
1405 struct ipt_replace repl;
1411 size = compatr->size;
1412 info->number = compatr->num_entries;
1415 xt_compat_lock(AF_INET);
1416 xt_compat_init_offsets(AF_INET, compatr->num_entries);
1417 /* Walk through entries, checking offsets. */
1418 xt_entry_foreach(iter0, entry0, compatr->size) {
1419 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1421 entry0 + compatr->size);
1428 if (j != compatr->num_entries)
1432 newinfo = xt_alloc_table_info(size);
1436 newinfo->number = compatr->num_entries;
1437 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1438 newinfo->hook_entry[i] = compatr->hook_entry[i];
1439 newinfo->underflow[i] = compatr->underflow[i];
1441 entry1 = newinfo->entries;
1443 size = compatr->size;
1444 xt_entry_foreach(iter0, entry0, compatr->size)
1445 compat_copy_entry_from_user(iter0, &pos, &size,
1448 /* all module references in entry0 are now gone.
1449 * entry1/newinfo contains a 64bit ruleset that looks exactly as
1450 * generated by 64bit userspace.
1452 * Call standard translate_table() to validate all hook_entrys,
1453 * underflows, check for loops, etc.
1455 xt_compat_flush_offsets(AF_INET);
1456 xt_compat_unlock(AF_INET);
1458 memcpy(&repl, compatr, sizeof(*compatr));
1460 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1461 repl.hook_entry[i] = newinfo->hook_entry[i];
1462 repl.underflow[i] = newinfo->underflow[i];
1465 repl.num_counters = 0;
1466 repl.counters = NULL;
1467 repl.size = newinfo->size;
1468 ret = translate_table(net, newinfo, entry1, &repl);
1474 xt_free_table_info(info);
1478 xt_free_table_info(newinfo);
1481 xt_compat_flush_offsets(AF_INET);
1482 xt_compat_unlock(AF_INET);
1483 xt_entry_foreach(iter0, entry0, compatr->size) {
1486 compat_release_entry(iter0);
1492 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1495 struct compat_ipt_replace tmp;
1496 struct xt_table_info *newinfo;
1497 void *loc_cpu_entry;
1498 struct ipt_entry *iter;
1500 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1503 /* overflow check */
1504 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1506 if (tmp.num_counters == 0)
1509 tmp.name[sizeof(tmp.name)-1] = 0;
1511 newinfo = xt_alloc_table_info(tmp.size);
1515 loc_cpu_entry = newinfo->entries;
1516 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1522 ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
1526 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1527 tmp.num_counters, compat_ptr(tmp.counters));
1529 goto free_newinfo_untrans;
1532 free_newinfo_untrans:
1533 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1534 cleanup_entry(iter, net);
1536 xt_free_table_info(newinfo);
1541 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1546 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1550 case IPT_SO_SET_REPLACE:
1551 ret = compat_do_replace(sock_net(sk), user, len);
1554 case IPT_SO_SET_ADD_COUNTERS:
1555 ret = do_add_counters(sock_net(sk), user, len, 1);
1565 struct compat_ipt_get_entries {
1566 char name[XT_TABLE_MAXNAMELEN];
1568 struct compat_ipt_entry entrytable[0];
1572 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1573 void __user *userptr)
1575 struct xt_counters *counters;
1576 const struct xt_table_info *private = table->private;
1581 struct ipt_entry *iter;
1583 counters = alloc_counters(table);
1584 if (IS_ERR(counters))
1585 return PTR_ERR(counters);
1589 xt_entry_foreach(iter, private->entries, total_size) {
1590 ret = compat_copy_entry_to_user(iter, &pos,
1591 &size, counters, i++);
1601 compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1605 struct compat_ipt_get_entries get;
1608 if (*len < sizeof(get))
1611 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1614 if (*len != sizeof(struct compat_ipt_get_entries) + get.size)
1617 get.name[sizeof(get.name) - 1] = '\0';
1619 xt_compat_lock(AF_INET);
1620 t = xt_find_table_lock(net, AF_INET, get.name);
1622 const struct xt_table_info *private = t->private;
1623 struct xt_table_info info;
1624 ret = compat_table_info(private, &info);
1625 if (!ret && get.size == info.size)
1626 ret = compat_copy_entries_to_user(private->size,
1627 t, uptr->entrytable);
1631 xt_compat_flush_offsets(AF_INET);
1637 xt_compat_unlock(AF_INET);
1641 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1644 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1648 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1652 case IPT_SO_GET_INFO:
1653 ret = get_info(sock_net(sk), user, len, 1);
1655 case IPT_SO_GET_ENTRIES:
1656 ret = compat_get_entries(sock_net(sk), user, len);
1659 ret = do_ipt_get_ctl(sk, cmd, user, len);
1666 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1670 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1674 case IPT_SO_SET_REPLACE:
1675 ret = do_replace(sock_net(sk), user, len);
1678 case IPT_SO_SET_ADD_COUNTERS:
1679 ret = do_add_counters(sock_net(sk), user, len, 0);
1690 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1694 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1698 case IPT_SO_GET_INFO:
1699 ret = get_info(sock_net(sk), user, len, 0);
1702 case IPT_SO_GET_ENTRIES:
1703 ret = get_entries(sock_net(sk), user, len);
1706 case IPT_SO_GET_REVISION_MATCH:
1707 case IPT_SO_GET_REVISION_TARGET: {
1708 struct xt_get_revision rev;
1711 if (*len != sizeof(rev)) {
1715 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1719 rev.name[sizeof(rev.name)-1] = 0;
1721 if (cmd == IPT_SO_GET_REVISION_TARGET)
1726 try_then_request_module(xt_find_revision(AF_INET, rev.name,
1729 "ipt_%s", rev.name);
1740 static void __ipt_unregister_table(struct net *net, struct xt_table *table)
1742 struct xt_table_info *private;
1743 void *loc_cpu_entry;
1744 struct module *table_owner = table->me;
1745 struct ipt_entry *iter;
1747 private = xt_unregister_table(table);
1749 /* Decrease module usage counts and free resources */
1750 loc_cpu_entry = private->entries;
1751 xt_entry_foreach(iter, loc_cpu_entry, private->size)
1752 cleanup_entry(iter, net);
1753 if (private->number > private->initial_entries)
1754 module_put(table_owner);
1755 xt_free_table_info(private);
1758 int ipt_register_table(struct net *net, const struct xt_table *table,
1759 const struct ipt_replace *repl,
1760 const struct nf_hook_ops *ops, struct xt_table **res)
1763 struct xt_table_info *newinfo;
1764 struct xt_table_info bootstrap = {0};
1765 void *loc_cpu_entry;
1766 struct xt_table *new_table;
1768 newinfo = xt_alloc_table_info(repl->size);
1772 loc_cpu_entry = newinfo->entries;
1773 memcpy(loc_cpu_entry, repl->entries, repl->size);
1775 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
1779 new_table = xt_register_table(net, table, &bootstrap, newinfo);
1780 if (IS_ERR(new_table)) {
1781 ret = PTR_ERR(new_table);
1785 /* set res now, will see skbs right after nf_register_net_hooks */
1786 WRITE_ONCE(*res, new_table);
1788 ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
1790 __ipt_unregister_table(net, new_table);
1797 xt_free_table_info(newinfo);
1801 void ipt_unregister_table(struct net *net, struct xt_table *table,
1802 const struct nf_hook_ops *ops)
1804 nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
1805 __ipt_unregister_table(net, table);
1808 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1810 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1811 u_int8_t type, u_int8_t code,
1814 return ((test_type == 0xFF) ||
1815 (type == test_type && code >= min_code && code <= max_code))
1820 icmp_match(const struct sk_buff *skb, struct xt_action_param *par)
1822 const struct icmphdr *ic;
1823 struct icmphdr _icmph;
1824 const struct ipt_icmp *icmpinfo = par->matchinfo;
1826 /* Must not be a fragment. */
1827 if (par->fragoff != 0)
1830 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
1832 /* We've been asked to examine this packet, and we
1833 * can't. Hence, no choice but to drop.
1835 par->hotdrop = true;
1839 return icmp_type_code_match(icmpinfo->type,
1843 !!(icmpinfo->invflags&IPT_ICMP_INV));
1846 static int icmp_checkentry(const struct xt_mtchk_param *par)
1848 const struct ipt_icmp *icmpinfo = par->matchinfo;
1850 /* Must specify no unknown invflags */
1851 return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0;
1854 static struct xt_target ipt_builtin_tg[] __read_mostly = {
1856 .name = XT_STANDARD_TARGET,
1857 .targetsize = sizeof(int),
1858 .family = NFPROTO_IPV4,
1859 #ifdef CONFIG_COMPAT
1860 .compatsize = sizeof(compat_int_t),
1861 .compat_from_user = compat_standard_from_user,
1862 .compat_to_user = compat_standard_to_user,
1866 .name = XT_ERROR_TARGET,
1867 .target = ipt_error,
1868 .targetsize = XT_FUNCTION_MAXNAMELEN,
1869 .family = NFPROTO_IPV4,
1873 static struct nf_sockopt_ops ipt_sockopts = {
1875 .set_optmin = IPT_BASE_CTL,
1876 .set_optmax = IPT_SO_SET_MAX+1,
1877 .set = do_ipt_set_ctl,
1878 #ifdef CONFIG_COMPAT
1879 .compat_set = compat_do_ipt_set_ctl,
1881 .get_optmin = IPT_BASE_CTL,
1882 .get_optmax = IPT_SO_GET_MAX+1,
1883 .get = do_ipt_get_ctl,
1884 #ifdef CONFIG_COMPAT
1885 .compat_get = compat_do_ipt_get_ctl,
1887 .owner = THIS_MODULE,
1890 static struct xt_match ipt_builtin_mt[] __read_mostly = {
1893 .match = icmp_match,
1894 .matchsize = sizeof(struct ipt_icmp),
1895 .checkentry = icmp_checkentry,
1896 .proto = IPPROTO_ICMP,
1897 .family = NFPROTO_IPV4,
1901 static int __net_init ip_tables_net_init(struct net *net)
1903 return xt_proto_init(net, NFPROTO_IPV4);
1906 static void __net_exit ip_tables_net_exit(struct net *net)
1908 xt_proto_fini(net, NFPROTO_IPV4);
1911 static struct pernet_operations ip_tables_net_ops = {
1912 .init = ip_tables_net_init,
1913 .exit = ip_tables_net_exit,
1916 static int __init ip_tables_init(void)
1920 ret = register_pernet_subsys(&ip_tables_net_ops);
1924 /* No one else will be downing sem now, so we won't sleep */
1925 ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
1928 ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
1932 /* Register setsockopt */
1933 ret = nf_register_sockopt(&ipt_sockopts);
1940 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
1942 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
1944 unregister_pernet_subsys(&ip_tables_net_ops);
1949 static void __exit ip_tables_fini(void)
1951 nf_unregister_sockopt(&ipt_sockopts);
1953 xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
1954 xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
1955 unregister_pernet_subsys(&ip_tables_net_ops);
1958 EXPORT_SYMBOL(ipt_register_table);
1959 EXPORT_SYMBOL(ipt_unregister_table);
1960 EXPORT_SYMBOL(ipt_do_table);
1961 module_init(ip_tables_init);
1962 module_exit(ip_tables_fini);