2 * Linux Socket Filter - Kernel level socket filtering
5 * Jay Schulist <jschlst@samba.org>
7 * Based on the design of:
8 * - The Berkeley Packet Filter
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Andi Kleen - Fix a few bad bugs and races.
16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
19 #include <linux/module.h>
20 #include <linux/types.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
28 #include <linux/gfp.h>
30 #include <net/protocol.h>
31 #include <net/netlink.h>
32 #include <linux/skbuff.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <asm/uaccess.h>
37 #include <asm/unaligned.h>
38 #include <linux/filter.h>
39 #include <linux/ratelimit.h>
40 #include <linux/seccomp.h>
41 #include <linux/if_vlan.h>
43 /* No hurry in this branch
45 * Exported for the bpf jit load helper.
47 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
52 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
53 else if (k >= SKF_LL_OFF)
54 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
56 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
61 static inline void *load_pointer(const struct sk_buff *skb, int k,
62 unsigned int size, void *buffer)
65 return skb_header_pointer(skb, k, size, buffer);
66 return bpf_internal_load_pointer_neg_helper(skb, k, size);
70 * sk_filter - run a packet through a socket filter
71 * @sk: sock associated with &sk_buff
72 * @skb: buffer to filter
74 * Run the filter code and then cut skb->data to correct size returned by
75 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
76 * than pkt_len we keep whole skb->data. This is the socket level
77 * wrapper to sk_run_filter. It returns 0 if the packet should
78 * be accepted or -EPERM if the packet should be tossed.
81 int sk_filter(struct sock *sk, struct sk_buff *skb)
84 struct sk_filter *filter;
87 * If the skb was allocated from pfmemalloc reserves, only
88 * allow SOCK_MEMALLOC sockets to use it as this socket is
91 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
94 err = security_sock_rcv_skb(sk, skb);
99 filter = rcu_dereference(sk->sk_filter);
101 unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
103 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
109 EXPORT_SYMBOL(sk_filter);
112 * sk_run_filter - run a filter on a socket
113 * @skb: buffer to run the filter on
114 * @fentry: filter to apply
116 * Decode and apply filter instructions to the skb->data.
117 * Return length to keep, 0 for none. @skb is the data we are
118 * filtering, @filter is the array of filter instructions.
119 * Because all jumps are guaranteed to be before last instruction,
120 * and last instruction guaranteed to be a RET, we dont need to check
121 * flen. (We used to pass to this function the length of filter)
123 unsigned int sk_run_filter(const struct sk_buff *skb,
124 const struct sock_filter *fentry)
127 u32 A = 0; /* Accumulator */
128 u32 X = 0; /* Index Register */
129 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
134 * Process array of filter instructions.
137 #if defined(CONFIG_X86_32)
138 #define K (fentry->k)
140 const u32 K = fentry->k;
143 switch (fentry->code) {
144 case BPF_S_ALU_ADD_X:
147 case BPF_S_ALU_ADD_K:
150 case BPF_S_ALU_SUB_X:
153 case BPF_S_ALU_SUB_K:
156 case BPF_S_ALU_MUL_X:
159 case BPF_S_ALU_MUL_K:
162 case BPF_S_ALU_DIV_X:
167 case BPF_S_ALU_DIV_K:
170 case BPF_S_ALU_MOD_X:
175 case BPF_S_ALU_MOD_K:
178 case BPF_S_ALU_AND_X:
181 case BPF_S_ALU_AND_K:
190 case BPF_S_ANC_ALU_XOR_X:
191 case BPF_S_ALU_XOR_X:
194 case BPF_S_ALU_XOR_K:
197 case BPF_S_ALU_LSH_X:
200 case BPF_S_ALU_LSH_K:
203 case BPF_S_ALU_RSH_X:
206 case BPF_S_ALU_RSH_K:
215 case BPF_S_JMP_JGT_K:
216 fentry += (A > K) ? fentry->jt : fentry->jf;
218 case BPF_S_JMP_JGE_K:
219 fentry += (A >= K) ? fentry->jt : fentry->jf;
221 case BPF_S_JMP_JEQ_K:
222 fentry += (A == K) ? fentry->jt : fentry->jf;
224 case BPF_S_JMP_JSET_K:
225 fentry += (A & K) ? fentry->jt : fentry->jf;
227 case BPF_S_JMP_JGT_X:
228 fentry += (A > X) ? fentry->jt : fentry->jf;
230 case BPF_S_JMP_JGE_X:
231 fentry += (A >= X) ? fentry->jt : fentry->jf;
233 case BPF_S_JMP_JEQ_X:
234 fentry += (A == X) ? fentry->jt : fentry->jf;
236 case BPF_S_JMP_JSET_X:
237 fentry += (A & X) ? fentry->jt : fentry->jf;
242 ptr = load_pointer(skb, k, 4, &tmp);
244 A = get_unaligned_be32(ptr);
251 ptr = load_pointer(skb, k, 2, &tmp);
253 A = get_unaligned_be16(ptr);
260 ptr = load_pointer(skb, k, 1, &tmp);
269 case BPF_S_LDX_W_LEN:
281 case BPF_S_LDX_B_MSH:
282 ptr = load_pointer(skb, K, 1, &tmp);
284 X = (*(u8 *)ptr & 0xf) << 2;
316 case BPF_S_ANC_PROTOCOL:
317 A = ntohs(skb->protocol);
319 case BPF_S_ANC_PKTTYPE:
322 case BPF_S_ANC_IFINDEX:
325 A = skb->dev->ifindex;
330 case BPF_S_ANC_QUEUE:
331 A = skb->queue_mapping;
333 case BPF_S_ANC_HATYPE:
338 case BPF_S_ANC_RXHASH:
342 A = raw_smp_processor_id();
344 case BPF_S_ANC_VLAN_TAG:
345 A = vlan_tx_tag_get(skb);
347 case BPF_S_ANC_VLAN_TAG_PRESENT:
348 A = !!vlan_tx_tag_present(skb);
350 case BPF_S_ANC_PAY_OFFSET:
351 A = __skb_get_poff(skb);
353 case BPF_S_ANC_NLATTR: {
356 if (skb_is_nonlinear(skb))
358 if (skb->len < sizeof(struct nlattr))
360 if (A > skb->len - sizeof(struct nlattr))
363 nla = nla_find((struct nlattr *)&skb->data[A],
366 A = (void *)nla - (void *)skb->data;
371 case BPF_S_ANC_NLATTR_NEST: {
374 if (skb_is_nonlinear(skb))
376 if (skb->len < sizeof(struct nlattr))
378 if (A > skb->len - sizeof(struct nlattr))
381 nla = (struct nlattr *)&skb->data[A];
382 if (nla->nla_len > skb->len - A)
385 nla = nla_find_nested(nla, X);
387 A = (void *)nla - (void *)skb->data;
392 #ifdef CONFIG_SECCOMP_FILTER
393 case BPF_S_ANC_SECCOMP_LD_W:
394 A = seccomp_bpf_load(fentry->k);
398 WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
399 fentry->code, fentry->jt,
400 fentry->jf, fentry->k);
407 EXPORT_SYMBOL(sk_run_filter);
411 * A BPF program is able to use 16 cells of memory to store intermediate
412 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
413 * As we dont want to clear mem[] array for each packet going through
414 * sk_run_filter(), we check that filter loaded by user never try to read
415 * a cell if not previously written, and we check all branches to be sure
416 * a malicious user doesn't try to abuse us.
418 static int check_load_and_stores(struct sock_filter *filter, int flen)
420 u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
423 BUILD_BUG_ON(BPF_MEMWORDS > 16);
424 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
427 memset(masks, 0xff, flen * sizeof(*masks));
429 for (pc = 0; pc < flen; pc++) {
430 memvalid &= masks[pc];
432 switch (filter[pc].code) {
435 memvalid |= (1 << filter[pc].k);
439 if (!(memvalid & (1 << filter[pc].k))) {
445 /* a jump must set masks on target */
446 masks[pc + 1 + filter[pc].k] &= memvalid;
449 case BPF_S_JMP_JEQ_K:
450 case BPF_S_JMP_JEQ_X:
451 case BPF_S_JMP_JGE_K:
452 case BPF_S_JMP_JGE_X:
453 case BPF_S_JMP_JGT_K:
454 case BPF_S_JMP_JGT_X:
455 case BPF_S_JMP_JSET_X:
456 case BPF_S_JMP_JSET_K:
457 /* a jump must set masks on targets */
458 masks[pc + 1 + filter[pc].jt] &= memvalid;
459 masks[pc + 1 + filter[pc].jf] &= memvalid;
470 * sk_chk_filter - verify socket filter code
471 * @filter: filter to verify
472 * @flen: length of filter
474 * Check the user's filter code. If we let some ugly
475 * filter code slip through kaboom! The filter must contain
476 * no references or jumps that are out of range, no illegal
477 * instructions, and must end with a RET instruction.
479 * All jumps are forward as they are not signed.
481 * Returns 0 if the rule set is legal or -EINVAL if not.
483 int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
486 * Valid instructions are initialized to non-0.
487 * Invalid instructions are initialized to 0.
489 static const u8 codes[] = {
490 [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
491 [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
492 [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
493 [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
494 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
495 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
496 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
497 [BPF_ALU|BPF_MOD|BPF_K] = BPF_S_ALU_MOD_K,
498 [BPF_ALU|BPF_MOD|BPF_X] = BPF_S_ALU_MOD_X,
499 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
500 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
501 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
502 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
503 [BPF_ALU|BPF_XOR|BPF_K] = BPF_S_ALU_XOR_K,
504 [BPF_ALU|BPF_XOR|BPF_X] = BPF_S_ALU_XOR_X,
505 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
506 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
507 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
508 [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
509 [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
510 [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
511 [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
512 [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
513 [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
514 [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
515 [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
516 [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
517 [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
518 [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
519 [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
520 [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
521 [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
522 [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
523 [BPF_RET|BPF_K] = BPF_S_RET_K,
524 [BPF_RET|BPF_A] = BPF_S_RET_A,
525 [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
526 [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
527 [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
529 [BPF_STX] = BPF_S_STX,
530 [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
531 [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
532 [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
533 [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
534 [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
535 [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
536 [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
537 [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
538 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
543 if (flen == 0 || flen > BPF_MAXINSNS)
546 /* check the filter code now */
547 for (pc = 0; pc < flen; pc++) {
548 struct sock_filter *ftest = &filter[pc];
549 u16 code = ftest->code;
551 if (code >= ARRAY_SIZE(codes))
556 /* Some instructions need special checks */
558 case BPF_S_ALU_DIV_K:
559 case BPF_S_ALU_MOD_K:
560 /* check for division by zero */
568 /* check for invalid memory addresses */
569 if (ftest->k >= BPF_MEMWORDS)
574 * Note, the large ftest->k might cause loops.
575 * Compare this with conditional jumps below,
576 * where offsets are limited. --ANK (981016)
578 if (ftest->k >= (unsigned int)(flen-pc-1))
581 case BPF_S_JMP_JEQ_K:
582 case BPF_S_JMP_JEQ_X:
583 case BPF_S_JMP_JGE_K:
584 case BPF_S_JMP_JGE_X:
585 case BPF_S_JMP_JGT_K:
586 case BPF_S_JMP_JGT_X:
587 case BPF_S_JMP_JSET_X:
588 case BPF_S_JMP_JSET_K:
589 /* for conditionals both must be safe */
590 if (pc + ftest->jt + 1 >= flen ||
591 pc + ftest->jf + 1 >= flen)
598 #define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
599 code = BPF_S_ANC_##CODE; \
607 ANCILLARY(NLATTR_NEST);
613 ANCILLARY(ALU_XOR_X);
615 ANCILLARY(VLAN_TAG_PRESENT);
616 ANCILLARY(PAY_OFFSET);
619 /* ancillary operation unknown or unsupported */
620 if (anc_found == false && ftest->k >= SKF_AD_OFF)
626 /* last instruction must be a RET code */
627 switch (filter[flen - 1].code) {
630 return check_load_and_stores(filter, flen);
634 EXPORT_SYMBOL(sk_chk_filter);
637 * sk_filter_release_rcu - Release a socket filter by rcu_head
638 * @rcu: rcu_head that contains the sk_filter to free
640 void sk_filter_release_rcu(struct rcu_head *rcu)
642 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
646 EXPORT_SYMBOL(sk_filter_release_rcu);
648 static int __sk_prepare_filter(struct sk_filter *fp)
652 fp->bpf_func = sk_run_filter;
654 err = sk_chk_filter(fp->insns, fp->len);
663 * sk_unattached_filter_create - create an unattached filter
664 * @fprog: the filter program
665 * @pfp: the unattached filter that is created
667 * Create a filter independent of any socket. We first run some
668 * sanity checks on it to make sure it does not explode on us later.
669 * If an error occurs or there is insufficient memory for the filter
670 * a negative errno code is returned. On success the return is zero.
672 int sk_unattached_filter_create(struct sk_filter **pfp,
673 struct sock_fprog *fprog)
675 struct sk_filter *fp;
676 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
679 /* Make sure new filter is there and in the right amounts. */
680 if (fprog->filter == NULL)
683 fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
686 memcpy(fp->insns, fprog->filter, fsize);
688 atomic_set(&fp->refcnt, 1);
689 fp->len = fprog->len;
691 err = __sk_prepare_filter(fp);
701 EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
703 void sk_unattached_filter_destroy(struct sk_filter *fp)
705 sk_filter_release(fp);
707 EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
710 * sk_attach_filter - attach a socket filter
711 * @fprog: the filter program
712 * @sk: the socket to use
714 * Attach the user's filter code. We first run some sanity checks on
715 * it to make sure it does not explode on us later. If an error
716 * occurs or there is insufficient memory for the filter a negative
717 * errno code is returned. On success the return is zero.
719 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
721 struct sk_filter *fp, *old_fp;
722 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
723 unsigned int sk_fsize = sk_filter_size(fprog->len);
726 if (sock_flag(sk, SOCK_FILTER_LOCKED))
729 /* Make sure new filter is there and in the right amounts. */
730 if (fprog->filter == NULL)
733 fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
736 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
737 sock_kfree_s(sk, fp, sk_fsize);
741 atomic_set(&fp->refcnt, 1);
742 fp->len = fprog->len;
744 err = __sk_prepare_filter(fp);
746 sk_filter_uncharge(sk, fp);
750 old_fp = rcu_dereference_protected(sk->sk_filter,
751 sock_owned_by_user(sk));
752 rcu_assign_pointer(sk->sk_filter, fp);
755 sk_filter_uncharge(sk, old_fp);
758 EXPORT_SYMBOL_GPL(sk_attach_filter);
760 int sk_detach_filter(struct sock *sk)
763 struct sk_filter *filter;
765 if (sock_flag(sk, SOCK_FILTER_LOCKED))
768 filter = rcu_dereference_protected(sk->sk_filter,
769 sock_owned_by_user(sk));
771 RCU_INIT_POINTER(sk->sk_filter, NULL);
772 sk_filter_uncharge(sk, filter);
777 EXPORT_SYMBOL_GPL(sk_detach_filter);
779 void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
781 static const u16 decodes[] = {
782 [BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K,
783 [BPF_S_ALU_ADD_X] = BPF_ALU|BPF_ADD|BPF_X,
784 [BPF_S_ALU_SUB_K] = BPF_ALU|BPF_SUB|BPF_K,
785 [BPF_S_ALU_SUB_X] = BPF_ALU|BPF_SUB|BPF_X,
786 [BPF_S_ALU_MUL_K] = BPF_ALU|BPF_MUL|BPF_K,
787 [BPF_S_ALU_MUL_X] = BPF_ALU|BPF_MUL|BPF_X,
788 [BPF_S_ALU_DIV_X] = BPF_ALU|BPF_DIV|BPF_X,
789 [BPF_S_ALU_MOD_K] = BPF_ALU|BPF_MOD|BPF_K,
790 [BPF_S_ALU_MOD_X] = BPF_ALU|BPF_MOD|BPF_X,
791 [BPF_S_ALU_AND_K] = BPF_ALU|BPF_AND|BPF_K,
792 [BPF_S_ALU_AND_X] = BPF_ALU|BPF_AND|BPF_X,
793 [BPF_S_ALU_OR_K] = BPF_ALU|BPF_OR|BPF_K,
794 [BPF_S_ALU_OR_X] = BPF_ALU|BPF_OR|BPF_X,
795 [BPF_S_ALU_XOR_K] = BPF_ALU|BPF_XOR|BPF_K,
796 [BPF_S_ALU_XOR_X] = BPF_ALU|BPF_XOR|BPF_X,
797 [BPF_S_ALU_LSH_K] = BPF_ALU|BPF_LSH|BPF_K,
798 [BPF_S_ALU_LSH_X] = BPF_ALU|BPF_LSH|BPF_X,
799 [BPF_S_ALU_RSH_K] = BPF_ALU|BPF_RSH|BPF_K,
800 [BPF_S_ALU_RSH_X] = BPF_ALU|BPF_RSH|BPF_X,
801 [BPF_S_ALU_NEG] = BPF_ALU|BPF_NEG,
802 [BPF_S_LD_W_ABS] = BPF_LD|BPF_W|BPF_ABS,
803 [BPF_S_LD_H_ABS] = BPF_LD|BPF_H|BPF_ABS,
804 [BPF_S_LD_B_ABS] = BPF_LD|BPF_B|BPF_ABS,
805 [BPF_S_ANC_PROTOCOL] = BPF_LD|BPF_B|BPF_ABS,
806 [BPF_S_ANC_PKTTYPE] = BPF_LD|BPF_B|BPF_ABS,
807 [BPF_S_ANC_IFINDEX] = BPF_LD|BPF_B|BPF_ABS,
808 [BPF_S_ANC_NLATTR] = BPF_LD|BPF_B|BPF_ABS,
809 [BPF_S_ANC_NLATTR_NEST] = BPF_LD|BPF_B|BPF_ABS,
810 [BPF_S_ANC_MARK] = BPF_LD|BPF_B|BPF_ABS,
811 [BPF_S_ANC_QUEUE] = BPF_LD|BPF_B|BPF_ABS,
812 [BPF_S_ANC_HATYPE] = BPF_LD|BPF_B|BPF_ABS,
813 [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS,
814 [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS,
815 [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS,
816 [BPF_S_ANC_SECCOMP_LD_W] = BPF_LD|BPF_B|BPF_ABS,
817 [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS,
818 [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
819 [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS,
820 [BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN,
821 [BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND,
822 [BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND,
823 [BPF_S_LD_B_IND] = BPF_LD|BPF_B|BPF_IND,
824 [BPF_S_LD_IMM] = BPF_LD|BPF_IMM,
825 [BPF_S_LDX_W_LEN] = BPF_LDX|BPF_W|BPF_LEN,
826 [BPF_S_LDX_B_MSH] = BPF_LDX|BPF_B|BPF_MSH,
827 [BPF_S_LDX_IMM] = BPF_LDX|BPF_IMM,
828 [BPF_S_MISC_TAX] = BPF_MISC|BPF_TAX,
829 [BPF_S_MISC_TXA] = BPF_MISC|BPF_TXA,
830 [BPF_S_RET_K] = BPF_RET|BPF_K,
831 [BPF_S_RET_A] = BPF_RET|BPF_A,
832 [BPF_S_ALU_DIV_K] = BPF_ALU|BPF_DIV|BPF_K,
833 [BPF_S_LD_MEM] = BPF_LD|BPF_MEM,
834 [BPF_S_LDX_MEM] = BPF_LDX|BPF_MEM,
836 [BPF_S_STX] = BPF_STX,
837 [BPF_S_JMP_JA] = BPF_JMP|BPF_JA,
838 [BPF_S_JMP_JEQ_K] = BPF_JMP|BPF_JEQ|BPF_K,
839 [BPF_S_JMP_JEQ_X] = BPF_JMP|BPF_JEQ|BPF_X,
840 [BPF_S_JMP_JGE_K] = BPF_JMP|BPF_JGE|BPF_K,
841 [BPF_S_JMP_JGE_X] = BPF_JMP|BPF_JGE|BPF_X,
842 [BPF_S_JMP_JGT_K] = BPF_JMP|BPF_JGT|BPF_K,
843 [BPF_S_JMP_JGT_X] = BPF_JMP|BPF_JGT|BPF_X,
844 [BPF_S_JMP_JSET_K] = BPF_JMP|BPF_JSET|BPF_K,
845 [BPF_S_JMP_JSET_X] = BPF_JMP|BPF_JSET|BPF_X,
851 to->code = decodes[code];
857 int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len)
859 struct sk_filter *filter;
863 filter = rcu_dereference_protected(sk->sk_filter,
864 sock_owned_by_user(sk));
872 if (len < filter->len)
876 for (i = 0; i < filter->len; i++) {
877 struct sock_filter fb;
879 sk_decode_filter(&filter->insns[i], &fb);
880 if (copy_to_user(&ubuf[i], &fb, sizeof(fb)))