bpf: add skb->queue_mapping write access from tc clsact
authorJesper Dangaard Brouer <brouer@redhat.com>
Tue, 19 Feb 2019 18:53:02 +0000 (19:53 +0100)
committerDaniel Borkmann <daniel@iogearbox.net>
Tue, 19 Feb 2019 20:56:05 +0000 (21:56 +0100)
The skb->queue_mapping already have read access, via __sk_buff->queue_mapping.

This patch allow BPF tc qdisc clsact write access to the queue_mapping via
tc_cls_act_is_valid_access.  Also handle that the value NO_QUEUE_MAPPING
is not allowed.

It is already possible to change this via TC filter action skbedit
tc-skbedit(8).  Due to the lack of TC examples, lets show one:

  # tc qdisc  add  dev ixgbe1 clsact
  # tc filter add  dev ixgbe1 ingress matchall action skbedit queue_mapping 5
  # tc filter list dev ixgbe1 ingress

The most common mistake is that XPS (Transmit Packet Steering) takes
precedence over setting skb->queue_mapping. XPS is configured per DEVICE
via /sys/class/net/DEVICE/queues/tx-*/xps_cpus via a CPU hex mask. To
disable set mask=00.

The purpose of changing skb->queue_mapping is to influence the selection of
the net_device "txq" (struct netdev_queue), which influence selection of
the qdisc "root_lock" (via txq->qdisc->q.lock) and txq->_xmit_lock. When
using the MQ qdisc the txq->qdisc points to different qdiscs and associated
locks, and HARD_TX_LOCK (txq->_xmit_lock), allowing for CPU scalability.

Due to lack of TC examples, lets show howto attach clsact BPF programs:

 # tc qdisc  add  dev ixgbe2 clsact
 # tc filter add  dev ixgbe2 egress bpf da obj XXX_kern.o sec tc_qmap2cpu
 # tc filter list dev ixgbe2 egress

Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
net/core/filter.c

index b584cb42a8037301e81ba3eea5cada8c18ad82bb..85749f6ec789e3df41584da06d0608886db826d1 100644 (file)
@@ -6279,6 +6279,7 @@ static bool tc_cls_act_is_valid_access(int off, int size,
                case bpf_ctx_range(struct __sk_buff, tc_classid):
                case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
                case bpf_ctx_range(struct __sk_buff, tstamp):
+               case bpf_ctx_range(struct __sk_buff, queue_mapping):
                        break;
                default:
                        return false;
@@ -6683,9 +6684,18 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct __sk_buff, queue_mapping):
-               *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
-                                     bpf_target_off(struct sk_buff, queue_mapping, 2,
-                                                    target_size));
+               if (type == BPF_WRITE) {
+                       *insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1);
+                       *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
+                                             bpf_target_off(struct sk_buff,
+                                                            queue_mapping,
+                                                            2, target_size));
+               } else {
+                       *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
+                                             bpf_target_off(struct sk_buff,
+                                                            queue_mapping,
+                                                            2, target_size));
+               }
                break;
 
        case offsetof(struct __sk_buff, vlan_present):