bpf: Use migrate_disable/enable in array macros and cgroup/lirc code.
authorDavid Miller <davem@davemloft.net>
Mon, 24 Feb 2020 14:01:46 +0000 (15:01 +0100)
committerAlexei Starovoitov <ast@kernel.org>
Tue, 25 Feb 2020 00:20:09 +0000 (16:20 -0800)
Replace the preemption disable/enable with migrate_disable/enable() to
reflect the actual requirement and to allow PREEMPT_RT to substitute it
with an actual migration disable mechanism which does not disable
preemption.

Including the code paths that go via __bpf_prog_run_save_cb().

Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200224145643.998293311@linutronix.de
include/linux/bpf.h
include/linux/filter.h

index 49b1a70..76b3a0e 100644 (file)
@@ -885,7 +885,7 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
                struct bpf_prog *_prog;                 \
                struct bpf_prog_array *_array;          \
                u32 _ret = 1;                           \
-               preempt_disable();                      \
+               migrate_disable();                      \
                rcu_read_lock();                        \
                _array = rcu_dereference(array);        \
                if (unlikely(check_non_null && !_array))\
@@ -898,7 +898,7 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
                }                                       \
 _out:                                                  \
                rcu_read_unlock();                      \
-               preempt_enable();                       \
+               migrate_enable();                       \
                _ret;                                   \
         })
 
@@ -932,7 +932,7 @@ _out:                                                       \
                u32 ret;                                \
                u32 _ret = 1;                           \
                u32 _cn = 0;                            \
-               preempt_disable();                      \
+               migrate_disable();                      \
                rcu_read_lock();                        \
                _array = rcu_dereference(array);        \
                _item = &_array->items[0];              \
@@ -944,7 +944,7 @@ _out:                                                       \
                        _item++;                        \
                }                                       \
                rcu_read_unlock();                      \
-               preempt_enable();                       \
+               migrate_enable();                       \
                if (_ret)                               \
                        _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS);  \
                else                                    \
index 9270de2..43b5e45 100644 (file)
@@ -677,6 +677,7 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
        return qdisc_skb_cb(skb)->data;
 }
 
+/* Must be invoked with migration disabled */
 static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
                                         struct sk_buff *skb)
 {
@@ -702,9 +703,9 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
 {
        u32 res;
 
-       preempt_disable();
+       migrate_disable();
        res = __bpf_prog_run_save_cb(prog, skb);
-       preempt_enable();
+       migrate_enable();
        return res;
 }