memcpy(dst->fp_items, src->fp_items, sizeof(src->fp_items));
}
+static inline void bpf_mprog_entry_clear(struct bpf_mprog_entry *dst)
+{
+ memset(dst->fp_items, 0, sizeof(dst->fp_items));
+}
+
+static inline void bpf_mprog_clear_all(struct bpf_mprog_entry *entry,
+ struct bpf_mprog_entry **entry_new)
+{
+ struct bpf_mprog_entry *peer;
+
+ peer = bpf_mprog_peer(entry);
+ bpf_mprog_entry_clear(peer);
+ peer->parent->count = 0;
+ *entry_new = peer;
+}
+
static inline void bpf_mprog_entry_grow(struct bpf_mprog_entry *entry, int idx)
{
int total = bpf_mprog_total(entry);
void tcx_uninstall(struct net_device *dev, bool ingress)
{
+ struct bpf_mprog_entry *entry, *entry_new = NULL;
struct bpf_tuple tuple = {};
- struct bpf_mprog_entry *entry;
struct bpf_mprog_fp *fp;
struct bpf_mprog_cp *cp;
+ bool active;
entry = tcx_entry_fetch(dev, ingress);
if (!entry)
return;
- tcx_entry_update(dev, NULL, ingress);
+ active = tcx_entry(entry)->miniq_active;
+ if (active)
+ bpf_mprog_clear_all(entry, &entry_new);
+ tcx_entry_update(dev, entry_new, ingress);
tcx_entry_sync();
bpf_mprog_foreach_tuple(entry, fp, cp, tuple) {
if (tuple.link)
bpf_prog_put(tuple.prog);
tcx_skeys_dec(ingress);
}
- WARN_ON_ONCE(tcx_entry(entry)->miniq_active);
- tcx_entry_free(entry);
+ if (!active)
+ tcx_entry_free(entry);
}
int tcx_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)