Revert "netfilter: xt_quota: fix the behavior of xt_quota module"
authorPablo Neira Ayuso <pablo@netfilter.org>
Fri, 19 Oct 2018 09:48:24 +0000 (11:48 +0200)
committerPablo Neira Ayuso <pablo@netfilter.org>
Fri, 19 Oct 2018 12:00:34 +0000 (14:00 +0200)
This reverts commit e9837e55b0200da544a095a1fca36efd7fd3ba30.

When talking to Maze and Chenbo, we agreed to keep this back by now
due to problems in the ruleset listing path with 32-bit arches.

Signed-off-by: Maciej Żenczykowski <maze@google.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
include/uapi/linux/netfilter/xt_quota.h
net/netfilter/xt_quota.c

index d72fd52..f3ba5d9 100644 (file)
@@ -15,11 +15,9 @@ struct xt_quota_info {
        __u32 flags;
        __u32 pad;
        __aligned_u64 quota;
-#ifdef __KERNEL__
-       atomic64_t counter;
-#else
-       __aligned_u64 remain;
-#endif
+
+       /* Used internally by the kernel */
+       struct xt_quota_priv    *master;
 };
 
 #endif /* _XT_QUOTA_H */
index fceae24..10d61a6 100644 (file)
 #include <linux/netfilter/xt_quota.h>
 #include <linux/module.h>
 
+struct xt_quota_priv {
+       spinlock_t      lock;
+       uint64_t        quota;
+};
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
 MODULE_DESCRIPTION("Xtables: countdown quota match");
@@ -21,48 +26,54 @@ static bool
 quota_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
        struct xt_quota_info *q = (void *)par->matchinfo;
-       u64 current_count = atomic64_read(&q->counter);
+       struct xt_quota_priv *priv = q->master;
        bool ret = q->flags & XT_QUOTA_INVERT;
-       u64 old_count, new_count;
-
-       do {
-               if (current_count == 1)
-                       return ret;
-               if (current_count <= skb->len) {
-                       atomic64_set(&q->counter, 1);
-                       return ret;
-               }
-               old_count = current_count;
-               new_count = current_count - skb->len;
-               current_count = atomic64_cmpxchg(&q->counter, old_count,
-                                                new_count);
-       } while (current_count != old_count);
-       return !ret;
+
+       spin_lock_bh(&priv->lock);
+       if (priv->quota >= skb->len) {
+               priv->quota -= skb->len;
+               ret = !ret;
+       } else {
+               /* we do not allow even small packets from now on */
+               priv->quota = 0;
+       }
+       spin_unlock_bh(&priv->lock);
+
+       return ret;
 }
 
 static int quota_mt_check(const struct xt_mtchk_param *par)
 {
        struct xt_quota_info *q = par->matchinfo;
 
-       BUILD_BUG_ON(sizeof(atomic64_t) != sizeof(__u64));
-
        if (q->flags & ~XT_QUOTA_MASK)
                return -EINVAL;
-       if (atomic64_read(&q->counter) > q->quota + 1)
-               return -ERANGE;
 
-       if (atomic64_read(&q->counter) == 0)
-               atomic64_set(&q->counter, q->quota + 1);
+       q->master = kmalloc(sizeof(*q->master), GFP_KERNEL);
+       if (q->master == NULL)
+               return -ENOMEM;
+
+       spin_lock_init(&q->master->lock);
+       q->master->quota = q->quota;
        return 0;
 }
 
+static void quota_mt_destroy(const struct xt_mtdtor_param *par)
+{
+       const struct xt_quota_info *q = par->matchinfo;
+
+       kfree(q->master);
+}
+
 static struct xt_match quota_mt_reg __read_mostly = {
        .name       = "quota",
        .revision   = 0,
        .family     = NFPROTO_UNSPEC,
        .match      = quota_mt,
        .checkentry = quota_mt_check,
+       .destroy    = quota_mt_destroy,
        .matchsize  = sizeof(struct xt_quota_info),
+       .usersize   = offsetof(struct xt_quota_info, master),
        .me         = THIS_MODULE,
 };