X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;f=include%2Fnet%2Fcls_cgroup.h;h=ccd6d8bffa4d8d0744c70c3591f948d6520b634a;hb=b87a173e25d6bf5c26f13d329cdddf57dbd4061a;hp=c15d39456e146196b24bafed159099baa57e03e9;hpb=40a10fd740a4d5a9f3da255cf8dae48c6723d1a6;p=platform%2Fkernel%2Flinux-starfive.git diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h index c15d394..ccd6d8b 100644 --- a/include/net/cls_cgroup.h +++ b/include/net/cls_cgroup.h @@ -49,9 +49,38 @@ static inline void sock_update_classid(struct sock *sk) if (classid != sk->sk_classid) sk->sk_classid = classid; } + +static inline u32 task_get_classid(const struct sk_buff *skb) +{ + u32 classid = task_cls_state(current)->classid; + + /* Due to the nature of the classifier it is required to ignore all + * packets originating from softirq context as accessing `current' + * would lead to false results. + * + * This test assumes that all callers of dev_queue_xmit() explicitly + * disable bh. Knowing this, it is possible to detect softirq based + * calls by looking at the number of nested bh disable calls because + * softirqs always disables bh. + */ + if (in_serving_softirq()) { + /* If there is an sk_classid we'll use that. */ + if (!skb->sk) + return 0; + + classid = skb->sk->sk_classid; + } + + return classid; +} #else /* !CONFIG_CGROUP_NET_CLASSID */ static inline void sock_update_classid(struct sock *sk) { } + +static inline u32 task_get_classid(const struct sk_buff *skb) +{ + return 0; +} #endif /* CONFIG_CGROUP_NET_CLASSID */ #endif /* _NET_CLS_CGROUP_H */