1 /* SPDX-License-Identifier: GPL-2.0 */
3 * linux/cgroup-defs.h - basic definitions for cgroup
5 * This file provides basic type and interface. Include this file directly
6 * only if necessary to avoid cyclic dependencies.
8 #ifndef _LINUX_CGROUP_DEFS_H
9 #define _LINUX_CGROUP_DEFS_H
11 #include <linux/limits.h>
12 #include <linux/list.h>
13 #include <linux/idr.h>
14 #include <linux/wait.h>
15 #include <linux/mutex.h>
16 #include <linux/rcupdate.h>
17 #include <linux/refcount.h>
18 #include <linux/percpu-refcount.h>
19 #include <linux/percpu-rwsem.h>
20 #include <linux/u64_stats_sync.h>
21 #include <linux/workqueue.h>
22 #include <linux/bpf-cgroup.h>
23 #include <linux/psi_types.h>
30 struct cgroup_taskset;
33 struct kernfs_open_file;
35 struct poll_table_struct;
37 #define MAX_CGROUP_TYPE_NAMELEN 32
38 #define MAX_CGROUP_ROOT_NAMELEN 64
39 #define MAX_CFTYPE_NAME 64
41 /* define the enumeration of all cgroup subsystems */
42 #define SUBSYS(_x) _x ## _cgrp_id,
43 enum cgroup_subsys_id {
44 #include <linux/cgroup_subsys.h>
49 /* bits in struct cgroup_subsys_state flags field */
51 CSS_NO_REF = (1 << 0), /* no reference counting for this css */
52 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
53 CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
54 CSS_VISIBLE = (1 << 3), /* css is visible to userland */
55 CSS_DYING = (1 << 4), /* css is dying */
58 /* bits in struct cgroup flags field */
60 /* Control Group requires release notifications to userspace */
61 CGRP_NOTIFY_ON_RELEASE,
63 * Clone the parent's configuration when creating a new child
64 * cpuset cgroup. For historical reasons, this option can be
65 * specified at mount time and thus is implemented here.
67 CGRP_CPUSET_CLONE_CHILDREN,
69 /* Control group has to be frozen. */
72 /* Cgroup is frozen. */
75 /* Control group has to be killed. */
79 /* cgroup_root->flags */
81 CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */
82 CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */
85 * Consider namespaces as delegation boundaries. If this flag is
86 * set, controller specific interface files in a namespace root
87 * aren't writeable from inside the namespace.
89 CGRP_ROOT_NS_DELEGATE = (1 << 3),
92 * Enable cpuset controller in v1 cgroup to use v2 behavior.
94 CGRP_ROOT_CPUSET_V2_MODE = (1 << 4),
97 * Enable legacy local memory.events.
99 CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 5),
102 * Enable recursive subtree protection
104 CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 6),
109 CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
110 CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
111 CFTYPE_NS_DELEGATABLE = (1 << 2), /* writeable beyond delegation boundaries */
113 CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
114 CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */
115 CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */
116 CFTYPE_PRESSURE = (1 << 6), /* only if pressure feature is enabled */
118 /* internal flags, do not use outside cgroup core proper */
119 __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
120 __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
124 * cgroup_file is the handle for a file instance created in a cgroup which
125 * is used, for example, to generate file changed notifications. This can
126 * be obtained by setting cftype->file_offset.
129 /* do not access any fields from outside cgroup core */
130 struct kernfs_node *kn;
131 unsigned long notified_at;
132 struct timer_list notify_timer;
136 * Per-subsystem/per-cgroup state maintained by the system. This is the
137 * fundamental structural building block that controllers deal with.
139 * Fields marked with "PI:" are public and immutable and may be accessed
140 * directly without synchronization.
142 struct cgroup_subsys_state {
143 /* PI: the cgroup that this css is attached to */
144 struct cgroup *cgroup;
146 /* PI: the cgroup subsystem that this css is attached to */
147 struct cgroup_subsys *ss;
149 /* reference count - access via css_[try]get() and css_put() */
150 struct percpu_ref refcnt;
152 /* siblings list anchored at the parent's ->children */
153 struct list_head sibling;
154 struct list_head children;
156 /* flush target list anchored at cgrp->rstat_css_list */
157 struct list_head rstat_css_node;
160 * PI: Subsys-unique ID. 0 is unused and root is always 1. The
161 * matching css can be looked up using css_from_id().
168 * Monotonically increasing unique serial number which defines a
169 * uniform order among all csses. It's guaranteed that all
170 * ->children lists are in the ascending order of ->serial_nr and
171 * used to allow interrupting and resuming iterations.
176 * Incremented by online self and children. Used to guarantee that
177 * parents are not offlined before their children.
181 /* percpu_ref killing and RCU release */
182 struct work_struct destroy_work;
183 struct rcu_work destroy_rwork;
186 * PI: the parent css. Placed here for cache proximity to following
187 * fields of the containing structure.
189 struct cgroup_subsys_state *parent;
193 * A css_set is a structure holding pointers to a set of
194 * cgroup_subsys_state objects. This saves space in the task struct
195 * object and speeds up fork()/exit(), since a single inc/dec and a
196 * list_add()/del() can bump the reference count on the entire cgroup
201 * Set of subsystem states, one for each subsystem. This array is
202 * immutable after creation apart from the init_css_set during
203 * subsystem registration (at boot time).
205 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
207 /* reference count */
211 * For a domain cgroup, the following points to self. If threaded,
212 * to the matching cset of the nearest domain ancestor. The
213 * dom_cset provides access to the domain cgroup and its csses to
214 * which domain level resource consumptions should be charged.
216 struct css_set *dom_cset;
218 /* the default cgroup associated with this css_set */
219 struct cgroup *dfl_cgrp;
221 /* internal task count, protected by css_set_lock */
225 * Lists running through all tasks using this cgroup group.
226 * mg_tasks lists tasks which belong to this cset but are in the
227 * process of being migrated out or in. Protected by
228 * css_set_rwsem, but, during migration, once tasks are moved to
229 * mg_tasks, it can be read safely while holding cgroup_mutex.
231 struct list_head tasks;
232 struct list_head mg_tasks;
233 struct list_head dying_tasks;
235 /* all css_task_iters currently walking this cset */
236 struct list_head task_iters;
239 * On the default hierarchy, ->subsys[ssid] may point to a css
240 * attached to an ancestor instead of the cgroup this css_set is
241 * associated with. The following node is anchored at
242 * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
243 * iterate through all css's attached to a given cgroup.
245 struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
247 /* all threaded csets whose ->dom_cset points to this cset */
248 struct list_head threaded_csets;
249 struct list_head threaded_csets_node;
252 * List running through all cgroup groups in the same hash
253 * slot. Protected by css_set_lock
255 struct hlist_node hlist;
258 * List of cgrp_cset_links pointing at cgroups referenced from this
259 * css_set. Protected by css_set_lock.
261 struct list_head cgrp_links;
264 * List of csets participating in the on-going migration either as
265 * source or destination. Protected by cgroup_mutex.
267 struct list_head mg_preload_node;
268 struct list_head mg_node;
271 * If this cset is acting as the source of migration the following
272 * two fields are set. mg_src_cgrp and mg_dst_cgrp are
273 * respectively the source and destination cgroups of the on-going
274 * migration. mg_dst_cset is the destination cset the target tasks
275 * on this cset should be migrated to. Protected by cgroup_mutex.
277 struct cgroup *mg_src_cgrp;
278 struct cgroup *mg_dst_cgrp;
279 struct css_set *mg_dst_cset;
281 /* dead and being drained, ignore for migration */
284 /* For RCU-protected deletion */
285 struct rcu_head rcu_head;
288 struct cgroup_base_stat {
289 struct task_cputime cputime;
293 * rstat - cgroup scalable recursive statistics. Accounting is done
294 * per-cpu in cgroup_rstat_cpu which is then lazily propagated up the
295 * hierarchy on reads.
297 * When a stat gets updated, the cgroup_rstat_cpu and its ancestors are
298 * linked into the updated tree. On the following read, propagation only
299 * considers and consumes the updated tree. This makes reading O(the
300 * number of descendants which have been active since last read) instead of
301 * O(the total number of descendants).
303 * This is important because there can be a lot of (draining) cgroups which
304 * aren't active and stat may be read frequently. The combination can
305 * become very expensive. By propagating selectively, increasing reading
306 * frequency decreases the cost of each read.
308 * This struct hosts both the fields which implement the above -
309 * updated_children and updated_next - and the fields which track basic
310 * resource statistics on top of it - bsync, bstat and last_bstat.
312 struct cgroup_rstat_cpu {
314 * ->bsync protects ->bstat. These are the only fields which get
315 * updated in the hot path.
317 struct u64_stats_sync bsync;
318 struct cgroup_base_stat bstat;
321 * Snapshots at the last reading. These are used to calculate the
322 * deltas to propagate to the global counters.
324 struct cgroup_base_stat last_bstat;
327 * Child cgroups with stat updates on this cpu since the last read
328 * are linked on the parent's ->updated_children through
331 * In addition to being more compact, singly-linked list pointing
332 * to the cgroup makes it unnecessary for each per-cpu struct to
333 * point back to the associated cgroup.
335 * Protected by per-cpu cgroup_rstat_cpu_lock.
337 struct cgroup *updated_children; /* terminated by self cgroup */
338 struct cgroup *updated_next; /* NULL iff not on the list */
341 struct cgroup_freezer_state {
342 /* Should the cgroup and its descendants be frozen. */
345 /* Should the cgroup actually be frozen? */
348 /* Fields below are protected by css_set_lock */
350 /* Number of frozen descendant cgroups */
351 int nr_frozen_descendants;
354 * Number of tasks, which are counted as frozen:
355 * frozen, SIGSTOPped, and PTRACEd.
361 /* self css with NULL ->ss, points back to this cgroup */
362 struct cgroup_subsys_state self;
364 unsigned long flags; /* "unsigned long" so bitops work */
367 * The depth this cgroup is at. The root is at depth zero and each
368 * step down the hierarchy increments the level. This along with
369 * ancestor_ids[] can determine whether a given cgroup is a
370 * descendant of another without traversing the hierarchy.
374 /* Maximum allowed descent tree depth */
378 * Keep track of total numbers of visible and dying descent cgroups.
379 * Dying cgroups are cgroups which were deleted by a user,
380 * but are still existing because someone else is holding a reference.
381 * max_descendants is a maximum allowed number of descent cgroups.
383 * nr_descendants and nr_dying_descendants are protected
384 * by cgroup_mutex and css_set_lock. It's fine to read them holding
385 * any of cgroup_mutex and css_set_lock; for writing both locks
389 int nr_dying_descendants;
393 * Each non-empty css_set associated with this cgroup contributes
394 * one to nr_populated_csets. The counter is zero iff this cgroup
395 * doesn't have any tasks.
397 * All children which have non-zero nr_populated_csets and/or
398 * nr_populated_children of their own contribute one to either
399 * nr_populated_domain_children or nr_populated_threaded_children
400 * depending on their type. Each counter is zero iff all cgroups
401 * of the type in the subtree proper don't have any tasks.
403 int nr_populated_csets;
404 int nr_populated_domain_children;
405 int nr_populated_threaded_children;
407 int nr_threaded_children; /* # of live threaded child cgroups */
409 struct kernfs_node *kn; /* cgroup kernfs entry */
410 struct cgroup_file procs_file; /* handle for "cgroup.procs" */
411 struct cgroup_file events_file; /* handle for "cgroup.events" */
414 * The bitmask of subsystems enabled on the child cgroups.
415 * ->subtree_control is the one configured through
416 * "cgroup.subtree_control" while ->child_ss_mask is the effective
417 * one which may have more subsystems enabled. Controller knobs
418 * are made available iff it's enabled in ->subtree_control.
422 u16 old_subtree_control;
423 u16 old_subtree_ss_mask;
425 /* Private pointers for each registered subsystem */
426 struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
428 struct cgroup_root *root;
431 * List of cgrp_cset_links pointing at css_sets with tasks in this
432 * cgroup. Protected by css_set_lock.
434 struct list_head cset_links;
437 * On the default hierarchy, a css_set for a cgroup with some
438 * susbsys disabled will point to css's which are associated with
439 * the closest ancestor which has the subsys enabled. The
440 * following lists all css_sets which point to this cgroup's css
441 * for the given subsystem.
443 struct list_head e_csets[CGROUP_SUBSYS_COUNT];
446 * If !threaded, self. If threaded, it points to the nearest
447 * domain ancestor. Inside a threaded subtree, cgroups are exempt
448 * from process granularity and no-internal-task constraint.
449 * Domain level resource consumptions which aren't tied to a
450 * specific task are charged to the dom_cgrp.
452 struct cgroup *dom_cgrp;
453 struct cgroup *old_dom_cgrp; /* used while enabling threaded */
455 /* per-cpu recursive resource statistics */
456 struct cgroup_rstat_cpu __percpu *rstat_cpu;
457 struct list_head rstat_css_list;
459 /* cgroup basic resource statistics */
460 struct cgroup_base_stat last_bstat;
461 struct cgroup_base_stat bstat;
462 struct prev_cputime prev_cputime; /* for printing out cputime */
465 * list of pidlists, up to two for each namespace (one for procs, one
466 * for tasks); created on demand.
468 struct list_head pidlists;
469 struct mutex pidlist_mutex;
471 /* used to wait for offlining of csses */
472 wait_queue_head_t offline_waitq;
474 /* used to schedule release agent */
475 struct work_struct release_agent_work;
477 /* used to track pressure stalls */
478 struct psi_group psi;
480 /* used to store eBPF programs */
481 struct cgroup_bpf bpf;
483 /* If there is block congestion on this cgroup. */
484 atomic_t congestion_count;
486 /* Used to store internal freezer state */
487 struct cgroup_freezer_state freezer;
489 /* ids of the ancestors at each level including self */
494 * A cgroup_root represents the root of a cgroup hierarchy, and may be
495 * associated with a kernfs_root to form an active hierarchy. This is
496 * internal to cgroup core. Don't access directly from controllers.
499 struct kernfs_root *kf_root;
501 /* The bitmask of subsystems attached to this hierarchy */
502 unsigned int subsys_mask;
504 /* Unique id for this hierarchy. */
507 /* The root cgroup. Root is destroyed on its release. */
510 /* for cgrp->ancestor_ids[0] */
511 u64 cgrp_ancestor_id_storage;
513 /* Number of cgroups in the hierarchy, used only for /proc/cgroups */
516 /* A list running through the active hierarchies */
517 struct list_head root_list;
519 /* Hierarchy-specific flags */
522 /* The path to use for release notifications. */
523 char release_agent_path[PATH_MAX];
525 /* The name for this hierarchy - may be empty */
526 char name[MAX_CGROUP_ROOT_NAMELEN];
530 * struct cftype: handler definitions for cgroup control files
532 * When reading/writing to a file:
533 * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata
534 * - the 'cftype' of the file is file->f_path.dentry->d_fsdata
538 * By convention, the name should begin with the name of the
539 * subsystem, followed by a period. Zero length string indicates
540 * end of cftype array.
542 char name[MAX_CFTYPE_NAME];
543 unsigned long private;
546 * The maximum length of string, excluding trailing nul, that can
547 * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed.
549 size_t max_write_len;
555 * If non-zero, should contain the offset from the start of css to
556 * a struct cgroup_file field. cgroup will record the handle of
557 * the created file into it. The recorded handle can be used as
558 * long as the containing css remains accessible.
560 unsigned int file_offset;
563 * Fields used for internal bookkeeping. Initialized automatically
564 * during registration.
566 struct cgroup_subsys *ss; /* NULL for cgroup core files */
567 struct list_head node; /* anchored at ss->cfts */
568 struct kernfs_ops *kf_ops;
570 int (*open)(struct kernfs_open_file *of);
571 void (*release)(struct kernfs_open_file *of);
574 * read_u64() is a shortcut for the common case of returning a
575 * single integer. Use it in place of read()
577 u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
579 * read_s64() is a signed version of read_u64()
581 s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
583 /* generic seq_file read interface */
584 int (*seq_show)(struct seq_file *sf, void *v);
586 /* optional ops, implement all or none */
587 void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
588 void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
589 void (*seq_stop)(struct seq_file *sf, void *v);
592 * write_u64() is a shortcut for the common case of accepting
593 * a single integer (as parsed by simple_strtoull) from
594 * userspace. Use in place of write(); return 0 or error.
596 int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
599 * write_s64() is a signed version of write_u64()
601 int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
605 * write() is the generic write callback which maps directly to
606 * kernfs write operation and overrides all other operations.
607 * Maximum write size is determined by ->max_write_len. Use
608 * of_css/cft() to access the associated css and cft.
610 ssize_t (*write)(struct kernfs_open_file *of,
611 char *buf, size_t nbytes, loff_t off);
613 __poll_t (*poll)(struct kernfs_open_file *of,
614 struct poll_table_struct *pt);
616 #ifdef CONFIG_DEBUG_LOCK_ALLOC
617 struct lock_class_key lockdep_key;
622 * Control Group subsystem type.
623 * See Documentation/admin-guide/cgroup-v1/cgroups.rst for details
625 struct cgroup_subsys {
626 struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
627 int (*css_online)(struct cgroup_subsys_state *css);
628 void (*css_offline)(struct cgroup_subsys_state *css);
629 void (*css_released)(struct cgroup_subsys_state *css);
630 void (*css_free)(struct cgroup_subsys_state *css);
631 void (*css_reset)(struct cgroup_subsys_state *css);
632 void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu);
633 int (*css_extra_stat_show)(struct seq_file *seq,
634 struct cgroup_subsys_state *css);
636 int (*can_attach)(struct cgroup_taskset *tset);
637 void (*cancel_attach)(struct cgroup_taskset *tset);
638 void (*attach)(struct cgroup_taskset *tset);
639 void (*post_attach)(void);
640 int (*can_fork)(struct task_struct *task,
641 struct css_set *cset);
642 void (*cancel_fork)(struct task_struct *task, struct css_set *cset);
643 void (*fork)(struct task_struct *task);
644 void (*exit)(struct task_struct *task);
645 void (*release)(struct task_struct *task);
646 void (*bind)(struct cgroup_subsys_state *root_css);
651 * If %true, the controller, on the default hierarchy, doesn't show
652 * up in "cgroup.controllers" or "cgroup.subtree_control", is
653 * implicitly enabled on all cgroups on the default hierarchy, and
654 * bypasses the "no internal process" constraint. This is for
655 * utility type controllers which is transparent to userland.
657 * An implicit controller can be stolen from the default hierarchy
658 * anytime and thus must be okay with offline csses from previous
659 * hierarchies coexisting with csses for the current one.
661 bool implicit_on_dfl:1;
664 * If %true, the controller, supports threaded mode on the default
665 * hierarchy. In a threaded subtree, both process granularity and
666 * no-internal-process constraint are ignored and a threaded
667 * controllers should be able to handle that.
669 * Note that as an implicit controller is automatically enabled on
670 * all cgroups on the default hierarchy, it should also be
671 * threaded. implicit && !threaded is not supported.
675 /* the following two fields are initialized automatically during boot */
679 /* optional, initialized automatically during boot if not set */
680 const char *legacy_name;
682 /* link to parent, protected by cgroup_lock() */
683 struct cgroup_root *root;
685 /* idr for css->id */
689 * List of cftypes. Each entry is the first entry of an array
690 * terminated by zero length name.
692 struct list_head cfts;
695 * Base cftypes which are automatically registered. The two can
696 * point to the same array.
698 struct cftype *dfl_cftypes; /* for the default hierarchy */
699 struct cftype *legacy_cftypes; /* for the legacy hierarchies */
702 * A subsystem may depend on other subsystems. When such subsystem
703 * is enabled on a cgroup, the depended-upon subsystems are enabled
704 * together if available. Subsystems enabled due to dependency are
705 * not visible to userland until explicitly enabled. The following
706 * specifies the mask of subsystems that this one depends on.
708 unsigned int depends_on;
711 extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
714 * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
717 * Allows cgroup operations to synchronize against threadgroup changes
718 * using a percpu_rw_semaphore.
720 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
722 percpu_down_read(&cgroup_threadgroup_rwsem);
726 * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
729 * Counterpart of cgroup_threadcgroup_change_begin().
731 static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
733 percpu_up_read(&cgroup_threadgroup_rwsem);
736 #else /* CONFIG_CGROUPS */
738 #define CGROUP_SUBSYS_COUNT 0
740 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
745 static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {}
747 #endif /* CONFIG_CGROUPS */
749 #ifdef CONFIG_SOCK_CGROUP_DATA
752 * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains
753 * per-socket cgroup information except for memcg association.
755 * On legacy hierarchies, net_prio and net_cls controllers directly set
756 * attributes on each sock which can then be tested by the network layer.
757 * On the default hierarchy, each sock is associated with the cgroup it was
758 * created in and the networking layer can match the cgroup directly.
760 * To avoid carrying all three cgroup related fields separately in sock,
761 * sock_cgroup_data overloads (prioidx, classid) and the cgroup pointer.
762 * On boot, sock_cgroup_data records the cgroup that the sock was created
763 * in so that cgroup2 matches can be made; however, once either net_prio or
764 * net_cls starts being used, the area is overridden to carry prioidx and/or
765 * classid. The two modes are distinguished by whether the lowest bit is
766 * set. Clear bit indicates cgroup pointer while set bit prioidx and
769 * While userland may start using net_prio or net_cls at any time, once
770 * either is used, cgroup2 matching no longer works. There is no reason to
771 * mix the two and this is in line with how legacy and v2 compatibility is
772 * handled. On mode switch, cgroup references which are already being
773 * pointed to by socks may be leaked. While this can be remedied by adding
774 * synchronization around sock_cgroup_data, given that the number of leaked
775 * cgroups is bound and highly unlikely to be high, this seems to be the
778 struct sock_cgroup_data {
780 #ifdef __LITTLE_ENDIAN
804 * There's a theoretical window where the following accessors race with
805 * updaters and return part of the previous pointer as the prioidx or
806 * classid. Such races are short-lived and the result isn't critical.
808 static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
810 /* fallback to 1 which is always the ID of the root cgroup */
811 return (skcd->is_data & 1) ? skcd->prioidx : 1;
814 static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
816 /* fallback to 0 which is the unconfigured default classid */
817 return (skcd->is_data & 1) ? skcd->classid : 0;
821 * If invoked concurrently, the updaters may clobber each other. The
822 * caller is responsible for synchronization.
824 static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
827 struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
829 if (sock_cgroup_prioidx(&skcd_buf) == prioidx)
832 if (!(skcd_buf.is_data & 1)) {
834 skcd_buf.is_data = 1;
837 skcd_buf.prioidx = prioidx;
838 WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
841 static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
844 struct sock_cgroup_data skcd_buf = {{ .val = READ_ONCE(skcd->val) }};
846 if (sock_cgroup_classid(&skcd_buf) == classid)
849 if (!(skcd_buf.is_data & 1)) {
851 skcd_buf.is_data = 1;
854 skcd_buf.classid = classid;
855 WRITE_ONCE(skcd->val, skcd_buf.val); /* see sock_cgroup_ptr() */
858 #else /* CONFIG_SOCK_CGROUP_DATA */
860 struct sock_cgroup_data {
863 #endif /* CONFIG_SOCK_CGROUP_DATA */
865 #endif /* _LINUX_CGROUP_DEFS_H */