#include <linux/types.h>
#include <linux/threads.h>
#include <linux/atomic.h>
+#include <linux/cpumask.h>
#include <asm/page.h>
#endif
};
+/* Track pages that require TLB flushes */
+struct tlbflush_unmap_batch {
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+ /*
+ * Each bit set is a CPU that potentially has a TLB entry for one of
+ * the PFNs being flushed. See set_tlb_ubc_flush_pending().
+ */
+ struct cpumask cpumask;
+
+ /* True if any bit in cpumask is set */
+ bool flush_required;
+
+ /*
+ * If true then the PTE was dirty when unmapped. The entry must be
+ * flushed before IO is initiated or a stale TLB entry potentially
+ * allows an update without redirtying the page.
+ */
+ bool writable;
+#endif
+};
+
#endif /* _LINUX_MM_TYPES_TASK_H */
struct wake_q_node *next;
};
-/* Track pages that require TLB flushes */
-struct tlbflush_unmap_batch {
- /*
- * Each bit set is a CPU that potentially has a TLB entry for one of
- * the PFNs being flushed. See set_tlb_ubc_flush_pending().
- */
- struct cpumask cpumask;
-
- /* True if any bit in cpumask is set */
- bool flush_required;
-
- /*
- * If true then the PTE was dirty when unmapped. The entry must be
- * flushed before IO is initiated or a stale TLB entry potentially
- * allows an update without redirtying the page.
- */
- bool writable;
-};
-
struct task_struct {
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */
-#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
struct tlbflush_unmap_batch tlb_ubc;
-#endif
struct rcu_head rcu;