2 #define TRACE_SYSTEM kmem
4 #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
7 #include <linux/types.h>
8 #include <linux/tracepoint.h>
9 #include <linux/version.h>
10 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
11 #include <trace/events/gfpflags.h>
14 DECLARE_EVENT_CLASS(kmem_alloc,
16 TP_PROTO(unsigned long call_site,
22 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
25 __field_hex( unsigned long, call_site )
26 __field_hex( const void *, ptr )
27 __field( size_t, bytes_req )
28 __field( size_t, bytes_alloc )
29 __field( gfp_t, gfp_flags )
33 tp_assign(call_site, call_site)
35 tp_assign(bytes_req, bytes_req)
36 tp_assign(bytes_alloc, bytes_alloc)
37 tp_assign(gfp_flags, gfp_flags)
40 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
45 show_gfp_flags(__entry->gfp_flags))
48 DEFINE_EVENT_MAP(kmem_alloc, kmalloc,
52 TP_PROTO(unsigned long call_site, const void *ptr,
53 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
55 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
58 DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
60 TP_PROTO(unsigned long call_site, const void *ptr,
61 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
63 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
66 DECLARE_EVENT_CLASS(kmem_alloc_node,
68 TP_PROTO(unsigned long call_site,
75 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
78 __field_hex( unsigned long, call_site )
79 __field_hex( const void *, ptr )
80 __field( size_t, bytes_req )
81 __field( size_t, bytes_alloc )
82 __field( gfp_t, gfp_flags )
87 tp_assign(call_site, call_site)
89 tp_assign(bytes_req, bytes_req)
90 tp_assign(bytes_alloc, bytes_alloc)
91 tp_assign(gfp_flags, gfp_flags)
95 TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
100 show_gfp_flags(__entry->gfp_flags),
104 DEFINE_EVENT_MAP(kmem_alloc_node, kmalloc_node,
108 TP_PROTO(unsigned long call_site, const void *ptr,
109 size_t bytes_req, size_t bytes_alloc,
110 gfp_t gfp_flags, int node),
112 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
115 DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
117 TP_PROTO(unsigned long call_site, const void *ptr,
118 size_t bytes_req, size_t bytes_alloc,
119 gfp_t gfp_flags, int node),
121 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
124 DECLARE_EVENT_CLASS(kmem_free,
126 TP_PROTO(unsigned long call_site, const void *ptr),
128 TP_ARGS(call_site, ptr),
131 __field_hex( unsigned long, call_site )
132 __field_hex( const void *, ptr )
136 tp_assign(call_site, call_site)
140 TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
143 DEFINE_EVENT_MAP(kmem_free, kfree,
147 TP_PROTO(unsigned long call_site, const void *ptr),
149 TP_ARGS(call_site, ptr)
152 DEFINE_EVENT(kmem_free, kmem_cache_free,
154 TP_PROTO(unsigned long call_site, const void *ptr),
156 TP_ARGS(call_site, ptr)
159 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
160 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
161 TRACE_EVENT(mm_page_free,
163 TRACE_EVENT(mm_page_free_direct,
166 TP_PROTO(struct page *page, unsigned int order),
168 TP_ARGS(page, order),
171 __field_hex( struct page *, page )
172 __field( unsigned int, order )
176 tp_assign(page, page)
177 tp_assign(order, order)
180 TP_printk("page=%p pfn=%lu order=%d",
182 page_to_pfn(__entry->page),
186 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
187 TRACE_EVENT(mm_page_free_batched,
189 TRACE_EVENT(mm_pagevec_free,
192 TP_PROTO(struct page *page, int cold),
197 __field_hex( struct page *, page )
202 tp_assign(page, page)
203 tp_assign(cold, cold)
206 TP_printk("page=%p pfn=%lu order=0 cold=%d",
208 page_to_pfn(__entry->page),
212 TRACE_EVENT(mm_page_alloc,
214 TP_PROTO(struct page *page, unsigned int order,
215 gfp_t gfp_flags, int migratetype),
217 TP_ARGS(page, order, gfp_flags, migratetype),
220 __field_hex( struct page *, page )
221 __field( unsigned int, order )
222 __field( gfp_t, gfp_flags )
223 __field( int, migratetype )
227 tp_assign(page, page)
228 tp_assign(order, order)
229 tp_assign(gfp_flags, gfp_flags)
230 tp_assign(migratetype, migratetype)
233 TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
235 __entry->page ? page_to_pfn(__entry->page) : 0,
237 __entry->migratetype,
238 show_gfp_flags(__entry->gfp_flags))
241 DECLARE_EVENT_CLASS(mm_page,
243 TP_PROTO(struct page *page, unsigned int order, int migratetype),
245 TP_ARGS(page, order, migratetype),
248 __field_hex( struct page *, page )
249 __field( unsigned int, order )
250 __field( int, migratetype )
254 tp_assign(page, page)
255 tp_assign(order, order)
256 tp_assign(migratetype, migratetype)
259 TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
261 __entry->page ? page_to_pfn(__entry->page) : 0,
263 __entry->migratetype,
267 DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
269 TP_PROTO(struct page *page, unsigned int order, int migratetype),
271 TP_ARGS(page, order, migratetype)
274 DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,
276 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
277 TP_PROTO(struct page *page, unsigned int order, int migratetype),
279 TP_PROTO(struct page *page, int order, int migratetype),
282 TP_ARGS(page, order, migratetype),
284 TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
285 __entry->page, page_to_pfn(__entry->page),
286 __entry->order, __entry->migratetype)
289 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
291 TRACE_EVENT(mm_page_alloc_extfrag,
293 TP_PROTO(struct page *page,
294 int alloc_order, int fallback_order,
295 int alloc_migratetype, int fallback_migratetype,
296 int change_ownership),
299 alloc_order, fallback_order,
300 alloc_migratetype, fallback_migratetype,
304 __field_hex( struct page *, page )
305 __field( int, alloc_order )
306 __field( int, fallback_order )
307 __field( int, alloc_migratetype )
308 __field( int, fallback_migratetype )
309 __field( int, change_ownership )
313 tp_assign(page, page)
314 tp_assign(alloc_order, alloc_order)
315 tp_assign(fallback_order, fallback_order)
316 tp_assign(alloc_migratetype, alloc_migratetype)
317 tp_assign(fallback_migratetype, fallback_migratetype)
318 tp_assign(change_ownership, change_ownership)
321 TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
323 page_to_pfn(__entry->page),
324 __entry->alloc_order,
325 __entry->fallback_order,
327 __entry->alloc_migratetype,
328 __entry->fallback_migratetype,
329 __entry->fallback_order < pageblock_order,
330 __entry->change_ownership)
333 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
335 TRACE_EVENT(mm_page_alloc_extfrag,
337 TP_PROTO(struct page *page,
338 int alloc_order, int fallback_order,
339 int alloc_migratetype, int fallback_migratetype),
342 alloc_order, fallback_order,
343 alloc_migratetype, fallback_migratetype),
346 __field_hex( struct page *, page )
347 __field( int, alloc_order )
348 __field( int, fallback_order )
349 __field( int, alloc_migratetype )
350 __field( int, fallback_migratetype )
354 tp_assign(page, page)
355 tp_assign(alloc_order, alloc_order)
356 tp_assign(fallback_order, fallback_order)
357 tp_assign(alloc_migratetype, alloc_migratetype)
358 tp_assign(fallback_migratetype, fallback_migratetype)
361 TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
363 page_to_pfn(__entry->page),
364 __entry->alloc_order,
365 __entry->fallback_order,
367 __entry->alloc_migratetype,
368 __entry->fallback_migratetype,
369 __entry->fallback_order < pageblock_order,
370 __entry->alloc_migratetype == __entry->fallback_migratetype)
373 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
377 #endif /* _TRACE_KMEM_H */
379 /* This part must be outside protection */
380 #include "../../../probes/define_trace.h"