Merge tag 'v3.14.25' into backport/v3.14.24-ltsi-rc1+v3.14.25/snapshot-merge.wip
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / staging / lttng / instrumentation / events / lttng-module / kmem.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM kmem
3
4 #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_KMEM_H
6
7 #include <linux/types.h>
8 #include <linux/tracepoint.h>
9 #include <linux/version.h>
10 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
11 #include <trace/events/gfpflags.h>
12 #endif
13
14 DECLARE_EVENT_CLASS(kmem_alloc,
15
16         TP_PROTO(unsigned long call_site,
17                  const void *ptr,
18                  size_t bytes_req,
19                  size_t bytes_alloc,
20                  gfp_t gfp_flags),
21
22         TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
23
24         TP_STRUCT__entry(
25                 __field_hex(    unsigned long,  call_site       )
26                 __field_hex(    const void *,   ptr             )
27                 __field(        size_t,         bytes_req       )
28                 __field(        size_t,         bytes_alloc     )
29                 __field(        gfp_t,          gfp_flags       )
30         ),
31
32         TP_fast_assign(
33                 tp_assign(call_site, call_site)
34                 tp_assign(ptr, ptr)
35                 tp_assign(bytes_req, bytes_req)
36                 tp_assign(bytes_alloc, bytes_alloc)
37                 tp_assign(gfp_flags, gfp_flags)
38         ),
39
40         TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
41                 __entry->call_site,
42                 __entry->ptr,
43                 __entry->bytes_req,
44                 __entry->bytes_alloc,
45                 show_gfp_flags(__entry->gfp_flags))
46 )
47
48 DEFINE_EVENT_MAP(kmem_alloc, kmalloc,
49
50         kmem_kmalloc,
51
52         TP_PROTO(unsigned long call_site, const void *ptr,
53                  size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
54
55         TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
56 )
57
58 DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
59
60         TP_PROTO(unsigned long call_site, const void *ptr,
61                  size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
62
63         TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
64 )
65
66 DECLARE_EVENT_CLASS(kmem_alloc_node,
67
68         TP_PROTO(unsigned long call_site,
69                  const void *ptr,
70                  size_t bytes_req,
71                  size_t bytes_alloc,
72                  gfp_t gfp_flags,
73                  int node),
74
75         TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
76
77         TP_STRUCT__entry(
78                 __field_hex(    unsigned long,  call_site       )
79                 __field_hex(    const void *,   ptr             )
80                 __field(        size_t,         bytes_req       )
81                 __field(        size_t,         bytes_alloc     )
82                 __field(        gfp_t,          gfp_flags       )
83                 __field(        int,            node            )
84         ),
85
86         TP_fast_assign(
87                 tp_assign(call_site, call_site)
88                 tp_assign(ptr, ptr)
89                 tp_assign(bytes_req, bytes_req)
90                 tp_assign(bytes_alloc, bytes_alloc)
91                 tp_assign(gfp_flags, gfp_flags)
92                 tp_assign(node, node)
93         ),
94
95         TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
96                 __entry->call_site,
97                 __entry->ptr,
98                 __entry->bytes_req,
99                 __entry->bytes_alloc,
100                 show_gfp_flags(__entry->gfp_flags),
101                 __entry->node)
102 )
103
104 DEFINE_EVENT_MAP(kmem_alloc_node, kmalloc_node,
105
106         kmem_kmalloc_node,
107
108         TP_PROTO(unsigned long call_site, const void *ptr,
109                  size_t bytes_req, size_t bytes_alloc,
110                  gfp_t gfp_flags, int node),
111
112         TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
113 )
114
115 DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
116
117         TP_PROTO(unsigned long call_site, const void *ptr,
118                  size_t bytes_req, size_t bytes_alloc,
119                  gfp_t gfp_flags, int node),
120
121         TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
122 )
123
124 DECLARE_EVENT_CLASS(kmem_free,
125
126         TP_PROTO(unsigned long call_site, const void *ptr),
127
128         TP_ARGS(call_site, ptr),
129
130         TP_STRUCT__entry(
131                 __field_hex(    unsigned long,  call_site       )
132                 __field_hex(    const void *,   ptr             )
133         ),
134
135         TP_fast_assign(
136                 tp_assign(call_site, call_site)
137                 tp_assign(ptr, ptr)
138         ),
139
140         TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
141 )
142
143 DEFINE_EVENT_MAP(kmem_free, kfree,
144
145         kmem_kfree,
146
147         TP_PROTO(unsigned long call_site, const void *ptr),
148
149         TP_ARGS(call_site, ptr)
150 )
151
152 DEFINE_EVENT(kmem_free, kmem_cache_free,
153
154         TP_PROTO(unsigned long call_site, const void *ptr),
155
156         TP_ARGS(call_site, ptr)
157 )
158
159 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
160 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
161 TRACE_EVENT(mm_page_free,
162 #else
163 TRACE_EVENT(mm_page_free_direct,
164 #endif
165
166         TP_PROTO(struct page *page, unsigned int order),
167
168         TP_ARGS(page, order),
169
170         TP_STRUCT__entry(
171                 __field_hex(    struct page *,  page            )
172                 __field(        unsigned int,   order           )
173         ),
174
175         TP_fast_assign(
176                 tp_assign(page, page)
177                 tp_assign(order, order)
178         ),
179
180         TP_printk("page=%p pfn=%lu order=%d",
181                         __entry->page,
182                         page_to_pfn(__entry->page),
183                         __entry->order)
184 )
185
186 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
187 TRACE_EVENT(mm_page_free_batched,
188 #else
189 TRACE_EVENT(mm_pagevec_free,
190 #endif
191
192         TP_PROTO(struct page *page, int cold),
193
194         TP_ARGS(page, cold),
195
196         TP_STRUCT__entry(
197                 __field_hex(    struct page *,  page            )
198                 __field(        int,            cold            )
199         ),
200
201         TP_fast_assign(
202                 tp_assign(page, page)
203                 tp_assign(cold, cold)
204         ),
205
206         TP_printk("page=%p pfn=%lu order=0 cold=%d",
207                         __entry->page,
208                         page_to_pfn(__entry->page),
209                         __entry->cold)
210 )
211
212 TRACE_EVENT(mm_page_alloc,
213
214         TP_PROTO(struct page *page, unsigned int order,
215                         gfp_t gfp_flags, int migratetype),
216
217         TP_ARGS(page, order, gfp_flags, migratetype),
218
219         TP_STRUCT__entry(
220                 __field_hex(    struct page *,  page            )
221                 __field(        unsigned int,   order           )
222                 __field(        gfp_t,          gfp_flags       )
223                 __field(        int,            migratetype     )
224         ),
225
226         TP_fast_assign(
227                 tp_assign(page, page)
228                 tp_assign(order, order)
229                 tp_assign(gfp_flags, gfp_flags)
230                 tp_assign(migratetype, migratetype)
231         ),
232
233         TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
234                 __entry->page,
235                 __entry->page ? page_to_pfn(__entry->page) : 0,
236                 __entry->order,
237                 __entry->migratetype,
238                 show_gfp_flags(__entry->gfp_flags))
239 )
240
241 DECLARE_EVENT_CLASS(mm_page,
242
243         TP_PROTO(struct page *page, unsigned int order, int migratetype),
244
245         TP_ARGS(page, order, migratetype),
246
247         TP_STRUCT__entry(
248                 __field_hex(    struct page *,  page            )
249                 __field(        unsigned int,   order           )
250                 __field(        int,            migratetype     )
251         ),
252
253         TP_fast_assign(
254                 tp_assign(page, page)
255                 tp_assign(order, order)
256                 tp_assign(migratetype, migratetype)
257         ),
258
259         TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
260                 __entry->page,
261                 __entry->page ? page_to_pfn(__entry->page) : 0,
262                 __entry->order,
263                 __entry->migratetype,
264                 __entry->order == 0)
265 )
266
267 DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
268
269         TP_PROTO(struct page *page, unsigned int order, int migratetype),
270
271         TP_ARGS(page, order, migratetype)
272 )
273
274 DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,
275
276 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33))
277         TP_PROTO(struct page *page, unsigned int order, int migratetype),
278 #else
279         TP_PROTO(struct page *page, int order, int migratetype),
280 #endif
281
282         TP_ARGS(page, order, migratetype),
283
284         TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
285                 __entry->page, page_to_pfn(__entry->page),
286                 __entry->order, __entry->migratetype)
287 )
288
289 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0))
290
291 TRACE_EVENT(mm_page_alloc_extfrag,
292
293         TP_PROTO(struct page *page,
294                         int alloc_order, int fallback_order,
295                         int alloc_migratetype, int fallback_migratetype,
296                         int change_ownership),
297
298         TP_ARGS(page,
299                 alloc_order, fallback_order,
300                 alloc_migratetype, fallback_migratetype,
301                 change_ownership),
302
303         TP_STRUCT__entry(
304                 __field_hex(    struct page *,  page                    )
305                 __field(        int,            alloc_order             )
306                 __field(        int,            fallback_order          )
307                 __field(        int,            alloc_migratetype       )
308                 __field(        int,            fallback_migratetype    )
309                 __field(        int,            change_ownership        )
310         ),
311
312         TP_fast_assign(
313                 tp_assign(page, page)
314                 tp_assign(alloc_order, alloc_order)
315                 tp_assign(fallback_order, fallback_order)
316                 tp_assign(alloc_migratetype, alloc_migratetype)
317                 tp_assign(fallback_migratetype, fallback_migratetype)
318                 tp_assign(change_ownership, change_ownership)
319         ),
320
321         TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
322                 __entry->page,
323                 page_to_pfn(__entry->page),
324                 __entry->alloc_order,
325                 __entry->fallback_order,
326                 pageblock_order,
327                 __entry->alloc_migratetype,
328                 __entry->fallback_migratetype,
329                 __entry->fallback_order < pageblock_order,
330                 __entry->change_ownership)
331 )
332
333 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
334
335 TRACE_EVENT(mm_page_alloc_extfrag,
336
337         TP_PROTO(struct page *page,
338                         int alloc_order, int fallback_order,
339                         int alloc_migratetype, int fallback_migratetype),
340
341         TP_ARGS(page,
342                 alloc_order, fallback_order,
343                 alloc_migratetype, fallback_migratetype),
344
345         TP_STRUCT__entry(
346                 __field_hex(    struct page *,  page                    )
347                 __field(        int,            alloc_order             )
348                 __field(        int,            fallback_order          )
349                 __field(        int,            alloc_migratetype       )
350                 __field(        int,            fallback_migratetype    )
351         ),
352
353         TP_fast_assign(
354                 tp_assign(page, page)
355                 tp_assign(alloc_order, alloc_order)
356                 tp_assign(fallback_order, fallback_order)
357                 tp_assign(alloc_migratetype, alloc_migratetype)
358                 tp_assign(fallback_migratetype, fallback_migratetype)
359         ),
360
361         TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
362                 __entry->page,
363                 page_to_pfn(__entry->page),
364                 __entry->alloc_order,
365                 __entry->fallback_order,
366                 pageblock_order,
367                 __entry->alloc_migratetype,
368                 __entry->fallback_migratetype,
369                 __entry->fallback_order < pageblock_order,
370                 __entry->alloc_migratetype == __entry->fallback_migratetype)
371 )
372
373 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,0)) */
374
375 #endif
376
377 #endif /* _TRACE_KMEM_H */
378
379 /* This part must be outside protection */
380 #include "../../../probes/define_trace.h"