#include <linux/sched.h>
#include <linux/pm_runtime.h>
+#include <trace/events/power.h>
#include "power.h"
static int rpm_resume(struct device *dev, int rpmflags);
static void __update_runtime_status(struct device *dev, enum rpm_status status)
{
update_pm_runtime_accounting(dev);
+ trace_runtime_pm_status(dev, status);
dev->power.runtime_status = status;
}
might_sleep_if(!(rpmflags & RPM_ASYNC));
if (rpmflags & RPM_GET_PUT) {
+ trace_runtime_pm_usage(dev,
+ atomic_read(&dev->power.usage_count)-1);
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
}
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
if (rpmflags & RPM_GET_PUT) {
+ trace_runtime_pm_usage(dev,
+ atomic_read(&dev->power.usage_count)-1);
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
}
if (rpmflags & RPM_GET_PUT)
atomic_inc(&dev->power.usage_count);
+ trace_runtime_pm_usage(dev, atomic_read(&dev->power.usage_count));
spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_resume(dev, rpmflags);
dev->power.runtime_auto = false;
atomic_inc(&dev->power.usage_count);
+ trace_runtime_pm_usage(dev, atomic_read(&dev->power.usage_count));
rpm_resume(dev, 0);
out:
goto out;
dev->power.runtime_auto = true;
+ trace_runtime_pm_usage(dev, atomic_read(&dev->power.usage_count)-1);
if (atomic_dec_and_test(&dev->power.usage_count))
rpm_idle(dev, RPM_AUTO);
/* If it used to be allowed then prevent it. */
if (!old_use || old_delay >= 0) {
atomic_inc(&dev->power.usage_count);
+ trace_runtime_pm_usage(dev,
+ atomic_read(&dev->power.usage_count));
rpm_resume(dev, 0);
}
}
else {
/* If it used to be prevented then allow it. */
- if (old_use && old_delay < 0)
+ if (old_use && old_delay < 0) {
atomic_dec(&dev->power.usage_count);
-
+ trace_runtime_pm_usage(dev,
+ atomic_read(&dev->power.usage_count));
+ }
/* Maybe we can autosuspend now. */
rpm_idle(dev, RPM_AUTO);
}
#include <linux/device.h>
#include <linux/pm.h>
+#include <trace/events/power.h>
#include <linux/jiffies.h>
static inline void pm_runtime_get_noresume(struct device *dev)
{
atomic_inc(&dev->power.usage_count);
+ trace_runtime_pm_usage(dev, atomic_read(&dev->power.usage_count));
}
static inline void pm_runtime_put_noidle(struct device *dev)
{
atomic_add_unless(&dev->power.usage_count, -1, 0);
+ trace_runtime_pm_usage(dev, atomic_read(&dev->power.usage_count));
}
static inline bool device_run_wake(struct device *dev)
#include <linux/ktime.h>
#include <linux/tracepoint.h>
+#include <linux/device.h>
DECLARE_EVENT_CLASS(cpu,
TP_ARGS(name, state, cpu_id)
);
+#ifdef CONFIG_PM_RUNTIME
+#define rpm_status_name(status) { RPM_##status, #status }
+#define show_rpm_status_name(val) \
+ __print_symbolic(val, \
+ rpm_status_name(SUSPENDED), \
+ rpm_status_name(SUSPENDING), \
+ rpm_status_name(RESUMING), \
+ rpm_status_name(ACTIVE) \
+ )
+TRACE_EVENT(runtime_pm_status,
+
+ TP_PROTO(struct device *dev, int status),
+
+ TP_ARGS(dev, status),
+
+ TP_STRUCT__entry(
+ __string(devname, dev_name(dev))
+ __string(drivername, dev_driver_string(dev))
+ __field(u32, status)
+ ),
+
+ TP_fast_assign(
+ __assign_str(devname, dev_name(dev));
+ __assign_str(drivername, dev_driver_string(dev));
+ __entry->status = status;
+ ),
+
+ TP_printk("driver=%s dev=%s status=%s", __get_str(drivername),
+ __get_str(devname), show_rpm_status_name(__entry->status))
+);
+TRACE_EVENT(runtime_pm_usage,
+
+ TP_PROTO(struct device *dev, int usage),
+
+ TP_ARGS(dev, usage),
+
+ TP_STRUCT__entry(
+ __string(devname, dev_name(dev))
+ __string(drivername, dev_driver_string(dev))
+ __field(u32, usage)
+ ),
+
+ TP_fast_assign(
+ __assign_str(devname, dev_name(dev));
+ __assign_str(drivername, dev_driver_string(dev));
+ __entry->usage = (u32)usage;
+ ),
+
+ TP_printk("driver=%s dev=%s usage=%d", __get_str(drivername),
+ __get_str(devname), __entry->usage)
+);
+#endif /* CONFIG_PM_RUNTIME */
+
#endif /* _TRACE_POWER_H */
/* This part must be outside protection */
#endif
EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);
+EXPORT_TRACEPOINT_SYMBOL_GPL(power_frequency);
+#ifdef CONFIG_PM_RUNTIME
+EXPORT_TRACEPOINT_SYMBOL_GPL(runtime_pm_usage);
+#endif