*/
static enum bfs_result __bfs(struct lock_list *source_entry,
void *data,
- int (*match)(struct lock_list *entry, void *data),
+ bool (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry,
int offset)
{
static inline enum bfs_result
__bfs_forwards(struct lock_list *src_entry,
void *data,
- int (*match)(struct lock_list *entry, void *data),
+ bool (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
{
return __bfs(src_entry, data, match, target_entry,
static inline enum bfs_result
__bfs_backwards(struct lock_list *src_entry,
void *data,
- int (*match)(struct lock_list *entry, void *data),
+ bool (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry)
{
return __bfs(src_entry, data, match, target_entry,
print_circular_bug_entry(entry, depth);
}
-static inline int class_equal(struct lock_list *entry, void *data)
+static inline bool class_equal(struct lock_list *entry, void *data)
{
return entry->class == data;
}
WARN(1, "lockdep bfs error:%d\n", ret);
}
-static int noop_count(struct lock_list *entry, void *data)
+static bool noop_count(struct lock_list *entry, void *data)
{
(*(unsigned long *)data)++;
- return 0;
+ return false;
}
static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
#ifdef CONFIG_TRACE_IRQFLAGS
-static inline int usage_accumulate(struct lock_list *entry, void *mask)
+static inline bool usage_accumulate(struct lock_list *entry, void *mask)
{
*(unsigned long *)mask |= entry->class->usage_mask;
- return 0;
+ return false;
}
/*
* without creating any illegal irq-safe -> irq-unsafe lock dependency.
*/
-static inline int usage_match(struct lock_list *entry, void *mask)
+static inline bool usage_match(struct lock_list *entry, void *mask)
{
- return entry->class->usage_mask & *(unsigned long *)mask;
+ return !!(entry->class->usage_mask & *(unsigned long *)mask);
}
/*