Boolean values can be placed in debugfs with:
struct dentry *debugfs_create_bool(const char *name, umode_t mode,
- struct dentry *parent, u32 *value);
+ struct dentry *parent, bool *value);
A read on the resulting file will yield either Y (for non-zero values) or
N, followed by a newline. If written to, it will accept either upper- or
* Allow root to disable self-hosted debug from userspace.
* This is useful if you want to connect an external JTAG debugger.
*/
-static u32 debug_enabled = 1;
+static bool debug_enabled = true;
static int create_debug_debugfs_entry(void)
{
static int __init early_debug_disable(char *buf)
{
- debug_enabled = 0;
+ debug_enabled = false;
return 0;
}
unsigned long gpe;
unsigned long command_addr;
unsigned long data_addr;
- u32 global_lock;
+ bool global_lock;
unsigned long flags;
unsigned long reference_count;
struct mutex mutex;
unsigned int num_reg_defaults_raw;
/* if set, only the cache is modified not the HW */
- u32 cache_only;
+ bool cache_only;
/* if set, only the HW is modified not the cache */
- u32 cache_bypass;
+ bool cache_bypass;
/* if set, remember to free reg_defaults_raw */
bool cache_free;
const void *reg_defaults_raw;
void *cache;
/* if set, the cache contains newer data than the HW */
- u32 cache_dirty;
+ bool cache_dirty;
/* if set, the HW registers are known to match map->reg_defaults */
bool no_sync_defaults;
if (ret > 0 && val == map->reg_defaults[ret].def)
continue;
- map->cache_bypass = 1;
+ map->cache_bypass = true;
ret = _regmap_write(map, i, val);
- map->cache_bypass = 0;
+ map->cache_bypass = false;
if (ret)
return ret;
dev_dbg(map->dev, "Synced register %#x, value %#x\n",
return -ENOMEM;
if (!map->reg_defaults_raw) {
- u32 cache_bypass = map->cache_bypass;
+ bool cache_bypass = map->cache_bypass;
dev_warn(map->dev, "No cache defaults, reading back from HW\n");
/* Bypass the cache access till data read from HW*/
- map->cache_bypass = 1;
+ map->cache_bypass = true;
tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
if (!tmp_buf) {
ret = -ENOMEM;
if (!regcache_reg_needs_sync(map, reg, val))
continue;
- map->cache_bypass = 1;
+ map->cache_bypass = true;
ret = _regmap_write(map, reg, val);
- map->cache_bypass = 0;
+ map->cache_bypass = false;
if (ret) {
dev_err(map->dev, "Unable to sync register %#x. %d\n",
reg, ret);
int ret = 0;
unsigned int i;
const char *name;
- unsigned int bypass;
+ bool bypass;
BUG_ON(!map->cache_ops);
map->async = true;
/* Apply any patch first */
- map->cache_bypass = 1;
+ map->cache_bypass = true;
for (i = 0; i < map->patch_regs; i++) {
ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
if (ret != 0) {
goto out;
}
}
- map->cache_bypass = 0;
+ map->cache_bypass = false;
if (map->cache_ops->sync)
ret = map->cache_ops->sync(map, 0, map->max_register);
{
int ret = 0;
const char *name;
- unsigned int bypass;
+ bool bypass;
BUG_ON(!map->cache_ops);
if (!regcache_reg_needs_sync(map, regtmp, val))
continue;
- map->cache_bypass = 1;
+ map->cache_bypass = true;
ret = _regmap_write(map, regtmp, val);
- map->cache_bypass = 0;
+ map->cache_bypass = false;
if (ret != 0) {
dev_err(map->dev, "Unable to sync register %#x. %d\n",
regtmp, ret);
dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
count * val_bytes, count, base, cur - map->reg_stride);
- map->cache_bypass = 1;
+ map->cache_bypass = true;
ret = _regmap_raw_write(map, base, *data, count * val_bytes);
if (ret)
dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
base, cur - map->reg_stride, ret);
- map->cache_bypass = 0;
+ map->cache_bypass = false;
*data = NULL;
spinlock_t hci_ibs_lock; /* HCI_IBS state lock */
u8 tx_ibs_state; /* HCI_IBS transmit side power state*/
u8 rx_ibs_state; /* HCI_IBS receive side power state */
- u32 tx_vote; /* Clock must be on for TX */
- u32 rx_vote; /* Clock must be on for RX */
+ bool tx_vote; /* Clock must be on for TX */
+ bool rx_vote; /* Clock must be on for RX */
struct timer_list tx_idle_timer;
u32 tx_idle_delay;
struct timer_list wake_retrans_timer;
to handle */
LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
we find in ACPI */
-u32 amd_iommu_unmap_flush; /* if true, flush on every unmap */
+bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
system */
* If true, the addresses will be flushed on unmap time, not when
* they are reused
*/
-extern u32 amd_iommu_unmap_flush;
+extern bool amd_iommu_unmap_flush;
/* Smallest max PASID supported by any IOMMU in the system */
extern u32 amd_iommu_max_pasid;
DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX);
unsigned long me_client_index;
- u32 allow_fixed_address;
+ bool allow_fixed_address;
struct mei_cl wd_cl;
enum mei_wd_states wd_state;
bool tid_release_task_busy;
struct dentry *debugfs_root;
- u32 use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */
- u32 trace_rss; /* 1 implies that different RSS flit per filter is
+ bool use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */
+ bool trace_rss; /* 1 implies that different RSS flit per filter is
* used per filter else if 0 default RSS flit is
* used for all 4 filters.
*/
bool monitor_started;
unsigned int filter_flags;
unsigned long dev_flags;
- u32 dfs_block_radar_events;
+ bool dfs_block_radar_events;
/* protected by conf_mutex */
bool radar_enabled;
u8 ah_retry_long;
u8 ah_retry_short;
- u32 ah_use_32khz_clock;
+ bool ah_use_32khz_clock;
u8 ah_coverage_class;
bool ah_ack_bitrate_high;
ah->config.dma_beacon_response_time = 1;
ah->config.sw_beacon_response_time = 6;
- ah->config.cwm_ignore_extcca = 0;
+ ah->config.cwm_ignore_extcca = false;
ah->config.analog_shiftreg = 1;
ah->config.rx_intr_mitigation = true;
struct ath9k_ops_config {
int dma_beacon_response_time;
int sw_beacon_response_time;
- u32 cwm_ignore_extcca;
+ bool cwm_ignore_extcca;
u32 pcie_waen;
u8 analog_shiftreg;
u32 ofdm_trig_low;
u32 ofdm_trig_high;
u32 cck_trig_high;
u32 cck_trig_low;
- u32 enable_paprd;
+ bool enable_paprd;
int serialize_regmode;
bool rx_intr_mitigation;
bool tx_intr_mitigation;
e->dyn_debug_dentries[id] = d; \
} while (0)
- add_dyn_dbg("debug_xmitpower", B43_DBG_XMITPOWER, 0);
- add_dyn_dbg("debug_dmaoverflow", B43_DBG_DMAOVERFLOW, 0);
- add_dyn_dbg("debug_dmaverbose", B43_DBG_DMAVERBOSE, 0);
- add_dyn_dbg("debug_pwork_fast", B43_DBG_PWORK_FAST, 0);
- add_dyn_dbg("debug_pwork_stop", B43_DBG_PWORK_STOP, 0);
- add_dyn_dbg("debug_lo", B43_DBG_LO, 0);
- add_dyn_dbg("debug_firmware", B43_DBG_FIRMWARE, 0);
- add_dyn_dbg("debug_keys", B43_DBG_KEYS, 0);
- add_dyn_dbg("debug_verbose_stats", B43_DBG_VERBOSESTATS, 0);
+ add_dyn_dbg("debug_xmitpower", B43_DBG_XMITPOWER, false);
+ add_dyn_dbg("debug_dmaoverflow", B43_DBG_DMAOVERFLOW, false);
+ add_dyn_dbg("debug_dmaverbose", B43_DBG_DMAVERBOSE, false);
+ add_dyn_dbg("debug_pwork_fast", B43_DBG_PWORK_FAST, false);
+ add_dyn_dbg("debug_pwork_stop", B43_DBG_PWORK_STOP, false);
+ add_dyn_dbg("debug_lo", B43_DBG_LO, false);
+ add_dyn_dbg("debug_firmware", B43_DBG_FIRMWARE, false);
+ add_dyn_dbg("debug_keys", B43_DBG_KEYS, false);
+ add_dyn_dbg("debug_verbose_stats", B43_DBG_VERBOSESTATS, false);
#undef add_dyn_dbg
}
u32 shm32read_addr_next;
/* Enabled/Disabled list for the dynamic debugging features. */
- u32 dyn_debug[__B43_NR_DYNDBG];
+ bool dyn_debug[__B43_NR_DYNDBG];
/* Dentries for the dynamic debugging entries. */
struct dentry *dyn_debug_dentries[__B43_NR_DYNDBG];
};
e->dyn_debug_dentries[id] = d; \
} while (0)
- add_dyn_dbg("debug_xmitpower", B43legacy_DBG_XMITPOWER, 0);
- add_dyn_dbg("debug_dmaoverflow", B43legacy_DBG_DMAOVERFLOW, 0);
- add_dyn_dbg("debug_dmaverbose", B43legacy_DBG_DMAVERBOSE, 0);
- add_dyn_dbg("debug_pwork_fast", B43legacy_DBG_PWORK_FAST, 0);
- add_dyn_dbg("debug_pwork_stop", B43legacy_DBG_PWORK_STOP, 0);
+ add_dyn_dbg("debug_xmitpower", B43legacy_DBG_XMITPOWER, false);
+ add_dyn_dbg("debug_dmaoverflow", B43legacy_DBG_DMAOVERFLOW, false);
+ add_dyn_dbg("debug_dmaverbose", B43legacy_DBG_DMAVERBOSE, false);
+ add_dyn_dbg("debug_pwork_fast", B43legacy_DBG_PWORK_FAST, false);
+ add_dyn_dbg("debug_pwork_stop", B43legacy_DBG_PWORK_STOP, false);
#undef add_dyn_dbg
}
struct b43legacy_txstatus_log txstatlog;
/* Enabled/Disabled list for the dynamic debugging features. */
- u32 dyn_debug[__B43legacy_NR_DYNDBG];
+ bool dyn_debug[__B43legacy_NR_DYNDBG];
/* Dentries for the dynamic debugging entries. */
struct dentry *dyn_debug_dentries[__B43legacy_NR_DYNDBG];
};
#endif /* CONFIG_IWLEGACY_DEBUGFS */
struct work_struct txpower_work;
- u32 disable_sens_cal;
- u32 disable_chain_noise_cal;
- u32 disable_tx_power_cal;
+ bool disable_sens_cal;
+ bool disable_chain_noise_cal;
+ bool disable_tx_power_cal;
struct work_struct run_time_calib_work;
struct timer_list stats_periodic;
struct timer_list watchdog;
const struct iwl_fw_bcast_filter *bcast_filters;
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct {
- u32 override; /* u32 for debugfs_create_bool */
+ bool override;
struct iwl_bcast_filter_cmd cmd;
} dbgfs_bcast_filtering;
#endif
bool disable_power_off;
bool disable_power_off_d3;
- u32 scan_iter_notif_enabled; /* must be u32 for debugfs_create_bool */
+ bool scan_iter_notif_enabled;
struct debugfs_blob_wrapper nvm_hw_blob;
struct debugfs_blob_wrapper nvm_sw_blob;
int n_nd_channels;
bool net_detect;
#ifdef CONFIG_IWLWIFI_DEBUGFS
- u32 d3_wake_sysassert; /* must be u32 for debugfs_create_bool */
+ bool d3_wake_sysassert;
bool d3_test_active;
bool store_d3_resume_sram;
void *d3_resume_sram;
trc->max_idx = (tbuf_sz / SNIC_TRC_ENTRY_SZ);
trc->rd_idx = trc->wr_idx = 0;
- trc->enable = 1;
+ trc->enable = true;
SNIC_INFO("Trace Facility Enabled.\n Trace Buffer SZ %lu Pages.\n",
tbuf_sz / PAGE_SIZE);
ret = 0;
{
struct snic_trc *trc = &snic_glob->trc;
- trc->enable = 0;
+ trc->enable = false;
snic_trc_debugfs_term();
if (trc->buf) {
u32 max_idx; /* Max Index into trace buffer */
u32 rd_idx;
u32 wr_idx;
- u32 enable; /* Control Variable for Tracing */
+ bool enable; /* Control Variable for Tracing */
struct dentry *trc_enable; /* debugfs file object */
struct dentry *trc_file;
struct uwb_dbg {
struct uwb_pal pal;
- u32 accept;
+ bool accept;
struct list_head rsvs;
struct dentry *root_d;
size_t count, loff_t *ppos)
{
char buf[3];
- u32 *val = file->private_data;
+ bool *val = file->private_data;
if (*val)
buf[0] = 'Y';
char buf[32];
size_t buf_size;
bool bv;
- u32 *val = file->private_data;
+ bool *val = file->private_data;
buf_size = min(count, (sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size))
* code.
*/
struct dentry *debugfs_create_bool(const char *name, umode_t mode,
- struct dentry *parent, u32 *value)
+ struct dentry *parent, bool *value)
{
return debugfs_create_file(name, mode, parent, value, &fops_bool);
}
struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
struct dentry *parent, atomic_t *value);
struct dentry *debugfs_create_bool(const char *name, umode_t mode,
- struct dentry *parent, u32 *value);
+ struct dentry *parent, bool *value);
struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode,
struct dentry *parent,
- u32 *value)
+ bool *value)
{
return ERR_PTR(-ENODEV);
}
#ifdef CONFIG_EDAC_DEBUG
struct dentry *debugfs;
u8 fake_inject_layer[EDAC_MAX_LAYERS];
- u32 fake_inject_ue;
+ bool fake_inject_ue;
u16 fake_inject_count;
#endif
};
atomic_t times;
atomic_t space;
unsigned long verbose;
- u32 task_filter;
+ bool task_filter;
unsigned long stacktrace_depth;
unsigned long require_start;
unsigned long require_end;
static struct {
struct fault_attr attr;
- u32 ignore_private;
+ bool ignore_private;
} fail_futex = {
.attr = FAULT_ATTR_INITIALIZER,
- .ignore_private = 0,
+ .ignore_private = false,
};
static int __init setup_fail_futex(char *str)
static DEFINE_SPINLOCK(free_entries_lock);
/* Global disable flag - will be set in case of an error */
-static u32 global_disable __read_mostly;
+static bool global_disable __read_mostly;
/* Early initialization disable flag, set at the end of dma_debug_init */
static bool dma_debug_initialized __read_mostly;
static struct {
struct fault_attr attr;
- u32 ignore_gfp_wait;
- int cache_filter;
+ bool ignore_gfp_wait;
+ bool cache_filter;
} failslab = {
.attr = FAULT_ATTR_INITIALIZER,
- .ignore_gfp_wait = 1,
- .cache_filter = 0,
+ .ignore_gfp_wait = true,
+ .cache_filter = false,
};
bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags)
static struct {
struct fault_attr attr;
- u32 ignore_gfp_highmem;
- u32 ignore_gfp_wait;
+ bool ignore_gfp_highmem;
+ bool ignore_gfp_wait;
u32 min_order;
} fail_page_alloc = {
.attr = FAULT_ATTR_INITIALIZER,
- .ignore_gfp_wait = 1,
- .ignore_gfp_highmem = 1,
+ .ignore_gfp_wait = true,
+ .ignore_gfp_highmem = true,
.min_order = 1,
};
int fw;
int fw_ver;
- u32 running;
+ bool running;
struct list_head ctl_list;