Commit 621a5f7a authored by Viresh Kumar's avatar Viresh Kumar Committed by Greg Kroah-Hartman
Browse files

debugfs: Pass bool pointer to debugfs_create_bool()



Its a bit odd that debugfs_create_bool() takes 'u32 *' as an argument,
when all it needs is a boolean pointer.

It would be better to update this API to make it accept 'bool *'
instead, as that will make it more consistent and often more convenient.
Over that bool takes just a byte.

That required updates to all user sites as well, in the same commit
updating the API. regmap core was also using
debugfs_{read|write}_file_bool(), directly and variable types were
updated for that to be bool as well.

Signed-off-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
Acked-by: Mark Brown's avatarMark Brown <broonie@kernel.org>
Acked-by: default avatarCharles Keepax <ckeepax@opensource.wolfsonmicro.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 6e58f752
......@@ -105,7 +105,7 @@ a variable of type size_t.
Boolean values can be placed in debugfs with:
struct dentry *debugfs_create_bool(const char *name, umode_t mode,
struct dentry *parent, u32 *value);
struct dentry *parent, bool *value);
A read on the resulting file will yield either Y (for non-zero values) or
N, followed by a newline. If written to, it will accept either upper- or
......
......@@ -58,7 +58,7 @@ static u32 mdscr_read(void)
* Allow root to disable self-hosted debug from userspace.
* This is useful if you want to connect an external JTAG debugger.
*/
static u32 debug_enabled = 1;
static bool debug_enabled = true;
static int create_debug_debugfs_entry(void)
{
......@@ -69,7 +69,7 @@ fs_initcall(create_debug_debugfs_entry);
static int __init early_debug_disable(char *buf)
{
debug_enabled = 0;
debug_enabled = false;
return 0;
}
......
......@@ -138,7 +138,7 @@ struct acpi_ec {
unsigned long gpe;
unsigned long command_addr;
unsigned long data_addr;
u32 global_lock;
bool global_lock;
unsigned long flags;
unsigned long reference_count;
struct mutex mutex;
......
......@@ -122,9 +122,9 @@ struct regmap {
unsigned int num_reg_defaults_raw;
/* if set, only the cache is modified not the HW */
u32 cache_only;
bool cache_only;
/* if set, only the HW is modified not the cache */
u32 cache_bypass;
bool cache_bypass;
/* if set, remember to free reg_defaults_raw */
bool cache_free;
......@@ -132,7 +132,7 @@ struct regmap {
const void *reg_defaults_raw;
void *cache;
/* if set, the cache contains newer data than the HW */
u32 cache_dirty;
bool cache_dirty;
/* if set, the HW registers are known to match map->reg_defaults */
bool no_sync_defaults;
......
......@@ -355,9 +355,9 @@ static int regcache_lzo_sync(struct regmap *map, unsigned int min,
if (ret > 0 && val == map->reg_defaults[ret].def)
continue;
map->cache_bypass = 1;
map->cache_bypass = true;
ret = _regmap_write(map, i, val);
map->cache_bypass = 0;
map->cache_bypass = false;
if (ret)
return ret;
dev_dbg(map->dev, "Synced register %#x, value %#x\n",
......
......@@ -54,11 +54,11 @@ static int regcache_hw_init(struct regmap *map)
return -ENOMEM;
if (!map->reg_defaults_raw) {
u32 cache_bypass = map->cache_bypass;
bool cache_bypass = map->cache_bypass;
dev_warn(map->dev, "No cache defaults, reading back from HW\n");
/* Bypass the cache access till data read from HW*/
map->cache_bypass = 1;
map->cache_bypass = true;
tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
if (!tmp_buf) {
ret = -ENOMEM;
......@@ -285,9 +285,9 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
if (!regcache_reg_needs_sync(map, reg, val))
continue;
map->cache_bypass = 1;
map->cache_bypass = true;
ret = _regmap_write(map, reg, val);
map->cache_bypass = 0;
map->cache_bypass = false;
if (ret) {
dev_err(map->dev, "Unable to sync register %#x. %d\n",
reg, ret);
......@@ -315,7 +315,7 @@ int regcache_sync(struct regmap *map)
int ret = 0;
unsigned int i;
const char *name;
unsigned int bypass;
bool bypass;
BUG_ON(!map->cache_ops);
......@@ -333,7 +333,7 @@ int regcache_sync(struct regmap *map)
map->async = true;
/* Apply any patch first */
map->cache_bypass = 1;
map->cache_bypass = true;
for (i = 0; i < map->patch_regs; i++) {
ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
if (ret != 0) {
......@@ -342,7 +342,7 @@ int regcache_sync(struct regmap *map)
goto out;
}
}
map->cache_bypass = 0;
map->cache_bypass = false;
if (map->cache_ops->sync)
ret = map->cache_ops->sync(map, 0, map->max_register);
......@@ -384,7 +384,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
{
int ret = 0;
const char *name;
unsigned int bypass;
bool bypass;
BUG_ON(!map->cache_ops);
......@@ -637,11 +637,11 @@ static int regcache_sync_block_single(struct regmap *map, void *block,
if (!regcache_reg_needs_sync(map, regtmp, val))
continue;
map->cache_bypass = 1;
map->cache_bypass = true;
ret = _regmap_write(map, regtmp, val);
map->cache_bypass = 0;
map->cache_bypass = false;
if (ret != 0) {
dev_err(map->dev, "Unable to sync register %#x. %d\n",
regtmp, ret);
......@@ -668,14 +668,14 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
count * val_bytes, count, base, cur - map->reg_stride);
map->cache_bypass = 1;
map->cache_bypass = true;
ret = _regmap_raw_write(map, base, *data, count * val_bytes);
if (ret)
dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
base, cur - map->reg_stride, ret);
map->cache_bypass = 0;
map->cache_bypass = false;
*data = NULL;
......
......@@ -80,8 +80,8 @@ struct qca_data {
spinlock_t hci_ibs_lock; /* HCI_IBS state lock */
u8 tx_ibs_state; /* HCI_IBS transmit side power state*/
u8 rx_ibs_state; /* HCI_IBS receive side power state */
u32 tx_vote; /* Clock must be on for TX */
u32 rx_vote; /* Clock must be on for RX */
bool tx_vote; /* Clock must be on for TX */
bool rx_vote; /* Clock must be on for RX */
struct timer_list tx_idle_timer;
u32 tx_idle_delay;
struct timer_list wake_retrans_timer;
......
......@@ -138,7 +138,7 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
to handle */
LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
we find in ACPI */
u32 amd_iommu_unmap_flush; /* if true, flush on every unmap */
bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
system */
......
......@@ -674,7 +674,7 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
* If true, the addresses will be flushed on unmap time, not when
* they are reused
*/
extern u32 amd_iommu_unmap_flush;
extern bool amd_iommu_unmap_flush;
/* Smallest max PASID supported by any IOMMU in the system */
extern u32 amd_iommu_max_pasid;
......
......@@ -528,7 +528,7 @@ struct mei_device {
DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX);
unsigned long me_client_index;
u32 allow_fixed_address;
bool allow_fixed_address;
struct mei_cl wd_cl;
enum mei_wd_states wd_state;
......
......@@ -767,8 +767,8 @@ struct adapter {
bool tid_release_task_busy;
struct dentry *debugfs_root;
u32 use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */
u32 trace_rss; /* 1 implies that different RSS flit per filter is
bool use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */
bool trace_rss; /* 1 implies that different RSS flit per filter is
* used per filter else if 0 default RSS flit is
* used for all 4 filters.
*/
......
......@@ -680,7 +680,7 @@ struct ath10k {
bool monitor_started;
unsigned int filter_flags;
unsigned long dev_flags;
u32 dfs_block_radar_events;
bool dfs_block_radar_events;
/* protected by conf_mutex */
bool radar_enabled;
......
......@@ -1367,7 +1367,7 @@ struct ath5k_hw {
u8 ah_retry_long;
u8 ah_retry_short;
u32 ah_use_32khz_clock;
bool ah_use_32khz_clock;
u8 ah_coverage_class;
bool ah_ack_bitrate_high;
......
......@@ -385,7 +385,7 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.dma_beacon_response_time = 1;
ah->config.sw_beacon_response_time = 6;
ah->config.cwm_ignore_extcca = 0;
ah->config.cwm_ignore_extcca = false;
ah->config.analog_shiftreg = 1;
ah->config.rx_intr_mitigation = true;
......
......@@ -332,14 +332,14 @@ enum ath9k_hw_hang_checks {
struct ath9k_ops_config {
int dma_beacon_response_time;
int sw_beacon_response_time;
u32 cwm_ignore_extcca;
bool cwm_ignore_extcca;
u32 pcie_waen;
u8 analog_shiftreg;
u32 ofdm_trig_low;
u32 ofdm_trig_high;
u32 cck_trig_high;
u32 cck_trig_low;
u32 enable_paprd;
bool enable_paprd;
int serialize_regmode;
bool rx_intr_mitigation;
bool tx_intr_mitigation;
......
......@@ -676,15 +676,15 @@ static void b43_add_dynamic_debug(struct b43_wldev *dev)
e->dyn_debug_dentries[id] = d; \
} while (0)
add_dyn_dbg("debug_xmitpower", B43_DBG_XMITPOWER, 0);
add_dyn_dbg("debug_dmaoverflow", B43_DBG_DMAOVERFLOW, 0);
add_dyn_dbg("debug_dmaverbose", B43_DBG_DMAVERBOSE, 0);
add_dyn_dbg("debug_pwork_fast", B43_DBG_PWORK_FAST, 0);
add_dyn_dbg("debug_pwork_stop", B43_DBG_PWORK_STOP, 0);
add_dyn_dbg("debug_lo", B43_DBG_LO, 0);
add_dyn_dbg("debug_firmware", B43_DBG_FIRMWARE, 0);
add_dyn_dbg("debug_keys", B43_DBG_KEYS, 0);
add_dyn_dbg("debug_verbose_stats", B43_DBG_VERBOSESTATS, 0);
add_dyn_dbg("debug_xmitpower", B43_DBG_XMITPOWER, false);
add_dyn_dbg("debug_dmaoverflow", B43_DBG_DMAOVERFLOW, false);
add_dyn_dbg("debug_dmaverbose", B43_DBG_DMAVERBOSE, false);
add_dyn_dbg("debug_pwork_fast", B43_DBG_PWORK_FAST, false);
add_dyn_dbg("debug_pwork_stop", B43_DBG_PWORK_STOP, false);
add_dyn_dbg("debug_lo", B43_DBG_LO, false);
add_dyn_dbg("debug_firmware", B43_DBG_FIRMWARE, false);
add_dyn_dbg("debug_keys", B43_DBG_KEYS, false);
add_dyn_dbg("debug_verbose_stats", B43_DBG_VERBOSESTATS, false);
#undef add_dyn_dbg
}
......
......@@ -68,7 +68,7 @@ struct b43_dfsentry {
u32 shm32read_addr_next;
/* Enabled/Disabled list for the dynamic debugging features. */
u32 dyn_debug[__B43_NR_DYNDBG];
bool dyn_debug[__B43_NR_DYNDBG];
/* Dentries for the dynamic debugging entries. */
struct dentry *dyn_debug_dentries[__B43_NR_DYNDBG];
};
......
......@@ -369,11 +369,11 @@ static void b43legacy_add_dynamic_debug(struct b43legacy_wldev *dev)
e->dyn_debug_dentries[id] = d; \
} while (0)
add_dyn_dbg("debug_xmitpower", B43legacy_DBG_XMITPOWER, 0);
add_dyn_dbg("debug_dmaoverflow", B43legacy_DBG_DMAOVERFLOW, 0);
add_dyn_dbg("debug_dmaverbose", B43legacy_DBG_DMAVERBOSE, 0);
add_dyn_dbg("debug_pwork_fast", B43legacy_DBG_PWORK_FAST, 0);
add_dyn_dbg("debug_pwork_stop", B43legacy_DBG_PWORK_STOP, 0);
add_dyn_dbg("debug_xmitpower", B43legacy_DBG_XMITPOWER, false);
add_dyn_dbg("debug_dmaoverflow", B43legacy_DBG_DMAOVERFLOW, false);
add_dyn_dbg("debug_dmaverbose", B43legacy_DBG_DMAVERBOSE, false);
add_dyn_dbg("debug_pwork_fast", B43legacy_DBG_PWORK_FAST, false);
add_dyn_dbg("debug_pwork_stop", B43legacy_DBG_PWORK_STOP, false);
#undef add_dyn_dbg
}
......
......@@ -47,7 +47,7 @@ struct b43legacy_dfsentry {
struct b43legacy_txstatus_log txstatlog;
/* Enabled/Disabled list for the dynamic debugging features. */
u32 dyn_debug[__B43legacy_NR_DYNDBG];
bool dyn_debug[__B43legacy_NR_DYNDBG];
/* Dentries for the dynamic debugging entries. */
struct dentry *dyn_debug_dentries[__B43legacy_NR_DYNDBG];
};
......
......@@ -1425,9 +1425,9 @@ struct il_priv {
#endif /* CONFIG_IWLEGACY_DEBUGFS */
struct work_struct txpower_work;
u32 disable_sens_cal;
u32 disable_chain_noise_cal;
u32 disable_tx_power_cal;
bool disable_sens_cal;
bool disable_chain_noise_cal;
bool disable_tx_power_cal;
struct work_struct run_time_calib_work;
struct timer_list stats_periodic;
struct timer_list watchdog;
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment