static int ads7846_get_value(struct ads7846 *ts, struct spi_message *m)
{
+++++ int value;
struct spi_transfer *t =
list_entry(m->transfers.prev, struct spi_transfer, transfer_list);
if (ts->model == 7845) {
----- return be16_to_cpup((__be16 *)&(((char*)t->rx_buf)[1])) >> 3;
+++++ value = be16_to_cpup((__be16 *)&(((char *)t->rx_buf)[1]));
} else {
/*
* adjust: on-wire is a must-ignore bit, a BE12 value, then
* padding; built from two 8 bit values written msb-first.
*/
----- return be16_to_cpup((__be16 *)t->rx_buf) >> 3;
+++++ value = be16_to_cpup((__be16 *)t->rx_buf);
}
+++++
+++++ /* enforce ADC output is 12 bits width */
+++++ return (value >> 3) & 0xfff;
}
static void ads7846_update_value(struct spi_message *m, int val)
static struct spi_driver ads7846_driver = {
.driver = {
.name = "ads7846",
--- -- .owner = THIS_MODULE,
.pm = &ads7846_pm,
.of_match_table = of_match_ptr(ads7846_dt_ids),
},
} while (gpio_get_value(pdata->gpio));
}
-----static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
+++++static void pcap_irq_handler(struct irq_desc *desc)
{
struct pcap_chip *pcap = irq_desc_get_handler_data(desc);
.remove = ezx_pcap_remove,
.driver = {
.name = "ezx-pcap",
--- -- .owner = THIS_MODULE,
},
};
{ .compatible = "micrel,ks8851" },
{ }
};
+++++MODULE_DEVICE_TABLE(of, ks8851_match_table);
static struct spi_driver ks8851_driver = {
.driver = {
.name = "ks8851",
.of_match_table = ks8851_match_table,
--- -- .owner = THIS_MODULE,
.pm = &ks8851_pm_ops,
},
.probe = ks8851_probe,
* GNU General Public License for more details.
*/
+++++ #include <linux/bitops.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
| QUARK_X1000_SSCR1_TFT \
| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
----- #define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
----- #define SPI_CS_CONTROL_SW_MODE BIT(0)
----- #define SPI_CS_CONTROL_CS_HIGH BIT(1)
+++++ #define LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
+++++ #define LPSS_CS_CONTROL_SW_MODE BIT(0)
+++++ #define LPSS_CS_CONTROL_CS_HIGH BIT(1)
+++++ #define LPSS_CS_CONTROL_CS_SEL_SHIFT 8
+++++ #define LPSS_CS_CONTROL_CS_SEL_MASK (3 << LPSS_CS_CONTROL_CS_SEL_SHIFT)
+++++ #define LPSS_CAPS_CS_EN_SHIFT 9
+++++ #define LPSS_CAPS_CS_EN_MASK (0xf << LPSS_CAPS_CS_EN_SHIFT)
struct lpss_config {
/* LPSS offset from drv_data->ioaddr */
int reg_general;
int reg_ssp;
int reg_cs_ctrl;
+++++ int reg_capabilities;
/* FIFO thresholds */
u32 rx_threshold;
u32 tx_threshold_lo;
.reg_general = 0x08,
.reg_ssp = 0x0c,
.reg_cs_ctrl = 0x18,
+++++ .reg_capabilities = -1,
.rx_threshold = 64,
.tx_threshold_lo = 160,
.tx_threshold_hi = 224,
.reg_general = 0x08,
.reg_ssp = 0x0c,
.reg_cs_ctrl = 0x18,
+++++ .reg_capabilities = -1,
.rx_threshold = 64,
.tx_threshold_lo = 160,
.tx_threshold_hi = 224,
.reg_general = -1,
.reg_ssp = 0x20,
.reg_cs_ctrl = 0x24,
+++++ .reg_capabilities = 0xfc,
.rx_threshold = 1,
.tx_threshold_lo = 32,
.tx_threshold_hi = 56,
},
+++++ { /* LPSS_BXT_SSP */
+++++ .offset = 0x200,
+++++ .reg_general = -1,
+++++ .reg_ssp = 0x20,
+++++ .reg_cs_ctrl = 0x24,
+++++ .reg_capabilities = 0xfc,
+++++ .rx_threshold = 1,
+++++ .tx_threshold_lo = 16,
+++++ .tx_threshold_hi = 48,
+++++ },
};
static inline const struct lpss_config
case LPSS_LPT_SSP:
case LPSS_BYT_SSP:
case LPSS_SPT_SSP:
+++++ case LPSS_BXT_SSP:
return true;
default:
return false;
drv_data->lpss_base = drv_data->ioaddr + config->offset;
/* Enable software chip select control */
----- value = SPI_CS_CONTROL_SW_MODE | SPI_CS_CONTROL_CS_HIGH;
+++++ value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
+++++ value &= ~(LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH);
+++++ value |= LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH;
__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
/* Enable multiblock DMA transfers */
if (config->reg_general >= 0) {
value = __lpss_ssp_read_priv(drv_data,
config->reg_general);
----- value |= GENERAL_REG_RXTO_HOLDOFF_DISABLE;
+++++ value |= LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE;
__lpss_ssp_write_priv(drv_data,
config->reg_general, value);
}
static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
{
const struct lpss_config *config;
----- u32 value;
+++++ u32 value, cs;
config = lpss_get_config(drv_data);
value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
----- if (enable)
----- value &= ~SPI_CS_CONTROL_CS_HIGH;
----- else
----- value |= SPI_CS_CONTROL_CS_HIGH;
+++++ if (enable) {
+++++ cs = drv_data->cur_msg->spi->chip_select;
+++++ cs <<= LPSS_CS_CONTROL_CS_SEL_SHIFT;
+++++ if (cs != (value & LPSS_CS_CONTROL_CS_SEL_MASK)) {
+++++ /*
+++++ * When switching another chip select output active
+++++ * the output must be selected first and wait 2 ssp_clk
+++++ * cycles before changing state to active. Otherwise
+++++ * a short glitch will occur on the previous chip
+++++ * select since output select is latched but state
+++++ * control is not.
+++++ */
+++++ value &= ~LPSS_CS_CONTROL_CS_SEL_MASK;
+++++ value |= cs;
+++++ __lpss_ssp_write_priv(drv_data,
+++++ config->reg_cs_ctrl, value);
+++++ ndelay(1000000000 /
+++++ (drv_data->master->max_speed_hz / 2));
+++++ }
+++++ value &= ~LPSS_CS_CONTROL_CS_HIGH;
+++++ } else {
+++++ value |= LPSS_CS_CONTROL_CS_HIGH;
+++++ }
__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
}
if (!(sccr1_reg & SSCR1_TIE))
mask &= ~SSSR_TFS;
+++++ /* Ignore RX timeout interrupt if it is disabled */
+++++ if (!(sccr1_reg & SSCR1_TINTE))
+++++ mask &= ~SSSR_TINT;
+++++
if (!(status & mask))
return IRQ_NONE;
mul = (1 << 24) >> 1;
/* Calculate initial quot */
----- q1 = DIV_ROUND_CLOSEST(fref1, rate);
+++++ q1 = DIV_ROUND_UP(fref1, rate);
/* Scale q1 if it's too big */
if (q1 > 256) {
/* Case 2 */
----- q2 = DIV_ROUND_CLOSEST(fref2, rate);
+++++ q2 = DIV_ROUND_UP(fref2, rate);
r2 = abs(fref2 / q2 - rate);
/*
mul = (1 << 24) * 2 / 5;
}
----- /* Check case 3 only If the divisor is big enough */
+++++ /* Check case 3 only if the divisor is big enough */
if (fref / rate >= 80) {
u64 fssp;
u32 m;
/* Calculate initial quot */
----- q1 = DIV_ROUND_CLOSEST(fref, rate);
+++++ q1 = DIV_ROUND_UP(fref, rate);
m = (1 << 24) / q1;
/* Get the remainder */
static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
{
----- unsigned long ssp_clk = drv_data->max_clk_rate;
+++++ unsigned long ssp_clk = drv_data->master->max_speed_hz;
const struct ssp_device *ssp = drv_data->ssp;
rate = min_t(int, ssp_clk, rate);
}
static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
----- struct chip_data *chip, int rate)
+++++ int rate)
{
+++++ struct chip_data *chip = drv_data->cur_chip;
unsigned int clk_div;
switch (drv_data->ssp_type) {
drv_data->read = drv_data->rx ? chip->read : null_reader;
/* Change speed and bit per word on a per transfer */
----- cr0 = chip->cr0;
----- if (transfer->speed_hz || transfer->bits_per_word) {
-----
----- bits = chip->bits_per_word;
----- speed = chip->speed_hz;
-----
----- if (transfer->speed_hz)
----- speed = transfer->speed_hz;
-----
----- if (transfer->bits_per_word)
----- bits = transfer->bits_per_word;
-----
----- clk_div = pxa2xx_ssp_get_clk_div(drv_data, chip, speed);
-----
----- if (bits <= 8) {
----- drv_data->n_bytes = 1;
----- drv_data->read = drv_data->read != null_reader ?
----- u8_reader : null_reader;
----- drv_data->write = drv_data->write != null_writer ?
----- u8_writer : null_writer;
----- } else if (bits <= 16) {
----- drv_data->n_bytes = 2;
----- drv_data->read = drv_data->read != null_reader ?
----- u16_reader : null_reader;
----- drv_data->write = drv_data->write != null_writer ?
----- u16_writer : null_writer;
----- } else if (bits <= 32) {
----- drv_data->n_bytes = 4;
----- drv_data->read = drv_data->read != null_reader ?
----- u32_reader : null_reader;
----- drv_data->write = drv_data->write != null_writer ?
----- u32_writer : null_writer;
----- }
----- /* if bits/word is changed in dma mode, then must check the
----- * thresholds and burst also */
----- if (chip->enable_dma) {
----- if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
----- message->spi,
----- bits, &dma_burst,
----- &dma_thresh))
----- dev_warn_ratelimited(&message->spi->dev,
----- "pump_transfers: DMA burst size reduced to match bits_per_word\n");
----- }
-----
----- cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
+++++ bits = transfer->bits_per_word;
+++++ speed = transfer->speed_hz;
+++++
+++++ clk_div = pxa2xx_ssp_get_clk_div(drv_data, speed);
+++++
+++++ if (bits <= 8) {
+++++ drv_data->n_bytes = 1;
+++++ drv_data->read = drv_data->read != null_reader ?
+++++ u8_reader : null_reader;
+++++ drv_data->write = drv_data->write != null_writer ?
+++++ u8_writer : null_writer;
+++++ } else if (bits <= 16) {
+++++ drv_data->n_bytes = 2;
+++++ drv_data->read = drv_data->read != null_reader ?
+++++ u16_reader : null_reader;
+++++ drv_data->write = drv_data->write != null_writer ?
+++++ u16_writer : null_writer;
+++++ } else if (bits <= 32) {
+++++ drv_data->n_bytes = 4;
+++++ drv_data->read = drv_data->read != null_reader ?
+++++ u32_reader : null_reader;
+++++ drv_data->write = drv_data->write != null_writer ?
+++++ u32_writer : null_writer;
++++ }
+++++ /*
+++++ * if bits/word is changed in dma mode, then must check the
+++++ * thresholds and burst also
+++++ */
+++++ if (chip->enable_dma) {
+++++ if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
+++++ message->spi,
+++++ bits, &dma_burst,
+++++ &dma_thresh))
+++++ dev_warn_ratelimited(&message->spi->dev,
+++++ "pump_transfers: DMA burst size reduced to match bits_per_word\n");
+ }
+
+++++ /* NOTE: PXA25x_SSP _could_ use external clocking ... */
+++++ cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
+++++ if (!pxa25x_ssp_comp(drv_data))
+++++ dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
+++++ drv_data->master->max_speed_hz
+++++ / (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
+++++ chip->enable_dma ? "DMA" : "PIO");
+++++ else
+++++ dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
+++++ drv_data->master->max_speed_hz / 2
+++++ / (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
+++++ chip->enable_dma ? "DMA" : "PIO");
++++
message->state = RUNNING_STATE;
drv_data->dma_mapped = 0;
struct chip_data *chip;
const struct lpss_config *config;
struct driver_data *drv_data = spi_master_get_devdata(spi->master);
----- unsigned int clk_div;
uint tx_thres, tx_hi_thres, rx_thres;
switch (drv_data->ssp_type) {
case LPSS_LPT_SSP:
case LPSS_BYT_SSP:
case LPSS_SPT_SSP:
+++++ case LPSS_BXT_SSP:
config = lpss_get_config(drv_data);
tx_thres = config->tx_threshold_lo;
tx_hi_thres = config->tx_threshold_hi;
}
}
----- clk_div = pxa2xx_ssp_get_clk_div(drv_data, chip, spi->max_speed_hz);
----- chip->speed_hz = spi->max_speed_hz;
-----
----- chip->cr0 = pxa2xx_configure_sscr0(drv_data, clk_div,
----- spi->bits_per_word);
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
chip->threshold = (QUARK_X1000_SSCR1_RxTresh(rx_thres)
if (spi->mode & SPI_LOOP)
chip->cr1 |= SSCR1_LBM;
----- /* NOTE: PXA25x_SSP _could_ use external clocking ... */
----- if (!pxa25x_ssp_comp(drv_data))
----- dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
----- drv_data->max_clk_rate
----- / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)),
----- chip->enable_dma ? "DMA" : "PIO");
----- else
----- dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
----- drv_data->max_clk_rate / 2
----- / (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)),
----- chip->enable_dma ? "DMA" : "PIO");
-----
if (spi->bits_per_word <= 8) {
chip->n_bytes = 1;
chip->read = u8_reader;
chip->read = u16_reader;
chip->write = u16_writer;
} else if (spi->bits_per_word <= 32) {
----- if (!is_quark_x1000_ssp(drv_data))
----- chip->cr0 |= SSCR0_EDSS;
chip->n_bytes = 4;
chip->read = u32_reader;
chip->write = u32_writer;
}
----- chip->bits_per_word = spi->bits_per_word;
spi_set_ctldata(spi, chip);
kfree(chip);
}
+++++ #ifdef CONFIG_PCI
#ifdef CONFIG_ACPI
static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
+++++ static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
+++++ {
+++++ unsigned int devid;
+++++ int port_id = -1;
+++++
+++++ if (adev && adev->pnp.unique_id &&
+++++ !kstrtouint(adev->pnp.unique_id, 0, &devid))
+++++ port_id = devid;
+++++ return port_id;
+++++ }
+++++ #else /* !CONFIG_ACPI */
+++++ static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
+++++ {
+++++ return -1;
+++++ }
+++++ #endif
+++++
/*
* PCI IDs of compound devices that integrate both host controller and private
* integrated DMA engine. Please note these are not used in module
/* SPT-H */
{ PCI_VDEVICE(INTEL, 0xa129), LPSS_SPT_SSP },
{ PCI_VDEVICE(INTEL, 0xa12a), LPSS_SPT_SSP },
+++++ /* BXT */
+++++ { PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP },
+++++ { PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP },
+++++ { PCI_VDEVICE(INTEL, 0x0ac6), LPSS_BXT_SSP },
+++++ /* APL */
+++++ { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
+++++ { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
+++++ { PCI_VDEVICE(INTEL, 0x5ac6), LPSS_BXT_SSP },
{ },
};
}
static struct pxa2xx_spi_master *
----- pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
+++++ pxa2xx_spi_init_pdata(struct platform_device *pdev)
{
struct pxa2xx_spi_master *pdata;
struct acpi_device *adev;
struct resource *res;
const struct acpi_device_id *adev_id = NULL;
const struct pci_device_id *pcidev_id = NULL;
----- int devid, type;
+++++ int type;
----- if (!ACPI_HANDLE(&pdev->dev) ||
----- acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
----- return NULL;
+++++ adev = ACPI_COMPANION(&pdev->dev);
if (dev_is_pci(pdev->dev.parent))
pcidev_id = pci_match_id(pxa2xx_spi_pci_compound_match,
to_pci_dev(pdev->dev.parent));
----- else
+++++ else if (adev)
adev_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
&pdev->dev);
+++++ else
+++++ return NULL;
if (adev_id)
type = (int)adev_id->driver_data;
ssp->irq = platform_get_irq(pdev, 0);
ssp->type = type;
ssp->pdev = pdev;
-----
----- ssp->port_id = -1;
----- if (adev->pnp.unique_id && !kstrtoint(adev->pnp.unique_id, 0, &devid))
----- ssp->port_id = devid;
+++++ ssp->port_id = pxa2xx_spi_get_port_id(adev);
pdata->num_chipselect = 1;
pdata->enable_dma = true;
return pdata;
}
----- #else
+++++ #else /* !CONFIG_PCI */
static inline struct pxa2xx_spi_master *
----- pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
+++++ pxa2xx_spi_init_pdata(struct platform_device *pdev)
{
return NULL;
}
struct spi_master *master;
struct driver_data *drv_data;
struct ssp_device *ssp;
+++++ const struct lpss_config *config;
int status;
u32 tmp;
platform_info = dev_get_platdata(dev);
if (!platform_info) {
----- platform_info = pxa2xx_spi_acpi_get_pdata(pdev);
+++++ platform_info = pxa2xx_spi_init_pdata(pdev);
if (!platform_info) {
dev_err(&pdev->dev, "missing platform data\n");
return -ENODEV;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
master->bus_num = ssp->port_id;
----- master->num_chipselect = platform_info->num_chipselect;
master->dma_alignment = DMA_ALIGNMENT;
master->cleanup = cleanup;
master->setup = setup;
/* Enable SOC clock */
clk_prepare_enable(ssp->clk);
----- drv_data->max_clk_rate = clk_get_rate(ssp->clk);
+++++ master->max_speed_hz = clk_get_rate(ssp->clk);
/* Load default SSP configuration */
pxa2xx_spi_write(drv_data, SSCR0, 0);
if (is_lpss_ssp(drv_data))
lpss_ssp_setup(drv_data);
+++++ if (is_lpss_ssp(drv_data)) {
+++++ lpss_ssp_setup(drv_data);
+++++ config = lpss_get_config(drv_data);
+++++ if (config->reg_capabilities >= 0) {
+++++ tmp = __lpss_ssp_read_priv(drv_data,
+++++ config->reg_capabilities);
+++++ tmp &= LPSS_CAPS_CS_EN_MASK;
+++++ tmp >>= LPSS_CAPS_CS_EN_SHIFT;
+++++ platform_info->num_chipselect = ffz(tmp);
+++++ }
+++++ }
+++++ master->num_chipselect = platform_info->num_chipselect;
+++++
tasklet_init(&drv_data->pump_transfers, pump_transfers,
(unsigned long)drv_data);
struct ssp_device *ssp = drv_data->ssp;
int status = 0;
----- pxa2xx_spi_dma_resume(drv_data);
-----
/* Enable the SSP clock */
if (!pm_runtime_suspended(dev))
clk_prepare_enable(ssp->clk);
u32 clear_sr;
u32 mask_sr;
----- /* Maximun clock rate */
----- unsigned long max_clk_rate;
-----
/* Message Transfer pump */
struct tasklet_struct pump_transfers;
};
struct chip_data {
----- u32 cr0;
u32 cr1;
u32 dds_rate;
---- - u32 psp;
u32 timeout;
u8 n_bytes;
u32 dma_burst_size;
u16 lpss_rx_threshold;
u16 lpss_tx_threshold;
u8 enable_dma;
----- u8 bits_per_word;
----- u32 speed_hz;
union {
int gpio_cs;
unsigned int frm;
extern void pxa2xx_spi_dma_start(struct driver_data *drv_data);
extern int pxa2xx_spi_dma_setup(struct driver_data *drv_data);
extern void pxa2xx_spi_dma_release(struct driver_data *drv_data);
----- extern void pxa2xx_spi_dma_resume(struct driver_data *drv_data);
extern int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
struct spi_device *spi,
u8 bits_per_word,
return 0;
}
static inline void pxa2xx_spi_dma_release(struct driver_data *drv_data) {}
----- static inline void pxa2xx_spi_dma_resume(struct driver_data *drv_data) {}
static inline int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
struct spi_device *spi,
u8 bits_per_word,
SPI_STATISTICS_SHOW(bytes_rx, "%llu");
SPI_STATISTICS_SHOW(bytes_tx, "%llu");
+++++#define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
+++++ SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
+++++ "transfer_bytes_histo_" number, \
+++++ transfer_bytes_histo[index], "%lu")
+++++SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
+++++SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
+++++SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
+++++SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
+++++SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
+++++SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
+++++SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
+++++SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
+++++SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
+++++SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
+++++SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
+++++SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
+++++SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
+++++SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
+++++SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
+++++SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
+++++SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
+++++
static struct attribute *spi_dev_attrs[] = {
&dev_attr_modalias.attr,
NULL,
&dev_attr_spi_device_bytes.attr,
&dev_attr_spi_device_bytes_rx.attr,
&dev_attr_spi_device_bytes_tx.attr,
+++++ &dev_attr_spi_device_transfer_bytes_histo0.attr,
+++++ &dev_attr_spi_device_transfer_bytes_histo1.attr,
+++++ &dev_attr_spi_device_transfer_bytes_histo2.attr,
+++++ &dev_attr_spi_device_transfer_bytes_histo3.attr,
+++++ &dev_attr_spi_device_transfer_bytes_histo4.attr,
+++++ &dev_attr_spi_device_transfer_bytes_histo5.attr,
+++++ &dev_attr_spi_device_transfer_bytes_histo6.attr,
+++++ &dev_attr_spi_device_transfer_bytes_histo7.attr,
+++++ &dev_attr_spi_device_transfer_bytes_histo8.attr,
+++++ &dev_attr_spi_device_transfer_bytes_histo9.attr,
+++++ &dev_attr_spi_device_transfer_bytes_histo10.attr,
+++++ &dev_attr_spi_device_transfer_bytes_histo11.attr,
+++++ &dev_attr_spi_device_transfer_bytes_histo12.attr,
+++++ &dev_attr_spi_device_transfer_bytes_histo13.attr,
+++++ &dev_attr_spi_device_transfer_bytes_histo14.attr,
+++++ &dev_attr_spi_device_transfer_bytes_histo15.attr,
+++++ &dev_attr_spi_device_transfer_bytes_histo16.attr,
NULL,
};
&dev_attr_spi_master_bytes.attr,
&dev_attr_spi_master_bytes_rx.attr,
&dev_attr_spi_master_bytes_tx.attr,
+++++ &dev_attr_spi_master_transfer_bytes_histo0.attr,
+++++ &dev_attr_spi_master_transfer_bytes_histo1.attr,
+++++ &dev_attr_spi_master_transfer_bytes_histo2.attr,
+++++ &dev_attr_spi_master_transfer_bytes_histo3.attr,
+++++ &dev_attr_spi_master_transfer_bytes_histo4.attr,
+++++ &dev_attr_spi_master_transfer_bytes_histo5.attr,
+++++ &dev_attr_spi_master_transfer_bytes_histo6.attr,
+++++ &dev_attr_spi_master_transfer_bytes_histo7.attr,
+++++ &dev_attr_spi_master_transfer_bytes_histo8.attr,
+++++ &dev_attr_spi_master_transfer_bytes_histo9.attr,
+++++ &dev_attr_spi_master_transfer_bytes_histo10.attr,
+++++ &dev_attr_spi_master_transfer_bytes_histo11.attr,
+++++ &dev_attr_spi_master_transfer_bytes_histo12.attr,
+++++ &dev_attr_spi_master_transfer_bytes_histo13.attr,
+++++ &dev_attr_spi_master_transfer_bytes_histo14.attr,
+++++ &dev_attr_spi_master_transfer_bytes_histo15.attr,
+++++ &dev_attr_spi_master_transfer_bytes_histo16.attr,
NULL,
};
struct spi_master *master)
{
unsigned long flags;
+++++ int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
+++++
+++++ if (l2len < 0)
+++++ l2len = 0;
spin_lock_irqsave(&stats->lock, flags);
stats->transfers++;
+++++ stats->transfer_bytes_histo[l2len]++;
stats->bytes += xfer->len;
if ((xfer->tx_buf) &&
static int spi_drv_probe(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
+++++ struct spi_device *spi = to_spi_device(dev);
int ret;
ret = of_clk_set_defaults(dev->of_node, false);
if (ret)
return ret;
+++++ if (dev->of_node) {
+++++ spi->irq = of_irq_get(dev->of_node, 0);
+++++ if (spi->irq == -EPROBE_DEFER)
+++++ return -EPROBE_DEFER;
+++++ if (spi->irq < 0)
+++++ spi->irq = 0;
+++++ }
+++++
ret = dev_pm_domain_attach(dev, true);
if (ret != -EPROBE_DEFER) {
----- ret = sdrv->probe(to_spi_device(dev));
+++++ ret = sdrv->probe(spi);
if (ret)
dev_pm_domain_detach(dev, true);
}
}
/**
--- -- * spi_register_driver - register a SPI driver
+++ ++ * __spi_register_driver - register a SPI driver
* @sdrv: the driver to register
* Context: can sleep
+++++ *
+++++ * Return: zero on success, else a negative error code.
*/
--- --int spi_register_driver(struct spi_driver *sdrv)
+++ ++int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
{
+++ ++ sdrv->driver.owner = owner;
sdrv->driver.bus = &spi_bus_type;
if (sdrv->probe)
sdrv->driver.probe = spi_drv_probe;
sdrv->driver.shutdown = spi_drv_shutdown;
return driver_register(&sdrv->driver);
}
--- --EXPORT_SYMBOL_GPL(spi_register_driver);
+++ ++EXPORT_SYMBOL_GPL(__spi_register_driver);
/*-------------------------------------------------------------------------*/
* needs to discard the spi_device without adding it, then it should
* call spi_dev_put() on it.
*
----- * Returns a pointer to the new device, or NULL.
+++++ * Return: a pointer to the new device, or NULL.
*/
struct spi_device *spi_alloc_device(struct spi_master *master)
{
* Companion function to spi_alloc_device. Devices allocated with
* spi_alloc_device can be added onto the spi bus with this function.
*
----- * Returns 0 on success; negative errno on failure
+++++ * Return: 0 on success; negative errno on failure
*/
int spi_add_device(struct spi_device *spi)
{
* this is exported so that for example a USB or parport based adapter
* driver could add devices (which it would learn about out-of-band).
*
----- * Returns the new device, or NULL.
+++++ * Return: the new device, or NULL.
*/
struct spi_device *spi_new_device(struct spi_master *master,
struct spi_board_info *chip)
*
* The board info passed can safely be __initdata ... but be careful of
* any embedded pointers (platform_data, etc), they're copied as-is.
+++++ *
+++++ * Return: zero on success, else a negative error code.
*/
int spi_register_board_info(struct spi_board_info const *info, unsigned n)
{
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
----- if (spi->cs_gpio >= 0)
+++++ if (gpio_is_valid(spi->cs_gpio))
gpio_set_value(spi->cs_gpio, !enable);
else if (spi->master->set_cs)
spi->master->set_cs(spi, !enable);
*
* If there are more messages in the queue, the next message is returned from
* this call.
+++++ *
+++++ * Return: the next message in the queue, else NULL if the queue is empty.
*/
struct spi_message *spi_get_next_queued_message(struct spi_master *master)
{
* spi_queued_transfer - transfer function for queued transfers
* @spi: spi device which is requesting transfer
* @msg: spi message which is to handled is queued to driver queue
+++++ *
+++++ * Return: zero on success, else a negative error code.
*/
static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
{
}
spi->max_speed_hz = value;
----- /* IRQ */
----- spi->irq = irq_of_parse_and_map(nc, 0);
-----
/* Store a pointer to the node in the device structure */
of_node_get(nc);
spi->dev.of_node = nc;
* only ones directly touching chip registers. It's how they allocate
* an spi_master structure, prior to calling spi_register_master().
*
----- * This must be called from context that can sleep. It returns the SPI
----- * master structure on success, else NULL.
+++++ * This must be called from context that can sleep.
*
* The caller is responsible for assigning the bus number and initializing
* the master's methods before calling spi_register_master(); and (after errors
----- * adding the device) calling spi_master_put() and kfree() to prevent a memory
----- * leak.
+++++ * adding the device) calling spi_master_put() to prevent a memory leak.
+++++ *
+++++ * Return: the SPI master structure on success, else NULL.
*/
struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
{
* success, else a negative error code (dropping the master's refcount).
* After a successful return, the caller is responsible for calling
* spi_unregister_master().
+++++ *
+++++ * Return: zero on success, else a negative error code.
*/
int spi_register_master(struct spi_master *master)
{
*
* Register a SPI device as with spi_register_master() which will
* automatically be unregister
+++++ *
+++++ * Return: zero on success, else a negative error code.
*/
int devm_spi_register_master(struct device *dev, struct spi_master *master)
{
* arch init time. It returns a refcounted pointer to the relevant
* spi_master (which the caller must release), or NULL if there is
* no such master registered.
+++++ *
+++++ * Return: the SPI master structure on success, else NULL.
*/
struct spi_master *spi_busnum_to_master(u16 bus_num)
{
* that the underlying controller or its driver does not support. For
* example, not all hardware supports wire transfers using nine bit words,
* LSB-first wire encoding, or active-high chipselects.
+++++ *
+++++ * Return: zero on success, else a negative error code.
*/
int spi_setup(struct spi_device *spi)
{
unsigned bad_bits, ugly_bits;
----- int status = 0;
+++++ int status;
/* check mode to prevent that DUAL and QUAD set at the same time
*/
if (!spi->bits_per_word)
spi->bits_per_word = 8;
----- if (__spi_validate_bits_per_word(spi->master, spi->bits_per_word))
----- return -EINVAL;
+++++ status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word);
+++++ if (status)
+++++ return status;
if (!spi->max_speed_hz)
spi->max_speed_hz = spi->master->max_speed_hz;
----- spi_set_cs(spi, false);
-----
if (spi->master->setup)
status = spi->master->setup(spi);
+++++ spi_set_cs(spi, false);
+++++
dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
* no other spi_message queued to that device will be processed.
* (This rule applies equally to all the synchronous transfer calls,
* which are wrappers around this core asynchronous primitive.)
+++++ *
+++++ * Return: zero on success, else a negative error code.
*/
int spi_async(struct spi_device *spi, struct spi_message *message)
{
* no other spi_message queued to that device will be processed.
* (This rule applies equally to all the synchronous transfer calls,
* which are wrappers around this core asynchronous primitive.)
+++++ *
+++++ * Return: zero on success, else a negative error code.
*/
int spi_async_locked(struct spi_device *spi, struct spi_message *message)
{
* Also, the caller is guaranteeing that the memory associated with the
* message will not be freed before this call returns.
*
----- * It returns zero on success, else a negative error code.
+++++ * Return: zero on success, else a negative error code.
*/
int spi_sync(struct spi_device *spi, struct spi_message *message)
{
* SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
* be released by a spi_bus_unlock call when the exclusive access is over.
*
----- * It returns zero on success, else a negative error code.
+++++ * Return: zero on success, else a negative error code.
*/
int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
{
* exclusive access is over. Data transfer must be done by spi_sync_locked
* and spi_async_locked calls when the SPI bus lock is held.
*
----- * It returns zero on success, else a negative error code.
+++++ * Return: always zero.
*/
int spi_bus_lock(struct spi_master *master)
{
* This call releases an SPI bus lock previously obtained by an spi_bus_lock
* call.
*
----- * It returns zero on success, else a negative error code.
+++++ * Return: always zero.
*/
int spi_bus_unlock(struct spi_master *master)
{
* portable code should never use this for more than 32 bytes.
* Performance-sensitive or bulk transfer code should instead use
* spi_{async,sync}() calls with dma-safe buffers.
+++++ *
+++++ * Return: zero on success, else a negative error code.
*/
int spi_write_then_read(struct spi_device *spi,
const void *txbuf, unsigned n_tx,
kfree(spidev->rx_buffer);
spidev->rx_buffer = NULL;
----- spidev->speed_hz = spidev->spi->max_speed_hz;
+++++ if (spidev->spi)
+++++ spidev->speed_hz = spidev->spi->max_speed_hz;
/* ... after we unbound from the underlying device? */
spin_lock_irq(&spidev->spi_lock);
static struct spi_driver spidev_spi_driver = {
.driver = {
.name = "spidev",
--- -- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(spidev_dt_ids),
},
.probe = spidev_probe,
}
par->fbtftops.write_register = fbtft_write_reg8_bus9;
par->fbtftops.write_vmem = fbtft_write_vmem16_bus9;
----- sdev->bits_per_word = 9;
----- ret = sdev->master->setup(sdev);
----- if (ret) {
+++++ if (par->spi->master->bits_per_word_mask
+++++ & SPI_BPW_MASK(9)) {
+++++ par->spi->bits_per_word = 9;
+++++ } else {
dev_warn(dev,
"9-bit SPI not available, emulating using 8-bit.\n");
----- sdev->bits_per_word = 8;
----- ret = sdev->master->setup(sdev);
----- if (ret)
----- goto out_release;
/* allocate buffer with room for dc bits */
par->extra = devm_kzalloc(par->info->device,
par->txbuf.len + (par->txbuf.len / 8) + 8,
static struct spi_driver flexfb_spi_driver = {
.driver = {
.name = DRVNAME,
--- -- .owner = THIS_MODULE,
},
.probe = flexfb_probe_spi,
.remove = flexfb_remove_spi,
{ .compatible = "omapdss,sony,acx565akm", },
{},
};
+++++MODULE_DEVICE_TABLE(of, acx565akm_of_match);
static struct spi_driver acx565akm_driver = {
.driver = {
.name = "acx565akm",
--- -- .owner = THIS_MODULE,
.of_match_table = acx565akm_of_match,
.suppress_bind_attrs = true,
},
/**
* struct spi_statistics - statistics for spi transfers
----- * @clock: lock protecting this structure
+++++ * @lock: lock protecting this structure
*
* @messages: number of spi-messages handled
* @transfers: number of spi_transfers handled
* @bytes_tx: number of bytes sent to device
* @bytes_rx: number of bytes received from device
*
+++++ * @transfer_bytes_histo:
+++++ * transfer bytes histogramm
*/
struct spi_statistics {
spinlock_t lock; /* lock for the whole structure */
unsigned long long bytes_rx;
unsigned long long bytes_tx;
+++++#define SPI_STATISTICS_HISTO_SIZE 17
+++++ unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE];
};
void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
return drv ? container_of(drv, struct spi_driver, driver) : NULL;
}
--- --extern int spi_register_driver(struct spi_driver *sdrv);
+++ ++extern int __spi_register_driver(struct module *owner, struct spi_driver *sdrv);
/**
* spi_unregister_driver - reverse effect of spi_register_driver
driver_unregister(&sdrv->driver);
}
+++ ++/* use a define to avoid include chaining to get THIS_MODULE */
+++ ++#define spi_register_driver(driver) \
+++ ++ __spi_register_driver(THIS_MODULE, driver)
+++ ++
/**
* module_spi_driver() - Helper macro for registering a SPI driver
* @__spi_driver: spi_driver struct
* @len: data buffer size
* Context: can sleep
*
----- * This writes the buffer and returns zero or a negative error code.
+++++ * This function writes the buffer @buf.
* Callable only from contexts that can sleep.
+++++ *
+++++ * Return: zero on success, else a negative error code.
*/
static inline int
spi_write(struct spi_device *spi, const void *buf, size_t len)
* @len: data buffer size
* Context: can sleep
*
----- * This reads the buffer and returns zero or a negative error code.
+++++ * This function reads the buffer @buf.
* Callable only from contexts that can sleep.
+++++ *
+++++ * Return: zero on success, else a negative error code.
*/
static inline int
spi_read(struct spi_device *spi, void *buf, size_t len)
*
* For more specific semantics see spi_sync().
*
----- * It returns zero on success, else a negative error code.
+++++ * Return: Return: zero on success, else a negative error code.
*/
static inline int
spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers,
* @cmd: command to be written before data is read back
* Context: can sleep
*
----- * This returns the (unsigned) eight bit number returned by the
----- * device, or else a negative error code. Callable only from
----- * contexts that can sleep.
+++++ * Callable only from contexts that can sleep.
+++++ *
+++++ * Return: the (unsigned) eight bit number returned by the
+++++ * device, or else a negative error code.
*/
static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd)
{
* @cmd: command to be written before data is read back
* Context: can sleep
*
----- * This returns the (unsigned) sixteen bit number returned by the
----- * device, or else a negative error code. Callable only from
----- * contexts that can sleep.
----- *
* The number is returned in wire-order, which is at least sometimes
* big-endian.
+++++ *
+++++ * Callable only from contexts that can sleep.
+++++ *
+++++ * Return: the (unsigned) sixteen bit number returned by the
+++++ * device, or else a negative error code.
*/
static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
{
* @cmd: command to be written before data is read back
* Context: can sleep
*
----- * This returns the (unsigned) sixteen bit number returned by the device in cpu
----- * endianness, or else a negative error code. Callable only from contexts that
----- * can sleep.
----- *
* This function is similar to spi_w8r16, with the exception that it will
* convert the read 16 bit data word from big-endian to native endianness.
*
+++++ * Callable only from contexts that can sleep.
+++++ *
+++++ * Return: the (unsigned) sixteen bit number returned by the device in cpu
+++++ * endianness, or else a negative error code.
*/
static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
struct wm0010_priv *wm0010 = snd_soc_codec_get_drvdata(codec);
unsigned long flags;
int ret;
----- const struct firmware *fw;
struct spi_message m;
struct spi_transfer t;
struct dfw_pllrec pll_rec;
wm0010->state = WM0010_OUT_OF_RESET;
spin_unlock_irqrestore(&wm0010->irq_lock, flags);
----- /* First the bootloader */
----- ret = request_firmware(&fw, "wm0010_stage2.bin", codec->dev);
----- if (ret != 0) {
----- dev_err(codec->dev, "Failed to request stage2 loader: %d\n",
----- ret);
----- goto abort;
----- }
-----
if (!wait_for_completion_timeout(&wm0010->boot_completion,
msecs_to_jiffies(20)))
dev_err(codec->dev, "Failed to get interrupt from DSP\n");
img_swap = kzalloc(len, GFP_KERNEL | GFP_DMA);
if (!img_swap)
----- goto abort;
+++++ goto abort_out;
/* We need to re-order for 0010 */
byte_swap_64((u64 *)&pll_rec, img_swap, len);
spi_message_add_tail(&t, &m);
ret = spi_sync(spi, &m);
----- if (ret != 0) {
+++++ if (ret) {
dev_err(codec->dev, "First PLL write failed: %d\n", ret);
----- goto abort;
+++++ goto abort_swap;
}
/* Use a second send of the message to get the return status */
ret = spi_sync(spi, &m);
----- if (ret != 0) {
+++++ if (ret) {
dev_err(codec->dev, "Second PLL write failed: %d\n", ret);
----- goto abort;
+++++ goto abort_swap;
}
p = (u32 *)out;
return 0;
+++++abort_swap:
+++++ kfree(img_swap);
+++++abort_out:
+++++ kfree(out);
abort:
/* Put the chip back into reset */
wm0010_halt(codec);
static struct spi_driver wm0010_spi_driver = {
.driver = {
.name = "wm0010",
--- -- .owner = THIS_MODULE,
},
.probe = wm0010_spi_probe,
.remove = wm0010_spi_remove,