From 9ccdf5023202c85251b7405e0c4c6b7add2d6401 Mon Sep 17 00:00:00 2001 From: Marc Blassin Date: Mon, 14 Nov 2011 18:15:42 +0100 Subject: [PATCH] Telephony port to Linux 3.0: HSI driver. BZ: 14450 This is HSI part for the telephony port from linux 2.6 to linux 3.0 This patch contains: - the HSI driver files (controller and client) written by Olivier Stoltz-Douchet and the related include files. - the required changes for the kernel configuration files to enable/disable the HSI in the default mfld kernel configuration. - the required changes to enable the HSI hardware in the platform data (already took care about the necessary changes to have the itp working correctly). modified: arch/x86/platform/mrst/mrst.c modified: drivers/Kconfig modified: drivers/Makefile new file: drivers/hsi/Kconfig new file: drivers/hsi/Makefile new file: drivers/hsi/clients/Kconfig new file: drivers/hsi/clients/Makefile new file: drivers/hsi/clients/cmt_speech.c new file: drivers/hsi/clients/hsi_char.c new file: drivers/hsi/clients/hsi_ffl_tty.c new file: drivers/hsi/clients/ssi_protocol.c new file: drivers/hsi/controllers/Kconfig new file: drivers/hsi/controllers/Makefile new file: drivers/hsi/controllers/hsi_arasan.h new file: drivers/hsi/controllers/hsi_dwahb_dma.h new file: drivers/hsi/controllers/intel_mid_hsi.c new file: drivers/hsi/controllers/omap_ssi.c new file: drivers/hsi/hsi.c new file: include/linux/hsi/hsi.h new file: include/linux/hsi/hsi_char.h new file: include/linux/hsi/hsi_ffl_tty.h new file: include/linux/hsi/intel_mid_hsi.h new file: include/linux/hsi/omap_ssi_hack.h new file: include/linux/hsi/ssip_slave.h Change-Id: I4f89ac2b8482403301c84dd018bd285b812214f0 Signed-off-by: Marc Blassin Reviewed-on: http://android.intel.com:8080/24611 Reviewed-by: Gross, Mark Tested-by: Gross, Mark --- arch/x86/configs/i386_mfld_defconfig | 24 + arch/x86/platform/mrst/mrst.c | 160 +- drivers/Kconfig | 2 + drivers/Makefile | 1 + drivers/hsi/Kconfig | 16 + drivers/hsi/Makefile | 5 + drivers/hsi/clients/Kconfig | 102 + drivers/hsi/clients/Makefile | 11 + drivers/hsi/clients/hsi_char.c | 1053 +++++++++ drivers/hsi/clients/hsi_ffl_tty.c | 3737 +++++++++++++++++++++++++++++++ drivers/hsi/controllers/Kconfig | 22 + drivers/hsi/controllers/Makefile | 6 + drivers/hsi/controllers/hsi_arasan.h | 132 ++ drivers/hsi/controllers/hsi_dwahb_dma.h | 127 ++ drivers/hsi/controllers/intel_mid_hsi.c | 3453 ++++++++++++++++++++++++++++ drivers/hsi/hsi.c | 516 +++++ include/linux/hsi/hsi.h | 376 ++++ include/linux/hsi/hsi_char.h | 66 + include/linux/hsi/hsi_ffl_tty.h | 297 +++ include/linux/hsi/intel_mid_hsi.h | 60 + 20 files changed, 10157 insertions(+), 9 deletions(-) create mode 100644 drivers/hsi/Kconfig create mode 100644 drivers/hsi/Makefile create mode 100644 drivers/hsi/clients/Kconfig create mode 100644 drivers/hsi/clients/Makefile create mode 100644 drivers/hsi/clients/hsi_char.c create mode 100644 drivers/hsi/clients/hsi_ffl_tty.c create mode 100644 drivers/hsi/controllers/Kconfig create mode 100644 drivers/hsi/controllers/Makefile create mode 100644 drivers/hsi/controllers/hsi_arasan.h create mode 100644 drivers/hsi/controllers/hsi_dwahb_dma.h create mode 100644 drivers/hsi/controllers/intel_mid_hsi.c create mode 100644 drivers/hsi/hsi.c create mode 100644 include/linux/hsi/hsi.h create mode 100644 include/linux/hsi/hsi_char.h create mode 100644 include/linux/hsi/hsi_ffl_tty.h create mode 100644 include/linux/hsi/intel_mid_hsi.h diff --git a/arch/x86/configs/i386_mfld_defconfig b/arch/x86/configs/i386_mfld_defconfig index fe5ae9c..86f4e7d 100644 --- a/arch/x86/configs/i386_mfld_defconfig +++ b/arch/x86/configs/i386_mfld_defconfig @@ -1043,6 +1043,30 @@ CONFIG_SPI_DW_PCI=y # CONFIG_SPI_TLE62X0 is not set # +# HSI driver +# +#CONFIG_HSI=y + +# +# HSI controllers +# +#CONFIG_HSI_ARASAN=y +#CONFIG_HSI_ARASAN_CONFIG=y + +# +# HSI clients +# +# CONFIG_HSI_CHAR is not set +#CONFIG_HSI_FFL_TTY=y +#CONFIG_HSI_FFL_TTY_NAME="IFX" +#CONFIG_HSI_FFL_TTY_FRAME_LENGTH=4096 +#CONFIG_HSI_FFL_TTY_HEADER_LENGTH=4 +#CONFIG_HSI_FFL_ENSURE_LAST_WORD_NULL=y +#CONFIG_HSI_FFL_TTY_CHANNEL=1 +#CONFIG_HSI_FFL_TTY_STATS=y + + +# # PPS support # # CONFIG_PPS is not set diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index a487362..4046bb2 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c @@ -37,7 +37,8 @@ #include #include #include - +#include +#include #include #include #include @@ -86,6 +87,16 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX]; EXPORT_SYMBOL_GPL(sfi_mrtc_array); int sfi_mrtc_num; +/* when ITP is needed we must avoid touching the configurations of these pins.*/ +/* see gpio part of this file */ +static int itp_connected; +static int __init parse_itp(char *arg) +{ + itp_connected = 1; + return 0; +} +early_param("itp", parse_itp); + static void mrst_power_off(void) { if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT) @@ -243,7 +254,7 @@ static unsigned long __init mrst_calibrate_tsc(void) fast_calibrate = apbt_quick_calibrate(); local_irq_restore(flags); } - + if (fast_calibrate) return fast_calibrate; @@ -413,6 +424,7 @@ struct devs_id { u8 type; u8 delay; void *(*get_platform_data)(void *info); + u8 trash_itp;/* true if this driver uses pin muxed with XDB connector */ }; /* the offset for the mapping of global gpio pin to irq */ @@ -799,6 +811,97 @@ static void *msic_adc_platform_data(void *info) return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_ADC); } + +static void *hsi_modem_platform_data(void *data) +{ + int rst_out = get_gpio_by_name("ifx_mdm_rst_out"); + int pwr_on = get_gpio_by_name("ifx_mdm_pwr_on"); + int rst_pmu = get_gpio_by_name("ifx_mdm_rst_pmu"); + int fcdp_rb = get_gpio_by_name("modem-gpio2"); + + static const char hsi_char_name[] = "hsi_char"; + static const char hsi_ffl_name[] = "hsi-ffl"; + + static struct hsi_board_info hsi_info[2] = { + [0] = { + .name = hsi_char_name, + .hsi_id = 0, + .port = 0, + .archdata = NULL, + .tx_cfg.speed = 200000, /* tx clock, kHz */ + .tx_cfg.channels = 8, + .tx_cfg.mode = HSI_MODE_FRAME, + .tx_cfg.arb_mode = HSI_ARB_RR, + .rx_cfg.flow = HSI_FLOW_SYNC, + .rx_cfg.mode = HSI_MODE_FRAME, + .rx_cfg.channels = 8 + }, + [1] = { + .name = hsi_ffl_name, + .hsi_id = 0, + .port = 0, + .archdata = NULL, + .tx_cfg.speed = 100000, /* tx clock, kHz */ + .tx_cfg.channels = 8, + .tx_cfg.mode = HSI_MODE_FRAME, + .tx_cfg.arb_mode = HSI_ARB_RR, + .rx_cfg.flow = HSI_FLOW_SYNC, + .rx_cfg.mode = HSI_MODE_FRAME, + .rx_cfg.channels = 8 + } + }; + + static struct hsi_mid_platform_data mid_info = { + .tx_dma_channels[0] = -1, + .tx_dma_channels[1] = 5, + .tx_dma_channels[2] = -1, + .tx_dma_channels[3] = -1, + .tx_dma_channels[4] = -1, + .tx_dma_channels[5] = -1, + .tx_dma_channels[6] = -1, + .tx_dma_channels[7] = -1, + .tx_fifo_sizes[0] = -1, + .tx_fifo_sizes[1] = 1024, + .tx_fifo_sizes[2] = -1, + .tx_fifo_sizes[3] = -1, + .tx_fifo_sizes[4] = -1, + .tx_fifo_sizes[5] = -1, + .tx_fifo_sizes[6] = -1, + .tx_fifo_sizes[7] = -1, + .rx_dma_channels[0] = -1, + .rx_dma_channels[1] = 1, + .rx_dma_channels[2] = -1, + .rx_dma_channels[3] = -1, + .rx_dma_channels[4] = -1, + .rx_dma_channels[5] = -1, + .rx_dma_channels[6] = -1, + .rx_dma_channels[7] = -1, + .rx_fifo_sizes[0] = -1, + .rx_fifo_sizes[1] = 1024, + .rx_fifo_sizes[2] = -1, + .rx_fifo_sizes[3] = -1, + .rx_fifo_sizes[4] = -1, + .rx_fifo_sizes[5] = -1, + .rx_fifo_sizes[6] = -1, + .rx_fifo_sizes[7] = -1, + }; + + printk(KERN_INFO "HSI platform data setup\n"); + + printk(KERN_INFO "HSI mdm GPIOs %d, %d, %d, %d\n", + rst_out, pwr_on, rst_pmu, fcdp_rb); + + mid_info.gpio_mdm_rst_out = rst_out; + mid_info.gpio_mdm_pwr_on = pwr_on; + mid_info.gpio_mdm_rst_bbn = rst_pmu; + mid_info.gpio_fcdp_rb = fcdp_rb; + + hsi_info[0].platform_data = (void *)&mid_info; + hsi_info[1].platform_data = (void *)&mid_info; + + return &hsi_info[0]; +} + static void *msic_battery_platform_data(void *info) { return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_BATTERY); @@ -900,7 +1003,7 @@ static const struct devs_id __initconst device_ids[] = { {"ektf2136_spi", SFI_DEV_TYPE_SPI, 0, &ektf2136_spi_platform_data}, {"msic_adc", SFI_DEV_TYPE_IPC, 1, &msic_adc_platform_data}, {"max17042", SFI_DEV_TYPE_I2C, 1, &max17042_platform_data}, - + {"hsi_ifx_modem", SFI_DEV_TYPE_HSI, 0, &hsi_modem_platform_data}, /* MSIC subdevices */ {"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data}, {"msic_gpio", SFI_DEV_TYPE_IPC, 1, &msic_gpio_platform_data}, @@ -926,7 +1029,7 @@ static int i2c_next_dev; static void __init intel_scu_device_register(struct platform_device *pdev) { - if(ipc_next_dev == MAX_IPCDEVS) + if (ipc_next_dev == MAX_IPCDEVS) pr_err("too many SCU IPC devices"); else ipc_devs[ipc_next_dev++] = pdev; @@ -1101,8 +1204,30 @@ static void __init sfi_handle_i2c_dev(int bus, struct i2c_board_info *i2c_info) intel_scu_i2c_device_register(bus, i2c_info); else i2c_register_board_info(bus, i2c_info, 1); - } +} + +static void sfi_handle_hsi_dev(struct hsi_board_info *hsi_info) +{ + const struct devs_id *dev = device_ids; + void *pdata = NULL; + while (dev->name[0]) { + if (dev->type == SFI_DEV_TYPE_HSI && + !strncmp(dev->name, hsi_info->name, 16)) { + pdata = dev->get_platform_data(hsi_info); + if (itp_connected && dev->trash_itp) + return; + break; + } + dev++; + } + + if (pdata) { + pr_info("SFI register platform data for HSI device %s\n", + dev->name); + hsi_register_board_info(pdata, 2); + } +} static int __init sfi_parse_devs(struct sfi_table_header *table) { @@ -1110,6 +1235,7 @@ static int __init sfi_parse_devs(struct sfi_table_header *table) struct sfi_device_table_entry *pentry; struct spi_board_info spi_info; struct i2c_board_info i2c_info; + struct hsi_board_info hsi_info; int num, i, bus; int ioapic; struct io_apic_irq_attr irq_attr; @@ -1168,9 +1294,8 @@ static int __init sfi_parse_devs(struct sfi_table_header *table) i2c_info.type, i2c_info.irq, i2c_info.addr); - if (!strcmp(i2c_info.type, "mxt224")){ + if (!strcmp(i2c_info.type, "mxt224")) break; - } /* Ignore all sensors info for PR2 and PR3 */ if (mfld_board_id() == MFLD_BID_PR2_PROTO || mfld_board_id() == MFLD_BID_PR2_PNP || @@ -1182,8 +1307,25 @@ static int __init sfi_parse_devs(struct sfi_table_header *table) sfi_handle_i2c_dev(bus, &i2c_info); break; - case SFI_DEV_TYPE_UART: case SFI_DEV_TYPE_HSI: + memset(&hsi_info, 0, sizeof(hsi_info)); + hsi_info.name = kzalloc(16, GFP_KERNEL); + if (hsi_info.name == NULL) { + pr_err("out of memory for HSI device '%s'.\n", + pentry->name); + continue; + } + strncpy((char *)hsi_info.name, pentry->name, 16); + hsi_info.hsi_id = pentry->host_num; + hsi_info.port = pentry->addr; + pr_info("info[%2d]: HSI bus = %d, name = %16.16s, " + "port = %d\n", i, + hsi_info.hsi_id, + hsi_info.name, + hsi_info.port); + sfi_handle_hsi_dev(&hsi_info); + break; + case SFI_DEV_TYPE_UART: default: ; } @@ -1194,7 +1336,7 @@ static int __init sfi_parse_devs(struct sfi_table_header *table) #include #include #include -static u32 board_id = 0; +static u32 board_id; static int board_id_proc_show(struct seq_file *m, void *v) { char *bid; diff --git a/drivers/Kconfig b/drivers/Kconfig index d0258eb..56d45f6 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -52,6 +52,8 @@ source "drivers/i2c/Kconfig" source "drivers/spi/Kconfig" +source "drivers/hsi/Kconfig" + source "drivers/pps/Kconfig" source "drivers/ptp/Kconfig" diff --git a/drivers/Makefile b/drivers/Makefile index 4ea4ac9..e0139786e 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -51,6 +51,7 @@ obj-$(CONFIG_ATA) += ata/ obj-$(CONFIG_TARGET_CORE) += target/ obj-$(CONFIG_MTD) += mtd/ obj-$(CONFIG_SPI) += spi/ +obj-$(CONFIG_HSI) += hsi/ obj-y += net/ obj-$(CONFIG_ATM) += atm/ obj-$(CONFIG_FUSION) += message/ diff --git a/drivers/hsi/Kconfig b/drivers/hsi/Kconfig new file mode 100644 index 0000000..94fc793 --- /dev/null +++ b/drivers/hsi/Kconfig @@ -0,0 +1,16 @@ +# +# HSI driver configuration +# +menuconfig HSI + bool "HSI support" + ---help--- + The "High speed synchronous Serial Interface" is + synchronous serial interface used mainly to connect + application engines and cellular modems. + +if HSI + +source "drivers/hsi/controllers/Kconfig" +source "drivers/hsi/clients/Kconfig" + +endif # HSI diff --git a/drivers/hsi/Makefile b/drivers/hsi/Makefile new file mode 100644 index 0000000..ebc91b3 --- /dev/null +++ b/drivers/hsi/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for HSI +# +obj-$(CONFIG_HSI) += hsi.o +obj-y += controllers/ clients/ diff --git a/drivers/hsi/clients/Kconfig b/drivers/hsi/clients/Kconfig new file mode 100644 index 0000000..95de9ed --- /dev/null +++ b/drivers/hsi/clients/Kconfig @@ -0,0 +1,102 @@ +# +# HSI clients configuration +# + +comment "HSI clients" + +config HSI_CHAR + tristate "HSI/SSI character driver" + depends on HSI + help + If you say Y here, you will enable the HSI/SSI character driver. + This driver provides a simple character device interface for + serial communication with the cellular modem over HSI/SSI bus. + + If unsure, say N. + +config HSI_FFL_TTY + tristate "Fixed frame length protocol on HSI" + default n + depends on HSI + help + If you say Y here, you will enable the fixed frame length protocol + driver over an HSI physical link. + This driver implements a TTY interface for transferring data over + HSI between two devices using a fixed frame length protocol. + + If unsure, say N. + +if HSI_FFL_TTY + +config HSI_FFL_TTY_NAME + string "Base name for the TTY" + default "IFX" + help + Sets the base name for the TTY associated to this fixed frame length + protocol. + The base name will be appended to the tty interface used by the HSI + fixed frame length protocol. + + If unsure, use the default value. + + +config HSI_FFL_TTY_FRAME_LENGTH + int "Fixed frame length" + default "4096" + range 4 131072 + help + Sets the fixed frame length in bytes to be used in this protocol + driver. This frame length must be a multiple of 4 bytes, set between + 4 bytes and 128 kiB (131072 bytes). + + Set to 4096 bytes by default. + +config HSI_FFL_TTY_HEADER_LENGTH + int "Fixed frame header length" + default "4" + range 0 4 + help + Sets the fixed frame header length in bytes to be used in this + protocol driver. This header length must be set to 4 in normal usage + or to 0 in raw protocol debug mode. + + Set to 4 bytes by default. + +config HSI_FFL_ENSURE_LAST_WORD_NULL + bool "Ensuring that all TX FFL frames end with zeros" + default y + help + If you say Y here, the FFL maximal payload will be reduced by 4 bytes + to ensure that the last HSI word is filled with zeros. This feature + is ensuring that both the CADATA and CAFLAG signals are low at the + end of a FFL frame so that no simultaneous DATA and FLAG transitions + can be spotted when the IP is being power-cut. + + If the attached modem is robust with respect to simultanous DATA and + FLAG transition, say N. Otherwise, it is always safer to say Y. + + +config HSI_FFL_TTY_CHANNEL + int "HSI channel" + default "0" + range 0 15 + help + Sets the default single channel index to be used for the FFL protocol. + You may need to change this value depending of the implementation of the + fixed length frame (FFL) protocol. + Set to 0 by default. + If unsure, use the default value. + + +config HSI_FFL_TTY_STATS + bool "Statistics to assess the performance of the protocol" + default n + help + If you say Y here, you will instanciate performance related counters + for measuring the number of sent and received frames as well as their + total actual length in bytes. + + If not fine-tuning the HSI FFL driver, say N. + +endif + diff --git a/drivers/hsi/clients/Makefile b/drivers/hsi/clients/Makefile new file mode 100644 index 0000000..6919101 --- /dev/null +++ b/drivers/hsi/clients/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for HSI clients +# + +CFLAGS_hsi_ffl_tty.o := -DDEBUG + +obj-$(CONFIG_SSI_PROTOCOL) += ssi_protocol.o +obj-$(CONFIG_HSI_CHAR) += hsi_char.o +obj-$(CONFIG_HSI_CMT_SPEECH) += cmt_speech.o +obj-$(CONFIG_HSI_FFL_TTY) += hsi_ffl_tty.o + diff --git a/drivers/hsi/clients/hsi_char.c b/drivers/hsi/clients/hsi_char.c new file mode 100644 index 0000000..7c497a8 --- /dev/null +++ b/drivers/hsi/clients/hsi_char.c @@ -0,0 +1,1053 @@ +/* + * hsi-char.c + * + * HSI character device driver, implements the character device + * interface. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * + * Contact: Andras Domokos + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define HSI_CHAR_CHANNELS 8 +#define HSI_CHAR_DEVS 8 +#define HSI_CHAR_MSGS 4 + +#define HSI_CHST_UNAVAIL 0 /* SBZ! */ +#define HSI_CHST_AVAIL 1 + +#define HSI_CHST_CLOSED (0 << 4) +#define HSI_CHST_CLOSING (1 << 4) +#define HSI_CHST_OPENING (2 << 4) +#define HSI_CHST_OPENED (3 << 4) + +#define HSI_CHST_READOFF (0 << 8) +#define HSI_CHST_READON (1 << 8) +#define HSI_CHST_READING (2 << 8) + +#define HSI_CHST_WRITEOFF (0 << 12) +#define HSI_CHST_WRITEON (1 << 12) +#define HSI_CHST_WRITING (2 << 12) + +#define HSI_CHST_OC_MASK 0xf0 +#define HSI_CHST_RD_MASK 0xf00 +#define HSI_CHST_WR_MASK 0xf000 + +#define HSI_CHST_OC(c) ((c)->state & HSI_CHST_OC_MASK) +#define HSI_CHST_RD(c) ((c)->state & HSI_CHST_RD_MASK) +#define HSI_CHST_WR(c) ((c)->state & HSI_CHST_WR_MASK) + +#define HSI_CHST_OC_SET(c, v) \ + do { \ + (c)->state &= ~HSI_CHST_OC_MASK; \ + (c)->state |= v; \ + } while (0); + +#define HSI_CHST_RD_SET(c, v) \ + do { \ + (c)->state &= ~HSI_CHST_RD_MASK; \ + (c)->state |= v; \ + } while (0); + +#define HSI_CHST_WR_SET(c, v) \ + do { \ + (c)->state &= ~HSI_CHST_WR_MASK; \ + (c)->state |= v; \ + } while (0); + +#define HSI_CHAR_POLL_RST (-1) +#define HSI_CHAR_POLL_OFF 0 +#define HSI_CHAR_POLL_ON 1 + +#define HSI_CHAR_RX 0 +#define HSI_CHAR_TX 1 + +struct hsi_char_channel { + int ch; + unsigned int state; + int wlrefcnt; + int rxpoll; + struct hsi_client *cl; + struct list_head free_msgs_list; + struct list_head rx_msgs_queue; + struct list_head tx_msgs_queue; + int poll_event; + spinlock_t lock; + struct fasync_struct *async_queue; + wait_queue_head_t rx_wait; + wait_queue_head_t tx_wait; +}; + +struct hsi_char_client_data { + atomic_t refcnt; + int attached; + atomic_t breq; + struct hsi_char_channel channels[HSI_CHAR_DEVS]; +}; + +static unsigned int max_data_size = 0x1000; +module_param(max_data_size, uint, 1); +MODULE_PARM_DESC(max_data_size, "max read/write data size [4,8..65536] (^2)"); + +static int channels_map[HSI_CHAR_DEVS] = {0, -1, -1 , -1, -1, -1, -1, -1}; +module_param_array(channels_map, int, NULL, 0); +MODULE_PARM_DESC(channels_map, "Array of HSI channels ([0...7]) to be probed"); + +static dev_t hsi_char_dev; +static struct hsi_char_client_data hsi_char_cl_data; + +static int hsi_char_rx_poll(struct hsi_char_channel *channel); + +static int __devinit hsi_char_probe(struct device *dev) +{ + struct hsi_char_client_data *cl_data = &hsi_char_cl_data; + struct hsi_char_channel *channel = cl_data->channels; + struct hsi_client *cl = to_hsi_client(dev); + int i; + + for (i = 0; i < HSI_CHAR_DEVS; i++) { + if (channel->state == HSI_CHST_AVAIL) + channel->cl = cl; + channel++; + } + cl->hsi_start_rx = NULL; + cl->hsi_stop_rx = NULL; + atomic_set(&cl_data->refcnt, 0); + atomic_set(&cl_data->breq, 1); + cl_data->attached = 0; + hsi_client_set_drvdata(cl, cl_data); + + return 0; +} + +static int __devexit hsi_char_remove(struct device *dev) +{ + struct hsi_client *cl = to_hsi_client(dev); + struct hsi_char_client_data *cl_data = hsi_client_drvdata(cl); + struct hsi_char_channel *channel = cl_data->channels; + int i; + + for (i = 0; i < HSI_CHAR_DEVS; i++) { + if (!(channel->state & HSI_CHST_AVAIL)) + continue; + if (cl_data->attached) { + hsi_release_port(channel->cl); + cl_data->attached = 0; + } + channel->state = HSI_CHST_UNAVAIL; + channel->cl = NULL; + channel++; + } + + return 0; +} + +static int hsi_char_fasync(int fd, struct file *file, int on) +{ + struct hsi_char_channel *channel = file->private_data; + + if (fasync_helper(fd, file, on, &channel->async_queue) < 0) + return -EIO; + + return 0; +} + +static unsigned int hsi_char_poll(struct file *file, poll_table *wait) +{ + struct hsi_char_channel *channel = file->private_data; + unsigned int ret; + + spin_lock_bh(&channel->lock); + poll_wait(file, &channel->rx_wait, wait); + poll_wait(file, &channel->tx_wait, wait); + ret = channel->poll_event; + spin_unlock_bh(&channel->lock); + hsi_char_rx_poll(channel); + + return ret; +} + +static inline void hsi_char_msg_len_set(struct hsi_msg *msg, unsigned int len) +{ + msg->sgt.sgl->length = len; +} + +static inline unsigned int hsi_char_msg_len_get(struct hsi_msg *msg) +{ + return msg->sgt.sgl->length; +} + +static void hsi_char_data_available(struct hsi_msg *msg) +{ + struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl); + struct hsi_char_channel *channel = cl_data->channels + msg->channel; + int ret; + + if (msg->status == HSI_STATUS_ERROR) { + ret = hsi_async_read(channel->cl, msg); + if (ret < 0) { + list_add_tail(&msg->link, &channel->free_msgs_list); + spin_lock_bh(&channel->lock); + list_add_tail(&msg->link, &channel->free_msgs_list); + channel->rxpoll = HSI_CHAR_POLL_OFF; + spin_unlock_bh(&channel->lock); + } + } else { + spin_lock_bh(&channel->lock); + channel->rxpoll = HSI_CHAR_POLL_OFF; + channel->poll_event |= (POLLIN | POLLRDNORM); + spin_unlock_bh(&channel->lock); + spin_lock_bh(&channel->lock); + list_add_tail(&msg->link, &channel->free_msgs_list); + spin_unlock_bh(&channel->lock); + wake_up_interruptible(&channel->rx_wait); + } +} + +static void hsi_char_rx_poll_destructor(struct hsi_msg *msg) +{ + struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl); + struct hsi_char_channel *channel = cl_data->channels + msg->channel; + + spin_lock_bh(&channel->lock); + list_add_tail(&msg->link, &channel->free_msgs_list); + channel->rxpoll = HSI_CHAR_POLL_RST; + spin_unlock_bh(&channel->lock); +} + +static int hsi_char_rx_poll(struct hsi_char_channel *channel) +{ + struct hsi_msg *msg; + int ret = 0; + + spin_lock_bh(&channel->lock); + if (list_empty(&channel->free_msgs_list)) { + ret = -ENOMEM; + goto out; + } + if (channel->rxpoll == HSI_CHAR_POLL_ON) + goto out; + msg = list_first_entry(&channel->free_msgs_list, struct hsi_msg, link); + list_del(&msg->link); + channel->rxpoll = HSI_CHAR_POLL_ON; + spin_unlock_bh(&channel->lock); + hsi_char_msg_len_set(msg, 0); + msg->complete = hsi_char_data_available; + msg->destructor = hsi_char_rx_poll_destructor; + /* don't touch msg->context! */ + ret = hsi_async_read(channel->cl, msg); + spin_lock_bh(&channel->lock); + if (ret < 0) { + list_add_tail(&msg->link, &channel->free_msgs_list); + channel->rxpoll = HSI_CHAR_POLL_OFF; + goto out; + } +out: + spin_unlock_bh(&channel->lock); + + return ret; +} + +static void hsi_char_rx_poll_rst(struct hsi_client *cl) +{ + struct hsi_char_client_data *cl_data = hsi_client_drvdata(cl); + struct hsi_char_channel *channel = cl_data->channels; + int i; + + for (i = 0; i < HSI_CHAR_DEVS; i++) { + if ((HSI_CHST_OC(channel) == HSI_CHST_OPENED) && + (channel->rxpoll == HSI_CHAR_POLL_RST)) + hsi_char_rx_poll(channel); + channel++; + } +} + +static void hsi_char_rx_completed(struct hsi_msg *msg) +{ + struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl); + struct hsi_char_channel *channel = cl_data->channels + msg->channel; + + spin_lock_bh(&channel->lock); + list_add_tail(&msg->link, &channel->rx_msgs_queue); + spin_unlock_bh(&channel->lock); + wake_up_interruptible(&channel->rx_wait); +} + +static void hsi_char_rx_msg_destructor(struct hsi_msg *msg) +{ + struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl); + struct hsi_char_channel *channel = cl_data->channels + msg->channel; + + spin_lock_bh(&channel->lock); + list_add_tail(&msg->link, &channel->free_msgs_list); + HSI_CHST_RD_SET(channel, HSI_CHST_READOFF); + spin_unlock_bh(&channel->lock); +} + +static void hsi_char_rx_cancel(struct hsi_char_channel *channel) +{ + hsi_flush(channel->cl); + hsi_char_rx_poll_rst(channel->cl); +} + +static void hsi_char_tx_completed(struct hsi_msg *msg) +{ + struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl); + struct hsi_char_channel *channel = cl_data->channels + msg->channel; + + spin_lock_bh(&channel->lock); + list_add_tail(&msg->link, &channel->tx_msgs_queue); + channel->poll_event |= (POLLOUT | POLLWRNORM); + spin_unlock_bh(&channel->lock); + wake_up_interruptible(&channel->tx_wait); +} + +static void hsi_char_tx_msg_destructor(struct hsi_msg *msg) +{ + struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl); + struct hsi_char_channel *channel = cl_data->channels + msg->channel; + + spin_lock_bh(&channel->lock); + list_add_tail(&msg->link, &channel->free_msgs_list); + HSI_CHST_WR_SET(channel, HSI_CHST_WRITEOFF); + spin_unlock_bh(&channel->lock); +} + +static void hsi_char_tx_cancel(struct hsi_char_channel *channel) +{ + hsi_flush(channel->cl); + hsi_char_rx_poll_rst(channel->cl); +} + +static ssize_t hsi_char_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos) +{ + struct hsi_char_channel *channel = file->private_data; + struct hsi_msg *msg = NULL; + ssize_t ret; + + if (len == 0) { + channel->poll_event &= ~POLLPRI; + return 0; + } + channel->poll_event &= ~POLLPRI; + + if (!IS_ALIGNED(len, sizeof(u32))) + return -EINVAL; + + if (len > max_data_size) + len = max_data_size; + + spin_lock_bh(&channel->lock); + if (HSI_CHST_RD(channel) != HSI_CHST_READOFF) { + ret = -EBUSY; + goto out; + } + if (list_empty(&channel->free_msgs_list)) { + ret = -ENOMEM; + goto out; + } + msg = list_first_entry(&channel->free_msgs_list, struct hsi_msg, link); + list_del(&msg->link); + spin_unlock_bh(&channel->lock); + hsi_char_msg_len_set(msg, len); + msg->complete = hsi_char_rx_completed; + msg->destructor = hsi_char_rx_msg_destructor; + ret = hsi_async_read(channel->cl, msg); + spin_lock_bh(&channel->lock); + if (ret < 0) + goto out; + HSI_CHST_RD_SET(channel, HSI_CHST_READING); + msg = NULL; + + for ( ; ; ) { + DEFINE_WAIT(wait); + + if (!list_empty(&channel->rx_msgs_queue)) { + msg = list_first_entry(&channel->rx_msgs_queue, + struct hsi_msg, link); + HSI_CHST_RD_SET(channel, HSI_CHST_READOFF); + channel->poll_event &= ~(POLLIN | POLLRDNORM); + list_del(&msg->link); + spin_unlock_bh(&channel->lock); + if (msg->status == HSI_STATUS_ERROR) { + ret = -EIO; + } else { + ret = copy_to_user((void __user *)buf, + msg->context, + hsi_char_msg_len_get(msg)); + if (ret) + ret = -EFAULT; + else + ret = hsi_char_msg_len_get(msg); + } + spin_lock_bh(&channel->lock); + break; + } else if (signal_pending(current)) { + spin_unlock_bh(&channel->lock); + hsi_char_rx_cancel(channel); + spin_lock_bh(&channel->lock); + HSI_CHST_RD_SET(channel, HSI_CHST_READOFF); + ret = -EINTR; + break; + } + + prepare_to_wait(&channel->rx_wait, &wait, TASK_INTERRUPTIBLE); + spin_unlock_bh(&channel->lock); + + schedule(); + + spin_lock_bh(&channel->lock); + finish_wait(&channel->rx_wait, &wait); + } +out: + if (msg) + list_add_tail(&msg->link, &channel->free_msgs_list); + spin_unlock_bh(&channel->lock); + + return ret; +} + +static ssize_t hsi_char_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + struct hsi_char_channel *channel = file->private_data; + struct hsi_msg *msg = NULL; + ssize_t ret; + + if ((len == 0) || !IS_ALIGNED(len, sizeof(u32))) + return -EINVAL; + + if (len > max_data_size) + len = max_data_size; + + spin_lock_bh(&channel->lock); + if (HSI_CHST_WR(channel) != HSI_CHST_WRITEOFF) { + ret = -EBUSY; + goto out; + } + if (list_empty(&channel->free_msgs_list)) { + ret = -ENOMEM; + goto out; + } + msg = list_first_entry(&channel->free_msgs_list, struct hsi_msg, link); + list_del(&msg->link); + HSI_CHST_WR_SET(channel, HSI_CHST_WRITEON); + spin_unlock_bh(&channel->lock); + + if (copy_from_user(msg->context, (void __user *)buf, len)) { + spin_lock_bh(&channel->lock); + HSI_CHST_WR_SET(channel, HSI_CHST_WRITEOFF); + ret = -EFAULT; + goto out; + } + + hsi_char_msg_len_set(msg, len); + msg->complete = hsi_char_tx_completed; + msg->destructor = hsi_char_tx_msg_destructor; + channel->poll_event &= ~(POLLOUT | POLLWRNORM); + ret = hsi_async_write(channel->cl, msg); + spin_lock_bh(&channel->lock); + if (ret < 0) { + channel->poll_event |= (POLLOUT | POLLWRNORM); + HSI_CHST_WR_SET(channel, HSI_CHST_WRITEOFF); + goto out; + } + HSI_CHST_WR_SET(channel, HSI_CHST_WRITING); + msg = NULL; + + for ( ; ; ) { + DEFINE_WAIT(wait); + + if (!list_empty(&channel->tx_msgs_queue)) { + msg = list_first_entry(&channel->tx_msgs_queue, + struct hsi_msg, link); + list_del(&msg->link); + HSI_CHST_WR_SET(channel, HSI_CHST_WRITEOFF); + if (msg->status == HSI_STATUS_ERROR) + ret = -EIO; + else + ret = hsi_char_msg_len_get(msg); + break; + } else if (signal_pending(current)) { + spin_unlock_bh(&channel->lock); + hsi_char_tx_cancel(channel); + spin_lock_bh(&channel->lock); + HSI_CHST_WR_SET(channel, HSI_CHST_WRITEOFF); + ret = -EINTR; + break; + } + prepare_to_wait(&channel->tx_wait, &wait, TASK_INTERRUPTIBLE); + spin_unlock_bh(&channel->lock); + + schedule(); + + spin_lock_bh(&channel->lock); + finish_wait(&channel->tx_wait, &wait); + } +out: + if (msg) + list_add_tail(&msg->link, &channel->free_msgs_list); + + spin_unlock_bh(&channel->lock); + + return ret; +} + +static void hsi_char_bcast_break(struct hsi_client *cl) +{ + struct hsi_char_client_data *cl_data = hsi_client_drvdata(cl); + struct hsi_char_channel *channel = cl_data->channels; + int i; + + for (i = 0; i < HSI_CHAR_DEVS; i++) { + if (HSI_CHST_OC(channel) != HSI_CHST_OPENED) + continue; + channel->poll_event |= POLLPRI; + wake_up_interruptible(&channel->rx_wait); + wake_up_interruptible(&channel->tx_wait); + channel++; + } +} + +static void hsi_char_break_received(struct hsi_msg *msg) +{ + struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl); + int ret; + + hsi_char_bcast_break(msg->cl); + ret = hsi_async_read(msg->cl, msg); + if (ret < 0) { + hsi_free_msg(msg); + atomic_inc(&cl_data->breq); + } +} + +static void hsi_char_break_req_destructor(struct hsi_msg *msg) +{ + struct hsi_char_client_data *cl_data = hsi_client_drvdata(msg->cl); + + hsi_free_msg(msg); + atomic_inc(&cl_data->breq); +} + +static int hsi_char_break_request(struct hsi_client *cl) +{ + struct hsi_char_client_data *cl_data = hsi_client_drvdata(cl); + struct hsi_msg *msg; + int ret = 0; + + if (!atomic_dec_and_test(&cl_data->breq)) { + atomic_inc(&cl_data->breq); + return -EBUSY; + } + msg = hsi_alloc_msg(0, GFP_KERNEL); + if (!msg) + return -ENOMEM; + msg->break_frame = 1; + msg->complete = hsi_char_break_received; + msg->destructor = hsi_char_break_req_destructor; + ret = hsi_async_read(cl, msg); + if (ret < 0) + hsi_free_msg(msg); + + return ret; +} + +static int hsi_char_break_send(struct hsi_client *cl) +{ + struct hsi_msg *msg; + int ret = 0; + + msg = hsi_alloc_msg(0, GFP_ATOMIC); + if (!msg) + return -ENOMEM; + msg->break_frame = 1; + msg->complete = hsi_free_msg; + msg->destructor = hsi_free_msg; + ret = hsi_async_write(cl, msg); + if (ret < 0) + hsi_free_msg(msg); + + return ret; +} + +static void hsi_char_reset(struct hsi_client *cl) +{ + hsi_flush(cl); + hsi_char_rx_poll_rst(cl); +} + +static inline int ssi_check_common_cfg(struct hsi_config *cfg) +{ + if ((cfg->mode != HSI_MODE_STREAM) && (cfg->mode != HSI_MODE_FRAME)) + return -EINVAL; + if ((cfg->channels == 0) || (cfg->channels > HSI_CHAR_CHANNELS)) + return -EINVAL; + if (cfg->channels & (cfg->channels - 1)) + return -EINVAL; + if ((cfg->flow != HSI_FLOW_SYNC) && (cfg->flow != HSI_FLOW_PIPE)) + return -EINVAL; + + return 0; +} + +static inline int ssi_check_rx_cfg(struct hsi_config *cfg) +{ + return ssi_check_common_cfg(cfg); +} + +static inline int ssi_check_tx_cfg(struct hsi_config *cfg) +{ + int ret = ssi_check_common_cfg(cfg); + + if (ret < 0) + return ret; + if ((cfg->arb_mode != HSI_ARB_RR) && (cfg->arb_mode != HSI_ARB_PRIO)) + return -EINVAL; + + return 0; +} + +static inline int hsi_char_cfg_set(struct hsi_client *cl, + struct hsi_config *cfg, int dir) +{ + struct hsi_config *rxtx_cfg; + int ret = 0; + + if (dir == HSI_CHAR_RX) { + rxtx_cfg = &cl->rx_cfg; + ret = ssi_check_rx_cfg(cfg); + } else { + rxtx_cfg = &cl->tx_cfg; + ret = ssi_check_tx_cfg(cfg); + } + if (ret < 0) + return ret; + + *rxtx_cfg = *cfg; + ret = hsi_setup(cl); + if (ret < 0) + return ret; + + if ((dir == HSI_CHAR_RX) && (cfg->mode == HSI_MODE_FRAME)) + hsi_char_break_request(cl); + + return ret; +} + +static inline void hsi_char_cfg_get(struct hsi_client *cl, + struct hsi_config *cfg, int dir) +{ + struct hsi_config *rxtx_cfg; + + if (dir == HSI_CHAR_RX) + rxtx_cfg = &cl->rx_cfg; + else + rxtx_cfg = &cl->tx_cfg; + *cfg = *rxtx_cfg; +} + +static inline void hsi_char_rx2icfg(struct hsi_config *cfg, + struct hsc_rx_config *rx_cfg) +{ + cfg->mode = rx_cfg->mode; + cfg->flow = rx_cfg->flow; + cfg->channels = rx_cfg->channels; + cfg->speed = 0; + cfg->arb_mode = 0; +} + +static inline void hsi_char_tx2icfg(struct hsi_config *cfg, + struct hsc_tx_config *tx_cfg) +{ + cfg->mode = tx_cfg->mode; + cfg->flow = tx_cfg->flow; + cfg->channels = tx_cfg->channels; + cfg->speed = tx_cfg->speed; + cfg->arb_mode = tx_cfg->arb_mode; +} + +static inline void hsi_char_rx2ecfg(struct hsc_rx_config *rx_cfg, + struct hsi_config *cfg) +{ + rx_cfg->mode = cfg->mode; + rx_cfg->flow = cfg->flow; + rx_cfg->channels = cfg->channels; +} + +static inline void hsi_char_tx2ecfg(struct hsc_tx_config *tx_cfg, + struct hsi_config *cfg) +{ + tx_cfg->mode = cfg->mode; + tx_cfg->flow = cfg->flow; + tx_cfg->channels = cfg->channels; + tx_cfg->speed = cfg->speed; + tx_cfg->arb_mode = cfg->arb_mode; +} + +static int hsi_char_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct hsi_char_channel *channel = file->private_data; + unsigned int state; + struct hsi_config cfg; + struct hsc_rx_config rx_cfg; + struct hsc_tx_config tx_cfg; + int ret = 0; + + if (HSI_CHST_OC(channel) != HSI_CHST_OPENED) + return -EINVAL; + + switch (cmd) { + case HSC_RESET: + hsi_char_reset(channel->cl); + break; + case HSC_SET_PM: + if (copy_from_user(&state, (void __user *)arg, sizeof(state))) + return -EFAULT; + if (state == HSC_PM_DISABLE) { + ret = hsi_start_tx(channel->cl); + if (!ret) + channel->wlrefcnt++; + } else if ((state == HSC_PM_ENABLE) + && (channel->wlrefcnt > 0)) { + ret = hsi_stop_tx(channel->cl); + if (!ret) + channel->wlrefcnt--; + } else { + ret = -EINVAL; + } + break; + case HSC_SEND_BREAK: + return hsi_char_break_send(channel->cl); + case HSC_SET_RX: + if (copy_from_user(&rx_cfg, (void __user *)arg, sizeof(rx_cfg))) + return -EFAULT; + hsi_char_rx2icfg(&cfg, &rx_cfg); + return hsi_char_cfg_set(channel->cl, &cfg, HSI_CHAR_RX); + case HSC_GET_RX: + hsi_char_cfg_get(channel->cl, &cfg, HSI_CHAR_RX); + hsi_char_rx2ecfg(&rx_cfg, &cfg); + if (copy_to_user((void __user *)arg, &rx_cfg, sizeof(rx_cfg))) + return -EFAULT; + break; + case HSC_SET_TX: + if (copy_from_user(&tx_cfg, (void __user *)arg, sizeof(tx_cfg))) + return -EFAULT; + hsi_char_tx2icfg(&cfg, &tx_cfg); + return hsi_char_cfg_set(channel->cl, &cfg, HSI_CHAR_TX); + case HSC_GET_TX: + hsi_char_cfg_get(channel->cl, &cfg, HSI_CHAR_TX); + hsi_char_tx2ecfg(&tx_cfg, &cfg); + if (copy_to_user((void __user *)arg, &tx_cfg, sizeof(tx_cfg))) + return -EFAULT; + default: + return -ENOIOCTLCMD; + } + + return ret; +} + +static inline struct hsi_msg *hsi_char_msg_alloc(unsigned int alloc_size) +{ + struct hsi_msg *msg; + void *buf; + + msg = hsi_alloc_msg(1, GFP_KERNEL); + if (!msg) + goto out; + buf = kmalloc(alloc_size, GFP_KERNEL); + if (!buf) { + hsi_free_msg(msg); + goto out; + } + sg_init_one(msg->sgt.sgl, buf, alloc_size); + msg->context = buf; + return msg; +out: + return NULL; +} + +static inline void hsi_char_msg_free(struct hsi_msg *msg) +{ + msg->complete = NULL; + msg->destructor = NULL; + kfree(sg_virt(msg->sgt.sgl)); + hsi_free_msg(msg); +} + +static inline void hsi_char_msgs_free(struct hsi_char_channel *channel) +{ + struct hsi_msg *msg, *tmp; + + list_for_each_entry_safe(msg, tmp, &channel->free_msgs_list, link) { + list_del(&msg->link); + hsi_char_msg_free(msg); + } + list_for_each_entry_safe(msg, tmp, &channel->rx_msgs_queue, link) { + list_del(&msg->link); + hsi_char_msg_free(msg); + } + list_for_each_entry_safe(msg, tmp, &channel->tx_msgs_queue, link) { + list_del(&msg->link); + hsi_char_msg_free(msg); + } +} + +static inline int hsi_char_msgs_alloc(struct hsi_char_channel *channel) +{ + struct hsi_msg *msg; + int i; + + for (i = 0; i < HSI_CHAR_MSGS; i++) { + msg = hsi_char_msg_alloc(max_data_size); + if (!msg) + goto out; + msg->channel = channel->ch; + list_add_tail(&msg->link, &channel->free_msgs_list); + } + return 0; +out: + hsi_char_msgs_free(channel); + + return -ENOMEM; +} + +static int hsi_char_open(struct inode *inode, struct file *file) +{ + struct hsi_char_client_data *cl_data = &hsi_char_cl_data; + struct hsi_char_channel *channel = cl_data->channels + iminor(inode); + int ret = 0, refcnt; + + if (channel->state == HSI_CHST_UNAVAIL) + return -ENODEV; + + spin_lock_bh(&channel->lock); + if (HSI_CHST_OC(channel) != HSI_CHST_CLOSED) { + ret = -EBUSY; + goto out; + } + HSI_CHST_OC_SET(channel, HSI_CHST_OPENING); + spin_unlock_bh(&channel->lock); + + refcnt = atomic_inc_return(&cl_data->refcnt); + if (refcnt == 1) { + if (cl_data->attached) { + atomic_dec(&cl_data->refcnt); + spin_lock_bh(&channel->lock); + HSI_CHST_OC_SET(channel, HSI_CHST_CLOSED); + ret = -EBUSY; + goto out; + } + ret = hsi_claim_port(channel->cl, 0); + if (ret < 0) { + atomic_dec(&cl_data->refcnt); + spin_lock_bh(&channel->lock); + HSI_CHST_OC_SET(channel, HSI_CHST_CLOSED); + goto out; + } + hsi_setup(channel->cl); + } else if (!cl_data->attached) { + atomic_dec(&cl_data->refcnt); + spin_lock_bh(&channel->lock); + HSI_CHST_OC_SET(channel, HSI_CHST_CLOSED); + ret = -ENODEV; + goto out; + } + ret = hsi_char_msgs_alloc(channel); + + if (ret < 0) { + refcnt = atomic_dec_return(&cl_data->refcnt); + if (!refcnt) + hsi_release_port(channel->cl); + spin_lock_bh(&channel->lock); + HSI_CHST_OC_SET(channel, HSI_CHST_CLOSED); + goto out; + } + if (refcnt == 1) + cl_data->attached = 1; + channel->wlrefcnt = 0; + channel->rxpoll = HSI_CHAR_POLL_OFF; + channel->poll_event = (POLLOUT | POLLWRNORM); + file->private_data = channel; + spin_lock_bh(&channel->lock); + HSI_CHST_OC_SET(channel, HSI_CHST_OPENED); +out: + spin_unlock_bh(&channel->lock); + + return ret; +} + +static int hsi_char_release(struct inode *inode, struct file *file) +{ + struct hsi_char_channel *channel = file->private_data; + struct hsi_char_client_data *cl_data = hsi_client_drvdata(channel->cl); + int ret = 0, refcnt; + + spin_lock_bh(&channel->lock); + if (HSI_CHST_OC(channel) != HSI_CHST_OPENED) + goto out; + HSI_CHST_OC_SET(channel, HSI_CHST_CLOSING); + spin_unlock_bh(&channel->lock); + + hsi_flush(channel->cl); + while (channel->wlrefcnt > 0) { + hsi_stop_tx(channel->cl); + channel->wlrefcnt--; + } + + refcnt = atomic_dec_return(&cl_data->refcnt); + if (!refcnt) { + hsi_release_port(channel->cl); + cl_data->attached = 0; + } + + hsi_char_msgs_free(channel); + + spin_lock_bh(&channel->lock); + HSI_CHST_OC_SET(channel, HSI_CHST_CLOSED); + HSI_CHST_RD_SET(channel, HSI_CHST_READOFF); + HSI_CHST_WR_SET(channel, HSI_CHST_WRITEOFF); +out: + spin_unlock_bh(&channel->lock); + + return ret; +} + +static const struct file_operations hsi_char_fops = { + .owner = THIS_MODULE, + .read = hsi_char_read, + .write = hsi_char_write, + .poll = hsi_char_poll, + .ioctl = hsi_char_ioctl, + .open = hsi_char_open, + .release = hsi_char_release, + .fasync = hsi_char_fasync, +}; + +static struct hsi_client_driver hsi_char_driver = { + .driver = { + .name = "hsi_char", + .owner = THIS_MODULE, + .probe = hsi_char_probe, + .remove = hsi_char_remove, + }, +}; + +static inline void hsi_char_channel_init(struct hsi_char_channel *channel) +{ + channel->state = HSI_CHST_AVAIL; + INIT_LIST_HEAD(&channel->free_msgs_list); + init_waitqueue_head(&channel->rx_wait); + init_waitqueue_head(&channel->tx_wait); + spin_lock_init(&channel->lock); + INIT_LIST_HEAD(&channel->rx_msgs_queue); + INIT_LIST_HEAD(&channel->tx_msgs_queue); +} + +static struct cdev hsi_char_cdev; + +static int __init hsi_char_init(void) +{ + char devname[] = "hsi_char"; + struct hsi_char_client_data *cl_data = &hsi_char_cl_data; + struct hsi_char_channel *channel = cl_data->channels; + unsigned long ch_mask = 0; + int ret, i; + + if ((max_data_size < 4) || (max_data_size > 0x10000) || + (max_data_size & (max_data_size - 1))) { + pr_err("Invalid max read/write data size"); + return -EINVAL; + } + + for (i = 0; i < HSI_CHAR_DEVS && channels_map[i] >= 0; i++) { + if (channels_map[i] >= HSI_CHAR_DEVS) { + pr_err("Invalid HSI/SSI channel specified"); + return -EINVAL; + } + set_bit(channels_map[i], &ch_mask); + } + + if (i == 0) { + pr_err("No HSI channels available"); + return -EINVAL; + } + + memset(cl_data->channels, 0, sizeof(cl_data->channels)); + for (i = 0; i < HSI_CHAR_DEVS; i++) { + channel->ch = i; + channel->state = HSI_CHST_UNAVAIL; + if (test_bit(i, &ch_mask)) + hsi_char_channel_init(channel); + channel++; + } + + ret = hsi_register_client_driver(&hsi_char_driver); + if (ret) { + pr_err("Error while registering HSI/SSI driver %d", ret); + return ret; + } + + ret = alloc_chrdev_region(&hsi_char_dev, 0, HSI_CHAR_DEVS, devname); + if (ret < 0) { + hsi_unregister_client_driver(&hsi_char_driver); + return ret; + } + + cdev_init(&hsi_char_cdev, &hsi_char_fops); + cdev_add(&hsi_char_cdev, hsi_char_dev, HSI_CHAR_DEVS); + pr_info("HSI/SSI char device loaded\n"); + + return 0; +} +module_init(hsi_char_init); + +static void __exit hsi_char_exit(void) +{ + cdev_del(&hsi_char_cdev); + unregister_chrdev_region(hsi_char_dev, HSI_CHAR_DEVS); + hsi_unregister_client_driver(&hsi_char_driver); + pr_info("HSI char device removed\n"); +} +module_exit(hsi_char_exit); + +MODULE_AUTHOR("Andras Domokos "); +MODULE_ALIAS("hsi:hsi_char"); +MODULE_DESCRIPTION("HSI character device"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hsi/clients/hsi_ffl_tty.c b/drivers/hsi/clients/hsi_ffl_tty.c new file mode 100644 index 0000000..be37a65 --- /dev/null +++ b/drivers/hsi/clients/hsi_ffl_tty.c @@ -0,0 +1,3737 @@ +/* + * hsi_ffl_tty.c + * + * Fixed frame length protocol over HSI, implements a TTY interface for + * this protocol. + * + * Copyright (C) 2010-2011 Intel Corporation. All rights reserved. + * + * Contact: Olivier Stoltz Douchet + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +/* Set the following to use the IPC error recovery mechanism */ +#undef USE_IPC_ERROR_RECOVERY + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRVNAME "hsi-ffl" +#define TTYNAME "tty"CONFIG_HSI_FFL_TTY_NAME + +/* Maximal number of TTY lines supported by this driver */ +#define FFL_TTY_MAX_LINES 8 + +/* Maximal number of frame allocation failure prior firing an error message */ +#define FFL_FRAME_ALLOC_RETRY_MAX_CNT 10 + +/* Defaut TX delay expressed in microseconds */ +#define FFL_TX_DELAY 10000 + +/* Defaut RX delay expressed in microseconds */ +#define FFL_RX_DELAY 100000 + +/* Defaut TX timeout delay expressed in microseconds */ +#define TTY_HANGUP_DELAY 2000000 + +/* Delays for powering up/resetting the modem, ms */ +#define PO_INTERLINE_DELAY 1 +#define PO_POST_DELAY 200 + +/* ACWAKE minimal pulse udelay in us (set to 0 if none is necessary) */ +#define ACWAKE_MINIMAL_PULSE_UDELAY 800 + +/* Initial minimal buffering size (in bytes) */ +#define FFL_MIN_TX_BUFFERING 65536 +#define FFL_MIN_RX_BUFFERING 65536 + +/* Error recovery related timings */ +#define RECOVERY_TO_NORMAL_DELAY_JIFFIES (usecs_to_jiffies(100000)) +#define RECOVERY_TX_DRAIN_TIMEOUT_JIFFIES (usecs_to_jiffies(100000)) +#define RECOVERY_BREAK_RESPONSE_TIMEOUT_JIFFIES (usecs_to_jiffies(100000)) + +/* Round-up the frame and header length to a multiple of 4-bytes to align + * on the HSI 4-byte granularity*/ +#define FFL_FRAME_LENGTH (((CONFIG_HSI_FFL_TTY_FRAME_LENGTH+3)/4)*4) +#define FFL_HEADER_LENGTH (((CONFIG_HSI_FFL_TTY_HEADER_LENGTH+3)/4)*4) +#define FFL_DATA_LENGTH (FFL_FRAME_LENGTH-FFL_HEADER_LENGTH) +#define FFL_LENGTH_MASK (roundup_pow_of_two(FFL_DATA_LENGTH)-1) + +/* Find the best allocation method */ +#if ((FFL_FRAME_LENGTH >= PAGE_SIZE) && \ + (((FFL_FRAME_LENGTH) & (FFL_FRAME_LENGTH-1)) == 0)) +#define FFL_FRAME_ALLOC_PAGES +#define FFL_FRAME_ALLOC_ORDER (ilog2(FFL_FRAME_LENGTH/PAGE_SIZE)) +#endif + +/* Compute the TX and RX, FIFO depth from the buffering requirements */ +/* For optimal performances the FFL_TX_CTRL_FIFO size shall be set to 2 at + * least to allow back-to-back transfers. */ +#define FFL_TX_ALL_FIFO \ + ((FFL_MIN_TX_BUFFERING+FFL_DATA_LENGTH-1)/FFL_DATA_LENGTH) +#define FFL_TX_CTRL_FIFO 2 +#define FFL_TX_WAIT_FIFO max(FFL_TX_ALL_FIFO-FFL_TX_CTRL_FIFO, 1) +#define FFL_RX_ALL_FIFO \ + ((FFL_MIN_RX_BUFFERING+FFL_DATA_LENGTH-1)/FFL_DATA_LENGTH) +#define FFL_RX_WAIT_FIFO max(FFL_RX_ALL_FIFO/2, 1) +#define FFL_RX_CTRL_FIFO max(FFL_RX_ALL_FIFO-FFL_RX_WAIT_FIFO, 1) + +/* Tag for detecting buggy frame sizes (must be greater than the maximum frame + * size */ +#define FFL_BUGGY_FRAME_SIZE 0xFFFFFFFFUL + +/* RX and TX state machine definitions */ +enum { + IDLE, + ACTIVE, + TTY, +}; + +#define FFL_GLOBAL_STATE_SZ 2 +#define FFL_GLOBAL_STATE_MASK ((1<main_ctx) +#define xfer_ctx_is_tx_ctx(ctx) ((ctx) == &main_ctx(ctx)->tx) +#define xfer_ctx_is_rx_ctx(ctx) ((ctx) == &main_ctx(ctx)->rx) + +/** + * struct ffl_hangup_ctx - hangup context for the fixed frame length protocol + * @cause: the current root cause of the hangup + * @last_cause: the previous root cause of the hangup + * @timer: the timer for the TX timeout + * @work: the context of for the TX timeout work queue + */ +struct ffl_hangup_ctx { + int cause; + int last_cause; + struct timer_list timer; + struct work_struct work; +}; + +/** + * struct ffl_reset_ctx - reset context for the fixed frame length protocol + * @cd_irq: the modem core dump interrupt line + * @irq: the modem reset interrupt line + * @ongoing: a flag stating that a reset is ongoing + */ +struct ffl_reset_ctx { + int cd_irq; + int irq; + int ongoing; +}; + +#ifdef USE_IPC_ERROR_RECOVERY +/** + * struct ffl_recovery_ctx - error recovery context for the FFL protocol + * @tx_drained_event: TX drain complete event + * @do_tx_drain: the context of the TX drain work queue + * @tx_break: the HSI message used for TX break emission + * @rx_drain_timer: the timer for scheduling a RX drain work + * @do_rx_drain: the context of the RX drain work queue + * @rx_break: the HSI message used for RX break reception + */ +struct ffl_recovery_ctx { + wait_queue_head_t tx_drained_event; + struct work_struct do_tx_drain; + struct hsi_msg tx_break; + struct timer_list rx_drain_timer; + struct work_struct do_rx_drain; + struct hsi_msg rx_break; +}; +#endif + +/** + * struct ffl_ctx - fixed frame length protocol on HSI context + * @client: reference to this HSI client + * @controller: reference to the controller bound to this context + * @index: the TTY index of this context + * @tty_prt: TTY port structure + * @tx_full_pipe_clean_event: event signalling that the full TX pipeline is + * clean or about to be clean + * @tx_write_pipe_clean_event: event signalling that the write part of the TX + * pipeline is clean (no more write can occur) + * @tx: current TX context + * @rx: current RX context + * @reset: modem reset context + * @hangup: hangup context + * @recovery: error recovery context + */ +struct ffl_ctx { + struct hsi_client *client; + struct device *controller; + int index; + struct tty_port tty_prt; + wait_queue_head_t tx_full_pipe_clean_event; + wait_queue_head_t tx_write_pipe_clean_event; + struct ffl_xfer_ctx tx; + struct ffl_xfer_ctx rx; + struct ffl_hangup_ctx hangup; + struct ffl_reset_ctx reset; +#ifdef USE_IPC_ERROR_RECOVERY + struct ffl_recovery_ctx recovery; +#endif +}; + +/** + * struct ffl_driver - fixed frame length protocol on HSI driver data + * @tty_drv: TTY driver reference + * @ctx: array of FFL contex references + */ +struct ffl_driver { + struct tty_driver *tty_drv; + struct ffl_ctx *ctx[FFL_TTY_MAX_LINES]; +}; + +/* + * Static protocol driver global variables + */ + +/* Protocol driver instance */ +static struct ffl_driver ffl_drv; + +/* Workqueue for submitting frame-recycling background tasks */ +static struct workqueue_struct *ffl_recycle_wq; + +/* Workqueue for submitting tx timeout hangup background tasks */ +static struct workqueue_struct *ffl_hangup_wq; + +#ifdef USE_IPC_ERROR_RECOVERY +/* Workqueue for submitting TX draining upon recovery background tasks */ +static struct workqueue_struct *ffl_tx_drain_wq; + +/* Workqueue for submitting RX draining upon recovery background tasks */ +static struct workqueue_struct *ffl_rx_drain_wq; +#endif + +/* + * Modem power / reset managers + */ + +/** + * modem_power - activity required to bring up modem + * @hsi: HSI controller + * + * Toggle gpios required to bring up modem power and start modem. + */ +static void modem_power(struct ffl_ctx *ctx) +{ + struct hsi_client *cl = ctx->client; + struct hsi_mid_platform_data *pd = cl->device.platform_data; + + ctx->reset.ongoing = 1; + + gpio_set_value(pd->gpio_mdm_pwr_on, 1); + mdelay(PO_INTERLINE_DELAY); + gpio_set_value(pd->gpio_mdm_pwr_on, 0); + msleep(PO_POST_DELAY); +} + +/** + * modem_reset - activity required to reset modem + * @hsi: HSI controller + * + * Toggle gpios required to reset modem. + */ +static void modem_reset(struct ffl_ctx *ctx) +{ + struct hsi_client *cl = ctx->client; + struct hsi_mid_platform_data *pd = cl->device.platform_data; + + ctx->reset.ongoing = 1; + + gpio_set_value(pd->gpio_mdm_rst_bbn, 0); + mdelay(PO_INTERLINE_DELAY); + gpio_set_value(pd->gpio_mdm_rst_bbn, 1); + msleep(PO_POST_DELAY); +} + +/* + * State handling routines + */ + +/** + * _ffl_ctx_get_state - get the global state of a state machine + * @ctx: a reference to the state machine context + * + * Returns the current state of the requested TX or RX context. + */ +static inline __must_check +unsigned int _ffl_ctx_get_state(struct ffl_xfer_ctx *ctx) +{ + return ctx->state & FFL_GLOBAL_STATE_MASK; +} + +/** + * ffl_ctx_get_state - get the global state of a state machine + * @ctx: a reference to the state machine context + * + * Returns the current state of the requested TX or RX context. + * + * This version adds the spinlock guarding + */ +static inline __must_check +unsigned int ffl_ctx_get_state(struct ffl_xfer_ctx *ctx) +{ + unsigned int state; + unsigned long flags; + + spin_lock_irqsave(&ctx->lock, flags); + state = _ffl_ctx_get_state(ctx); + spin_unlock_irqrestore(&ctx->lock, flags); + + return state; +} + +/** + * _ffl_ctx_is_state - checks the global state of a state machine + * @ctx: a reference to the state machine context + * @state: the state to consider + * + * Returns a non-zero value if in the requested state. + */ +static inline __must_check int _ffl_ctx_is_state(struct ffl_xfer_ctx *ctx, + unsigned int state) +{ +#ifdef DEBUG + BUG_ON(state & ~FFL_GLOBAL_STATE_MASK); +#endif + + return (_ffl_ctx_get_state(ctx) == state); +} + +/** + * _ffl_ctx_set_state - sets the global state of a state machine + * @ctx: a reference to the state machine context + * @state: the state to set + */ +static inline void _ffl_ctx_set_state(struct ffl_xfer_ctx *ctx, + unsigned int state) +{ +#ifdef DEBUG + BUG_ON(state & ~FFL_GLOBAL_STATE_MASK); +#endif + + ctx->state = (ctx->state & ~FFL_GLOBAL_STATE_MASK) | state; +} + +/** + * _ffl_ctx_has_flag - checks if a flag is present in the state + * @ctx: a reference to the state machine context + * @flag: the flag(s) to consider + * + * Returns a non-zero value if all requested flags are present. + */ +static inline __must_check int _ffl_ctx_has_flag(struct ffl_xfer_ctx *ctx, + unsigned int flag) +{ +#ifdef DEBUG + BUG_ON(flag & FFL_GLOBAL_STATE_MASK); +#endif + + return ((ctx->state & flag) == flag); +} + +/** + * _ffl_ctx_has_any_flag - checks if any flag is present in the state + * @ctx: a reference to the state machine context + * @flag: the flag(s) to consider + * + * Returns a non-zero value if all requested flags are present. + */ +static inline __must_check int _ffl_ctx_has_any_flag(struct ffl_xfer_ctx *ctx, + unsigned int flag) +{ +#ifdef DEBUG + BUG_ON(flag & FFL_GLOBAL_STATE_MASK); +#endif + + return ctx->state & flag; +} + +/** + * _ffl_ctx_set_flag - flags some extra information in the state + * @ctx: a reference to the state machine context + * @flag: the flag(s) to set + */ +static inline void _ffl_ctx_set_flag(struct ffl_xfer_ctx *ctx, + unsigned int flag) +{ +#ifdef DEBUG + BUG_ON(flag & FFL_GLOBAL_STATE_MASK); +#endif + + ctx->state |= flag; +} + +/** + * _ffl_ctx_clear_flag - unflags some extra information in the state + * @ctx: a reference to the state machine context + * @flag: the flag(s) to clear + */ +static inline void _ffl_ctx_clear_flag(struct ffl_xfer_ctx *ctx, + unsigned int flag) +{ +#ifdef DEBUG + BUG_ON(flag & FFL_GLOBAL_STATE_MASK); +#endif + + ctx->state &= ~flag; +} + +/* + * Low-level fixed frame length management helper functions + */ + +/** + * ffl_virt - helper function for getting the virtual base address of a frame + * @frame: a reference to the considered frame + * + * Returns the virtual base address of the frame + */ +static inline unsigned char *ffl_virt(struct hsi_msg *frame) +{ + return (unsigned char *) (sg_virt(frame->sgt.sgl)); +} + +/** + * ffl_frame_room - helper function for evaluating room in a frame + * @frame: a reference to the considered frame + * @used_len: the used length of the frame in bytes + * + * Returns the room in byte in the current frame + */ +static inline unsigned int ffl_frame_room(struct hsi_msg *frame, + unsigned int used_len) +{ +#ifdef CONFIG_HSI_FFL_ENSURE_LAST_WORD_NULL + return min(frame->sgt.sgl->length, + (unsigned int)(FFL_FRAME_LENGTH-4)) - used_len + - FFL_HEADER_LENGTH; +#else + return frame->sgt.sgl->length - used_len - FFL_HEADER_LENGTH; +#endif +} + +/** + * room_in - helper function for getting current room in a frame + * @frame: a reference to the considered frame + * + * Returns the room in byte in the current frame + */ +static inline unsigned int room_in(struct hsi_msg *frame) +{ + return ffl_frame_room(frame, frame->actual_len); +} + +/** + * ffl_data_ptr - helper function for getting the actual virtual address of a + * frame data, taking into account the header offset + * @frame_ptr: a pointer to the virtual base address of a frame + * @offset: an offset to add to the current virtual address of the frame data + * + * Returns the virtual base address of the actual frame data + */ +static inline __attribute_const__ +unsigned char *ffl_data_ptr(unsigned char *frame_ptr, unsigned int offset) +{ + return &(frame_ptr[FFL_HEADER_LENGTH+offset]); +} + +/** + * ffl_set_length - write down the length information to the frame header + * @frame_ptr: a pointer to the virtual base address of a frame + * @sz: the length information to encode in the header + */ +static inline void ffl_set_length(unsigned char *frame_ptr, u32 sz) +{ +#if FFL_HEADER_LENGTH > 0 + u32 *header = (u32 *) frame_ptr; + *header = sz; +#endif +} + +/** + * ffl_get_length - read the length information from the frame header + * @frame_ptr: a pointer to the virtual base address of a frame + * + * Returns the length information to encode in the header + */ +static inline unsigned int ffl_get_length(unsigned char *frame_ptr) +{ +#if FFL_HEADER_LENGTH > 0 + return (unsigned int) *((u32 *) frame_ptr); +#else + return FFL_DATA_LENGTH; +#endif +} + +/** + * ffl_rx_frame_init - initialise a frame for entering the RX wait FIFO + * @frame: a reference to the considered frame + * + * This helper function is simply updating the scatterlist information and + * detecting errors. + */ +static void ffl_rx_frame_init(struct hsi_msg *frame) +{ + struct scatterlist *sg = frame->sgt.sgl; + + /* Use a non null frame length when an error occur to forward it to + * the upper layers. + * Do not use the in-frame length which can be broken */ + if (likely((!frame->break_frame) && + (frame->status == HSI_STATUS_COMPLETED))) { + frame->actual_len = ffl_get_length(ffl_virt(frame)); + } else { + pr_debug(DRVNAME ": Invalid FFL RX frame status (%d)", + frame->status); + frame->actual_len = 1; + } + sg->length = 0; + + /* If the decoded frame size is invalid, we are in big trouble */ + if (unlikely((frame->actual_len > FFL_DATA_LENGTH) || + (!frame->actual_len))) { + pr_debug(DRVNAME ": Invalid FFL frame size (%u bytes)\n", + frame->actual_len); + frame->status = HSI_STATUS_ERROR; + frame->actual_len = 1; + } +}; + +/** + * ffl_rx_frame_skip - skip a chunk of data at the beginning of a frame + * @frame: a reference to the considered frame + * @copied: the length of the chunk to skip + * + * This helper function is simply updating the scatterlist information. + */ +static inline void ffl_rx_frame_skip(struct hsi_msg *frame, + unsigned int copied) +{ + struct scatterlist *sg = frame->sgt.sgl; + + sg->offset += copied; + sg->length += copied; + frame->actual_len -= copied; +}; + +/** + * ffl_rx_frame_reset - revert a frame to a working order + * @ctx: a reference to the FFL context (RX or TX) to consider + * @frame: a reference to the considered frame + * + * This helper function is simply updating the scatterlist information. + */ +static inline void ffl_rx_frame_reset(struct ffl_xfer_ctx *ctx, + struct hsi_msg *frame) +{ + struct scatterlist *sg = frame->sgt.sgl; + + sg->offset -= sg->length; + sg->length = (ctx->data_len + FFL_HEADER_LENGTH); + frame->actual_len = 0; +}; + +/** + * ffl_frame_of - get the frame virtual address from its link address + * @link: the virtual address of the linked list structure of a frame + * + * Returns the corresponding frame virtual address. + */ +static inline __attribute_const__ +struct hsi_msg *ffl_frame_of(struct list_head *link) +{ + return list_entry(link, struct hsi_msg, link); +} + +/* + * Low-level FIFO managing + */ + +/** + * _ffl_frame_pop - pop the frame from its containing FIFO + * @frame: a reference to the frame being popped + */ +static inline void _ffl_frame_pop(struct hsi_msg *frame) +{ + list_del_init(&frame->link); +} + +/** + * _ffl_fifo_head - get a reference to the top frame of a FIFO + * @fifo: a reference of the FIFO to consider + * + * Returns a reference to the first frame of this FIFO. + * + * BEWARE: calling this with an empty FIFO gives unexpected results! + */ +static inline struct hsi_msg *_ffl_fifo_head(struct list_head *fifo) +{ + return ffl_frame_of(fifo->next); +} + +/** + * _ffl_fifo_head_safe - get a reference to the top frame of a FIFO or NULL + * if the FIFO is empty + * @fifo: a reference of the FIFO to consider + * + * Returns a reference to the first frame of this FIFO or NULL if the FIFO is + * empty. + */ +static inline __must_check +struct hsi_msg *_ffl_fifo_head_safe(struct list_head *fifo) +{ + struct list_head *first = fifo->next; + + if (first == fifo) + return NULL; + return ffl_frame_of(first); +} + +/** + * _ffl_fifo_head_pop - get a reference to the top frame of a FIFO and pop it + * from the FIFO + * @fifo: a reference of the FIFO to consider + * + * Returns a reference to the first frame of this FIFO, which has been popped + * + * BEWARE: calling this with an empty FIFO gives unexpected results! + */ +static inline struct hsi_msg *_ffl_fifo_head_pop(struct list_head *fifo) +{ + struct hsi_msg *frame = _ffl_fifo_head(fifo); + _ffl_frame_pop(frame); + return frame; +} + +/** + * _ffl_fifo_head_safe_pop - get a reference to the top frame of a FIFO or NULL + * if the FIFO is empty, and then pop it if it exists + * @fifo: a reference of the FIFO to consider + * + * Returns a reference to the first frame of this FIFO, which has been popped + * or NULL if the FIFO is empty. + */ +static inline __must_check +struct hsi_msg *_ffl_fifo_head_safe_pop(struct list_head *fifo) +{ + struct hsi_msg *frame = _ffl_fifo_head_safe(fifo); + if (frame) + _ffl_frame_pop(frame); + return frame; +} + +/** + * _ffl_fifo_tail_safe - get a reference to the bottom frame of a FIFO or NULL + * if the FIFO is empty + * @fifo: a reference of the FIFO to consider + * + * Returns a reference to the last frame of this FIFO or NULL if the FIFO is + * empty. + */ +static inline __must_check +struct hsi_msg *_ffl_fifo_tail_safe(struct list_head *fifo) +{ + struct list_head *last = fifo->prev; + + if (last == fifo) + return NULL; + return ffl_frame_of(last); +} + +/** + * _ffl_fifo_frame_push - push a frame at the bottom of a FIFO + * @frame: a reference to the frame to push + * @fifo: a reference to the FIFO + */ +static inline void _ffl_fifo_frame_push(struct hsi_msg *frame, + struct list_head *fifo) +{ + list_add_tail(&frame->link, fifo); +} + +/** + * _ffl_fifo_frame_push_back - push back a frame at the top of a FIFO + * @frame: a reference to the frame to push back + * @fifo: a reference to the FIFO + */ +static inline void _ffl_fifo_frame_push_back(struct hsi_msg *frame, + struct list_head *fifo) +{ + list_add(&frame->link, fifo); +} + +/* + * Specialised FIFO handling methods + */ + +/** + * _ffl_fifo_wait_pop - pop a frame from the FIFO of waiting frames + * @ctx: a reference to the FFL context (RX or TX) to consider + * @frame: a reference to the frame to pop + * + * This function is not only popping the frame, but also updating the counters + * related to the FIFO of waiting frames in the considered context. + */ +static inline void _ffl_fifo_wait_pop(struct ffl_xfer_ctx *ctx, + struct hsi_msg *frame) +{ + _ffl_frame_pop(frame); + --ctx->wait_len; + ctx->buffered -= frame->actual_len; +} + +/** + * _ffl_fifo_wait_push - push a frame to the FIFO of waiting frames + * @ctx: a reference to the FFL context (RX or TX) to consider + * @frame: a reference to the frame to push + * + * This function is not only pushing the frame, but also updating the counters + * related to the FIFO of waiting frames in the considered context. + */ +static inline void _ffl_fifo_wait_push(struct ffl_xfer_ctx *ctx, + struct hsi_msg *frame) +{ + ++ctx->wait_len; + ctx->buffered += frame->actual_len; + _ffl_fifo_frame_push(frame, &ctx->wait_frames); +} + +/** + * _ffl_fifo_wait_push_back - push back a frame in the FIFO of waiting frames + * @ctx: a reference to the FFL context (RX or TX) to consider + * @frame: a reference to the frame to push back + * + * This function is not only pushing back the frame, but also updating the + * counters related to the FIFO of waiting frames in the considered context. + */ +static inline void _ffl_fifo_wait_push_back(struct ffl_xfer_ctx *ctx, + struct hsi_msg *frame) +{ + ++ctx->wait_len; + ctx->buffered += frame->actual_len; + _ffl_fifo_frame_push_back(frame, &ctx->wait_frames); +} + +/** + * _ffl_fifo_ctrl_pop - pop a frame from the HSI controller FIFO + * @ctx: a reference to the FFL context (TX or RX) to consider + * + * This function is only updating the counters related to the FIFO of + * outstanding frames in the considered context. + */ +static inline void _ffl_fifo_ctrl_pop(struct ffl_xfer_ctx *ctx) +{ + --ctx->ctrl_len; +} + +/** + * _ffl_fifo_ctrl_push - push a frame to the HSI controller FIFO + * @ctx: a reference to the FFL context (TX or RX) to consider + * @frame: a reference to the frame to push + * @flags: a reference to the flag used by the external spinlock, passed in to + * unlock it and end the atomic context temporarily. + * + * Returns 0 on success or an error code on failure. + * + * This function is not only pushing the frame, but also updating the counters + * related to the FIFO of outstanding frames in the considered context. + */ +static inline __must_check int _ffl_fifo_ctrl_push(struct ffl_xfer_ctx *ctx, + struct hsi_msg *frame, unsigned long *flags) +{ + unsigned int lost_room = room_in(frame); + int err; + +#ifdef USE_IPC_ERROR_RECOVERY + /* Prevent sending messages to the controller when shutting down or in + * error recovery */ + if (unlikely(_ffl_ctx_has_any_flag(ctx, ERROR_RECOVERY_ONGOING_BIT | + TTY_OFF_BIT)) && + (!frame->break_frame)) +#else + /* Prevent sending messages to the controller when shutting down */ + if (unlikely(_ffl_ctx_has_flag(ctx, TTY_OFF_BIT)) && + (!frame->break_frame)) +#endif + return -EBUSY; + + /* Update the context room prior to removing the spinlock */ + ctx->room -= lost_room; + spin_unlock_irqrestore(&ctx->lock, *flags); + err = hsi_async(frame->cl, frame); + spin_lock_irqsave(&ctx->lock, *flags); + if (likely(!err)) + ++ctx->ctrl_len; + else + ctx->room += lost_room; + + return err; +} + +/* + * FIFO transfer functions + */ + +/* Forward declaration for _ffl_from_wait_to_ctrl() */ +static void _ffl_pop_wait_push_ctrl_safe(struct ffl_xfer_ctx *ctx, + unsigned long *flags); + +/** + * _ffl_from_wait_to_ctrl - transfer a TX frame from the wait FIFO to the + * controller FIFO + * @ctx: a reference to the FFL TX context to consider + * @frame: a reference to the frame to transfer + * @flags: a reference to the flag used by the external spinlock, passed in to + * unlock it and end the atomic context temporarily. + * + * Note that no error is returned upon controller failure such as -EBUSY, in + * such cases, the frame is simply returned back to the wait FIFO, as nothing + * else can be done. + */ +static void _ffl_from_wait_to_ctrl(struct ffl_xfer_ctx *ctx, + struct hsi_msg *frame, unsigned long *flags) +{ + unsigned int actual_len = frame->actual_len; + int err; + + if (unlikely(_ffl_ctx_has_flag(ctx, TX_TTY_WRITE_FORWARDING_BIT))) + return; + + _ffl_ctx_set_flag(ctx, TX_TTY_WRITE_FORWARDING_BIT); + _ffl_fifo_wait_pop(ctx, frame); + err = _ffl_fifo_ctrl_push(ctx, frame, flags); + _ffl_ctx_clear_flag(ctx, TX_TTY_WRITE_FORWARDING_BIT); + + if (unlikely(err)) { + /* Keep the data for the future */ + _ffl_fifo_wait_push_back(ctx, frame); + } else { + if (ctx->ctrl_len > 0) + mod_timer(&ctx->main_ctx->hangup.timer, + jiffies + usecs_to_jiffies(TTY_HANGUP_DELAY)); +#ifdef CONFIG_HSI_FFL_TTY_STATS + ctx->data_sz += actual_len; + ctx->frame_cnt++; +#endif + if (ctx->ctrl_len < ctx->ctrl_max) + _ffl_pop_wait_push_ctrl_safe(ctx, flags); + } +} + +/** + * _ffl_pop_wait_push_ctrl - transfer the first TX frame from the wait FIFO to + * the controller FIFO + * @ctx: a reference to the FFL TX context to consider + * @flags: a reference to the flag used by the external spinlock, passed in to + * unlock it and end the atomic context temporarily. + * + * This wrapper function is simply transferring the first frame of the wait + * FIFO. + * + * BEWARE: calling this with an empty FIFO gives unexpected results! + */ +static inline void _ffl_pop_wait_push_ctrl(struct ffl_xfer_ctx *ctx, + unsigned long *flags) +{ + _ffl_from_wait_to_ctrl(ctx, _ffl_fifo_head(&ctx->wait_frames), flags); +} + +/** + * _ffl_pop_wait_push_ctrl_safe - transfer the first TX frame from the wait FIFO + * to the controller FIFO, unless this frame is + * being updated (marked as break frame) + * @ctx: a reference to the FFL TX context to consider + * @flags: a reference to the flag used by the external spinlock, passed in to + * unlock it and end the atomic context temporarily. + * + * BEWARE: calling this with an empty FIFO gives unexpected results! + */ +static void _ffl_pop_wait_push_ctrl_safe(struct ffl_xfer_ctx *ctx, + unsigned long *flags) +{ + struct hsi_msg *frame; + + frame = _ffl_fifo_head(&ctx->wait_frames); + + if ((frame->status == HSI_STATUS_COMPLETED) && + (likely(!_ffl_ctx_has_flag(ctx, TTY_OFF_BIT)))) + _ffl_from_wait_to_ctrl(ctx, frame, flags); +} + +/* + * Frame (hsi_msg) creation and deletion + */ + +/** + * ffl_delete_frame - helper function to delete and free an existing frame + * @frame: a reference to the frame to delete + * @ctx: a reference to the related FFL context + * + * This function shall only be called by the pool of frame management routines. + */ +static void ffl_delete_frame(struct hsi_msg *frame, struct ffl_ctx *ctx) +{ + /* Revert to the actual allocated size */ + frame->sgt.sgl->length = FFL_FRAME_LENGTH; + + if ((ctx->controller) && + (is_device_dma_capable(ctx->controller))) { + dma_free_coherent(ctx->controller, FFL_FRAME_LENGTH, + ffl_virt(frame), + sg_dma_address(frame->sgt.sgl)); + } else { +#ifdef FFL_FRAME_ALLOC_PAGES + __free_pages(sg_page(frame->sgt.sgl), FFL_FRAME_ALLOC_ORDER); +#else + kfree(ffl_virt(frame)); +#endif + } + + sg_free_table(&frame->sgt); + kfree(frame); +} + +/* Forward declarations for ffl_create_frame() */ +static void ffl_complete_tx(struct hsi_msg *frame); +static void ffl_complete_rx(struct hsi_msg *frame); +static void ffl_destruct_frame(struct hsi_msg *frame); + +/** + * ffl_create_frame - helper function to allocate and initialise a new frame + * @ctx: a reference to the FFL context (RX or TX) to consider + * + * Returns a reference to the newly created frame or NULL if an error occured. + * + * This function shall only be called by the pool of frame management routines. + */ +static __must_check struct hsi_msg *ffl_create_frame(struct ffl_xfer_ctx *ctx) +{ + struct ffl_ctx *main_ctx = main_ctx(ctx); + struct hsi_msg *new; + void *buffer; + + /* Be careful: might sleep ! */ + new = kzalloc(sizeof(struct hsi_msg), GFP_KERNEL); + if (unlikely(!new)) + goto fail0; + + if (unlikely(sg_alloc_table(&new->sgt, 1, GFP_KERNEL))) + goto fail1; + + if ((main_ctx->controller) && + (is_device_dma_capable(main_ctx->controller))) { + buffer = dma_alloc_coherent(main_ctx->controller, + FFL_FRAME_LENGTH, + &sg_dma_address(new->sgt.sgl), + GFP_KERNEL); + } else { +#ifdef FFL_FRAME_ALLOC_PAGES + buffer = (void *) __get_free_pages(GFP_KERNEL, + FFL_FRAME_ALLOC_ORDER); +#else + buffer = kmalloc(FFL_FRAME_LENGTH, GFP_KERNEL); +#endif +#ifdef CONFIG_HSI_FFL_ENSURE_LAST_WORD_NULL + ((u32 *)buffer)[FFL_FRAME_LENGTH/4-1] = 0; +#endif + } + + if (unlikely(!buffer)) + goto fail2; + + sg_set_buf(new->sgt.sgl, buffer, FFL_FRAME_LENGTH); + + ffl_set_length(buffer, 0); + + new->cl = main_ctx(ctx)->client; + new->context = ctx; + + if (xfer_ctx_is_tx_ctx(ctx)) { + new->complete = &ffl_complete_tx; + new->destructor = &ffl_destruct_frame; + new->ttype = HSI_MSG_WRITE; + } else { + new->complete = &ffl_complete_rx; + new->destructor = &ffl_destruct_frame; + new->ttype = HSI_MSG_READ; + } + + return new; + +fail2: + sg_free_table(&new->sgt); + +fail1: + kfree(new); + +fail0: + return NULL; +} + +/* + * FIFO length management + */ + +/** + * _ffl_ctx_is_empty - checks if a context is empty (all FIFO are empty) + * @ctx: a reference to the FFL context (RX or TX) to consider + * + * This helper function is returning a non-zero value if both the wait FIFO and + * the controller FIFO are empty. Note that this does not mean that there are + * no data pending in the controller hardware. + */ +static __must_check inline int _ffl_ctx_is_empty(struct ffl_xfer_ctx *ctx) +{ + return ((ctx->wait_len == 0) && (ctx->ctrl_len <= 0)); +} + +/** + * ffl_tx_full_pipe_is_clean - checks if the TX pipe is clean or about to be + * @ctx: a reference to the main FFL context to consider + * + * This helper function is returning a non-zero value if both the wait FIFO and + * the controller FIFO are empty or if a hangup of any kind is currently + * outstanding + */ +static __must_check int ffl_tx_full_pipe_is_clean(struct ffl_ctx *ctx) +{ + struct ffl_xfer_ctx *tx_ctx = &ctx->tx; + int ret; + unsigned long flags; + + spin_lock_irqsave(&tx_ctx->lock, flags); + ret = _ffl_ctx_is_empty(tx_ctx) || (ctx->hangup.cause); + spin_unlock_irqrestore(&tx_ctx->lock, flags); + + return ret; +} + +/** + * ffl_tx_write_pipe_is_clean - checks if there are still outstanding writes + * @ctx: a reference to the TX context to consider + * + * This helper function is returning a non-zero value if there is no + * outstanding writes in the TX pipeline. + */ +static __must_check int ffl_tx_write_pipe_is_clean(struct ffl_xfer_ctx *ctx) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&ctx->lock, flags); + ret = !_ffl_ctx_has_flag(ctx, TX_TTY_WRITE_ONGOING_BIT); + spin_unlock_irqrestore(&ctx->lock, flags); + + return ret; +} + +/* + * State machines + */ + +/** + * _ffl_start_tx - update the TX state machine on every new transfer + * @ctx: a reference to the FFL TX context to consider + * @flags: a reference to the flag used by the external spinlock, passed in to + * unlock it and end the atomic context temporarily. + * + * This helper function updates the TX state if it is currently idle and + * inform the HSI framework and attached controller. + */ +static void _ffl_start_tx(struct ffl_xfer_ctx *ctx, unsigned long *flags) +{ + struct ffl_ctx *main_ctx = container_of(ctx, struct ffl_ctx, tx); + int err; + + if (_ffl_ctx_is_state(ctx, IDLE)) { + _ffl_ctx_set_state(ctx, ACTIVE); + spin_unlock_irqrestore(&ctx->lock, *flags); +#if (ACWAKE_MINIMAL_PULSE_UDELAY > 0) + udelay(ACWAKE_MINIMAL_PULSE_UDELAY); +#endif + err = hsi_start_tx(main_ctx->client); + spin_lock_irqsave(&ctx->lock, *flags); + if (unlikely(err)) + _ffl_ctx_set_state(ctx, IDLE); + } else { + del_timer(&ctx->timer); + } +} + +/** + * _ffl_stop_tx - update the TX state machine after expiration of the TX active + * timeout further to a no outstanding TX transaction status + * @ctx: a reference to the FFL TX context to consider + * @flags: a reference to the flag used by the external spinlock, passed in to + * unlock it and end the atomic context temporarily. + * + * This helper function updates the TX state if it is currently active and + * inform the HSI framework and attached controller. + */ +static void _ffl_stop_tx(struct ffl_xfer_ctx *ctx, unsigned long *flags) +{ + struct ffl_ctx *main_ctx; + + if (_ffl_ctx_is_state(ctx, ACTIVE)) { + _ffl_ctx_set_state(ctx, IDLE); + main_ctx = container_of(ctx, struct ffl_ctx, tx); + spin_unlock_irqrestore(&ctx->lock, *flags); + hsi_stop_tx(main_ctx->client); + spin_lock_irqsave(&ctx->lock, *flags); + } +} + +/** + * ffl_stop_tx - update the TX state machine after expiration of the TX active + * timeout further to a no outstanding TX transaction status + * @param: a hidden reference to the FFL TX context to consider + * + * This helper function updates the TX state if it is currently active and + * inform the HSI framework and attached controller. + */ +static void ffl_stop_tx(unsigned long param) +{ + struct ffl_xfer_ctx *ctx = (struct ffl_xfer_ctx *) param; + unsigned long flags; + + spin_lock_irqsave(&ctx->lock, flags); + _ffl_stop_tx(ctx, &flags); + spin_unlock_irqrestore(&ctx->lock, flags); +} + +/** + * ffl_start_rx - update the internal RX state machine + * @cl: a reference to HSI client to consider + * + * This helper function updates the RX state and wakes the device. + */ +static void ffl_start_rx(struct hsi_client *cl) +{ + struct ffl_ctx *main_ctx = + (struct ffl_ctx *) hsi_client_drvdata(cl); + struct ffl_xfer_ctx *ctx = &main_ctx->rx; + unsigned long flags; + + spin_lock_irqsave(&ctx->lock, flags); + _ffl_ctx_set_state(ctx, ACTIVE); + spin_unlock_irqrestore(&ctx->lock, flags); +} + +/** + * _ffl_state_rx_not_active - update the RX state machine upon reception of an + * @ctx: a reference to the FFL RX context to consider + * + * This helper function updates the RX state in accordance with the status of + * the RX FIFO. + */ +static inline void _ffl_state_rx_not_active(struct ffl_xfer_ctx *ctx) +{ + if (!_ffl_ctx_is_empty(ctx)) + _ffl_ctx_set_state(ctx, TTY); + else + _ffl_ctx_set_state(ctx, IDLE); +} + +/** + * _ffl_update_state_rx - update the RX state machine upon recycling of a + * RX frame + * @ctx: a reference to the FFL RX context to consider + * + * This helper function updates the RX state in accordance with the status of + * the RX FIFO, unless the RX is required active. + */ +static inline void _ffl_update_state_rx(struct ffl_xfer_ctx *ctx) +{ + if (!_ffl_ctx_is_state(ctx, ACTIVE)) + _ffl_state_rx_not_active(ctx); +} + +/** + * _ffl_stop_rx - update the internal RX state machine + * @ctx: a reference to the FFL RX context to consider + * @main_ctx: a reference to related main FFL context + * + * This helper function updates the RX state and allows the HSI device to + * sleep. + */ +static inline void _ffl_stop_rx(struct ffl_xfer_ctx *ctx, + struct ffl_ctx *main_ctx) +{ + _ffl_state_rx_not_active(ctx); +} + +/** + * ffl_stop_rx - update the internal RX state machine + * @cl: a reference to HSI client to consider + * + * This helper function updates the RX state and allows the HSI device to + * sleep. + */ +static void ffl_stop_rx(struct hsi_client *cl) +{ + struct ffl_ctx *main_ctx = + (struct ffl_ctx *) hsi_client_drvdata(cl); + struct ffl_xfer_ctx *ctx = &main_ctx->rx; + unsigned long flags; + + spin_lock_irqsave(&ctx->lock, flags); + _ffl_stop_rx(ctx, main_ctx); + spin_unlock_irqrestore(&ctx->lock, flags); +} + +/* + * Frame recycling helper functions + */ + +/** + * _ffl_new_frame - creating a new empty file from the recycling FIFO + * @ctx: a reference to the FFL context (RX or TX) to consider + * + * Returns a reference to the new empty frame or NULL if there are no recycled + * frames left. + */ +static inline __must_check +struct hsi_msg *_ffl_new_frame(struct ffl_xfer_ctx *ctx) +{ + /* If there are recycled items, then we are sure that there is room + * in either the waiting FIFO or controller FIFO */ + return _ffl_fifo_head_safe_pop(&ctx->recycled_frames); +} + +/** + * _ffl_update_ctx_status - updating the channel status further to config + * changes (channel, frame length) + * @ctx: a reference to the FFL context (RX or TX) to consider + */ +static void _ffl_update_ctx_status(struct ffl_xfer_ctx *ctx) +{ + struct list_head *index; + struct hsi_msg *frame; + struct scatterlist *sg; + + list_for_each(index, &ctx->recycled_frames) { + frame = ffl_frame_of(index); + sg = frame->sgt.sgl; + + frame->channel = ctx->channel; + + ctx->room += ctx->data_len; + ctx->room -= (sg->length - FFL_HEADER_LENGTH); + sg->length = ctx->data_len + FFL_HEADER_LENGTH; + } + + if (xfer_ctx_is_tx_ctx(ctx)) + main_ctx(ctx)->client->tx_cfg = ctx->config; + else + main_ctx(ctx)->client->rx_cfg = ctx->config; +} + +/** + * _ffl_recycle_frame - recycling a frame in the recycling FIFO + * @ctx: a reference to the FFL context (RX or TX) to consider + * @frame: a reference to the frame to recycle + */ +static inline void _ffl_recycle_frame(struct ffl_xfer_ctx *ctx, + struct hsi_msg *frame) +/* Needs locking ! */ +{ + _ffl_fifo_frame_push(frame, &ctx->recycled_frames); +} + +/** + * _ffl_free_frame - deleting a frame created by a call to ffl_create_frame + * @ctx: a reference to the FFL context (RX or TX) to consider + * @frame: a reference to the frame to delete + * + * This function is either recycling the frame if there are not too many frames + * in the system, otherwise destroy it and free its resource. + */ +static void _ffl_free_frame(struct ffl_xfer_ctx *ctx, + struct hsi_msg *frame) +{ + if (unlikely(ctx->all_len > (ctx->wait_max+ctx->ctrl_max))) { + ffl_delete_frame(frame, main_ctx(ctx)); + --ctx->all_len; + } else { + frame->status = HSI_STATUS_COMPLETED; + frame->actual_len = 0; + frame->break_frame = 0; +#ifdef DEBUG + /* Initialise the frame with a buggy length to ensure that it + * is correctly updated prior TX and after RX */ + ffl_set_length(ffl_virt(frame), FFL_BUGGY_FRAME_SIZE); +#endif + ctx->room += ffl_frame_room(frame, 0); + _ffl_recycle_frame(ctx, frame); + } +} + +/** + * ffl_destruct_frame - delete or recycle an existing frame + * @frame: a reference to the frame to delete + * + * This function shall only be called as an HSI destruct callback. + */ +static void ffl_destruct_frame(struct hsi_msg *frame) +{ + struct ffl_xfer_ctx *ctx = frame->context; + unsigned long flags; + + spin_lock_irqsave(&ctx->lock, flags); + _ffl_fifo_ctrl_pop(ctx); + _ffl_free_frame(ctx, frame); + if (xfer_ctx_is_rx_ctx(ctx)) { + _ffl_update_state_rx(ctx); + } else { + if (ctx->ctrl_len <= 0) + del_timer(&ctx->main_ctx->hangup.timer); + if (_ffl_ctx_is_empty(ctx)) { + wake_up(&ctx->main_ctx->tx_full_pipe_clean_event); + mod_timer(&ctx->timer, jiffies + ctx->delay); + } + } + spin_unlock_irqrestore(&ctx->lock, flags); +} + +/** + * _ffl_fifo_frames_delete - deletes the whole content of a FIFO + * @fifo: a reference to the FIFO to empty + * @ctx: a reference to the main context related to the FIFO + * + * This helper function is emptying a FIFO and deleting all its frames. + */ +static void _ffl_fifo_frames_delete(struct list_head *fifo, + struct ffl_ctx *ctx) +{ + struct hsi_msg *frame; + + while ((frame = _ffl_fifo_head_safe(fifo))) { + _ffl_frame_pop(frame); + ffl_delete_frame(frame, ctx); + } +} + +/** + * _ffl_tx_fifo_wait_recycle - recycle the whole content of the TX waiting FIFO + * @ctx: a reference to the TX FFL context to consider + * + * This helper function is emptying a waiting TX FIFO and recycling all its + * frames. + */ +static void _ffl_tx_fifo_wait_recycle(struct ffl_xfer_ctx *ctx) +{ + struct hsi_msg *frame; + + _ffl_ctx_clear_flag(ctx, TX_TTY_WRITE_PENDING_BIT); + + while ((frame = _ffl_fifo_head_safe(&ctx->wait_frames))) { + _ffl_fifo_wait_pop(ctx, frame); + ctx->room -= room_in(frame); + if (frame->status == HSI_STATUS_COMPLETED) + _ffl_free_frame(ctx, frame); + else + frame->status = HSI_STATUS_ERROR; + } +} + +/** + * _ffl_rx_fifo_wait_recycle - recycle the whole content of the RX waiting FIFO + * @ctx: a reference to the RX FFL context to consider + * + * This helper function is emptying a waiting RX FIFO and recycling all its + * frames. + */ +static void _ffl_rx_fifo_wait_recycle(struct ffl_xfer_ctx *ctx) +{ + struct hsi_msg *frame; + + _ffl_ctx_clear_flag(ctx, RX_TTY_FORWARDING_BIT|RX_TTY_REFORWARD_BIT); + + while ((frame = _ffl_fifo_head_safe(&ctx->wait_frames))) { + _ffl_fifo_wait_pop(ctx, frame); + ffl_rx_frame_reset(ctx, frame); + _ffl_free_frame(ctx, frame); + } +} + +/** + * ffl_increase_pool_of_frames - background work aimed at creating new frames + * @work: a reference to the work context + * + * This function is called as a background job (in the ffl_recycle_wq work + * queue) for performing the frame resource allocation (which can then sleep). + * + * An error message is sent upon the failure of FFL_FRAME_ALLOC_RETRY_MAX_CNT + * allocation requests. + */ +static void ffl_increase_pool_of_frames(struct work_struct *work) +{ + struct ffl_xfer_ctx *ctx = container_of(work, struct ffl_xfer_ctx, + increase_pool); + struct hsi_msg *new; + int retry; + unsigned long flags; + + spin_lock_irqsave(&ctx->lock, flags); + while (ctx->all_len < (ctx->wait_max+ctx->ctrl_max)) { + spin_unlock_irqrestore(&ctx->lock, flags); + + retry = 0; + new = ffl_create_frame(ctx); + while (!new) { + ++retry; + if (retry == FFL_FRAME_ALLOC_RETRY_MAX_CNT) { + pr_err(DRVNAME + ": cannot allocate a frame after %d" + " retries...", retry); + retry = 0; + } + + /* No memory available: do something more urgent ! */ + schedule(); + new = ffl_create_frame(ctx); + } + + spin_lock_irqsave(&ctx->lock, flags); + new->channel = ctx->channel; +#ifdef CONFIG_HSI_FFL_ENSURE_LAST_WORD_NULL + ctx->room += min(ctx->data_len, + (unsigned int)(FFL_DATA_LENGTH - 4)); +#else + ctx->room += ctx->data_len; +#endif + new->sgt.sgl->length = ctx->data_len + FFL_HEADER_LENGTH; + _ffl_recycle_frame(ctx, new); + ++ctx->all_len; + } + spin_unlock_irqrestore(&ctx->lock, flags); + + pr_debug(DRVNAME ": done creating pool of frames."); +} + +/* + * TX data flow functions + */ + +/* The top-down flow is made in the ffl_tty_write() TTY function */ + +/** + * ffl_tty_wakeup - wakeup an asleep TTY write function call + * @ctx: a reference to the context related to this TTY + * + * This helper function awakes any asleep TTY write callback function. + */ +static void ffl_tty_wakeup(struct ffl_ctx *ctx) +{ + struct tty_struct *tty; + + tty = tty_port_tty_get(&ctx->tty_prt); + if (likely(tty)) { + tty_wakeup(tty); + tty_kref_put(tty); + } +} + +/** + * ffl_complete_tx - bottom-up flow for the TX side + * @frame: a reference to the completed frame + * + * A TX transfer has completed: recycle the completed frame and kick a new + * delayed request to enter the IDLE state if nothing else is expected. + */ +static void ffl_complete_tx(struct hsi_msg *frame) +{ + struct ffl_xfer_ctx *ctx = frame->context; + struct ffl_ctx *main_ctx = container_of(ctx, + struct ffl_ctx, tx); + int wakeup; + unsigned long flags; + + main_ctx->reset.ongoing = 0; + + spin_lock_irqsave(&ctx->lock, flags); + _ffl_free_frame(ctx, frame); + _ffl_fifo_ctrl_pop(ctx); + if (ctx->ctrl_len > 0) + /* we can not del hangup timer, because another TX was queued */ + mod_timer(&main_ctx->hangup.timer, + jiffies + usecs_to_jiffies(TTY_HANGUP_DELAY)); + else + del_timer(&main_ctx->hangup.timer); + if (_ffl_ctx_is_empty(ctx)) { + wake_up(&main_ctx->tx_full_pipe_clean_event); + mod_timer(&ctx->timer, jiffies + ctx->delay); + } else { + del_timer(&ctx->timer); + } + if (ctx->wait_len > 0) + _ffl_pop_wait_push_ctrl_safe(ctx, &flags); + + /* Wake-up the TTY write whenever the TX wait FIFO is half empty, and + * not before, to prevent too many wakeups */ + wakeup = ((_ffl_ctx_has_flag(ctx, TX_TTY_WRITE_PENDING_BIT)) && + (ctx->wait_len <= ctx->wait_max/2)); + if (wakeup) + _ffl_ctx_clear_flag(ctx, TX_TTY_WRITE_PENDING_BIT); + spin_unlock_irqrestore(&ctx->lock, flags); + + if (wakeup) + ffl_tty_wakeup(main_ctx); +} + +/* + * RX data flow functions + */ + +/** + * _ffl_rx_push_controller - Push as many recycled frames as possible to the + * controller FIFO + * @ctx: a reference to the RX context where the FIFO of recycled frames sits + * @flags: a reference to the flag used by the external spinlock, passed in to + * unlock it and end the atomic context temporarily. + * + * Returns 0 upon success or an error code. + * + * This helper method is returning 0 on success, or an error code. + */ +static __must_check int _ffl_rx_push_controller(struct ffl_xfer_ctx *ctx, + unsigned long *flags) +{ + struct hsi_msg *new; + + while (ctx->ctrl_len < ctx->ctrl_max) { + new = _ffl_new_frame(ctx); + if (!new) + return -ENOMEM; + if (unlikely(_ffl_fifo_ctrl_push(ctx, new, flags))) { + _ffl_recycle_frame(ctx, new); + return -EAGAIN; + } + } + + return 0; +} + +/** + * _ffl_rx_free_frame - frame recycling helper function for the RX side + * @ctx: a reference to the RX context where the FIFO of recycled frames sits + * @frame: a reference to the frame that shall be recycled + * @flags: a reference to the flag used by the external spinlock, passed in to + * unlock it and end the atomic context temporarily. + * + * This helper method is recycling the frame and pushing a new frame to the + * controller if there is room available in the controller FIFO, and finally + * updating the state of the RX state machine. + */ +static void _ffl_rx_free_frame(struct ffl_xfer_ctx *ctx, + struct hsi_msg *frame, unsigned long *flags) +{ + struct hsi_msg *new; + + ffl_rx_frame_reset(ctx, frame); + _ffl_free_frame(ctx, frame); + if (ctx->ctrl_len < ctx->ctrl_max) { + new = _ffl_new_frame(ctx); + if (unlikely(_ffl_fifo_ctrl_push(ctx, new, flags))) + _ffl_recycle_frame(ctx, new); + } + _ffl_update_state_rx(ctx); +} + +/** + * _ffl_forward_tty - RX data TTY forwarding helper function + * @tty: a reference to the TTY where the data shall be forwarded + * @ctx: a reference to the RX context where the FIFO of waiting frames sits + * @flags: a reference to the flag used by the external spinlock, passed in to + * unlock it and end the atomic context temporarily. + * + * Data contained in the waiting frame FIFO shall be forwarded to the TTY. + * This function is pushing as much data as possible to the TTY interface, is + * recycling frames that have been fully forwarded and is kicking a TTY insert + * restart delayed job if some data is remaining in the waiting FIFO or if the + * controller FIFO is not full yet. + */ +static void _ffl_forward_tty(struct tty_struct *tty, + struct ffl_xfer_ctx *ctx, + unsigned long *flags) +{ + struct hsi_msg *frame; + unsigned char *data_ptr; + unsigned int copied; + int do_push; + int err; + char tty_flag; + + if (_ffl_ctx_has_flag(ctx, RX_TTY_FORWARDING_BIT)) { + _ffl_ctx_set_flag(ctx, RX_TTY_REFORWARD_BIT); + return; + } + + /* Initialised to 1 to prevent unexpected TTY forwarding resume + * function when there is no TTY or when it is throttled */ + copied = 1; + do_push = 0; + err = 0; + + _ffl_ctx_set_flag(ctx, RX_TTY_FORWARDING_BIT); + + del_timer(&ctx->timer); + +shoot_again_now: + while (ctx->wait_len > 0) { + frame = _ffl_fifo_head(&ctx->wait_frames); + if (likely(frame->status == HSI_STATUS_COMPLETED)) + tty_flag = (likely(!frame->break_frame)) ? + TTY_NORMAL : TTY_BREAK; + else + tty_flag = TTY_FRAME; + + _ffl_fifo_wait_pop(ctx, frame); + + if (unlikely(!tty)) + goto free_frame; + + while (frame->actual_len > 0) { + + if (test_bit(TTY_THROTTLED, &tty->flags)) { + /* Initialised to 1 to prevent unexpected TTY + * forwarding resume function schedule */ + copied = 1; + _ffl_fifo_wait_push_back(ctx, frame); + goto no_more_tty_insert; + } + + spin_unlock_irqrestore(&ctx->lock, *flags); + + /* Copy the data to the flip buffers */ + data_ptr = ffl_data_ptr(ffl_virt(frame), 0); + copied = (unsigned int) + tty_insert_flip_string_fixed_flag(tty, + data_ptr, + tty_flag, + frame->actual_len); + ffl_rx_frame_skip(frame, copied); + + /* We'll push the flip buffers each time something has + * been written to them to allow low latency */ + do_push |= (copied > 0); + + spin_lock_irqsave(&ctx->lock, *flags); + + if (copied == 0) { + _ffl_fifo_wait_push_back(ctx, frame); + goto no_more_tty_insert; + } + } + +free_frame: + _ffl_rx_free_frame(ctx, frame, flags); + } + +no_more_tty_insert: + /* Schedule a flip since called from complete_rx() in an interrupt + * context instead of tty_flip_buffer_push() */ + if (do_push) + tty_schedule_flip(tty); + + /* If some reforwarding request occur in the meantime, do this now */ + if (_ffl_ctx_has_flag(ctx, RX_TTY_REFORWARD_BIT)) { + _ffl_ctx_clear_flag(ctx, RX_TTY_REFORWARD_BIT); + goto shoot_again_now; + } + + _ffl_ctx_clear_flag(ctx, RX_TTY_FORWARDING_BIT); + + if (unlikely(ctx->ctrl_len < ctx->ctrl_max)) + err = _ffl_rx_push_controller(ctx, flags); + + /* Shoot again later if there is still pending data to serve or if + * the RX controller FIFO is not ready yet */ + if ((!copied) || (unlikely(err == -EAGAIN))) + mod_timer(&ctx->timer, jiffies + ctx->delay); +} + +#ifdef USE_IPC_ERROR_RECOVERY +/* Forward declarations for ffl_complete_rx() */ +static void do_recovery_drain_unless(struct ffl_xfer_ctx *xfer_ctx, + unsigned int condition); +#endif + +/** + * ffl_complete_rx - bottom-up flow for the RX side + * @frame: a reference to the completed frame + * + * A RX transfer has completed: push the data conveyed in the frame to the TTY + * interface and signal any existing error. + */ +static void ffl_complete_rx(struct hsi_msg *frame) +{ + struct ffl_xfer_ctx *ctx = frame->context; + struct ffl_ctx *main_ctx = container_of(ctx, + struct ffl_ctx, rx); + struct tty_struct *tty; + unsigned long flags; + +#ifdef DEBUG + if (unlikely(frame->actual_len != (ctx->data_len + FFL_HEADER_LENGTH))) + pr_err(DRVNAME ": [%08x] Invalid FFL frame length %d bytes\n", + (u32) frame, frame->actual_len); +#endif + + tty = tty_port_tty_get(&main_ctx->tty_prt); + + ffl_rx_frame_init(frame); + + spin_lock_irqsave(&ctx->lock, flags); + +#ifdef USE_IPC_ERROR_RECOVERY + /* Tag frames as being error frames when in error recovery mode */ + if (unlikely(_ffl_ctx_has_flag(ctx, ERROR_RECOVERY_RX_ERROR_BIT))) { + frame->actual_len = 1; + frame->status = HSI_STATUS_ERROR; + } else if (unlikely(frame->status != HSI_STATUS_COMPLETED)) { + _ffl_ctx_set_flag(ctx, ERROR_RECOVERY_RX_ERROR_BIT); + spin_unlock_irqrestore(&ctx->lock, flags); + do_recovery_drain_unless(&main_ctx->tx, + ERROR_RECOVERY_ONGOING_BIT | + TTY_OFF_BIT); + spin_lock_irqsave(&ctx->lock, flags); + } +#endif + + _ffl_fifo_ctrl_pop(ctx); +#ifdef CONFIG_HSI_FFL_TTY_STATS + ctx->data_sz += frame->actual_len; + ctx->frame_cnt++; + if (ctx->ctrl_len <= 0) + ctx->overflow_cnt++; +#endif + _ffl_fifo_wait_push(ctx, frame); + _ffl_forward_tty(tty, ctx, &flags); + spin_unlock_irqrestore(&ctx->lock, flags); + + if (tty) + tty_kref_put(tty); +} + +/** + * ffl_rx_forward_retry - TTY forwarding retry job + * @param: a casted reference to the to the RX context where the FIFO of + * waiting frames sits + * + * This simply calls the TTY forwarding function in a tasklet shell. + */ +static void ffl_rx_forward_retry(unsigned long param) +{ + struct ffl_xfer_ctx *ctx = (struct ffl_xfer_ctx *) param; + struct ffl_ctx *main_ctx = container_of(ctx, + struct ffl_ctx, rx); + struct tty_struct *tty; + unsigned long flags; + + tty = tty_port_tty_get(&main_ctx->tty_prt); + + spin_lock_irqsave(&ctx->lock, flags); + _ffl_forward_tty(tty, ctx, &flags); + spin_unlock_irqrestore(&ctx->lock, flags); + + if (tty) + tty_kref_put(tty); +} + +/** + * ffl_rx_forward_resume - TTY forwarding resume callback + * @tty: a reference to the TTY requesting the resume + * + * This simply calls the TTY forwarding function as a response to a TTY + * unthrottle event. + */ +static void ffl_rx_forward_resume(struct tty_struct *tty) +{ + struct ffl_ctx *main_ctx; + struct ffl_xfer_ctx *ctx; + unsigned long flags; + + /* Get the context reference from the driver data if already opened */ + main_ctx = (struct ffl_ctx *) tty->driver_data; + + if (!main_ctx) + return; + + ctx = &main_ctx->rx; + + spin_lock_irqsave(&ctx->lock, flags); + _ffl_forward_tty(tty, ctx, &flags); + spin_unlock_irqrestore(&ctx->lock, flags); +} + +/* + * Time handling methods + */ + +/** + * from_usecs - translating usecs to jiffies + * @delay: the dealy in usecs + * + * Returns the delay rounded up to the next jiffy and prevent it to be set + * to zero, as all delayed function calls shall occur to the next jiffy (at + * least). + */ +static inline unsigned long from_usecs(const unsigned long delay) +{ + unsigned long j = usecs_to_jiffies(delay); + + if (j == 0) + j = 1; + + return j; +} + +/* + * TTY handling methods + */ + +/** + * ffl_wait_until_ctx_sent - waits for all the TX FIFO to be empty + * @ctx: a reference to the considered context + * @timeout: a timeout value expressed in jiffies + */ +static inline void ffl_wait_until_ctx_sent(struct ffl_ctx *ctx, int timeout) +{ + wait_event_interruptible_timeout(ctx->tx_full_pipe_clean_event, + ffl_tx_full_pipe_is_clean(ctx), + timeout); +} + +/** + * ffl_tty_port_activate - callback to the TTY port activate function + * @port: a reference to the calling TTY port + * @tty: a reference to the calling TTY + * + * Return 0 on success or a negative error code on error. + * + * The TTY port activate is only called on the first port open. + */ +static int ffl_tty_port_activate(struct tty_port *port, struct tty_struct *tty) +{ + struct ffl_ctx *ctx; + struct ffl_xfer_ctx *tx_ctx; + struct ffl_xfer_ctx *rx_ctx; + int err; + unsigned long flags; + + /* Get the context reference stored in the TTY open() */ + ctx = (struct ffl_ctx *) tty->driver_data; + tx_ctx = &ctx->tx; + rx_ctx = &ctx->rx; + + /* Update the TX and RX HSI configuration */ + _ffl_update_ctx_status(tx_ctx); + _ffl_update_ctx_status(rx_ctx); + + /* Claim the HSI port */ + err = hsi_claim_port(ctx->client, 0); + if (unlikely(err)) { + pr_err(DRVNAME ": HSI port claim failed (%d)", err); + return err; + } + + /* Setup the HSI controller */ + err = hsi_setup(ctx->client); + if (unlikely(err)) { + pr_err(DRVNAME ": HSI setup failed (%d)", err); + hsi_release_port(ctx->client); + return err; + } + + spin_lock_irqsave(&rx_ctx->lock, flags); + _ffl_ctx_clear_flag(rx_ctx, TTY_OFF_BIT|ERROR_RECOVERY_RX_ERROR_BIT); + err = _ffl_rx_push_controller(rx_ctx, &flags); + spin_unlock_irqrestore(&rx_ctx->lock, flags); + +#ifdef USE_IPC_ERROR_RECOVERY + hsi_async(ctx->client, &ctx->recovery.rx_break); +#endif + + if (unlikely(err == -EAGAIN)) + mod_timer(&rx_ctx->timer, jiffies + rx_ctx->delay); + + /* Broadcast the port ready information */ + spin_lock_irqsave(&tx_ctx->lock, flags); + ctx->hangup.last_cause |= ctx->hangup.cause; + ctx->hangup.cause = 0; + _ffl_ctx_clear_flag(tx_ctx, TTY_OFF_BIT|ERROR_RECOVERY_TX_DRAINED_BIT); + spin_unlock_irqrestore(&tx_ctx->lock, flags); + + return 0; +} + +/* Forward declarations for ffl_tty_port_shutdown() */ +static void ffl_hangup_ctx_clear(struct ffl_hangup_ctx *ctx_hangup); +#ifdef USE_IPC_ERROR_RECOVERY +static void ffl_recovery_ctx_clear(struct ffl_recovery_ctx *ctx_recovery); +#endif + +/** + * ffl_tty_port_shutdown - callback to the TTY port shutdown function + * @port: a reference to the calling TTY port + * + * The TTY port shutdown is only called on the last port close. + */ +static void ffl_tty_port_shutdown(struct tty_port *port) +{ + struct ffl_ctx *ctx; + struct ffl_xfer_ctx *tx_ctx; + struct ffl_xfer_ctx *rx_ctx; + unsigned long flags; + + ctx = container_of(port, struct ffl_ctx, tty_prt); + tx_ctx = &ctx->tx; + rx_ctx = &ctx->rx; + + /* Broadcast the shutdown information */ + spin_lock_irqsave(&tx_ctx->lock, flags); + _ffl_ctx_set_flag(tx_ctx, TTY_OFF_BIT); + spin_unlock_irqrestore(&tx_ctx->lock, flags); + spin_lock_irqsave(&rx_ctx->lock, flags); + _ffl_ctx_set_flag(rx_ctx, TTY_OFF_BIT); + spin_unlock_irqrestore(&rx_ctx->lock, flags); + + /* Wait for TX write pipeline to be at least partially cleaned */ + wait_event_interruptible(ctx->tx_write_pipe_clean_event, + ffl_tx_write_pipe_is_clean(&ctx->tx)); + ffl_wait_until_ctx_sent(ctx, 0); + +#ifdef USE_IPC_ERROR_RECOVERY + /* Wait for error recovery completion */ + ffl_recovery_ctx_clear(&ctx->recovery); +#endif + + /* Wait for hangup completion */ + ffl_hangup_ctx_clear(&ctx->hangup); + + hsi_flush(ctx->client); + + del_timer_sync(&rx_ctx->timer); + spin_lock_irqsave(&rx_ctx->lock, flags); + _ffl_rx_fifo_wait_recycle(rx_ctx); + _ffl_stop_rx(rx_ctx, ctx); + spin_unlock_irqrestore(&rx_ctx->lock, flags); + + del_timer_sync(&tx_ctx->timer); + spin_lock_irqsave(&tx_ctx->lock, flags); + _ffl_tx_fifo_wait_recycle(tx_ctx); + _ffl_stop_tx(tx_ctx, &flags); + spin_unlock_irqrestore(&tx_ctx->lock, flags); + + hsi_release_port(ctx->client); +} + +/** + * ffl_tty_open - callback to the TTY open function + * @tty: a reference to the calling TTY + * @filp: a reference to the calling file + * + * Return 0 on success or a negative error code on error. + * + * The HSI layer is only initialised during the first opening. + */ +static int ffl_tty_open(struct tty_struct *tty, struct file *filp) +{ + struct ffl_ctx *ctx; + int err; + + /* Get the context reference from the driver data if already opened */ + ctx = (struct ffl_ctx *) tty->driver_data; + + /* Otherwise parse the context list to find the correct one */ + if (!ctx) { + ctx = ffl_drv.ctx[tty->index]; + tty->driver_data = ctx; + } + + if (unlikely(!ctx)) { + err = -ENODEV; + pr_err(DRVNAME ": Cannot find TTY context (%d)", err); + return err; + } + + /* Open the TTY port (calls port->activate on first opening) */ + err = tty_port_open(&ctx->tty_prt, tty, filp); + if (unlikely(err)) + pr_err(DRVNAME ": TTY open failed (%d)", err); + + /* Set the TTY_NO_WRITE_SPLIT to transfer as much data as possible on + * the first write request. This shall not introduce denial of service + * as this flag will later adapt to the available TX buffer size. */ + set_bit(TTY_NO_WRITE_SPLIT, &tty->flags); + + return err; +} + +/** + * ffl_flush_tx_buffer - flushes the TX waiting FIFO + * @tty: a reference to the requesting TTY + */ +static void ffl_flush_tx_buffer(struct tty_struct *tty) +{ + struct ffl_ctx *main_ctx = (struct ffl_ctx *) tty->driver_data; + struct ffl_xfer_ctx *ctx = &main_ctx->tx; + unsigned long flags; + + spin_lock_irqsave(&ctx->lock, flags); + _ffl_tx_fifo_wait_recycle(ctx); + spin_unlock_irqrestore(&ctx->lock, flags); +} + +/** + * ffl_throw_tty_hangup - throwing a work for TTY hangup request + * @ctx: a reference to the main FFL context + * @cause: the cause for the TTY hangup request + */ +static void ffl_throw_tty_hangup(struct ffl_ctx *ctx, int cause) +{ + struct ffl_xfer_ctx *tx_ctx = &ctx->tx; + unsigned long flags; + int do_hangup; + + spin_lock_irqsave(&tx_ctx->lock, flags); + do_hangup = ((!_ffl_ctx_has_flag(tx_ctx, TTY_OFF_BIT)) && + (!ctx->hangup.cause)); + ctx->hangup.cause |= cause; + wake_up(&ctx->tx_full_pipe_clean_event); + spin_unlock_irqrestore(&tx_ctx->lock, flags); + + if (do_hangup) + queue_work(ffl_hangup_wq, &ctx->hangup.work); + +} + +/** + * ffl_tty_tx_timeout - timer function for tx timeout hangup request + * @param: a hidden reference to the main FFL context + */ +static void ffl_tty_tx_timeout(unsigned long int param) +{ + struct ffl_ctx *ctx = (struct ffl_ctx *)param; + + ffl_throw_tty_hangup(ctx, HU_TIMEOUT); +} + +/** + * ffl_do_tx_hangup - initiate a hangup due to TX timeout + * @work: a reference to work queue element + * + * Required since port shutdown calls a mutex that might sleep + */ +static void ffl_do_tx_hangup(struct work_struct *work) +{ + struct ffl_hangup_ctx *hangup_ctx; + struct ffl_ctx *ctx; + struct tty_struct *tty; + unsigned long flags; + int exit; + + hangup_ctx = container_of(work, struct ffl_hangup_ctx, work); + ctx = container_of(hangup_ctx, struct ffl_ctx, hangup); + + spin_lock_irqsave(&ctx->tx.lock, flags); + exit = _ffl_ctx_has_flag(&ctx->tx, TTY_OFF_BIT); + spin_unlock_irqrestore(&ctx->tx.lock, flags); + if (unlikely(exit)) + return; + + tty = tty_port_tty_get(&ctx->tty_prt); + if (tty) { + tty_vhangup(tty); + tty_kref_put(tty); + } +} + +/** + * ffl_tty_hangup - callback to a TTY hangup request + * @tty: a reference to the requesting TTY + */ +static void ffl_tty_hangup(struct tty_struct *tty) +{ + struct ffl_ctx *ctx = (struct ffl_ctx *) tty->driver_data; + + tty_port_hangup(&ctx->tty_prt); +} + +/** + * ffl_wait_until_sent - callback to a TTY wait until sent request + * @tty: a reference to the requesting TTY + * @timeout: a timeout value expressed in jiffies + */ +static void ffl_wait_until_sent(struct tty_struct *tty, int timeout) +{ + struct ffl_ctx *ctx = (struct ffl_ctx *) tty->driver_data; + + ffl_wait_until_ctx_sent(ctx, timeout); +} + +/** + * ffl_tty_close - callback to the TTY close function + * @tty: a reference to the calling TTY + * @filp: a reference to the calling file + * + * The HSI layer is only released during the last closing. + */ +static void ffl_tty_close(struct tty_struct *tty, struct file *filp) +{ + struct ffl_ctx *main_ctx = (struct ffl_ctx *) tty->driver_data; + + if ((filp != NULL) && (likely(main_ctx != NULL))) + tty_port_close(&main_ctx->tty_prt, tty, filp); +} + +/** + * do_ffl_tty_write - writes data coming from the TTY to the TX FIFO + * @ctx: a reference to the considered TX context + * @buf: the virtual address of the current input buffer (from TTY) + * @len: the remaining buffer size + * + * Returns the total size of what has been transferred. + * + * This is a recursive function, the core of the TTY write callback function. + */ +static int do_ffl_tty_write(struct ffl_xfer_ctx *ctx, unsigned char *buf, + int len) +{ + struct hsi_msg *frame; + unsigned char *frame_ptr; + int offset, avail, copied; + unsigned int updated_actual_len; + unsigned long flags; + + offset = 0; + avail = 0; + + spin_lock_irqsave(&ctx->lock, flags); + + if (unlikely(_ffl_ctx_has_flag(ctx, TTY_OFF_BIT))) { + spin_unlock_irqrestore(&ctx->lock, flags); + return 0; + } + + frame = _ffl_fifo_tail_safe(&ctx->wait_frames); + if (frame) { + offset = frame->actual_len; +#ifdef CONFIG_HSI_FFL_ENSURE_LAST_WORD_NULL + avail = min(ctx->data_len, + (unsigned int)(FFL_DATA_LENGTH - 4)) - offset; +#else + avail = ctx->data_len - offset; +#endif + } + + if (avail == 0) { + frame = _ffl_new_frame(ctx); + if (frame) { + offset = 0; +#ifdef CONFIG_HSI_FFL_ENSURE_LAST_WORD_NULL + avail = min(ctx->data_len, + (unsigned int) (FFL_DATA_LENGTH - 4)); +#else + avail = ctx->data_len; +#endif + _ffl_fifo_wait_push(ctx, frame); + } + } + + if (frame) { + frame->status = HSI_STATUS_PENDING; + /* Do a start TX on new frames only and after having marked + * the current frame as pending, e.g. don't touch ! */ + if (offset == 0) + _ffl_start_tx(ctx, &flags); + } else { + _ffl_ctx_set_flag(ctx, TX_TTY_WRITE_PENDING_BIT); +#ifdef CONFIG_HSI_FFL_TTY_STATS + ctx->overflow_cnt++; +#endif + } + spin_unlock_irqrestore(&ctx->lock, flags); + + if (!frame) + return 0; + + copied = min(avail, len); + frame_ptr = ffl_virt(frame); + updated_actual_len = frame->actual_len + copied; + ffl_set_length(frame_ptr, updated_actual_len); + (void) memcpy(ffl_data_ptr(frame_ptr, offset), buf, copied); + + spin_lock_irqsave(&ctx->lock, flags); + if (likely(frame->status != HSI_STATUS_ERROR)) { + frame->actual_len = updated_actual_len; + ctx->buffered += copied; + ctx->room -= copied; + frame->status = HSI_STATUS_COMPLETED; + if (ctx->ctrl_len < ctx->ctrl_max) + _ffl_pop_wait_push_ctrl(ctx, &flags); + } else { + /* ERROR frames have already been popped from the wait FIFO */ + _ffl_free_frame(ctx, frame); + } + spin_unlock_irqrestore(&ctx->lock, flags); + + return copied; +} + +/** + * ffl_tty_write - writes data coming from the TTY to the TX FIFO + * @tty: a reference to the calling TTY + * @buf: the virtual address of the current input buffer (from TTY) + * @len: the TTY buffer size + * + * Returns the total size of what has been transferred in the TX FIFO + * + * This is the TTY write callback function. + */ +static int ffl_tty_write(struct tty_struct *tty, const unsigned char *buf, + int len) +{ + struct ffl_ctx *main_ctx = + (struct ffl_ctx *) tty->driver_data; + struct ffl_xfer_ctx *ctx = &main_ctx->tx; + int enough_room; + int already_copied, copied; + unsigned char *ptr; + unsigned long flags; + + spin_lock_irqsave(&ctx->lock, flags); + enough_room = + (likely(!_ffl_ctx_has_flag(ctx, TX_TTY_WRITE_ONGOING_BIT))) ? + (!!(ctx->room >= len)) : -1; + _ffl_ctx_set_flag(ctx, TX_TTY_WRITE_ONGOING_BIT); + spin_unlock_irqrestore(&ctx->lock, flags); + + /* Prevent a new write if there is one currently ongoing */ + if (unlikely(enough_room < 0)) { + pr_err("%s: write ongoing !\n", __func__); + return 0; + } + if (enough_room) + set_bit(TTY_NO_WRITE_SPLIT, &tty->flags); + else + clear_bit(TTY_NO_WRITE_SPLIT, &tty->flags); + + already_copied = 0; + while (len > 0) { + ptr = (unsigned char *) &buf[already_copied]; + copied = do_ffl_tty_write(ctx, ptr, len); + if (copied == 0) + break; + already_copied += copied; + len -= copied; + } + + spin_lock_irqsave(&ctx->lock, flags); + _ffl_ctx_clear_flag(ctx, TX_TTY_WRITE_ONGOING_BIT); + wake_up(&main_ctx->tx_write_pipe_clean_event); + spin_unlock_irqrestore(&ctx->lock, flags); + + return already_copied; +} + +/** + * ffl_tty_write_room - returns the available buffer size on the TX FIFO + * @tty: a reference to the calling TTY + * + * Returns the total available size in the TX wait FIFO. + */ +static int ffl_tty_write_room(struct tty_struct *tty) +{ + struct ffl_xfer_ctx *ctx = &((struct ffl_ctx *) tty->driver_data)->tx; + unsigned int room; + unsigned long flags; + + spin_lock_irqsave(&ctx->lock, flags); + room = (likely(!_ffl_ctx_has_flag(ctx, TX_TTY_WRITE_ONGOING_BIT))) ? + ctx->room : 0; + spin_unlock_irqrestore(&ctx->lock, flags); + + return room; +} + +/** + * ffl_tty_chars_in_buffer - returns the size of the data hold in the TX FIFO + * @tty: a reference to the calling TTY + * + * Returns the total size of data hold in the TX wait FIFO. It does not take + * into account the data which has already been passed to the HSI controller + * in both in software and hardware FIFO. + */ +static int ffl_tty_chars_in_buffer(struct tty_struct *tty) +{ + struct ffl_xfer_ctx *ctx = &((struct ffl_ctx *) tty->driver_data)->tx; + unsigned int buffered; + unsigned long flags; + + spin_lock_irqsave(&ctx->lock, flags); + buffered = ctx->buffered; + spin_unlock_irqrestore(&ctx->lock, flags); + + return buffered; +} + +/** + * ffl_tty_ioctl - manages the IOCTL read and write requests + * @tty: a reference to the calling TTY + * @filp: a reference to the calling file + * @cmd: the IOCTL command + * @arg: the I/O argument to pass or retrieve data + * + * Returns 0 upon normal completion or the error code in case of an error. + */ +static int ffl_tty_ioctl(struct tty_struct *tty, + unsigned int cmd, unsigned long arg) +{ + struct ffl_ctx *ctx = (struct ffl_ctx *) tty->driver_data; + struct work_struct *increase_pool = NULL; + unsigned int data; +#ifdef CONFIG_HSI_FFL_TTY_STATS + struct hsi_ffl_stats stats; +#endif + unsigned long flags; + + switch (cmd) { + case FFL_TTY_RESET_TX: + spin_lock_irqsave(&ctx->tx.lock, flags); + _ffl_tx_fifo_wait_recycle(&ctx->tx); + spin_unlock_irqrestore(&ctx->tx.lock, flags); + break; + + case FFL_TTY_RESET_RX: + spin_lock_irqsave(&ctx->rx.lock, flags); + _ffl_rx_fifo_wait_recycle(&ctx->rx); + spin_unlock_irqrestore(&ctx->rx.lock, flags); + break; + + case FFL_TTY_GET_TX_STATE: + data = ffl_ctx_get_state(&ctx->tx); + return put_user(data, (unsigned int __user *) arg); + break; + + case FFL_TTY_GET_RX_STATE: + data = ffl_ctx_get_state(&ctx->rx); + return put_user(data, (unsigned int __user *) arg); + break; + + case FFL_TTY_SET_TX_WAIT_MAX: + if (arg > 0) { + spin_lock_irqsave(&ctx->tx.lock, flags); + if (arg > ctx->tx.wait_max) + increase_pool = &ctx->tx.increase_pool; + ctx->tx.wait_max = arg; + spin_unlock_irqrestore(&ctx->tx.lock, flags); + } else { + dev_dbg(&ctx->client->device, + "Invalid TX wait FIFO size %li\n", + arg); + return -EINVAL; + } + break; + + case FFL_TTY_GET_TX_WAIT_MAX: + spin_lock_irqsave(&ctx->tx.lock, flags); + data = ctx->tx.wait_max; + spin_unlock_irqrestore(&ctx->tx.lock, flags); + return put_user(data, (unsigned int __user *) arg); + break; + + case FFL_TTY_SET_RX_WAIT_MAX: + if (arg > 0) { + spin_lock_irqsave(&ctx->rx.lock, flags); + if (arg > ctx->rx.ctrl_max) + increase_pool = &ctx->rx.increase_pool; + ctx->rx.wait_max = arg; + spin_unlock_irqrestore(&ctx->rx.lock, flags); + } else { + dev_dbg(&ctx->client->device, + "Invalid RX wait FIFO size %li\n", + arg); + return -EINVAL; + } + break; + + case FFL_TTY_GET_RX_WAIT_MAX: + spin_lock_irqsave(&ctx->rx.lock, flags); + data = ctx->rx.wait_max; + spin_unlock_irqrestore(&ctx->rx.lock, flags); + return put_user(data, (unsigned int __user *) arg); + break; + + case FFL_TTY_SET_TX_CTRL_MAX: + if (arg > 0) { + spin_lock_irqsave(&ctx->tx.lock, flags); + if (arg > ctx->tx.ctrl_max) + increase_pool = &ctx->tx.increase_pool; + ctx->tx.ctrl_max = arg; + spin_unlock_irqrestore(&ctx->tx.lock, flags); + } else { + dev_dbg(&ctx->client->device, + "Invalid TX controller FIFO size %li\n", + arg); + return -EINVAL; + } + break; + + case FFL_TTY_GET_TX_CTRL_MAX: + spin_lock_irqsave(&ctx->tx.lock, flags); + data = ctx->tx.ctrl_max; + spin_unlock_irqrestore(&ctx->tx.lock, flags); + return put_user(data, (unsigned int __user *) arg); + break; + + case FFL_TTY_SET_RX_CTRL_MAX: + if (arg > 0) { + spin_lock_irqsave(&ctx->rx.lock, flags); + if (arg > ctx->rx.ctrl_max) + increase_pool = &ctx->rx.increase_pool; + ctx->rx.ctrl_max = arg; + spin_unlock_irqrestore(&ctx->rx.lock, flags); + } else { + dev_dbg(&ctx->client->device, + "Invalid RX controller FIFO size %li\n", + arg); + return -EINVAL; + } + break; + + case FFL_TTY_GET_RX_CTRL_MAX: + spin_lock_irqsave(&ctx->rx.lock, flags); + data = ctx->rx.ctrl_max; + spin_unlock_irqrestore(&ctx->rx.lock, flags); + return put_user(data, (unsigned int __user *) arg); + break; + + case FFL_TTY_SET_TX_DELAY: + spin_lock_irqsave(&ctx->tx.lock, flags); + ctx->tx.delay = from_usecs(arg); + spin_unlock_irqrestore(&ctx->tx.lock, flags); + break; + + case FFL_TTY_GET_TX_DELAY: + spin_lock_irqsave(&ctx->tx.lock, flags); + data = jiffies_to_usecs(ctx->tx.delay); + spin_unlock_irqrestore(&ctx->tx.lock, flags); + return put_user(data, (unsigned int __user *) arg); + break; + + case FFL_TTY_SET_RX_DELAY: + spin_lock_irqsave(&ctx->rx.lock, flags); + ctx->rx.delay = from_usecs(arg); + spin_unlock_irqrestore(&ctx->rx.lock, flags); + break; + + case FFL_TTY_GET_RX_DELAY: + spin_lock_irqsave(&ctx->rx.lock, flags); + data = jiffies_to_usecs(ctx->rx.delay); + spin_unlock_irqrestore(&ctx->rx.lock, flags); + return put_user(data, (unsigned int __user *) arg); + break; + + case FFL_TTY_SET_TX_FLOW: + switch (arg) { + case HSI_FLOW_SYNC: + case HSI_FLOW_PIPE: + spin_lock_irqsave(&ctx->tx.lock, flags); + ctx->tx.config.flow = arg; + spin_unlock_irqrestore(&ctx->tx.lock, flags); + break; + default: + return -EINVAL; + } + break; + + case FFL_TTY_GET_TX_FLOW: + spin_lock_irqsave(&ctx->tx.lock, flags); + data = ctx->tx.config.flow; + spin_unlock_irqrestore(&ctx->tx.lock, flags); + return put_user(data, (unsigned int __user *) arg); + break; + + case FFL_TTY_SET_RX_FLOW: + switch (arg) { + case HSI_FLOW_SYNC: + case HSI_FLOW_PIPE: + spin_lock_irqsave(&ctx->rx.lock, flags); + ctx->client->rx_cfg.flow = arg; + spin_unlock_irqrestore(&ctx->rx.lock, flags); + break; + default: + return -EINVAL; + } + break; + + case FFL_TTY_GET_RX_FLOW: + spin_lock_irqsave(&ctx->rx.lock, flags); + data = ctx->rx.config.flow; + spin_unlock_irqrestore(&ctx->rx.lock, flags); + return put_user(data, (unsigned int __user *) arg); + break; + + case FFL_TTY_SET_TX_MODE: + switch (arg) { + case HSI_MODE_STREAM: + case HSI_MODE_FRAME: + spin_lock_irqsave(&ctx->tx.lock, flags); + ctx->tx.config.mode = arg; + spin_unlock_irqrestore(&ctx->tx.lock, flags); + break; + default: + return -EINVAL; + } + break; + + case FFL_TTY_GET_TX_MODE: + spin_lock_irqsave(&ctx->tx.lock, flags); + data = ctx->tx.config.mode; + spin_unlock_irqrestore(&ctx->tx.lock, flags); + return put_user(data, (unsigned int __user *) arg); + break; + + case FFL_TTY_SET_RX_MODE: + switch (arg) { + case HSI_MODE_STREAM: + case HSI_MODE_FRAME: + spin_lock_irqsave(&ctx->rx.lock, flags); + ctx->rx.config.mode = arg; + spin_unlock_irqrestore(&ctx->rx.lock, flags); + break; + default: + return -EINVAL; + } + break; + + case FFL_TTY_GET_RX_MODE: + spin_lock_irqsave(&ctx->rx.lock, flags); + data = ctx->rx.config.mode; + spin_unlock_irqrestore(&ctx->rx.lock, flags); + return put_user(data, (unsigned int __user *) arg); + break; + + case FFL_TTY_SET_TX_CHANNELS: + if ((arg > 16) || (arg <= ctx->tx.channel)) + return -EINVAL; + spin_lock_irqsave(&ctx->tx.lock, flags); + ctx->tx.config.channels = arg; + spin_unlock_irqrestore(&ctx->tx.lock, flags); + break; + + case FFL_TTY_GET_TX_CHANNELS: + spin_lock_irqsave(&ctx->tx.lock, flags); + data = ctx->tx.config.channels; + spin_unlock_irqrestore(&ctx->tx.lock, flags); + return put_user(data, (unsigned int __user *) arg); + break; + + case FFL_TTY_SET_RX_CHANNELS: + if ((arg > 16) || (arg <= ctx->rx.channel)) + return -EINVAL; + spin_lock_irqsave(&ctx->rx.lock, flags); + ctx->rx.config.channels = arg; + spin_unlock_irqrestore(&ctx->rx.lock, flags); + break; + + case FFL_TTY_GET_RX_CHANNELS: + spin_lock_irqsave(&ctx->rx.lock, flags); + data = ctx->rx.config.channels; + spin_unlock_irqrestore(&ctx->rx.lock, flags); + return put_user(data, (unsigned int __user *) arg); + break; + + case FFL_TTY_SET_TX_CHANNEL: + if (arg >= ctx->tx.config.channels) + return -EINVAL; + spin_lock_irqsave(&ctx->tx.lock, flags); + ctx->tx.channel = arg; + spin_unlock_irqrestore(&ctx->tx.lock, flags); + break; + + case FFL_TTY_GET_TX_CHANNEL: + spin_lock_irqsave(&ctx->tx.lock, flags); + data = ctx->tx.channel; + spin_unlock_irqrestore(&ctx->tx.lock, flags); + return put_user(data, (unsigned int __user *) arg); + break; + + case FFL_TTY_SET_RX_CHANNEL: + if (arg >= ctx->rx.config.channels) + return -EINVAL; + spin_lock_irqsave(&ctx->rx.lock, flags); + ctx->rx.channel = arg; + spin_unlock_irqrestore(&ctx->rx.lock, flags); + break; + + case FFL_TTY_GET_RX_CHANNEL: + spin_lock_irqsave(&ctx->rx.lock, flags); + data = ctx->rx.channel; + spin_unlock_irqrestore(&ctx->rx.lock, flags); + return put_user(data, (unsigned int __user *) arg); + break; + + case FFL_TTY_SET_TX_FRAME_LEN: + if ((arg <= FFL_HEADER_LENGTH) || (arg > FFL_FRAME_LENGTH)) + return -EINVAL; + spin_lock_irqsave(&ctx->tx.lock, flags); + ctx->tx.data_len = ((arg+3)/4)*4 - FFL_HEADER_LENGTH; + spin_unlock_irqrestore(&ctx->tx.lock, flags); + break; + + case FFL_TTY_GET_TX_FRAME_LEN: + spin_lock_irqsave(&ctx->tx.lock, flags); + data = ctx->tx.data_len + FFL_HEADER_LENGTH; + spin_unlock_irqrestore(&ctx->tx.lock, flags); + return put_user(data, (unsigned int __user *) arg); + break; + + case FFL_TTY_SET_RX_FRAME_LEN: + if ((arg <= FFL_HEADER_LENGTH) || (arg > FFL_FRAME_LENGTH)) + return -EINVAL; + spin_lock_irqsave(&ctx->rx.lock, flags); + ctx->rx.data_len = ((arg+3)/4)*4 - FFL_HEADER_LENGTH; + spin_unlock_irqrestore(&ctx->rx.lock, flags); + break; + + case FFL_TTY_GET_RX_FRAME_LEN: + spin_lock_irqsave(&ctx->rx.lock, flags); + data = ctx->rx.data_len + FFL_HEADER_LENGTH; + spin_unlock_irqrestore(&ctx->rx.lock, flags); + return put_user(data, (unsigned int __user *) arg); + break; + + case FFL_TTY_SET_TX_ARB_MODE: + switch (arg) { + case HSI_ARB_RR: + case HSI_ARB_PRIO: + spin_lock_irqsave(&ctx->tx.lock, flags); + ctx->tx.config.arb_mode = arg; + spin_unlock_irqrestore(&ctx->tx.lock, flags); + break; + default: + return -EINVAL; + } + break; + + case FFL_TTY_GET_TX_ARB_MODE: + spin_lock_irqsave(&ctx->tx.lock, flags); + data = ctx->tx.config.arb_mode; + spin_unlock_irqrestore(&ctx->tx.lock, flags); + return put_user(data, (unsigned int __user *) arg); + break; + + case FFL_TTY_SET_TX_FREQUENCY: + if (arg == 0) + return -EINVAL; + spin_lock_irqsave(&ctx->tx.lock, flags); + ctx->tx.config.speed = arg; + spin_unlock_irqrestore(&ctx->tx.lock, flags); + break; + + case FFL_TTY_GET_TX_FREQUENCY: + spin_lock_irqsave(&ctx->tx.lock, flags); + data = ctx->tx.config.speed; + spin_unlock_irqrestore(&ctx->tx.lock, flags); + return put_user(data, (unsigned int __user *) arg); + break; + +#ifdef CONFIG_HSI_FFL_TTY_STATS + case FFL_TTY_RESET_TX_STATS: + spin_lock_irqsave(&ctx->tx.lock, flags); + ctx->tx.data_sz = 0; + ctx->tx.frame_cnt = 0; + ctx->tx.overflow_cnt = 0; + spin_unlock_irqrestore(&ctx->tx.lock, flags); + break; + + case FFL_TTY_GET_TX_STATS: + spin_lock_irqsave(&ctx->tx.lock, flags); + stats.data_sz = ctx->tx.data_sz; + stats.frame_cnt = ctx->tx.frame_cnt; + stats.overflow_cnt = ctx->tx.overflow_cnt; + spin_unlock_irqrestore(&ctx->tx.lock, flags); + return copy_to_user((void __user *) arg, &stats, + sizeof(stats)); + break; + + case FFL_TTY_RESET_RX_STATS: + spin_lock_irqsave(&ctx->rx.lock, flags); + ctx->rx.data_sz = 0; + ctx->rx.frame_cnt = 0; + ctx->rx.overflow_cnt = 0; + spin_unlock_irqrestore(&ctx->rx.lock, flags); + break; + + case FFL_TTY_GET_RX_STATS: + spin_lock_irqsave(&ctx->rx.lock, flags); + stats.data_sz = ctx->rx.data_sz; + stats.frame_cnt = ctx->rx.frame_cnt; + stats.overflow_cnt = ctx->rx.overflow_cnt; + spin_unlock_irqrestore(&ctx->rx.lock, flags); + return copy_to_user((void __user *) arg, &stats, + sizeof(stats)); + break; +#endif + + case FFL_TTY_MODEM_RESET: + pr_debug("IO ctrl: %s(%d) reset modem\n", + current->comm, current->pid); + pr_debug("reset modem\n"); + modem_reset(ctx); + break; + + case FFL_TTY_MODEM_STATE: + data = !(ctx->reset.ongoing); + return put_user(data, (unsigned int __user *)arg); + break; + + case FFL_TTY_GET_HANGUP_REASON: + spin_lock_irqsave(&ctx->tx.lock, flags); + data = ctx->hangup.last_cause; + ctx->hangup.last_cause = 0; + spin_unlock_irqrestore(&ctx->tx.lock, flags); + return put_user(data, (unsigned int __user *)arg); + break; + + default: + return -ENOIOCTLCMD; +} + + if (increase_pool) + (void) queue_work(ffl_recycle_wq, increase_pool); + + return 0; +} + +/* + * Modem reset interrupt service routine + */ + +/** + * ffl_reset_isr - modem reset / core dump interrupt service routine + * @irq: interrupt number + * @dev: our device pointer + */ +static irqreturn_t ffl_reset_isr(int irq, void *dev) +{ + struct ffl_ctx *ctx; + struct ffl_xfer_ctx *tx_ctx; + struct hsi_client *client; + struct hsi_mid_platform_data *pd = NULL; + int status, cause = 0; + + ctx = (struct ffl_ctx *)dev; + tx_ctx = &ctx->tx; + client = ctx->client; + pd = client->device.platform_data; + + if (unlikely(!pd)) + return IRQ_HANDLED; + + if (irq == ctx->reset.irq) { + status = gpio_get_value(pd->gpio_mdm_rst_out); + dev_dbg(&client->device, "GPIO RESET_OUT %x", status); + /* Prevent issuing hang-up for the usual reset toggling whilst + * the modem is resetting */ + cause = (ctx->reset.ongoing) ? 0 : HU_RESET; + } else if (irq == ctx->reset.cd_irq) { + status = gpio_get_value(pd->gpio_fcdp_rb); + dev_dbg(&client->device, "GPIO CORE_DUMP %x", status); + /* Skip fake core dump sequences */ + cause = (ctx->reset.ongoing) ? 0 : HU_COREDUMP; + } + + if (cause) { + if (cause == HU_RESET) + ctx->reset.ongoing = 1; + ffl_throw_tty_hangup(ctx, cause); + } + + return IRQ_HANDLED; +} + +#ifdef USE_IPC_ERROR_RECOVERY +/* + * Error recovery mechanisms + */ + +/** + * do_recovery_drain_unless - helper function for firing RX or TX drain work + * @xfer_ctx: a reference to the requesting transfer context + * @condition: set of flags for preventing queueing of the work + */ +static void do_recovery_drain_unless(struct ffl_xfer_ctx *xfer_ctx, + unsigned int condition) +{ + struct ffl_ctx *main_ctx = main_ctx(xfer_ctx); + struct ffl_recovery_ctx *recovery_ctx = &main_ctx->recovery; + unsigned long flags; + + spin_lock_irqsave(&xfer_ctx->lock, flags); + if (!_ffl_ctx_has_any_flag(xfer_ctx, condition)) { + _ffl_ctx_set_flag(xfer_ctx, ERROR_RECOVERY_ONGOING_BIT); + if (xfer_ctx == &main_ctx->rx) { + del_timer(&recovery_ctx->rx_drain_timer); + queue_work(ffl_rx_drain_wq, + &recovery_ctx->do_rx_drain); + } else { + queue_work(ffl_tx_drain_wq, + &recovery_ctx->do_tx_drain); + } + } + spin_unlock_irqrestore(&xfer_ctx->lock, flags); +} + +/** + * ffl_recovery_rx_drain_timeout - timer function for RX drain recovery request + * @param: a reference to the requesting main context + */ +static void ffl_recovery_rx_drain_timeout(unsigned long int param) +{ + struct ffl_xfer_ctx *rx_ctx = (struct ffl_xfer_ctx *) param; + + do_recovery_drain_unless(rx_ctx, ERROR_RECOVERY_ONGOING_BIT); +} + +/** + * ffl_destruct_break - callback for break frame destruction + * @frame: a reference to the break frame to delete + */ +static void ffl_destruct_break(struct hsi_msg *frame) +{ + /* Do nothing ! */ +} + +/** + * ffl_complete_rx_break - callback for RX break frame completion + * @frame: a reference to the completed break frame + */ +static void ffl_complete_rx_break(struct hsi_msg *frame) +{ + struct ffl_xfer_ctx *rx_ctx = &((struct ffl_ctx *)frame->context)->rx; + + do_recovery_drain_unless(rx_ctx, + ERROR_RECOVERY_ONGOING_BIT|TTY_OFF_BIT); +} + +/** + * ffl_tx_is_drained - checks if the TX path has been drained or not + * @ctx: a reference to the TX context to consider + */ +static __must_check int ffl_tx_is_drained(struct ffl_xfer_ctx *ctx) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&ctx->lock, flags); + ret = _ffl_ctx_has_flag(ctx, ERROR_RECOVERY_TX_DRAINED_BIT); + spin_unlock_irqrestore(&ctx->lock, flags); + + return ret; +} + +/** + * ffl_recovery_tx_drain - initiate a TX draining sequence + * @work: a reference to work queue element + */ +static void ffl_recovery_tx_drain(struct work_struct *work) +{ + struct ffl_recovery_ctx *recovery_ctx; + struct ffl_ctx *main_ctx; + struct ffl_xfer_ctx *rx_ctx, *tx_ctx; + unsigned long flags; + + recovery_ctx = container_of(work, struct ffl_recovery_ctx, do_tx_drain); + main_ctx = container_of(recovery_ctx, struct ffl_ctx, recovery); + tx_ctx = &main_ctx->tx; + rx_ctx = &main_ctx->rx; + + /* Prevent re-entry in the TX drain sequence and ensure that no more TX + * messages will be sent */ + spin_lock_irqsave(&tx_ctx->lock, flags); + if (_ffl_ctx_has_flag(tx_ctx, ERROR_RECOVERY_ONGOING_BIT)) { + spin_unlock_irqrestore(&tx_ctx->lock, flags); + return; + } + _ffl_ctx_set_flag(tx_ctx, ERROR_RECOVERY_ONGOING_BIT); + spin_unlock_irqrestore(&tx_ctx->lock, flags); + + /* Do a start_tx() to ensure ACWAKE is high */ + hsi_start_tx(main_ctx->client); + + /* Wait for the TX path to be empty with a timeout */ + ffl_wait_until_ctx_sent(main_ctx, RECOVERY_TX_DRAIN_TIMEOUT_JIFFIES); + + /* Send a break message */ + hsi_async(main_ctx->client, &main_ctx->recovery.tx_break); + + /* Set the TX drain as being complete */ + spin_lock_irqsave(&tx_ctx->lock, flags); + _ffl_ctx_set_flag(tx_ctx, ERROR_RECOVERY_TX_DRAINED_BIT); + wake_up(&recovery_ctx->tx_drained_event); + spin_unlock_irqrestore(&tx_ctx->lock, flags); + + /* Schedule a RX drain if no BREAK has been received before a timeout */ + spin_lock_irqsave(&rx_ctx->lock, flags); + if (!_ffl_ctx_has_flag(rx_ctx, ERROR_RECOVERY_ONGOING_BIT)) + mod_timer(&main_ctx->recovery.rx_drain_timer, + jiffies + RECOVERY_BREAK_RESPONSE_TIMEOUT_JIFFIES); + spin_unlock_irqrestore(&rx_ctx->lock, flags); +} + +/** + * ffl_recovery_rx_drain - initiate a RX draining sequence + * @work: a reference to work queue element + */ +static void ffl_recovery_rx_drain(struct work_struct *work) +{ + struct ffl_recovery_ctx *recovery_ctx; + struct ffl_ctx *main_ctx; + struct ffl_xfer_ctx *rx_ctx, *tx_ctx; + unsigned long flags; + int err = 0; + + recovery_ctx = container_of(work, struct ffl_recovery_ctx, do_rx_drain); + main_ctx = container_of(recovery_ctx, struct ffl_ctx, recovery); + tx_ctx = &main_ctx->tx; + rx_ctx = &main_ctx->rx; + + /* Ensure all received messages are tagged as error as they will be + * flushed and these messages shall not re-enter the controller */ + spin_lock_irqsave(&rx_ctx->lock, flags); + _ffl_ctx_set_flag(rx_ctx, ERROR_RECOVERY_ONGOING_BIT); + spin_unlock_irqrestore(&rx_ctx->lock, flags); + + /* Force CAREADY low not to receive further messages and flush what's + * in the controller */ + hsi_flush(main_ctx->client); + + /* Wait for the TX side to be drained prior starting the recovery */ + do_recovery_drain_unless(tx_ctx, ERROR_RECOVERY_ONGOING_BIT); + wait_event_interruptible(recovery_ctx->tx_drained_event, + ffl_tx_is_drained(tx_ctx)); + + /* Wait for a guard time to ensure a proper recovery on both sides */ + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(RECOVERY_TO_NORMAL_DELAY_JIFFIES); + + /* Error recovery is no longer in use in both directions */ + spin_lock_irqsave(&rx_ctx->lock, flags); + _ffl_ctx_clear_flag(rx_ctx, ERROR_RECOVERY_ONGOING_BIT | + ERROR_RECOVERY_RX_ERROR_BIT); + if (!_ffl_ctx_has_flag(rx_ctx, TTY_OFF_BIT)) { + hsi_async(main_ctx->client, &recovery_ctx->rx_break); + err = _ffl_rx_push_controller(rx_ctx, &flags); + } + spin_unlock_irqrestore(&rx_ctx->lock, flags); + + if (unlikely(err == -EAGAIN)) + mod_timer(&rx_ctx->timer, jiffies + rx_ctx->delay); + + spin_lock_irqsave(&tx_ctx->lock, flags); + _ffl_ctx_clear_flag(tx_ctx, ERROR_RECOVERY_ONGOING_BIT | + ERROR_RECOVERY_TX_DRAINED_BIT); + if (tx_ctx->wait_len > 0) + _ffl_pop_wait_push_ctrl_safe(tx_ctx, &flags); + spin_unlock_irqrestore(&tx_ctx->lock, flags); + + /* Do a stop_tx() to release ACWAKE forcing */ + hsi_stop_tx(main_ctx->client); +} + +/** + * ffl_recovery_ctx_init - initialises a recovery context after its creation + * @ctx_recovery: a reference to the considered recovery context + */ +static void ffl_recovery_ctx_init(struct ffl_recovery_ctx *ctx_recovery) +{ + struct ffl_ctx *ctx = + container_of(ctx_recovery, struct ffl_ctx, recovery); + + init_waitqueue_head(&ctx_recovery->tx_drained_event); + init_timer(&ctx_recovery->rx_drain_timer); + ctx_recovery->rx_drain_timer.function = ffl_recovery_rx_drain_timeout; + ctx_recovery->rx_drain_timer.data = (unsigned long int) &ctx->rx; + + INIT_WORK(&ctx_recovery->do_tx_drain, ffl_recovery_tx_drain); + INIT_WORK(&ctx_recovery->do_rx_drain, ffl_recovery_rx_drain); + + ctx_recovery->rx_break.cl = ctx->client; + ctx_recovery->rx_break.context = ctx; + ctx_recovery->rx_break.complete = &ffl_complete_rx_break; + ctx_recovery->rx_break.destructor = &ffl_destruct_break; + ctx_recovery->rx_break.ttype = HSI_MSG_READ; + ctx_recovery->rx_break.break_frame = 1; + + ctx_recovery->tx_break.cl = ctx->client; + ctx_recovery->tx_break.context = ctx; + ctx_recovery->tx_break.complete = &ffl_destruct_break; + ctx_recovery->tx_break.destructor = &ffl_destruct_break; + ctx_recovery->tx_break.ttype = HSI_MSG_WRITE; + ctx_recovery->tx_break.break_frame = 1; +} + +/** + * ffl_recovery_ctx_clear - clears a recovery context prior to its deletion + * @ctx_recovery: a reference to the considered recovery context + */ +static void ffl_recovery_ctx_clear(struct ffl_recovery_ctx *ctx_recovery) +{ + /* Flush any running TX drain work (if started) first to trigger the + * RX drain timer if necessary then if this latter timer was pending, + * queue the RX drain work ASAP before flushing it (unless it has not + * been started in any case) */ + flush_work(&ctx_recovery->do_tx_drain); + if (del_timer_sync(&ctx_recovery->rx_drain_timer)) + queue_work(ffl_rx_drain_wq, &ctx_recovery->do_rx_drain); + flush_work(&ctx_recovery->do_rx_drain); +} +#endif + +/* + * Reset and hangup contexts initialisation and destruction helper functions + */ + +/** + * ffl_request_gpio - helper function for requesting a GPIO + * @gpio: the GPIO to use + * @dir: the direction of the GPIO (1: output, 0: input) + * @high_not_low: the default state of the output GPIO + * @dir_may_change: a flag stating if the direction of the GPIO can be changed + * @label: the label associated to this GPIO + * @name: the name of the GPIO + * @info: the information to display on error and logs + * + * Returns 0 if successful or an error code. + */ +static int ffl_request_gpio(unsigned int gpio, int dir, int high_not_low, + bool dir_may_change, const char *label, + const char *name, const char *info) +{ + int err; + + err = gpio_request(gpio, label); + if (err) + goto no_gpio_request; + + if (dir) + err = gpio_direction_output(gpio, high_not_low); + else + err = gpio_direction_input(gpio); + if (err) + goto exit_error; + + err = gpio_export(gpio, dir_may_change); + if (err) + goto exit_error; + + pr_info(DRVNAME ": GPIO %s %d\n", name, gpio); + + return 0; + +exit_error: + gpio_free(gpio); +no_gpio_request: + pr_err(DRVNAME ": unable to configure GPIO%d (%s)", gpio, info); + return err; +} + +#define ffl_request_output_gpio(label, name, info, \ + dir_may_change, high_not_low) \ + do { \ + err = ffl_request_gpio(pd->gpio_##name, 1, high_not_low, \ + dir_may_change, label, #name, info); \ + if (err) \ + goto no_##name##_gpio;\ + } while (0) + +#define ffl_request_input_gpio(label, name, info) \ + do { \ + err = ffl_request_gpio(pd->gpio_##name, 0, 0, 0, label, \ + #name, info); \ + if (err) \ + goto no_##name##_gpio;\ + } while (0) + +/** + * do_ffl_request_irq - helper function for requesting a GPIO-based interrupt + * @irq: a reference to the irq + * @gpio: the GPIO to use + * @flags: the interrupt flags + * @ctx: the FFL context to associate to the interrupt service routine + * @info: the information to display on error and logs + * + * This function is requesting a GPIO-based interrupt and setting the ISR to + * ffl_reset_isr(). + * + * Returns 0 if successful or an error code. + */ +static int do_ffl_request_irq(int *irq, unsigned int gpio, unsigned long flags, + struct ffl_ctx *ctx, const char *info) +{ + int err; + + *irq = gpio_to_irq(gpio); + if (*irq < 0) { + *irq = 0; + pr_err(DRVNAME ": unable to map IRQ from GPIO%d (%s)", + gpio, info); + return -ENODEV; + } + + err = request_irq(*irq, ffl_reset_isr, flags, DRVNAME, (void *)ctx); + if (err) + pr_err(DRVNAME ": unable to request IRQ%d from GPIO%d (%s)", + *irq, gpio, info); + else + pr_info(DRVNAME ": IRQ %d (%s)\n", *irq, info); + + return err; +} + +#define ffl_request_irq(irq_id, flags, name, info) \ + do { \ + err = do_ffl_request_irq(&ctx_reset->irq_id, pd->gpio_##name, \ + flags, ctx, info); \ + if (err) \ + goto no_##name##_irq; \ + } while (0) + +/** + * ffl_reset_ctx_init - initialises a reset context after its creation + * @ctx_reset: a reference to the considered reset context + * @pd: a reference to the HSI platform data configuration + * Returns 0 if successful or an error code. + */ +static int ffl_reset_ctx_init(struct ffl_reset_ctx *ctx_reset, + struct hsi_mid_platform_data *pd) +{ + struct ffl_ctx *ctx = container_of(ctx_reset, struct ffl_ctx, reset); + int err; + + ctx_reset->ongoing = 1; + + ffl_request_output_gpio("ifxHSIModem", mdm_rst_bbn, "RESET", 1, 1); + ffl_request_output_gpio("ifxHSIModem", mdm_pwr_on, "ON", 1, 0); + ffl_request_input_gpio("ifxHSIModem", mdm_rst_out, "RST_OUT"); + ffl_request_input_gpio("ifxHSIModem", fcdp_rb, "CORE DUMP"); + + ffl_request_irq(irq, IRQF_TRIGGER_RISING|IRQF_TRIGGER_FALLING, + mdm_rst_out, "RST_OUT"); + ffl_request_irq(cd_irq, IRQF_TRIGGER_RISING, fcdp_rb, "CORE DUMP"); + + modem_power(ctx); + + return 0; + +no_fcdp_rb_irq: + free_irq(ctx_reset->irq, (void *)ctx); +no_mdm_rst_out_irq: + gpio_free(pd->gpio_fcdp_rb); +no_fcdp_rb_gpio: + gpio_free(pd->gpio_mdm_rst_out); +no_mdm_rst_out_gpio: + gpio_free(pd->gpio_mdm_pwr_on); +no_mdm_pwr_on_gpio: + gpio_free(pd->gpio_mdm_rst_bbn); +no_mdm_rst_bbn_gpio: + return err; +} + +/** + * ffl_reset_ctx_clear - clears a reset context prior to its deletion + * @ctx_reset: a reference to the considered reset context + * @pd: a reference to the HSI platform data configuration + */ +static void ffl_reset_ctx_clear(struct ffl_reset_ctx *ctx_reset, + struct hsi_mid_platform_data *pd) +{ + struct ffl_ctx *ctx = container_of(ctx_reset, struct ffl_ctx, reset); + + free_irq(ctx_reset->cd_irq, (void *)ctx); + free_irq(ctx_reset->irq, (void *)ctx); + + gpio_free(pd->gpio_fcdp_rb); + gpio_free(pd->gpio_mdm_rst_out); + gpio_free(pd->gpio_mdm_pwr_on); + gpio_free(pd->gpio_mdm_rst_bbn); +} + +/** + * ffl_hangup_ctx_init - initialises a hangup context after its creation + * @ctx_hangup: a reference to the considered hangup context + */ +static void ffl_hangup_ctx_init(struct ffl_hangup_ctx *ctx_hangup) +{ + struct ffl_ctx *ctx = container_of(ctx_hangup, struct ffl_ctx, hangup); + + init_timer(&ctx_hangup->timer); + INIT_WORK(&ctx_hangup->work, ffl_do_tx_hangup); + ctx_hangup->timer.function = ffl_tty_tx_timeout; + ctx_hangup->timer.data = (unsigned long int) ctx; +} + +/** + * ffl_hangup_ctx_clear - clears a hangup context prior to its deletion + * @ctx_hangup: a reference to the considered hangup context + */ +static void ffl_hangup_ctx_clear(struct ffl_hangup_ctx *ctx_hangup) +{ + struct ffl_ctx *ctx = container_of(ctx_hangup, struct ffl_ctx, hangup); + struct ffl_xfer_ctx *tx_ctx = &ctx->tx; + unsigned long flags; + int is_hunging_up; + + spin_lock_irqsave(&tx_ctx->lock, flags); + is_hunging_up = (ctx_hangup->cause); + spin_unlock_irqrestore(&tx_ctx->lock, flags); + + /* No need to wait for the end of the calling work! */ + if (is_hunging_up) + return; + + if (del_timer_sync(&ctx_hangup->timer)) + cancel_work_sync(&ctx_hangup->work); + else + flush_work(&ctx_hangup->work); +} + +/* + * RX/TX context initialisation and destruction helper functions + */ + +/** + * ffl_xfer_ctx_init - initialise a TX or RX context after its creation + * @ctx: a reference to the considered TX or RX context + * @main_ctx: a reference to its related main context + * @channel: the HSI channel for FFL + * @delay: the initial delay for the timer related to the TX or RX context + * @wait_max: the maximal size of the wait FIFO for this context + * @ctrl_max: the maximal size of the HSI controller FIFO for this context + * @config: a reference to the default HSI interface configuration + * + * This helper function is simply filling in the initial data into a newly + * created TX or RX context. + */ +static void ffl_xfer_ctx_init(struct ffl_xfer_ctx *ctx, + struct ffl_ctx *main_ctx, + unsigned int channel, + unsigned int delay, + unsigned int wait_max, + unsigned int ctrl_max, + const struct hsi_config *config) +{ + INIT_LIST_HEAD(&ctx->wait_frames); + INIT_LIST_HEAD(&ctx->recycled_frames); + init_timer(&ctx->timer); + spin_lock_init(&ctx->lock); + ctx->timer.data = (unsigned long) ctx; + ctx->delay = from_usecs(delay); + ctx->wait_len = 0; + ctx->ctrl_len = 0; + ctx->all_len = 0; + ctx->state = IDLE; + ctx->wait_max = wait_max; + ctx->ctrl_max = ctrl_max; + ctx->buffered = 0; + ctx->room = 0; + ctx->main_ctx = main_ctx; + ctx->data_len = FFL_DATA_LENGTH; + ctx->channel = channel; + INIT_WORK(&ctx->increase_pool, ffl_increase_pool_of_frames); +#ifdef CONFIG_HSI_FFL_TTY_STATS + ctx->data_sz = 0; + ctx->frame_cnt = 0; + ctx->overflow_cnt = 0; +#endif + ctx->config = *config; +} + +/** + * ffl_xfer_ctx_clear - clears a TX or RX context prior to its deletion + * @ctx: a reference to the considered TX or RX context + * + * This helper function is simply calling the relevant destructors and reseting + * the context information. + */ +static void ffl_xfer_ctx_clear(struct ffl_xfer_ctx *ctx) +{ + struct ffl_ctx *main_ctx = main_ctx(ctx); + unsigned long flags; + + spin_lock_irqsave(&ctx->lock, flags); + ctx->wait_max = 0; + ctx->ctrl_max = 0; + ctx->state = IDLE; + del_timer_sync(&ctx->timer); + _ffl_fifo_frames_delete(&ctx->wait_frames, main_ctx); + _ffl_fifo_frames_delete(&ctx->recycled_frames, main_ctx); + spin_unlock_irqrestore(&ctx->lock, flags); + flush_work(&ctx->increase_pool); +} + +/* + * Protocol driver handling routines + */ + +/* + * ffl_termios_init - default termios initialisation + */ +static const struct ktermios ffl_termios_init = { + .c_iflag = 0, + .c_oflag = 0, + .c_cflag = B115200 | CS8, + .c_lflag = 0, + .c_cc = INIT_C_CC, + .c_ispeed = 0, + .c_ospeed = 0 +}; + +/* + * ffl_driver_tty_ops - table of supported TTY operations + */ +static const struct tty_operations ffl_driver_tty_ops = { + .open = ffl_tty_open, + .close = ffl_tty_close, + .write = ffl_tty_write, + .write_room = ffl_tty_write_room, + .chars_in_buffer = ffl_tty_chars_in_buffer, + .ioctl = ffl_tty_ioctl, + .hangup = ffl_tty_hangup, + .wait_until_sent = ffl_wait_until_sent, + .unthrottle = ffl_rx_forward_resume, + .flush_buffer = ffl_flush_tx_buffer, +}; + +/* + * ffl_port_tty_ops - table of supported TTY port operations + */ +static const struct tty_port_operations ffl_port_tty_ops = { + .activate = ffl_tty_port_activate, + .shutdown = ffl_tty_port_shutdown, +}; + +/** + * ffl_driver_probe - creates a new context in the FFL driver + * @dev: a reference to the HSI device requiring a context + * + * Returns 0 upon success or an error in case of an error + * + * This function is creating a new context per HSI controller requiring a + * FFL protocol driver, creates the related TTY port and TTY entry in the + * filesystem. + */ +static int __init ffl_driver_probe(struct device *dev) +{ + struct hsi_client *client = to_hsi_client(dev); + struct hsi_mid_platform_data *pd = client->device.platform_data; + struct tty_port *tty_prt; + struct ffl_ctx *ctx; + int i = 0; + int l = -1; + int err; + + dev_dbg(dev, "ffl_driver_probe entered\n"); + + /* Get a free line number and check that the client is not already + * registered (is that possible anyway ?) */ + for (i = FFL_TTY_MAX_LINES-1; i >= 0; --i) { + if (!(ffl_drv.ctx[i])) { + l = i; + } else { + if (unlikely(ffl_drv.ctx[i]->client == client)) { + dev_dbg(dev, "ignoring subsequent detection"); + return -ENODEV; + } + } + } + + /* No line is available... */ + if (l < 0) { + dev_dbg(dev, "no line available\n"); + return -ENODEV; + } + + /* Create the main context */ + ctx = kzalloc(sizeof(struct ffl_ctx), GFP_KERNEL); + if (unlikely(!ctx)) { + pr_err(DRVNAME ": Cannot allocate main context"); + return -ENOMEM; + } + + client->hsi_start_rx = ffl_start_rx; + client->hsi_stop_rx = ffl_stop_rx; + hsi_client_set_drvdata(client, (void *) ctx); + + ctx->index = l; + ctx->client = client; + + /* The parent of our device is the HSI port, the parent of the HSI + * port is the HSI controller device */ + ctx->controller = dev->parent->parent; + + init_waitqueue_head(&ctx->tx_full_pipe_clean_event); + init_waitqueue_head(&ctx->tx_write_pipe_clean_event); + + ffl_xfer_ctx_init(&ctx->tx, ctx, CONFIG_HSI_FFL_TTY_CHANNEL, + FFL_TX_DELAY, FFL_TX_WAIT_FIFO, FFL_TX_CTRL_FIFO, + &client->tx_cfg); + ffl_xfer_ctx_init(&ctx->rx, ctx, CONFIG_HSI_FFL_TTY_CHANNEL, + FFL_RX_DELAY, FFL_RX_WAIT_FIFO, FFL_RX_CTRL_FIFO, + &client->rx_cfg); + + ctx->tx.timer.function = ffl_stop_tx; + ctx->rx.timer.function = ffl_rx_forward_retry; + + ffl_drv.ctx[l] = ctx; + + /* Warn if no DMA capability has been found */ + if (!is_device_dma_capable(ctx->controller)) + pr_warn(DRVNAME ": HSI device is not DMA capable"); + + /* Create the TTY port */ + tty_prt = &(ctx->tty_prt); + tty_port_init(tty_prt); + tty_prt->ops = &ffl_port_tty_ops; + + /* Register the TTY device */ + if (unlikely(!tty_register_device(ffl_drv.tty_drv, l, dev))) { + err = -EFAULT; + pr_err(DRVNAME ": TTY device registration failed (%d)", err); + goto no_tty_device_registration; + } + + /* Initialise the hangup context */ + ffl_hangup_ctx_init(&ctx->hangup); + + /* Initialise the reset context */ + err = ffl_reset_ctx_init(&ctx->reset, pd); + if (unlikely(err)) + goto no_reset_ctx_initialisation; + +#ifdef USE_IPC_ERROR_RECOVERY + /* Initialise the error recovery context */ + ffl_recovery_ctx_init(&ctx->recovery); +#endif + + /* Allocate FIFO in background */ + (void) queue_work(ffl_recycle_wq, &ctx->tx.increase_pool); + (void) queue_work(ffl_recycle_wq, &ctx->rx.increase_pool); + + dev_dbg(dev, "ffl_driver_probe completed\n"); + return 0; + +no_reset_ctx_initialisation: + ffl_hangup_ctx_clear(&ctx->hangup); + tty_unregister_device(ffl_drv.tty_drv, ctx->index); + +no_tty_device_registration: + client->hsi_start_rx = NULL; + client->hsi_stop_rx = NULL; + hsi_client_set_drvdata(client, NULL); + + ffl_xfer_ctx_clear(&ctx->tx); + ffl_xfer_ctx_clear(&ctx->rx); + + ffl_drv.ctx[ctx->index] = NULL; + kfree(ctx); + + return err; +} + +/** + * ffl_driver_removes - removes a context from the FFL driver + * @dev: a reference to the device requiring the context + * + * Returns 0 on success or an error code + * + * This function is freeing all resources hold by the context attached to the + * requesting HSI device. + */ +static int __exit ffl_driver_remove(struct device *dev) +{ + struct hsi_client *client = to_hsi_client(dev); + struct ffl_ctx *ctx = + (struct ffl_ctx *) hsi_client_drvdata(client); + +#ifdef USE_IPC_ERROR_RECOVERY + ffl_recovery_ctx_clear(&ctx->recovery); +#endif + + ffl_hangup_ctx_clear(&ctx->hangup); + + ffl_reset_ctx_clear(&ctx->reset, client->device.platform_data); + + tty_unregister_device(ffl_drv.tty_drv, ctx->index); + + client->hsi_start_rx = NULL; + client->hsi_stop_rx = NULL; + hsi_client_set_drvdata(client, NULL); + + ffl_xfer_ctx_clear(&ctx->tx); + ffl_xfer_ctx_clear(&ctx->rx); + + ffl_drv.ctx[ctx->index] = NULL; + kfree(ctx); + + return 0; +} + +/* + * Protocol driver main init / exit functions + */ + +/* + * ffl_driver_setup - configuration of the FFL driver + */ +static struct hsi_client_driver ffl_driver_setup = { + .driver = { + .name = DRVNAME, + .owner = THIS_MODULE, + .probe = ffl_driver_probe, + .remove = ffl_driver_remove, + }, +}; + +/** + * ffl_driver_init - initialises the FFL driver common parts + * + * Returns 0 on success or an error code + */ +static int __init ffl_driver_init(void) +{ + struct tty_driver *tty_drv = NULL; + int err; + int i; + + /* Clear the initial context content */ + for (i = 0; i < FFL_TTY_MAX_LINES; i++) + ffl_drv.ctx[i] = NULL; + + /* Create the workqueue for allocating frames */ + ffl_recycle_wq = create_singlethread_workqueue(DRVNAME "-rc"); + if (unlikely(!ffl_recycle_wq)) { + pr_err(DRVNAME ": unable to create pool-handling workqueue"); + err = -EFAULT; + goto out; + } + + /* Create the workqueue for tx hangup */ + ffl_hangup_wq = create_singlethread_workqueue(DRVNAME "-hg"); + if (unlikely(!ffl_hangup_wq)) { + pr_err(DRVNAME ": unable to create tx hangup workqueue"); + err = -EFAULT; + goto no_tx_hangup_wq; + } + +#ifdef USE_IPC_ERROR_RECOVERY + /* Create the workqueue for TX drain recovery */ + ffl_tx_drain_wq = create_singlethread_workqueue(DRVNAME "-td"); + if (unlikely(!ffl_tx_drain_wq)) { + pr_err(DRVNAME ": unable to create rx error workqueue"); + err = -EFAULT; + goto no_tx_drain_wq; + } + + /* Create the workqueue for RX drain recovery */ + ffl_rx_drain_wq = create_singlethread_workqueue(DRVNAME "-rd"); + if (unlikely(!ffl_rx_drain_wq)) { + pr_err(DRVNAME ": unable to create rx break workqueue"); + err = -EFAULT; + goto no_rx_drain_wq; + } +#endif + + /* Allocate the TTY interface */ + tty_drv = alloc_tty_driver(FFL_TTY_MAX_LINES); + if (unlikely(!tty_drv)) { + pr_err(DRVNAME ": Cannot allocate TTY driver"); + err = -ENOMEM; + goto no_tty_driver_allocation; + } + + /* Configure the TTY */ + tty_drv->magic = TTY_DRIVER_MAGIC; + tty_drv->owner = THIS_MODULE; + tty_drv->driver_name = DRVNAME; + tty_drv->name = TTYNAME; + tty_drv->minor_start = 0; + tty_drv->num = FFL_TTY_MAX_LINES; + tty_drv->type = TTY_DRIVER_TYPE_SERIAL; + tty_drv->subtype = SERIAL_TYPE_NORMAL; + tty_drv->flags = TTY_DRIVER_REAL_RAW | + TTY_DRIVER_DYNAMIC_DEV; + tty_drv->init_termios = ffl_termios_init; + + ffl_drv.tty_drv = tty_drv; + + tty_set_operations(tty_drv, &ffl_driver_tty_ops); + + /* Register the TTY prior to probing the HSI devices */ + err = tty_register_driver(tty_drv); + if (unlikely(err)) { + pr_err(DRVNAME ": TTY driver registration failed (%d)", err); + goto no_tty_driver_registration; + } + + /* Now, register the client */ + err = hsi_register_client_driver(&ffl_driver_setup); + if (unlikely(err)) { + pr_err(DRVNAME + ": error whilst registering the " DRVNAME " driver %d", + err); + goto no_hsi_client_registration; + } + + pr_debug(DRVNAME ": driver initialised"); + + return 0; + +no_hsi_client_registration: + tty_unregister_driver(ffl_drv.tty_drv); +no_tty_driver_registration: + put_tty_driver(ffl_drv.tty_drv); + ffl_drv.tty_drv = NULL; +no_tty_driver_allocation: +#ifdef USE_IPC_ERROR_RECOVERY + destroy_workqueue(ffl_rx_drain_wq); +no_rx_drain_wq: + destroy_workqueue(ffl_tx_drain_wq); +no_tx_drain_wq: +#endif + destroy_workqueue(ffl_hangup_wq); +no_tx_hangup_wq: + destroy_workqueue(ffl_recycle_wq); +out: + return err; +} +module_init(ffl_driver_init); + +/** + * ffl_driver_exit - frees the resources taken by the FFL driver common parts + */ +static void __exit ffl_driver_exit(void) +{ + hsi_unregister_client_driver(&ffl_driver_setup); + + tty_unregister_driver(ffl_drv.tty_drv); + put_tty_driver(ffl_drv.tty_drv); + ffl_drv.tty_drv = NULL; + +#ifdef USE_IPC_ERROR_RECOVERY + destroy_workqueue(ffl_rx_drain_wq); + destroy_workqueue(ffl_tx_drain_wq); +#endif + destroy_workqueue(ffl_hangup_wq); + destroy_workqueue(ffl_recycle_wq); + + pr_debug(DRVNAME ": driver removed"); +} +module_exit(ffl_driver_exit); + +/* + * Module information + */ +MODULE_AUTHOR("Olivier Stoltz Douchet "); +MODULE_DESCRIPTION("Fixed frame length protocol on HSI driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("0.1-HSI-FFL"); + diff --git a/drivers/hsi/controllers/Kconfig b/drivers/hsi/controllers/Kconfig new file mode 100644 index 0000000..1c1165d --- /dev/null +++ b/drivers/hsi/controllers/Kconfig @@ -0,0 +1,22 @@ +# +# HSI controllers configuration +# +comment "HSI controllers" + +config HSI_ARASAN + tristate "intel_mid_hsi HSI hardware driver" + depends on HSI + default n + help + If you say Y here, you will enable the intel_mid_hsi HSI hardware + driver. + + If unsure, say N. + +if HSI_ARASAN + +config HSI_ARASAN_CONFIG + boolean + default y + +endif # HSI_ARASAN diff --git a/drivers/hsi/controllers/Makefile b/drivers/hsi/controllers/Makefile new file mode 100644 index 0000000..874edcf --- /dev/null +++ b/drivers/hsi/controllers/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for HSI controllers drivers +# + +obj-$(CONFIG_OMAP_SSI) += omap_ssi.o +obj-$(CONFIG_HSI_ARASAN) += intel_mid_hsi.o diff --git a/drivers/hsi/controllers/hsi_arasan.h b/drivers/hsi/controllers/hsi_arasan.h new file mode 100644 index 0000000..9ba8543 --- /dev/null +++ b/drivers/hsi/controllers/hsi_arasan.h @@ -0,0 +1,132 @@ +/* + * hsi_arasan.h + * + * Implements HSI interface for Arasan controller. + * + * Copyright (C) 2010 Intel Corporation. All rights reserved. + * + * Contact: Jim Stanley + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef _HSI_ARASAN_H_ +#define _HSI_ARASAN_H_ + +/* Platform device parameters */ +#define HSI_IOMEM_NAME "HSI_HSI_BASE" +#define HSI_DMA_NAME "HSI_DMA_BASE" + +/* Register base addresses */ +#define ARASAN_HSI_DMA_CONFIG(base, channel) (base+((channel)*4)) +#define ARASAN_HSI_DMA_TX_FIFO_SIZE(base) (base+0x40) +#define ARASAN_HSI_DMA_TX_FIFO_THRESHOLD(base) (base+0x44) +#define ARASAN_HSI_DMA_RX_FIFO_SIZE(base) (base+0x48) +#define ARASAN_HSI_DMA_RX_FIFO_THRESHOLD(base) (base+0x4C) + +#define ARASAN_HSI_CLOCK_CONTROL(base) (base+0x50) + +#define ARASAN_HSI_HSI_STATUS(base) (base+0x54) +#define ARASAN_HSI_HSI_STATUS1(base) (base+0xC4) +#define ARASAN_HSI_INTERRUPT_STATUS(base) (base+0x58) +#define ARASAN_HSI_INTERRUPT_STATUS_ENABLE(base) (base+0x5C) +#define ARASAN_HSI_INTERRUPT_SIGNAL_ENABLE(base) (base+0x60) + +#define ARASAN_HSI_PROGRAM(base) (base+0x64) +#define ARASAN_HSI_PROGRAM1(base) (base+0xC8) + +#define ARASAN_HSI_ARBITER_PRIORITY(base) (base+0x68) +#define ARASAN_HSI_ARBITER_BANDWIDTH1(base) (base+0x6C) +#define ARASAN_HSI_ARBITER_BANDWIDTH2(base) (base+0x70) + +#define ARASAN_HSI_CAPABILITY(base) (base+0x74) + +#define ARASAN_HSI_TX_DATA(base, channel) (base+((channel)*4)+0x78) +#define ARASAN_HSI_RX_DATA(base, channel) (base+((channel)*4)+0x98) + +#define ARASAN_HSI_ERROR_INTERRUPT_STATUS(base) (base+0xB8) +#define ARASAN_HSI_ERROR_INTERRUPT_STATUS_ENABLE(base) (base+0xBC) +#define ARASAN_HSI_ERROR_INTERRUPT_SIGNAL_ENABLE(base) (base+0xC0) + +#define ARASAN_HSI_VERSION(base) (base+0xFC) + +/* Key register fields */ +#define ARASAN_ALL_CHANNELS ((1<<8)-1) +#define ARASAN_ANY_CHANNEL ((1<<8)-1) +#define ARASAN_ANY_DMA_CHANNEL ((1<<8)-1) + +#define ARASAN_DMA_ENABLE (1<<31) +#define ARASAN_DMA_BURST_SIZE(s) (order_base_2(s/4)<<24) +#define ARASAN_DMA_XFER_FRAMES(s) ((s)<<4) +#define ARASAN_DMA_CHANNEL(c) ((c)<<1) +#define ARASAN_DMA_DIR(d) ((d)<<0) + +#define ARASAN_FIFO_MAX_BITS 10 +#define ARASAN_FIFO_SIZE(s, c) ((s)<<((c)*4)) +#define ARASAN_FIFO_DEPTH(r, c) (1<<(((r)>>((c)*4)) & 0xF)) + +#define ARASAN_RX_TAP_DELAY_NS(c) (min((c), 7)<<27) +#define ARASAN_RX_TAILING_BIT_COUNT(c) ((200/max((c), 50))<<24) +#define ARASAN_RX_FRAME_BURST_COUNT(c) (((c) & 0xFF)<<16) +#define ARASAN_TX_BREAK (1<<15) +#define ARASAN_DATA_TIMEOUT(t) ((t)<<11) +#define ARASAN_CLK_DIVISOR(d) ((d)<<3) +#define ARASAN_CLK_START (1<<2) +#define ARASAN_CLK_STABLE (1<<1) +#define ARASAN_CLK_ENABLE (1<<0) + +#define ARASAN_TX_EMPTY(c) (1<<((c)+24)) +#define ARASAN_ANY_RX_NOT_EMPTY (ARASAN_ANY_CHANNEL<<8) +#define ARASAN_RX_NOT_EMPTY(c) (1<<((c)+8)) +#define ARASAN_RX_READY (1<<7) +#define ARASAN_RX_WAKE (1<<4) + +#define ARASAN_TX_ENABLE (1<<31) +#define ARASAN_TX_DISABLE (0<<31) +#define ARASAN_RX_MODE(m) (((m) == HSI_MODE_FRAME)<<30) +#define ARASAN_RX_CHANNEL_ENABLE(en, c) ((en)<<(20+(c))) +#define ARASAN_TX_CHANNEL_ENABLE(en, c) ((en)<<(12+(c))) +#define ARASAN_RX_ENABLE (1<<11) +#define ARASAN_RX_DISABLE (0<<11) +#define ARASAN_RX_FLOW(f) (((f) == HSI_FLOW_PIPE)<<9) +#define ARASAN_TX_MODE(m) (((m) == HSI_MODE_FRAME)<<8) +#define ARASAN_TX_FRAME_MODE ARASAN_TX_MODE(HSI_MODE_FRAME) +#define ARASAN_RX_TIMEOUT_CNT(cnt) (((cnt)&0x7F)<<1) +#define ARASAN_RESET (1<<0) + +#define ARASAN_IRQ_ERROR (1<<31) +#define ARASAN_IRQ_ANY_DMA_COMPLETE (ARASAN_ANY_DMA_CHANNEL<<17) +#define ARASAN_IRQ_DMA_COMPLETE(c) (1<<((c)+17)) +#define ARASAN_IRQ_RX_WAKE (1<<16) +#define ARASAN_IRQ_ANY_RX_THRESHOLD (ARASAN_ANY_CHANNEL<<8) +#define ARASAN_IRQ_RX_THRESHOLD(c) (1<<((c)+8)) +#define ARASAN_IRQ_ANY_TX_THRESHOLD (ARASAN_ANY_CHANNEL) +#define ARASAN_IRQ_TX_THRESHOLD(c) (1<<(c)) + +#define ARASAN_IRQ_ANY_DATA_TIMEOUT (ARASAN_ANY_CHANNEL<<2) +#define ARASAN_IRQ_DATA_TIMEOUT(c) (1<<((c)+2)) +#define ARASAN_IRQ_RX_ERROR (1<<1) +#define ARASAN_IRQ_BREAK (1<<0) + +#define ARASAN_RX_CHANNEL_BITS(b) (((b) & 0x3)<<2) +#define ARASAN_RX_CHANNEL_SIZE(s) ARASAN_RX_CHANNEL_BITS(order_base_2(s)) +#define ARASAN_TX_CHANNEL_BITS(b) ((b) & 0x03) +#define ARASAN_TX_CHANNEL_SIZE(s) ARASAN_TX_CHANNEL_BITS(order_base_2(s)) +#define ARASAN_TX_CHANNEL_CNT(r) (1<<((r) & 0x3)) +#define ARASAN_RX_CHANNEL_CNT(r) (1<<(((r)>>2) & 0x3)) + +#define ARASAN_TX_BASE_CLK_KHZ(r) ((((r)>>11)&0x1FF)*1000) + +#endif /* _ARASAN_H */ diff --git a/drivers/hsi/controllers/hsi_dwahb_dma.h b/drivers/hsi/controllers/hsi_dwahb_dma.h new file mode 100644 index 0000000..ae84d7b --- /dev/null +++ b/drivers/hsi/controllers/hsi_dwahb_dma.h @@ -0,0 +1,127 @@ +/* + * ssi_dwahb_dma.h + * + * Implements interface for DW ahb DMA controller. + * + * Copyright (C) 2009 Intel Corporation. All rights reserved. + * + * Contact: Jim Stanley + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef _DWAHB_DMA_H_ +#define _DWAHB_DMA_H_ + +#define DWAHB_CHAN_CNT 8 +#define DWAHB_ALL_CHANNELS ((1< + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +/* Set the following if the modem is crashing on ACREADY de-asssertion whilst + * ACWAKE is asserted. */ +#undef PREVENT_RX_SLEEP_WHEN_NOT_SUSPENDED + +/* Set the following if wanting to schedule a later suspend on idle state */ +#define SCHEDULE_LATER_SUSPEND_ON_IDLE + +/* Set the following to disable the power management (for debugging) */ +#undef DISABLE_POWER_MANAGEMENT + +/* Set the following to prevent ACWAKE toggling (for debugging). This also + * disbales power management */ +#undef PREVENT_ACWAKE_TOGGLING + +/* Set the following to allow software workaround of the DMA link listing */ +#define USE_SOFWARE_WORKAROUND_FOR_DMA_LLI + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_HAS_WAKELOCK +#include +#endif +#include "hsi_arasan.h" +#include "hsi_dwahb_dma.h" + +#define HSI_MPU_IRQ_NAME "HSI_CONTROLLER_IRQ" +#define HSI_MASTER_DMA_ID 0x834 /* PCI id for DWAHB dma */ + +#define HSI_RESETDONE_TIMEOUT 10 /* 10 ms */ +#define HSI_RESETDONE_RETRIES 20 /* => max 200 ms waiting for reset */ +#define HSI_BASE_FREQUENCY 200000 /* in KHz */ + +#define CAWAKE_POLL_JIFFIES (usecs_to_jiffies(100000)) /* 100 ms */ +#define IDLE_TO_SUSPEND_DELAY 100 /* 100 ms */ +#define HSI_CLOCK_SETUP_DELAY_WAIT 200 /* 200 us is the max time */ + +#define TX_THRESHOLD_HALF +#define RX_THRESHOLD_HALF + +#define HSI_BYTES_TO_FRAMES(x) (((x) + 3) >> 2) +#define HSI_FRAMES_TO_BYTES(x) ((x) << 2) + +#ifdef TX_THRESHOLD_HALF +#define TX_THRESHOLD 0 +#define NUM_TX_FIFO_DWORDS(x) (x / 2) +#else +#define TX_THRESHOLD 0xff +#define NUM_TX_FIFO_DWORDS(x) ((3 * x) / 4) +#endif + +#ifdef RX_THRESHOLD_HALF +#define RX_THRESHOLD 0 +#define NUM_RX_FIFO_DWORDS(x) (x / 2) +#else +#define RX_THRESHOLD 0xff +#define NUM_RX_FIFO_DWORDS(x) ((3 * x) / 4) +#endif + +#ifdef PREVENT_ACWAKE_TOGGLING +#define ACWAKE_DEFAULT_STATUS ARASAN_TX_ENABLE +#ifndef DISABLE_POWER_MANAGEMENT +#define DISABLE_POWER_MANAGEMENT +#endif +#else +#define ACWAKE_DEFAULT_STATUS ARASAN_TX_DISABLE +#endif + +/* This maps each channel to a single bit in DMA and HSI channels busy fields */ +#define DMA_BUSY(ch) (1<<(ch)) +#define QUEUE_BUSY(ch) (1<<(ch)) + +/* TX, RX and PM states */ +enum { + TX_SLEEPING, + TX_READY +}; + +enum { + RX_SLEEPING, + RX_READY, + RX_CAN_SLEEP +}; + +enum { + DEVICE_READY, + DEVICE_SUSPENDED, + DEVICE_AND_IRQ_SUSPENDED +}; + +/* Master DMA config low register configuration: + * No multi-block support, no maximal burst, no AHB bus locking, hardware + * handshaking and a priority set to the channel id */ +#define HSI_DWAHB_CFG_LO_CFG(dma_chan) \ + (DWAHB_PRIORITY(dma_chan) | DWAHB_SUSPEND(0) | \ + DWAHB_DST_SW_HANDSHAKE(0) | DWAHB_SRC_SW_HANDSHAKE(0) | \ + DWAHB_LOCKING(0) | DWAHB_DST_HANDSHAKE_ACTIVE_LOW(0) | \ + DWAHB_SRC_HANDSHAKE_ACTIVE_LOW(0) | \ + DWAHB_MAX_AMBA_BURST(0) | DWAHB_SRC_RELOAD(0) | DWAHB_DST_RELOAD(0)) + +/* Master DMA config high register configuration: + * Use the channel id handshaking interface, no link list status update, no + * opcode / data only access, low latency, no prefetch */ +#define HSI_DWAHB_CFG_HI_CFG(dma_chan) \ + (DWAHB_PREFETCH(0) | DWAHB_USE_FIFO(0) | DWAHB_DATA_ONLY | \ + DWAHB_DST_STATUS_UPDATE(0) | DWAHB_SRC_STATUS_UPDATE(0) | \ + DWAHB_SRC_HW_HANDSHAKE(dma_chan) | DWAHB_DST_HW_HANDSHAKE(dma_chan)) + +/* Master DMA control low register configuration: + * No interrupt, 32-bit data, increment on both source and destination, + * 32-word bursts, no scatter, no gather, slave DMA control, and link + * listing (if requested) */ +#define HSI_DWAHB_CTL_LO_CFG(tx_not_rx, lli_enb) \ + (DWAHB_IRQ_ENABLE(0) | DWAHB_DST_WIDTH(32) | DWAHB_SRC_WIDTH(32) | \ + DWAHB_DST_INC | DWAHB_SRC_INC | DWAHB_DST_BURST(32) | \ + DWAHB_SRC_BURST(32) | DWAHB_SRC_GATHER(0) | DWAHB_DST_SCATTER(0) | \ + DWAHB_IS_NOT_FLOW_CTL(tx_not_rx) | \ + DWAHB_DST_LINK_LIST(lli_enb) | \ + DWAHB_SRC_LINK_LIST(lli_enb)) + +/** + * struct intel_dma_lli - DMA link list structure + * @sar: DMA source address + * @dar: DMA destination address + * @llp: next DMA link list entry reference + * @ctl_lo: DMA control register lower word + * @ctl_hi: DMA control register upper word + */ +struct intel_dma_lli { + u32 sar; + u32 dar; + u32 llp; + u32 ctl_lo; + u32 ctl_hi; +}; + +/** + * struct intel_dma_lli_xfer - DMA transfer configuration using link listing + * @blk: reference to the block being transferred + * @llp_addr: DMA address of the first link list entry + * @lli: array of DMA link list entries + */ +struct intel_dma_lli_xfer { +#ifdef USE_SOFWARE_WORKAROUND_FOR_DMA_LLI + struct scatterlist *blk; +#endif + dma_addr_t llp_addr; + struct intel_dma_lli lli[0]; +}; + +/** + * struct intel_dma_plain_xfer - DMA transfer configuration without link list + * @size: size of the transfer in 32-bit words + * @src_addr: DMA source address + * @dst_addr: DMA destination address + */ +struct intel_dma_plain_xfer { + u32 size; + dma_addr_t src_addr; + dma_addr_t dst_addr; +}; + +/** + * struct intel_dma_xfer - Internal DMA transfer context + * @msg: reference of the message being transferred + * @mst_enable: master DMA enabling register + * @slv_enable: slave DMA enabling register + * @with_link_listing: DMA transfer with link listing context + * @without_link_listing: DMA transfer without link listing context + */ +struct intel_dma_xfer { + struct hsi_msg *msg; + u32 mst_enable; + u32 slv_enable; + union { + struct intel_dma_lli_xfer with_link_list; + struct intel_dma_plain_xfer without_link_list; + }; +}; + +/** + * struct intel_dma_ctx - Internal DMA context + * @sg_entries: the maximal number of link listing entries for this context + * @ongoing: reference to ongoing DMA transfer + * @ready: reference to next ready DMA transfer + */ +struct intel_dma_ctx { + int sg_entries; + struct intel_dma_xfer *ongoing; + struct intel_dma_xfer *ready; +}; + +/** + * struct intel_pio_ctx - Internal PIO context + * @blk: reference to the block being processed + * @offset: offset in the block being processed + */ +struct intel_pio_ctx { + struct scatterlist *blk; + unsigned int offset; +}; + +/** + * struct intel_xfer_ctx - Internal xfer context (PIO or DMA) + * @dma: DMA context storage + * @pio: PIO context storage + */ +struct intel_xfer_ctx { + union { + struct intel_dma_ctx dma; + struct intel_pio_ctx pio; + }; +}; + +/** + * struct intel_controller - Arasan HSI controller data + * @dev: device associated to the controller (HSI controller) + * @pdev: PCI dev* for HSI controller + * @dmac: PCI dev* for master DMA controller + * @ctrl_io: HSI I/O ctrl address + * @dma_io: GDD I/O ctrl address + * @irq: interrupt line index of the HSI controller + * @isr_tasklet: first-level high priority interrupt handling tasklet + * @fwd_tasklet: second level response forwarding tasklet + * @cawake_poll: timer for polling the falling edge status of the CAWAKE line + * @sw_lock: spinlock for accessing software FIFO + * @hw_lock: spinlock for accessing hardware FIFO + * @tx_queue: channel-indexed array of FIFO of messages awaiting transmission + * @rx_queue: channel-indexed array of FIFO of messages awaiting reception + * @brk_queue: FIFO for RX break messages + * @fwd_queue: FIFO of messages awaiting of being forwarded back to client + * @tx_ctx: Context for the ongoing TX transfers + * @rx_ctx: Context for the ongoing RX transfers + * @tx_queue_busy: bitmap of busy (frozen) TX queues + * @rx_queue_busy: bitmap of busy (frozen) RX queues + * @tx_dma_chan: mapping for TX HSI channel to dma channel (-1 if no DMA used) + * @rx_dma_chan: mapping for RX HSI channel to dma channel (-1 if no DMA used) + * @dma_ctx: mapping of DMA contexts to HSI channel contexts + * @dma_running: bitfield of running DMA transactions + * @dma_resumed: bitfield of resumed DMA transactions + * @tx_state: current state of the TX side (0 for idle, >0 for ACWAKE) + * @rx_state: current state of the RX state (0 for idle, 1 for ACWAKE) + * @suspend_state: current power state (0 for powered, >0 for suspended) + * @irq_status: current interrupt status register + * @err_status: current error interrupt status register + * @irq_cfg: current interrupt configuration register + * @err_cfg: current error interrupt configuration register + * @clk_cfg: current clock configuration register + * @prg_cfg: current program configuration register + * @tx_fifo_config_cfg: current TX FIFO control configuration register + * @rx_fifo_config_cfg: current RX FIFO control configuration register + * @arb_cfg: current arbiter priority configuration register + * @sz_cfg: current program1 configuration register + * @ip_freq: HSI controller IP frequency in kHz + * @brk_us_delay: Minimal BREAK sequence delay in us + * @stay_awake: Android wake lock for preventing entering low power mode + * @dir: debugfs HSI root directory + */ +struct intel_controller { + /* Devices and resources */ + struct device *dev; + struct device *pdev; + struct pci_dev *dmac; + void __iomem *ctrl_io; + void __iomem *dma_io; + unsigned int irq; + /* Dual-level interrupt tasklets */ + struct tasklet_struct isr_tasklet; + struct tasklet_struct fwd_tasklet; + struct timer_list cawake_poll; + /* Queues and registers access locks */ + spinlock_t sw_lock; + spinlock_t hw_lock; + /* Software FIFO */ + struct list_head tx_queue[HSI_MID_MAX_CHANNELS]; + struct list_head rx_queue[HSI_MID_MAX_CHANNELS]; + struct list_head brk_queue; + struct list_head fwd_queue; + struct intel_xfer_ctx tx_ctx[HSI_MID_MAX_CHANNELS]; + struct intel_xfer_ctx rx_ctx[HSI_MID_MAX_CHANNELS]; + u16 tx_queue_busy; + u16 rx_queue_busy; + /* Current DMA processed messages */ + s8 tx_dma_chan[HSI_MID_MAX_CHANNELS]; + s8 rx_dma_chan[HSI_MID_MAX_CHANNELS]; + struct intel_dma_ctx *dma_ctx[DWAHB_CHAN_CNT]; + u32 dma_running; + u32 dma_resumed; + /* Current RX and TX states (0 for idle) */ + int tx_state; + int rx_state; + int suspend_state; + /* HSI controller register images */ + u32 irq_status; + u32 err_status; + /* HSI controller setup */ + u32 irq_cfg; + u32 err_cfg; + u32 clk_cfg; + u32 prg_cfg; + u32 tx_fifo_cfg; + u32 rx_fifo_cfg; + u32 arb_cfg; + u32 sz_cfg; + /* HSI controller IP frequency */ + unsigned int ip_freq; + unsigned int brk_us_delay; +#ifdef CONFIG_HAS_WAKELOCK + /* Android PM support */ + struct wake_lock stay_awake; +#endif +#ifdef CONFIG_DEBUG_FS + struct dentry *dir; +#endif +}; + +/* + * Helper functions + */ + +/** + * is_using_link_list - checks if the DMA context is using link listing + * @dma_ctx: a reference to the DMA context to query + * + * Returns 0 if not using link-listing or 1 if using it. + */ +static inline int is_using_link_list(struct intel_dma_ctx *dma_ctx) +{ + return (dma_ctx->sg_entries > 1); +} + +/** + * is_in_tx_frame_mode - checks if the HSI controller is set in frame mode + * @intel_hsi: Intel HSI controller reference + * + * Returns 0 if in stream mode or ARASAN_TX_FRAME_MODE if in frame mode. + */ +static inline int is_in_tx_frame_mode(struct intel_controller *intel_hsi) +{ + return intel_hsi->prg_cfg & ARASAN_TX_FRAME_MODE; +} + +/** + * hsi_tx_channel_count - getting the number of programmed HSI TX channels + * @intel_hsi: Intel HSI controller reference + * + * Returns the number of programmed HSI TX channels. + */ +static inline int hsi_tx_channel_count(struct intel_controller *intel_hsi) +{ + return ARASAN_TX_CHANNEL_CNT(intel_hsi->sz_cfg); +} + +/** + * hsi_rx_channel_count - getting the number of programmed HSI RX channels + * @intel_hsi: Intel HSI controller reference + * + * Returns the number of programmed HSI RX channels. + */ +static inline int hsi_rx_channel_count(struct intel_controller *intel_hsi) +{ + return ARASAN_RX_CHANNEL_CNT(intel_hsi->sz_cfg); +} + +/** + * tx_fifo_depth - getting the TX FIFO depth in words of a given channel + * @intel_hsi: Intel HSI controller reference + * @channel: the HSI channel to consider + * + * Returns the size of the hardware TX FIFO or -1 if the channel is disabled. + */ +static inline unsigned int tx_fifo_depth(struct intel_controller *intel_hsi, + unsigned int channel) +{ + return ARASAN_FIFO_DEPTH(intel_hsi->tx_fifo_cfg, channel); +} + +/** + * rx_fifo_depth - getting the RX FIFO depth in words of a given channel + * @intel_hsi: Intel HSI controller reference + * @channel: the HSI channel to consider + * + * Returns the size of the hardware RX FIFO or -1 if the channel is disabled. + */ +static inline unsigned int rx_fifo_depth(struct intel_controller *intel_hsi, + unsigned int channel) +{ + return ARASAN_FIFO_DEPTH(intel_hsi->rx_fifo_cfg, channel); +} + +/** + * hsi_enable_interrupt - enabling and signalling interrupts + * @ctrl: the IO-based address of the HSI hardware + * @irq_enable: the bitfield of interrupt sources to enable + */ +static inline void hsi_enable_interrupt(void __iomem *ctrl, u32 irq_enable) +{ + iowrite32(irq_enable, ARASAN_HSI_INTERRUPT_STATUS_ENABLE(ctrl)); + iowrite32(irq_enable, ARASAN_HSI_INTERRUPT_SIGNAL_ENABLE(ctrl)); +} + +/** + * hsi_enable_error_interrupt - enabling and signalling error interrupts + * @ctrl: the IO-based address of the HSI hardware + * @irq_enable: the bitfield of error interrupt sources to enable + */ +static inline void hsi_enable_error_interrupt(void __iomem *ctrl, + u32 irq_enable) +{ + iowrite32(irq_enable, ARASAN_HSI_ERROR_INTERRUPT_STATUS_ENABLE(ctrl)); + iowrite32(irq_enable, ARASAN_HSI_ERROR_INTERRUPT_SIGNAL_ENABLE(ctrl)); +} + + +/** + * hsi_pm_wake_lock - acquire the wake lock whenever necessary + * @intel_hsi: Intel HSI controller reference + */ +static inline void hsi_pm_wake_lock(struct intel_controller *intel_hsi) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) +{ +#ifdef CONFIG_HAS_WAKELOCK + unsigned long flags; + + spin_lock_irqsave(&intel_hsi->hw_lock, flags); + if (intel_hsi->suspend_state != DEVICE_READY) + wake_lock(&intel_hsi->stay_awake); + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); +#endif +} + +/** + * hsi_pm_runtime_get - getting a PM runtime reference to the HSI controller + * @intel_hsi: Intel HSI controller reference + * + * This function is also getting the wake lock should wake lock is used. + */ +static void hsi_pm_runtime_get(struct intel_controller *intel_hsi) +{ + hsi_pm_wake_lock(intel_hsi); + pm_runtime_get(intel_hsi->pdev); +} + +/** + * hsi_pm_runtime_get_sync - getting a synchronised PM runtime reference to the + * HSI controller + * @intel_hsi: Intel HSI controller reference + * + * This function is also getting the wake lock should wake lock is used. + */ +static void hsi_pm_runtime_get_sync(struct intel_controller *intel_hsi) +{ + hsi_pm_wake_lock(intel_hsi); + pm_runtime_get_sync(intel_hsi->pdev); +} + +/** + * assert_acwake - asserting the ACWAKE line status + * @intel_hsi: Intel HSI controller reference + * + * The actual ACWAKE assertion happens when tx_state was 0. + */ +static void assert_acwake(struct intel_controller *intel_hsi) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) +{ +#ifndef PREVENT_ACWAKE_TOGGLING + void __iomem *ctrl = intel_hsi->ctrl_io; +#endif + unsigned long flags; + int do_wakeup; + + spin_lock_irqsave(&intel_hsi->hw_lock, flags); +#ifndef PREVENT_ACWAKE_TOGGLING + do_wakeup = (intel_hsi->tx_state == TX_SLEEPING); + if (do_wakeup) { + intel_hsi->prg_cfg |= ARASAN_TX_ENABLE; + if (intel_hsi->suspend_state == DEVICE_READY) + iowrite32(intel_hsi->prg_cfg, ARASAN_HSI_PROGRAM(ctrl)); + } +#else + do_wakeup = 0; +#endif + intel_hsi->tx_state++; + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + + if (do_wakeup) + hsi_pm_runtime_get(intel_hsi); +} + +/** + * deassert_acwake - de-asserting the ACWAKE line status + * @intel_hsi: Intel HSI controller reference + * + * The actual ACWAKE de-assertion happens only when tx_state reaches 0. + * + * Returns 1 on success or 0 if the tx_state count was already 0. + */ +static int deassert_acwake(struct intel_controller *intel_hsi) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) +{ + void __iomem *ctrl = intel_hsi->ctrl_io; + unsigned long flags; + int do_sleep, i; + + /* Wait for READY signal assertion prior de-asserting the WAKE signal + * in synchronised mode only !!! */ + spin_lock_irqsave(&intel_hsi->hw_lock, flags); + if ((intel_hsi->prg_cfg & ARASAN_TX_FRAME_MODE) || + (unlikely(intel_hsi->suspend_state != DEVICE_READY))) + goto do_deassert_acwake; + + /* Timeout after 10 ms whilst waiting for ready line to rise back */ + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + i = 0; + while ((!(ioread32(ARASAN_HSI_HSI_STATUS(ctrl)) & ARASAN_RX_READY)) && + (i < 100)) { + /* Wait for 10 us */ + udelay(10); + i++; + } + spin_lock_irqsave(&intel_hsi->hw_lock, flags); + +do_deassert_acwake: + /* The deassert AC wake function is also used in the release function + * so that it can be called more times than expected should some + * stop_tx calls happen simultaneously. This makes the code cleaner and + * more robust. */ + if (intel_hsi->tx_state <= TX_SLEEPING) { + intel_hsi->tx_state = TX_SLEEPING; + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + return 0; + } + + --intel_hsi->tx_state; +#ifndef PREVENT_ACWAKE_TOGGLING + do_sleep = (intel_hsi->tx_state == TX_SLEEPING); + if (do_sleep) { + intel_hsi->prg_cfg &= ~ARASAN_TX_ENABLE; + if (likely(intel_hsi->suspend_state == DEVICE_READY)) + iowrite32(intel_hsi->prg_cfg, ARASAN_HSI_PROGRAM(ctrl)); + } +#else + do_sleep = 0; +#endif + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + + if (do_sleep) + pm_runtime_put(intel_hsi->pdev); + + return 1; +} + +/** + * cawake_poll - polling the CAWAKE line status + * @param: hidden Intel HSI controller reference + * + * This polling timer is activated on CAWAKE rising interrupt, and re-activated + * until the CAWAKE is low and all internal RX FIFO are empty. + */ +static void cawake_poll(unsigned long param) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) +{ + struct intel_controller *intel_hsi = (struct intel_controller *) param; + void __iomem *ctrl = intel_hsi->ctrl_io; + int schedule_rx_sleep = 0; + unsigned long flags; + + spin_lock_irqsave(&intel_hsi->hw_lock, flags); + + if (unlikely(intel_hsi->rx_state != RX_READY)) { + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + return; + } + + /* This shall almost never happen, but timer will be restarted on + * resume anyway */ + if (unlikely(intel_hsi->suspend_state != DEVICE_READY)) { + pr_warn("hsi: poll whilst suspended !\n"); + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + return; + } + + /* Prevent RX side disabling as long as CAWAKE is asserted or any RX + * hardware FIFO is not empty */ + if (ioread32(ARASAN_HSI_HSI_STATUS(ctrl)) & + (ARASAN_RX_WAKE|ARASAN_ANY_RX_NOT_EMPTY)) + mod_timer(&intel_hsi->cawake_poll, + jiffies + CAWAKE_POLL_JIFFIES); + else { + intel_hsi->irq_cfg |= ARASAN_IRQ_RX_WAKE; + hsi_enable_interrupt(ctrl, intel_hsi->irq_cfg); + intel_hsi->rx_state = RX_CAN_SLEEP; + schedule_rx_sleep = 1; + } + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + + if (schedule_rx_sleep) + tasklet_hi_schedule(&intel_hsi->isr_tasklet); +} + +/** + * has_enabled_acready - enable the ACREADY line + * @intel_hsi: Intel HSI controller reference + * + * Returns 1 if enabled or 0 if already enabled. + */ +static int has_enabled_acready(struct intel_controller *intel_hsi) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) +{ + void __iomem *ctrl = intel_hsi->ctrl_io; + unsigned long flags; + int do_wakeup; + + spin_lock_irqsave(&intel_hsi->hw_lock, flags); + + /* Do not re-wakeup the device if already woken up */ + do_wakeup = (intel_hsi->rx_state == RX_SLEEPING); + + intel_hsi->prg_cfg |= ARASAN_RX_ENABLE; + if ((do_wakeup) && (intel_hsi->suspend_state == DEVICE_READY)) { + iowrite32(intel_hsi->prg_cfg, ARASAN_HSI_PROGRAM(ctrl)); + mod_timer(&intel_hsi->cawake_poll, + jiffies + CAWAKE_POLL_JIFFIES); + } + intel_hsi->rx_state = RX_READY; + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + + if (do_wakeup) + hsi_pm_runtime_get(intel_hsi); + + return do_wakeup; +} + +/** + * has_disabled_acready - try to disable the ACREADY line + * @intel_hsi: Intel HSI controller reference + * + * The actual RX disable can only happen if the CAWAKE line was low and all + * RX hardware FIFO are empty, in which case rx_state has been set to + * RX_CAN_SLEEP. + * + * Returns 1 if disabled or 0 if not. + */ +static int has_disabled_acready(struct intel_controller *intel_hsi) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) +{ + void __iomem *ctrl = intel_hsi->ctrl_io; + int do_sleep = 0; + unsigned long flags; + + spin_lock_irqsave(&intel_hsi->hw_lock, flags); + if (intel_hsi->rx_state != RX_CAN_SLEEP) { + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + return 0; + } + do_sleep = !(ioread32(ARASAN_HSI_HSI_STATUS(ctrl)) & + (ARASAN_RX_WAKE|ARASAN_ANY_RX_NOT_EMPTY)); + if (likely(do_sleep)) { +#ifndef PREVENT_RX_SLEEP_WHEN_NOT_SUSPENDED + intel_hsi->prg_cfg &= ~ARASAN_RX_ENABLE; + if (likely(intel_hsi->suspend_state == DEVICE_READY)) + iowrite32(intel_hsi->prg_cfg, ARASAN_HSI_PROGRAM(ctrl)); +#endif + intel_hsi->rx_state = RX_SLEEPING; + } else { + mod_timer(&intel_hsi->cawake_poll, + jiffies + CAWAKE_POLL_JIFFIES); + intel_hsi->irq_status &= ~ARASAN_IRQ_RX_WAKE; + intel_hsi->rx_state = RX_READY; + } + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + + if (do_sleep) { + /* Wait for 2 us (more than 1 HSI frame at 20 MHz) to ensure + * that the CAREADY will not rise back too soon */ + udelay(2); + pm_runtime_put(intel_hsi->pdev); + } + + return do_sleep; +} + +/** + * force_disable_acready - force disable of the ACREADY line + * @intel_hsi: Intel HSI controller reference + */ +static void force_disable_acready(struct intel_controller *intel_hsi) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) +{ + void __iomem *ctrl = intel_hsi->ctrl_io; + struct hsi_controller *hsi = to_hsi_controller(intel_hsi->dev); + int do_sleep = 0; + unsigned int i; + unsigned long flags; + + spin_lock_irqsave(&intel_hsi->hw_lock, flags); + do_sleep = (intel_hsi->rx_state != RX_SLEEPING); + if (do_sleep) { + intel_hsi->prg_cfg &= ~ARASAN_RX_ENABLE; + if (likely(intel_hsi->suspend_state == DEVICE_READY)) + iowrite32(intel_hsi->prg_cfg, ARASAN_HSI_PROGRAM(ctrl)); + intel_hsi->rx_state = RX_SLEEPING; + } + + /* Prevent the ACREADY change because of the CAWAKE toggling. + * The CAWAKE event interrupt shall be re-enabled whenever the + * RX fifo is no longer empty */ + del_timer(&intel_hsi->cawake_poll); + intel_hsi->irq_status &= ~ARASAN_IRQ_RX_WAKE; + intel_hsi->irq_cfg &= ~ARASAN_IRQ_RX_WAKE; + if (likely(intel_hsi->suspend_state == DEVICE_READY)) { + iowrite32(ARASAN_IRQ_RX_WAKE, + ARASAN_HSI_INTERRUPT_STATUS(ctrl)); + hsi_enable_interrupt(ctrl, intel_hsi->irq_cfg); + } + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + + if (do_sleep) { + for (i = 0; i < hsi->num_ports; i++) + hsi_event(&hsi->port[i], HSI_EVENT_STOP_RX); + pm_runtime_put(intel_hsi->pdev); + } +} + +/** + * unforce_disable_acready - unforce a previously disabled ACREADY line + * @intel_hsi: Intel HSI controller reference + */ +static void unforce_disable_acready(struct intel_controller *intel_hsi) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) +{ + void __iomem *ctrl = intel_hsi->ctrl_io; + unsigned long flags; + + spin_lock_irqsave(&intel_hsi->hw_lock, flags); + if (unlikely((intel_hsi->rx_state == RX_SLEEPING) && + (!(intel_hsi->irq_cfg & ARASAN_IRQ_RX_WAKE)))) { + intel_hsi->irq_cfg |= ARASAN_IRQ_RX_WAKE; + if (intel_hsi->suspend_state == DEVICE_READY) + hsi_enable_interrupt(ctrl, intel_hsi->irq_cfg); + } + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); +} + +/** + * hsi_get_dma_channel - get DMA channel index from HSI channel index + * @intel_hsi: Intel HSI controller reference + * @msg: reference to the HSI message + * @hsi_channel: HSI channel for which to find DMA channel + * + * Return the DMA channel associated with an HSI channel for read or write. + * Return -1 in case there is no associated DMA channel. + */ +static int hsi_get_dma_channel(struct intel_controller *intel_hsi, + struct hsi_msg *msg, unsigned int hsi_channel) +{ + if ((hsi_channel >= HSI_MID_MAX_CHANNELS) || (!msg->sgt.nents)) + return -1; + + return (msg->ttype == HSI_MSG_READ) ? + (int) intel_hsi->rx_dma_chan[hsi_channel] : + (int) intel_hsi->tx_dma_chan[hsi_channel]; +} + +/** + * hsi_set_master_dma_cfg - setting common parts of the master DMA registers + * @dma: the IO-based address of the DMA hardware + * @dma_chan: the DMA channel to configure + * @tx_not_rx: the direction of the DMA channel (TX=1, RX=0) + * @lli_enb: link-list enable flag + */ +static inline void hsi_set_master_dma_cfg(void __iomem *dma, int dma_chan, + int tx_not_rx, int lli_enb) +{ + + /* Disable link listing */ + iowrite32(0, HSI_DWAHB_LLP(dma, dma_chan)); + + /* Set the common master DMA static configuration */ + iowrite32(HSI_DWAHB_CFG_LO_CFG(dma_chan), + HSI_DWAHB_CFG_LO(dma, dma_chan)); + + iowrite32(HSI_DWAHB_CFG_HI_CFG(dma_chan), + HSI_DWAHB_CFG_HI(dma, dma_chan)); + + iowrite32(HSI_DWAHB_CTL_LO_CFG(tx_not_rx, lli_enb), + HSI_DWAHB_CTL_LO(dma, dma_chan)); + + /* Nothing to transfer yet ! */ + iowrite32(0, HSI_DWAHB_CTL_HI(dma, dma_chan)); +} + +/** + * hsi_disable_master_dma_cfg - disable the master DMA registers + * @dma: the IO-based address of the DMA hardware + */ +static inline void hsi_disable_master_dma_cfg(void __iomem *dma) +{ + int i; + + /* Disable all DMA channels */ + iowrite32(DWAHB_CHAN_DISABLE(DWAHB_ALL_CHANNELS), HSI_DWAHB_CHEN(dma)); + iowrite32(DWAHB_DISABLE, HSI_DWAHB_DMACFG(dma)); + + for (i = 0; i < DWAHB_CHAN_CNT; i++) { + /* Disable link listing */ + iowrite32(0, HSI_DWAHB_LLP(dma, i)); + + /* Suspend channel, set software handshaking and set an invalid + * hardware handshaking IF (-1) */ + iowrite32(DWAHB_SUSPEND(1) | DWAHB_SRC_SW_HANDSHAKE(1) | + DWAHB_DST_SW_HANDSHAKE(1), HSI_DWAHB_CFG_LO(dma, i)); + iowrite32(DWAHB_DATA_ONLY | DWAHB_SRC_HW_HANDSHAKE(-1) | + DWAHB_DST_HW_HANDSHAKE(-1), HSI_DWAHB_CFG_HI(dma, i)); + iowrite32(0, HSI_DWAHB_CTL_LO(dma, i)); + iowrite32(0, HSI_DWAHB_CTL_HI(dma, i)); + } +} + +/** + * hsi_ctrl_set_cfg - HSI controller hardware configure + * @intel_hsi: Intel HSI controller reference + * + * Program the hardware in accordance with the settings stored in the HSI + * controller software structure. + * + * Returns success or an error if it is not possible to reprogram the device. + */ +static int hsi_ctrl_set_cfg(struct intel_controller *intel_hsi) +{ + void __iomem *ctrl = intel_hsi->ctrl_io; + void __iomem *dma = intel_hsi->dma_io; + u32 status; + int i, dma_chan, lli_enb; + + /* If the reset bit is set then nothing has been configured yet ! */ + if (intel_hsi->prg_cfg & ARASAN_RESET) + return 0; + + /* Prepare the internal clock without enabling it */ + iowrite32(0, ARASAN_HSI_PROGRAM(ctrl)); + iowrite32(0, ARASAN_HSI_DMA_TX_FIFO_SIZE(ctrl)); + iowrite32(0, ARASAN_HSI_DMA_RX_FIFO_SIZE(ctrl)); + iowrite32((intel_hsi->clk_cfg & ~ARASAN_CLK_ENABLE), + ARASAN_HSI_CLOCK_CONTROL(ctrl)); + + /* Configure fixed DMA registers (master and slave) */ + for (i = 0; i < DWAHB_CHAN_CNT; i++) + iowrite32(0, ARASAN_HSI_DMA_CONFIG(ctrl, i)); + + iowrite32(DWAHB_ENABLE, HSI_DWAHB_DMACFG(dma)); + for (i = 0; i < HSI_MID_MAX_CHANNELS; i++) { + dma_chan = intel_hsi->tx_dma_chan[i]; + lli_enb = is_using_link_list(&intel_hsi->tx_ctx[i].dma); + if (dma_chan >= 0) + hsi_set_master_dma_cfg(dma, dma_chan, 1, lli_enb); + } + + for (i = 0; i < HSI_MID_MAX_CHANNELS; i++) { + dma_chan = intel_hsi->rx_dma_chan[i]; + lli_enb = is_using_link_list(&intel_hsi->rx_ctx[i].dma); + if (dma_chan >= 0) + hsi_set_master_dma_cfg(dma, dma_chan, 0, lli_enb); + } + + /* Enable the internal HSI clock */ + status = ioread32(ARASAN_HSI_CLOCK_CONTROL(ctrl)); + i = 0; + while (!(status & ARASAN_CLK_STABLE)) { + i++; + if (i > HSI_CLOCK_SETUP_DELAY_WAIT) + return -ETIME; + udelay(1); + status = ioread32(ARASAN_HSI_CLOCK_CONTROL(ctrl)); + } + iowrite32(intel_hsi->clk_cfg, ARASAN_HSI_CLOCK_CONTROL(ctrl)); + + /* Configure the main controller parameters (except wake status) */ +#ifdef PREVENT_ACWAKE_TOGGLING + intel_hsi->prg_cfg |= ARASAN_TX_ENABLE; + iowrite32(intel_hsi->prg_cfg & ~ARASAN_RX_ENABLE, + ARASAN_HSI_PROGRAM(ctrl)); +#else + iowrite32(intel_hsi->prg_cfg & ~(ARASAN_TX_ENABLE|ARASAN_RX_ENABLE), + ARASAN_HSI_PROGRAM(ctrl)); +#endif + + /* Configure the number of HSI channels */ + iowrite32(intel_hsi->sz_cfg, ARASAN_HSI_PROGRAM1(ctrl)); + + /* Configure the arbitration scheme */ + iowrite32(intel_hsi->arb_cfg, ARASAN_HSI_ARBITER_PRIORITY(ctrl)); + + /* Configure the hardware FIFO */ + iowrite32(intel_hsi->tx_fifo_cfg, ARASAN_HSI_DMA_TX_FIFO_SIZE(ctrl)); + iowrite32(TX_THRESHOLD, ARASAN_HSI_DMA_TX_FIFO_THRESHOLD(ctrl)); + iowrite32(intel_hsi->rx_fifo_cfg, ARASAN_HSI_DMA_RX_FIFO_SIZE(ctrl)); + iowrite32(RX_THRESHOLD, ARASAN_HSI_DMA_RX_FIFO_THRESHOLD(ctrl)); + + /* Start the CAWAKE poll mechanism if RX is enabled */ + if (intel_hsi->prg_cfg & ARASAN_RX_ENABLE) + mod_timer(&intel_hsi->cawake_poll, + jiffies + CAWAKE_POLL_JIFFIES); + + /* Enable then signal interrupts */ + hsi_enable_interrupt(ctrl, intel_hsi->irq_cfg); + hsi_enable_error_interrupt(ctrl, intel_hsi->err_cfg); + + /* Enable the RX and TX parts if necessary */ + iowrite32(intel_hsi->prg_cfg, ARASAN_HSI_PROGRAM(ctrl)); + + return 0; +} + +/** + * hsi_ctrl_resume - HSI controller hardware resume + * @intel_hsi: Intel HSI controller reference + * + * Program the hardware back to its prior-suspend state and re-enable IRQ. + * + * Returns success or an error if it is not possible to reprogram the device. + */ +static int hsi_ctrl_resume(struct intel_controller *intel_hsi) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) +{ + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&intel_hsi->hw_lock, flags); + if (intel_hsi->suspend_state != DEVICE_READY) { + err = hsi_ctrl_set_cfg(intel_hsi); + intel_hsi->dma_resumed = 0; + intel_hsi->suspend_state--; + } + + if (intel_hsi->suspend_state != DEVICE_READY) { + enable_irq(intel_hsi->irq); + intel_hsi->suspend_state--; + } + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + + return err; +} + +/** + * hsi_ctrl_suspend - HSI controller hardware suspend + * @intel_hsi: Intel HSI controller reference + * + * Stops pending DMA transfers, disable all interrupts and shut down the + * controller clock. + * + * Returns 0 if successful or an error code + */ +static int hsi_ctrl_suspend(struct intel_controller *intel_hsi) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) +{ + void __iomem *ctrl = intel_hsi->ctrl_io; + void __iomem *dma = intel_hsi->dma_io; + int i, err = 0; + unsigned long flags; + + spin_lock_irqsave(&intel_hsi->hw_lock, flags); + if (intel_hsi->suspend_state == DEVICE_READY) { + +#ifdef PREVENT_RX_SLEEP_WHEN_NOT_SUSPENDED + if (unlikely(ioread32(ARASAN_HSI_HSI_STATUS(ctrl)) & + (ARASAN_RX_WAKE|ARASAN_ANY_RX_NOT_EMPTY))) { + /* ACWAKE rising edge will be detected by the ISR */ + err = -EBUSY; + goto exit_ctrl_suspend; + } + + intel_hsi->prg_cfg &= ~ARASAN_RX_ENABLE; + iowrite32(intel_hsi->prg_cfg, ARASAN_HSI_PROGRAM(ctrl)); +#endif + + if ((intel_hsi->tx_state != TX_SLEEPING) || + (intel_hsi->rx_state != RX_SLEEPING)) { + err = -EBUSY; + goto exit_ctrl_suspend; + } + + /* Disable all DMA */ + iowrite32(DWAHB_CHAN_DISABLE(DWAHB_ALL_CHANNELS), + HSI_DWAHB_CHEN(dma)); + iowrite32(DWAHB_DISABLE, HSI_DWAHB_DMACFG(dma)); + for (i = 0; i < DWAHB_CHAN_CNT; i++) + iowrite32(0, ARASAN_HSI_DMA_CONFIG(ctrl, i)); + intel_hsi->dma_running = 0; + + /* Disable all interrupts */ + hsi_enable_interrupt(ctrl, 0); + hsi_enable_error_interrupt(ctrl, 0); + + /* Cut the clock and set the device as being suspended */ + iowrite32(0, ARASAN_HSI_PROGRAM(ctrl)); + iowrite32(0, ARASAN_HSI_CLOCK_CONTROL(ctrl)); + intel_hsi->suspend_state = DEVICE_SUSPENDED; + +#ifdef CONFIG_HAS_WAKELOCK + wake_unlock(&intel_hsi->stay_awake); +#endif + } +exit_ctrl_suspend: + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + + return err; +} + +/** + * do_free_dma_xfer - do free a single DMA transfer context + * @dma_xfer: DMA transfer context reference to free + * @sg_entries: maximal number of supported scatter gather entries + * @intel_hsi: Intel HSI controller reference + */ +static void do_free_dma_xfer(struct intel_dma_xfer *dma_xfer, int sg_entries, + struct intel_controller *intel_hsi) +{ + dma_addr_t dma_addr; + int size; + + if (!dma_xfer) + return; + + if (sg_entries > 1) { + size = sg_entries * sizeof(struct intel_dma_lli); + dma_addr = (dma_addr_t) (dma_xfer->with_link_list.llp_addr); + dma_unmap_single(intel_hsi->pdev, dma_addr, size, + DMA_TO_DEVICE); + } + + kfree(dma_xfer); +} + +/** + * do_alloc_dma_xfer - allocate and initialise a single DMA transfer context + * @tx_not_rx: DMA channel direction (1 for TX, 0 for RX) + * @hsi_chan: HSI channel number + * @dma_chan: DMA channel number + * @sg_entries: maximal number of supported scatter gather entries + * @intel_hsi: Intel HSI controller reference + * + * Returns a pointer to a newly created DMA channel or NULL if not possible + */ +static +struct intel_dma_xfer *do_alloc_dma_xfer(int tx_not_rx, int hsi_chan, + int dma_chan, int sg_entries, + struct intel_controller *intel_hsi) +{ + struct intel_dma_xfer *dma_xfer; + struct intel_dma_lli_xfer *lli_xfer; + struct intel_dma_plain_xfer *plain_xfer; + int size, sg_size, header; + dma_addr_t dma_addr; + int i; + + if (sg_entries > 1) { + header = offsetof(struct intel_dma_xfer, with_link_list); + sg_size = sg_entries * sizeof(struct intel_dma_lli); + size = header + offsetof(struct intel_dma_lli_xfer, lli) + + sg_size; + + dma_xfer = kzalloc(size, GFP_ATOMIC); + if (!dma_xfer) + return NULL; + lli_xfer = &dma_xfer->with_link_list; + + dma_addr = dma_map_single(intel_hsi->pdev, lli_xfer->lli, + sg_size, DMA_TO_DEVICE); + if (!dma_addr) { + kfree(dma_xfer); + return NULL; + } + + lli_xfer->llp_addr = dma_addr; + + for (i = 0; i < sg_entries; i++) { + if (tx_not_rx) + lli_xfer->lli[i].dar = + HSI_DWAHB_TX_ADDRESS(hsi_chan); + else + lli_xfer->lli[i].sar = + HSI_DWAHB_RX_ADDRESS(hsi_chan); + + lli_xfer->lli[i].ctl_lo = + HSI_DWAHB_CTL_LO_CFG(tx_not_rx, 1); + } + } else { + header = offsetof(struct intel_dma_xfer, without_link_list); + size = header + sizeof(struct intel_dma_plain_xfer); + + dma_xfer = kzalloc(size, GFP_ATOMIC); + if (!dma_xfer) + return NULL; + plain_xfer = &dma_xfer->without_link_list; + + if (tx_not_rx) + plain_xfer->dst_addr = HSI_DWAHB_TX_ADDRESS(hsi_chan); + else + plain_xfer->src_addr = HSI_DWAHB_RX_ADDRESS(hsi_chan); + } + + dma_xfer->mst_enable = DWAHB_CHAN_START(dma_chan); + + return dma_xfer; +} + +/** + * alloc_dma_xfer - allocating and initialising a single DMA transfer context + * @tx_not_rx: DMA channel direction (1 for TX, 0 for RX) + * @hsi_chan: HSI channel number + * @dma_chan: DMA channel number + * @intel_hsi: Intel HSI controller reference + * + * Returns 0 if successful or an error code + */ +static int alloc_dma_xfer(int tx_not_rx, int hsi_chan, int dma_chan, + struct intel_controller *intel_hsi) +{ + struct intel_dma_ctx *dma_ctx = intel_hsi->dma_ctx[dma_chan]; + int sg_ents = dma_ctx->sg_entries; + + dma_ctx->ongoing = do_alloc_dma_xfer(tx_not_rx, hsi_chan, dma_chan, + sg_ents, intel_hsi); + + if (!dma_ctx->ongoing) + goto exit_error; + + dma_ctx->ready = do_alloc_dma_xfer(tx_not_rx, hsi_chan, dma_chan, + sg_ents, intel_hsi); + + if (dma_ctx->ready) + return 0; + + do_free_dma_xfer(dma_ctx->ongoing, sg_ents, intel_hsi); +exit_error: + intel_hsi->dma_ctx[dma_chan] = NULL; + + return -ENOMEM; +} + +/** + * free_xfer_ctx - freeing all contexts + * @intel_hsi: Intel HSI controller reference + */ +static void free_xfer_ctx(struct intel_controller *intel_hsi) + __acquires(&intel_hsi->sw_lock) __releases(&intel_hsi->sw_lock) +{ + struct intel_dma_ctx *dma_ctx; + int i, sg_ents; + unsigned long flags; + + spin_lock_irqsave(&intel_hsi->sw_lock, flags); + for (i = 0; i < DWAHB_CHAN_CNT; i++) + if (intel_hsi->dma_ctx[i]) { + dma_ctx = intel_hsi->dma_ctx[i]; + sg_ents = dma_ctx->sg_entries; + do_free_dma_xfer(dma_ctx->ongoing, sg_ents, intel_hsi); + do_free_dma_xfer(dma_ctx->ready, sg_ents, intel_hsi); + intel_hsi->dma_ctx[i]->ongoing = NULL; + intel_hsi->dma_ctx[i]->ready = NULL; + intel_hsi->dma_ctx[i] = NULL; + } + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); +} + +/** + * alloc_xfer_ctx - allocating and initialising contexts + * @intel_hsi: Intel HSI controller reference + * + * Returns 0 if successful or an error code + */ +static int alloc_xfer_ctx(struct intel_controller *intel_hsi) + __acquires(&intel_hsi->sw_lock) __releases(&intel_hsi->sw_lock) +{ + int i, lch; + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&intel_hsi->sw_lock, flags); + for (i = 0; i < HSI_MID_MAX_CHANNELS; i++) { + lch = intel_hsi->tx_dma_chan[i]; + if (lch >= 0) { + intel_hsi->dma_ctx[lch] = &intel_hsi->tx_ctx[i].dma; + err = alloc_dma_xfer(1, i, lch, intel_hsi); + if (err) + break; + } + lch = intel_hsi->rx_dma_chan[i]; + if (lch >= 0) { + intel_hsi->dma_ctx[lch] = &intel_hsi->rx_ctx[i].dma; + err = alloc_dma_xfer(0, i, lch, intel_hsi); + if (err) + break; + } + } + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + + if (unlikely(err)) + free_xfer_ctx(intel_hsi); + + return err; +} + +/** + * hsi_ctrl_clean_reset - quick and clean hardware halt + * @intel_hsi: Intel HSI controller reference + */ +static void hsi_ctrl_clean_reset(struct intel_controller *intel_hsi) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) +{ + void __iomem *ctrl = intel_hsi->ctrl_io; + void __iomem *dma = intel_hsi->dma_io; + int i; + unsigned long flags; + + /* Disable the interrupt line */ + disable_irq(intel_hsi->irq); + + spin_lock_irqsave(&intel_hsi->hw_lock, flags); + + /* Remove the CAWAKE poll timer */ + del_timer_sync(&intel_hsi->cawake_poll); + + /* If suspended then there is nothing to do on the hardware side */ + if (intel_hsi->suspend_state != DEVICE_READY) + goto exit_clean_reset; + + /* Disable DMA */ + for (i = 0; i < DWAHB_CHAN_CNT; i++) + iowrite32(0, ARASAN_HSI_DMA_CONFIG(ctrl, i)); + hsi_disable_master_dma_cfg(dma); + + /* Disable IRQ */ + hsi_enable_interrupt(ctrl, 0); + hsi_enable_error_interrupt(ctrl, 0); + + /* Kill RX and TX wake sources and disable all channels */ + iowrite32(0, ARASAN_HSI_PROGRAM(ctrl)); + + /* Cut the clock and set the device as being not configured */ + iowrite32(0, ARASAN_HSI_CLOCK_CONTROL(ctrl)); + +exit_clean_reset: + if (intel_hsi->rx_state == RX_READY) + intel_hsi->rx_state = RX_CAN_SLEEP; + intel_hsi->dma_running = 0; + intel_hsi->irq_status = 0; + intel_hsi->err_status = 0; + intel_hsi->prg_cfg = ARASAN_RESET; + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + + while (deassert_acwake(intel_hsi)) + ; + (void) has_disabled_acready(intel_hsi); + + /* Do not forget to re-enable the interrupt */ + enable_irq(intel_hsi->irq); + + /* Free all contexts to restart from scratch */ + free_xfer_ctx(intel_hsi); +} + +/** + * hsi_ctrl_full_reset - hardware soft reset + * @intel_hsi: Intel HSI controller reference + * + * Returns 0 if successful, -EIO if it doesn't work. + */ +static int hsi_ctrl_full_reset(struct intel_controller *intel_hsi) +{ + void __iomem *ctrl = intel_hsi->ctrl_io; + void __iomem *dma = intel_hsi->dma_io; + u32 reset_ongoing = 1; + unsigned int ip_freq; + int retries = 0; + + /* Read the IP frequency once at the beginning (if defined) */ + ip_freq = ARASAN_TX_BASE_CLK_KHZ(ioread32(ARASAN_HSI_CAPABILITY(ctrl))); + if (ip_freq == 0) + ip_freq = HSI_BASE_FREQUENCY; + intel_hsi->ip_freq = ip_freq; + + /* Perform software reset then loop until controller is ready */ + iowrite32(ARASAN_RESET, ARASAN_HSI_PROGRAM(ctrl)); + while ((reset_ongoing) && (retries < HSI_RESETDONE_RETRIES)) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(HSI_RESETDONE_TIMEOUT)); + reset_ongoing = ioread32(ARASAN_HSI_PROGRAM(ctrl)) & + ARASAN_RESET; + retries++; + } + iowrite32(0, ARASAN_HSI_PROGRAM(ctrl)); + + if (reset_ongoing) { + dev_err(intel_hsi->dev, "HSI reset failed"); + return -EIO; + } + + /* Disable all master DMA channels */ + hsi_disable_master_dma_cfg(dma); + + /* Tag the controller has being reset */ + intel_hsi->prg_cfg = ARASAN_RESET; + + return 0; +} + +#ifdef CONFIG_DEBUG_FS + +static int hsi_debug_show(struct seq_file *m, void *p) +{ + struct hsi_port *port = m->private; + struct intel_controller *intel_hsi = hsi_port_drvdata(port); + void __iomem *ctrl = intel_hsi->ctrl_io; + int ch; + + hsi_pm_runtime_get_sync(intel_hsi); + seq_printf(m, "REVISION\t\t: 0x%08x\n", + ioread32(ARASAN_HSI_VERSION(ctrl))); + for (ch = 0; ch < DWAHB_CHAN_CNT; ch++) { + seq_printf(m, "DMA CONFIG %d\t\t: 0x%08x\n", ch, + ioread32(ARASAN_HSI_DMA_CONFIG(ctrl, ch))); + } + seq_printf(m, "TXFIFO CTL1\t\t: 0x%08x\n", + ioread32(ARASAN_HSI_DMA_TX_FIFO_SIZE(ctrl))); + seq_printf(m, "TXFIFO CTL2\t\t: 0x%08x\n", + ioread32(ARASAN_HSI_DMA_TX_FIFO_THRESHOLD(ctrl))); + seq_printf(m, "RXFIFO CTL1\t\t: 0x%08x\n", + ioread32(ARASAN_HSI_DMA_RX_FIFO_SIZE(ctrl))); + seq_printf(m, "RXFIFO CTL2\t\t: 0x%08x\n", + ioread32(ARASAN_HSI_DMA_RX_FIFO_THRESHOLD(ctrl))); + seq_printf(m, "CLOCK CONTROL\t\t: 0x%08x\n", + ioread32(ARASAN_HSI_CLOCK_CONTROL(ctrl))); + seq_printf(m, "STATUS\t\t\t: 0x%08x\n", + ioread32(ARASAN_HSI_HSI_STATUS(ctrl))); + seq_printf(m, "STATUS1\t\t\t: 0x%08x\n", + ioread32(ARASAN_HSI_HSI_STATUS1(ctrl))); + seq_printf(m, "PROGRAM\t\t\t: 0x%08x\n", + ioread32(ARASAN_HSI_PROGRAM(ctrl))); + seq_printf(m, "PROGRAM1\t\t: 0x%08x\n", + ioread32(ARASAN_HSI_PROGRAM1(ctrl))); + seq_printf(m, "INTERRUPT STATUS\t: 0x%08x\n", + ioread32(ARASAN_HSI_INTERRUPT_STATUS(ctrl))); + seq_printf(m, "INTERRUPT STATUS ENABLE\t: 0x%08x\n", + ioread32(ARASAN_HSI_INTERRUPT_STATUS_ENABLE(ctrl))); + seq_printf(m, "INTERRUPT SIGNAL ENABLE\t: 0x%08x\n", + ioread32(ARASAN_HSI_INTERRUPT_SIGNAL_ENABLE(ctrl))); + seq_printf(m, "ARBITER PRIORITY\t: 0x%08x\n", + ioread32(ARASAN_HSI_ARBITER_PRIORITY(ctrl))); + seq_printf(m, "ARBITER BANDWIDTH1\t: 0x%08x\n", + ioread32(ARASAN_HSI_ARBITER_BANDWIDTH1(ctrl))); + seq_printf(m, "ARBITER BANDWIDTH2\t: 0x%08x\n", + ioread32(ARASAN_HSI_ARBITER_BANDWIDTH2(ctrl))); + seq_printf(m, "ERROR INT STATUS\t: 0x%08x\n", + ioread32(ARASAN_HSI_ERROR_INTERRUPT_STATUS(ctrl))); + seq_printf(m, "ERROR INT STATUS ENABLE\t: 0x%08x\n", + ioread32(ARASAN_HSI_ERROR_INTERRUPT_STATUS_ENABLE(ctrl))); + seq_printf(m, "ERROR INT SIGNAL ENABLE\t: 0x%08x\n", + ioread32(ARASAN_HSI_ERROR_INTERRUPT_SIGNAL_ENABLE(ctrl))); + pm_runtime_put(intel_hsi->pdev); + + return 0; +} + +#define HSI_DEBUG_GDD_PRINT(F) \ + seq_printf(m, #F "\t\t: 0x%08x\n", ioread32(HSI_DWAHB_ ## F(dma))) +#define HSI_DEBUG_GDD_PRINT2(F, i) \ + seq_printf(m, #F " %d\t\t: 0x%08x\n", i,\ + ioread32(HSI_DWAHB_ ## F(dma, i))) + +static int hsi_debug_dma_show(struct seq_file *m, void *p) +{ + struct hsi_controller *hsi = m->private; + struct intel_controller *intel_hsi = hsi_controller_drvdata(hsi); + void __iomem *dma = intel_hsi->dma_io; + int i; + + hsi_pm_runtime_get_sync(intel_hsi); + for (i = 0; i < DWAHB_CHAN_CNT; i++) { + HSI_DEBUG_GDD_PRINT2(SAR, i); + HSI_DEBUG_GDD_PRINT2(DAR, i); + HSI_DEBUG_GDD_PRINT2(CTL_LO, i); + HSI_DEBUG_GDD_PRINT2(CTL_HI, i); + HSI_DEBUG_GDD_PRINT2(CFG_LO, i); + HSI_DEBUG_GDD_PRINT2(CFG_HI, i); + } + + HSI_DEBUG_GDD_PRINT(DMACFG); + HSI_DEBUG_GDD_PRINT(CHEN); + HSI_DEBUG_GDD_PRINT(STATUSINT); + + HSI_DEBUG_GDD_PRINT(STATUSTFR); + HSI_DEBUG_GDD_PRINT(STATUSBLOCK); + HSI_DEBUG_GDD_PRINT(STATUSSRCTRAN); + HSI_DEBUG_GDD_PRINT(STATUSDSTTRAN); + HSI_DEBUG_GDD_PRINT(STATUSERR); + + HSI_DEBUG_GDD_PRINT(MASKTFR); + HSI_DEBUG_GDD_PRINT(MASKBLOCK); + HSI_DEBUG_GDD_PRINT(MASKSRCTRAN); + HSI_DEBUG_GDD_PRINT(MASKDSTTRAN); + HSI_DEBUG_GDD_PRINT(MASKERR); + pm_runtime_put(intel_hsi->pdev); + + return 0; +} + +static int hsi_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, hsi_debug_show, inode->i_private); +} + +static int hsi_dma_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, hsi_debug_dma_show, inode->i_private); +} + +static const struct file_operations hsi_regs_fops = { + .open = hsi_regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations hsi_dma_regs_fops = { + .open = hsi_dma_regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init hsi_debug_add_ctrl(struct hsi_controller *hsi) +{ + struct intel_controller *intel_hsi = hsi_controller_drvdata(hsi); + struct dentry *dir; + + /* HSI controller */ + intel_hsi->dir = debugfs_create_dir(dev_name(&hsi->device), NULL); + if (IS_ERR(intel_hsi->dir)) + return PTR_ERR(intel_hsi->dir); + debugfs_create_file("regs", S_IRUGO, intel_hsi->dir, hsi, + &hsi_regs_fops); + + /* HSI slave DMA */ + dir = debugfs_create_dir("dma", intel_hsi->dir); + if (IS_ERR(dir)) + goto rback; + debugfs_create_file("regs", S_IRUGO, dir, hsi, &hsi_dma_regs_fops); + + return 0; +rback: + debugfs_remove_recursive(intel_hsi->dir); + + return PTR_ERR(dir); +} +#endif /* CONFIG_DEBUG_FS */ + +/** + * do_hsi_prepare_dma - prepare a DMA context + * @msg: reference to the message + * @lch: DMA channel to consider + * @intel_hsi: Intel HSI controller reference + */ +static void do_hsi_prepare_dma(struct hsi_msg *msg, int lch, + struct intel_controller *intel_hsi) +{ + struct intel_dma_ctx *dma_ctx = intel_hsi->dma_ctx[lch]; + struct intel_dma_xfer *ready_xfer = dma_ctx->ready; + struct intel_dma_lli_xfer *lli_xfer; + struct intel_dma_plain_xfer *plain_xfer; + struct sg_table *sgt = &msg->sgt; + struct scatterlist *sgl = sgt->sgl; + u32 rx_not_tx = (msg->ttype == HSI_MSG_READ); + u32 size; + struct scatterlist *sg; + u32 len, next_llp; + int i; + + if (is_using_link_list(dma_ctx)) { + lli_xfer = &ready_xfer->with_link_list; + size = 0; + next_llp = (u32) lli_xfer->llp_addr; +#ifdef USE_SOFWARE_WORKAROUND_FOR_DMA_LLI + lli_xfer->blk = sgl; +#endif + + for_each_sg(sgl, sg, sgt->nents, i) { + next_llp += sizeof(struct intel_dma_lli); + len = HSI_BYTES_TO_FRAMES(sg->length); + size += len; + + if (rx_not_tx) + lli_xfer->lli[i].dar = sg_dma_address(sg); + else + lli_xfer->lli[i].sar = sg_dma_address(sg); + lli_xfer->lli[i].llp = (sg_is_last(sg)) ? 0 : next_llp; + lli_xfer->lli[i].ctl_hi = len; + } + + msg->actual_len = HSI_FRAMES_TO_BYTES(size); +#ifdef USE_SOFWARE_WORKAROUND_FOR_DMA_LLI + size = 0; /* on slave, size is updated on each block xfer */ +#endif + } else { + size = HSI_BYTES_TO_FRAMES(sgl->length); + plain_xfer = &ready_xfer->without_link_list; + plain_xfer->size = size; + if (rx_not_tx) + plain_xfer->dst_addr = sg_dma_address(msg->sgt.sgl); + else + plain_xfer->src_addr = sg_dma_address(msg->sgt.sgl); + + msg->actual_len = HSI_FRAMES_TO_BYTES(size); + } + + ready_xfer->slv_enable = ARASAN_DMA_DIR(rx_not_tx) | + ARASAN_DMA_CHANNEL(msg->channel) | + ARASAN_DMA_XFER_FRAMES(size) | + ARASAN_DMA_BURST_SIZE(32) | ARASAN_DMA_ENABLE; + + ready_xfer->msg = msg; +} + +/** + * try_hsi_prepare_dma - try preparing a ready DMA context + * @queue: reference to queue ready to start transfer + * @lch: DMA channel to consider + * @intel_hsi: Intel HSI controller reference + */ +static void hsi_try_prepare_dma(struct list_head *queue, int lch, + struct intel_controller *intel_hsi) + __acquires(&intel_hsi->sw_lock) __releases(&intel_hsi->sw_lock) +{ + unsigned long flags; + struct hsi_msg *ongoing_msg, *ready_msg; + + spin_lock_irqsave(&intel_hsi->sw_lock, flags); + + /* If some DMA context is already ready, we are fine */ + if (intel_hsi->dma_ctx[lch]->ready->msg) + goto all_dma_prepared; + + ongoing_msg = intel_hsi->dma_ctx[lch]->ongoing->msg; + if (ongoing_msg) { + ready_msg = list_entry(ongoing_msg->link.next, + struct hsi_msg, link); + /* Done if there is a single ongoing message in the queue */ + if (&(ready_msg->link) == queue) + goto all_dma_prepared; + } else { + if (list_empty(queue)) + goto all_dma_prepared; + ready_msg = list_first_entry(queue, struct hsi_msg, link); + } + + if (likely(ready_msg->sgt.nents)) + do_hsi_prepare_dma(ready_msg, lch, intel_hsi); + +all_dma_prepared: + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); +} + +/** + * hsi_start_pio - start PIO data transfer + * @msg: Pointer to message to transfer + * @intel_hsi: Intel HSI controller reference + */ +static void hsi_start_pio(struct hsi_msg *msg, + struct intel_controller *intel_hsi) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) +{ + int channel = msg->channel; + void __iomem *ctrl = intel_hsi->ctrl_io; + unsigned long flags; + u32 irq_en, err_en; + + irq_en = (msg->ttype == HSI_MSG_WRITE) ? + ARASAN_IRQ_TX_THRESHOLD(channel) : + ARASAN_IRQ_RX_THRESHOLD(channel); + err_en = (msg->ttype == HSI_MSG_WRITE) ? + 0 : ARASAN_IRQ_DATA_TIMEOUT(channel); + + /* Enable the threshold reached signal */ + spin_lock_irqsave(&intel_hsi->hw_lock, flags); + intel_hsi->irq_cfg |= irq_en; + intel_hsi->err_cfg |= err_en; + if (likely(intel_hsi->suspend_state == DEVICE_READY)) { + hsi_enable_interrupt(ctrl, intel_hsi->irq_cfg); + if (err_en) + hsi_enable_error_interrupt(ctrl, intel_hsi->err_cfg); + } + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); +} + +/** + * do_hsi_start_dma - start/restart DMA data transfer helper function + * @msg: reference to the message to transfer + * @lch: DMA channel to consider + * @intel_hsi: Intel HSI controller reference + * @resuming: flag stating if this is a first start or a resume restart + */ +static void do_hsi_start_dma(struct hsi_msg *msg, int lch, + struct intel_controller *intel_hsi, int resuming) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) +{ + struct intel_dma_ctx *dma_ctx = intel_hsi->dma_ctx[lch]; + struct intel_dma_xfer *dma_xfer = dma_ctx->ongoing; + struct intel_dma_lli_xfer *lli_xfer; + struct intel_dma_plain_xfer *plain_xfer; + void __iomem *ctrl = intel_hsi->ctrl_io; + void __iomem *dma = intel_hsi->dma_io; + u32 mask = DMA_BUSY(lch); + u32 blk_length_overwrite = 0; + unsigned int blk_length; + unsigned long flags; + int nothing_to_do; + + spin_lock_irqsave(&intel_hsi->hw_lock, flags); + if (resuming) + nothing_to_do = ((intel_hsi->dma_resumed & mask) || + (msg->status != HSI_STATUS_PROCEEDING)); + else + nothing_to_do = ((intel_hsi->suspend_state != DEVICE_READY) || + (intel_hsi->dma_running & mask)); + + if (nothing_to_do) + goto do_start_dma_done; + + if (is_using_link_list(dma_ctx)) { + /* Set the link list pointer */ + lli_xfer = &dma_xfer->with_link_list; + iowrite32(lli_xfer->llp_addr, HSI_DWAHB_LLP(dma, lch)); +#ifdef USE_SOFWARE_WORKAROUND_FOR_DMA_LLI + /* Overwrite the block length */ + blk_length = HSI_BYTES_TO_FRAMES(lli_xfer->blk->length); + blk_length_overwrite = ARASAN_DMA_XFER_FRAMES(blk_length); +#endif + } else { + /* Clear 'done' bit and set the transfer size and addresses */ + plain_xfer = &dma_xfer->without_link_list; + iowrite32(plain_xfer->size, HSI_DWAHB_CTL_HI(dma, lch)); + iowrite32(plain_xfer->src_addr, HSI_DWAHB_SAR(dma, lch)); + iowrite32(plain_xfer->dst_addr, HSI_DWAHB_DAR(dma, lch)); + } + + /* Enable slave then master DMA to start the transfer */ +#ifdef USE_SOFWARE_WORKAROUND_FOR_DMA_LLI + iowrite32(dma_xfer->slv_enable | blk_length_overwrite, + ARASAN_HSI_DMA_CONFIG(ctrl, lch)); +#else + iowrite32(dma_xfer->slv_enable, ARASAN_HSI_DMA_CONFIG(ctrl, lch)); +#endif + iowrite32(dma_xfer->mst_enable, HSI_DWAHB_CHEN(dma)); + + intel_hsi->dma_running |= mask; + intel_hsi->dma_resumed |= mask; + +do_start_dma_done: + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); +} + +/** + * hsi_start_dma - start DMA data transfer + * @msg: reference to the message to transfer + * @lch: DMA channel to consider + * @intel_hsi: Intel HSI controller reference + */ +static void hsi_start_dma(struct hsi_msg *msg, int lch, + struct intel_controller *intel_hsi) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) +{ + do_hsi_start_dma(msg, lch, intel_hsi, 0); +} + +/** + * hsi_restart_dma - restarting DMA data transfer further to a resume + * @msg: reference to the message to transfer + * @lch: DMA channel to consider + * @intel_hsi: Intel HSI controller reference + */ +static void hsi_restart_dma(struct hsi_msg *msg, int lch, + struct intel_controller *intel_hsi) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) +{ + do_hsi_start_dma(msg, lch, intel_hsi, 1); +} + +/** + * hsi_transfer - starting transfer from TX or RX queue + * @intel_hsi: Intel HSI controller reference + * @tx_not_rx: direction to consider (RX = 0, TX = anything else) + * @hsi_channel: HSI channel to consider + * @dma_channel: DMA channel to consider (<0 if no DMA) + */ +static void hsi_transfer(struct intel_controller *intel_hsi, int tx_not_rx, + unsigned int hsi_channel, int dma_channel) + __acquires(&intel_hsi->sw_lock) __releases(&intel_hsi->sw_lock) +{ + struct list_head *queue; + struct hsi_msg *msg; + unsigned long flags; + struct intel_dma_xfer *done_dma_xfer; + struct intel_pio_ctx *pio_ctx; + + queue = (tx_not_rx) ? + &intel_hsi->tx_queue[hsi_channel] : + &intel_hsi->rx_queue[hsi_channel]; + + spin_lock_irqsave(&intel_hsi->sw_lock, flags); + if (list_empty(queue)) { + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + return; + } + + msg = list_first_entry(queue, struct hsi_msg, link); + if (msg->status != HSI_STATUS_QUEUED) { + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + return; + } + + msg->status = HSI_STATUS_PROCEEDING; + + if (dma_channel >= 0) { + if (intel_hsi->dma_ctx[dma_channel]->ready->msg == NULL) + do_hsi_prepare_dma(msg, dma_channel, intel_hsi); + + /* The ongoing DMA is now the ready DMA message */ + done_dma_xfer = intel_hsi->dma_ctx[dma_channel]->ongoing; + intel_hsi->dma_ctx[dma_channel]->ongoing = + intel_hsi->dma_ctx[dma_channel]->ready; + intel_hsi->dma_ctx[dma_channel]->ready = done_dma_xfer; + } else { + msg->actual_len = 0; + pio_ctx = (tx_not_rx) ? + &intel_hsi->tx_ctx[hsi_channel].pio : + &intel_hsi->rx_ctx[hsi_channel].pio; + pio_ctx->blk = (!msg->sgt.nents) ? NULL : msg->sgt.sgl; + pio_ctx->offset = 0; + } + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + + /* Assert ACWAKE (deasserted on complete or destruct) */ + if (tx_not_rx) + assert_acwake(intel_hsi); + else + unforce_disable_acready(intel_hsi); + + if (dma_channel < 0) + hsi_start_pio(msg, intel_hsi); + else { + hsi_start_dma(msg, dma_channel, intel_hsi); + hsi_try_prepare_dma(queue, dma_channel, intel_hsi); + } +} + +/** + * hsi_resume_dma_transfers - resuming DMA transfers hold on suspend + * @intel_hsi: Intel HSI controller reference + */ +static void hsi_resume_dma_transfers(struct intel_controller *intel_hsi) +{ + struct hsi_msg *msg; + int i; + + for (i = 0; i < DWAHB_CHAN_CNT; i++) { + msg = (intel_hsi->dma_ctx[i]) ? + intel_hsi->dma_ctx[i]->ongoing->msg : NULL; + if (msg) + hsi_restart_dma(msg, i, intel_hsi); + } +} + +/** + * hsi_break_complete - break interrupt callback + * @intel_hsi: Intel HSI controller reference + */ +static void hsi_break_complete(struct intel_controller *intel_hsi) + __acquires(&intel_hsi->sw_lock) __releases(&intel_hsi->sw_lock) +{ + struct list_head *queue; + struct list_head *node; + struct hsi_msg *msg; + unsigned long flags; + + dev_dbg(intel_hsi->dev, "HWBREAK received\n"); + + queue = &intel_hsi->brk_queue; + spin_lock_irqsave(&intel_hsi->sw_lock, flags); + node = queue->next; + while (node != queue) { + msg = list_entry(node, struct hsi_msg, link); + list_del(&msg->link); + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + msg->status = HSI_STATUS_COMPLETED; + msg->complete(msg); + spin_lock_irqsave(&intel_hsi->sw_lock, flags); + node = queue->next; + } + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); +} + +/** + * hsi_rx_error - handle RX error interrupt sources + * @intel_hsi: Intel HSI controller reference + */ +static void hsi_rx_error(struct intel_controller *intel_hsi) + __acquires(&intel_hsi->sw_lock) __releases(&intel_hsi->sw_lock) +{ + struct list_head *queue; + struct hsi_msg *msg = NULL; + unsigned int i; + unsigned long flags; + + pr_err("hsi: rx error\n"); + for (i = 0; i < hsi_rx_channel_count(intel_hsi); i++) { + queue = &intel_hsi->rx_queue[i]; + spin_lock_irqsave(&intel_hsi->sw_lock, flags); + if (!list_empty(queue)) { + msg = list_first_entry(queue, struct hsi_msg, link); + msg->status = HSI_STATUS_ERROR; + } + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + } +} + +/** + * hsi_timeout - handle all timeout interrupt sources + * @intel_hsi: Intel HSI controller reference + * @timeout_reg: interrupt timeout register value + * + * Returns the timeout channels that have been cleared + */ +static u32 hsi_timeout(struct intel_controller *intel_hsi, u32 timeout_reg) + __acquires(&intel_hsi->sw_lock) __releases(&intel_hsi->sw_lock) + __acquires(&intel_hsi->sw_lock) __releases(&intel_hsi->sw_lock) +{ + void __iomem *ctrl = intel_hsi->ctrl_io; + struct list_head *queue; + struct hsi_msg *msg = NULL; + struct intel_pio_ctx *pio_ctx; + unsigned int i; + u32 *buf; + u32 timeout_clr, timeout_mask; + unsigned long flags; + + timeout_clr = 0; + + /* handle data timeout errors */ + /* ch0 timeout is bit 2 of int status reg */ + timeout_mask = ARASAN_IRQ_DATA_TIMEOUT(0); + for (i = 0; i < hsi_rx_channel_count(intel_hsi); + i++, timeout_mask <<= 1) { + if (!(timeout_reg & timeout_mask)) + continue; + + queue = &intel_hsi->rx_queue[i]; + pio_ctx = &intel_hsi->rx_ctx[i].pio; + +hsi_pio_timeout_try: + spin_lock_irqsave(&intel_hsi->sw_lock, flags); + + /* if no msg waiting for read, leave and wait for it */ + if (list_empty(queue)) + goto hsi_pio_timeout_next; + + msg = list_first_entry(queue, struct hsi_msg, link); + if (unlikely(msg->status != HSI_STATUS_PROCEEDING)) + goto hsi_pio_timeout_next; + + if (unlikely(!msg->sgt.nents)) { + msg->actual_len = 0; + msg->status = HSI_STATUS_COMPLETED; + goto hsi_pio_timeout_done; + } + + timeout_clr |= timeout_mask; + + while ((ioread32(ARASAN_HSI_HSI_STATUS(ctrl)) & + ARASAN_RX_NOT_EMPTY(i)) && + (msg->status == HSI_STATUS_PROCEEDING)) { + if (likely(pio_ctx->blk->length > 0)) { + buf = sg_virt(pio_ctx->blk) + pio_ctx->offset*4; + *buf = ioread32(ARASAN_HSI_RX_DATA(ctrl, i)); + msg->actual_len += HSI_FRAMES_TO_BYTES(1); + pio_ctx->offset += 1; + } + + if (pio_ctx->offset >= + HSI_BYTES_TO_FRAMES(pio_ctx->blk->length)) { + if (!sg_is_last(pio_ctx->blk)) { + pio_ctx->blk = sg_next(pio_ctx->blk); + pio_ctx->offset = 0; + } else + msg->status = HSI_STATUS_COMPLETED; + } + } + + if (msg->status == HSI_STATUS_COMPLETED) { +hsi_pio_timeout_done: + list_del(&msg->link); + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + msg->complete(msg); + hsi_transfer(intel_hsi, 0, i, -1); + goto hsi_pio_timeout_try; + } + +hsi_pio_timeout_next: + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + } + + return timeout_clr; +} + +/** + * hsi_async_break - send break message or queue break receive msg + * @msg: reference to the HSI break message + * + * Return 0 if successful, -EINVAL if not in frame mode. + */ +static int hsi_async_break(struct hsi_msg *msg) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) + __acquires(&intel_hsi->sw_lock) __releases(&intel_hsi->sw_lock) +{ + struct hsi_port *port = hsi_get_port(msg->cl); + struct intel_controller *intel_hsi = hsi_port_drvdata(port); + void __iomem *ctrl = intel_hsi->ctrl_io; + unsigned long flags; + + /* Return an error if not in frame mode */ + if (unlikely(!is_in_tx_frame_mode(intel_hsi))) + return -EINVAL; + + hsi_pm_runtime_get_sync(intel_hsi); + if (msg->ttype == HSI_MSG_WRITE) { + assert_acwake(intel_hsi); + spin_lock_irqsave(&intel_hsi->hw_lock, flags); + intel_hsi->clk_cfg |= ARASAN_TX_BREAK; + iowrite32(intel_hsi->clk_cfg, ARASAN_HSI_CLOCK_CONTROL(ctrl)); + /* Dummy read to ensure that at least the minimal delay for a + * break sequence will be met */ + (void) ioread32(ARASAN_HSI_CLOCK_CONTROL(ctrl)); + udelay(intel_hsi->brk_us_delay); + intel_hsi->clk_cfg &= ~ARASAN_TX_BREAK; + iowrite32(intel_hsi->clk_cfg, ARASAN_HSI_CLOCK_CONTROL(ctrl)); + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + msg->status = HSI_STATUS_COMPLETED; + msg->complete(msg); + (void) deassert_acwake(intel_hsi); + } else { + spin_lock_irqsave(&intel_hsi->sw_lock, flags); + list_add_tail(&msg->link, &intel_hsi->brk_queue); + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + } + pm_runtime_put(intel_hsi->pdev); + + return 0; +} + +/** + * hsi_mid_async - queue a HSI message and start transfer if possible + * @msg: reference to the HSI message + * + * Queue message to send when possible. + * + * Returns 0 if successful, -EINVAL if message pointer is NULL or channel + * number is invalid, or transfer error if any. + */ +static int hsi_mid_async(struct hsi_msg *msg) + __acquires(&intel_hsi->sw_lock) __releases(&intel_hsi->sw_lock) +{ + struct hsi_port *port = hsi_get_port(msg->cl); + struct intel_controller *intel_hsi = hsi_port_drvdata(port); + struct list_head *queue; + u16 *queue_busy; + unsigned int hsi_channel, nents; + int dma_channel; + u8 dir; + int tx_not_rx, old_status, err; + unsigned long flags; + + if (msg->break_frame) + return hsi_async_break(msg); + + hsi_channel = msg->channel; + dma_channel = hsi_get_dma_channel(intel_hsi, msg, hsi_channel); + + if (msg->ttype == HSI_MSG_WRITE) { + if (hsi_channel >= hsi_tx_channel_count(intel_hsi)) + return -EINVAL; + tx_not_rx = 1; + queue = &intel_hsi->tx_queue[hsi_channel]; + queue_busy = &intel_hsi->tx_queue_busy; + dir = DMA_TO_DEVICE; + } else { + if (hsi_channel >= hsi_rx_channel_count(intel_hsi)) + return -EINVAL; + tx_not_rx = 0; + queue = &intel_hsi->rx_queue[hsi_channel]; + queue_busy = &intel_hsi->rx_queue_busy; + dir = DMA_FROM_DEVICE; + } + + nents = msg->sgt.nents; + if (dma_channel >= 0) { + if (nents > intel_hsi->dma_ctx[dma_channel]->sg_entries) + return -ENOSYS; + + err = dma_map_sg(intel_hsi->pdev, msg->sgt.sgl, nents, dir); + if (err < 0) + return err; + } + + old_status = msg->status; + msg->status = HSI_STATUS_QUEUED; + + spin_lock_irqsave(&intel_hsi->sw_lock, flags); + if (unlikely((*queue_busy) & QUEUE_BUSY(hsi_channel))) { + msg->status = old_status; + if (dma_channel >= 0) + dma_unmap_sg(intel_hsi->pdev, msg->sgt.sgl, nents, dir); + err = -EBUSY; + } else { + list_add_tail(&msg->link, queue); + err = 0; + } + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + + if (!err) + hsi_transfer(intel_hsi, tx_not_rx, hsi_channel, dma_channel); + + return err; +} + +/** + * hsi_destruct_msg - helper function for cleanly destructing a message + * @msg: reference to the message to destruct + * @dma_channel: DMA channel to consider (<0 if in PIO mode) + * @intel_hsi: Intel HSI controller reference + */ +static void hsi_destruct_msg(struct hsi_msg *msg, int dma_channel, + struct intel_controller *intel_hsi) +{ + u8 dir; + + if (msg->ttype == HSI_MSG_WRITE) + (void) deassert_acwake(intel_hsi); + + if (dma_channel >= 0) { + dir = (msg->ttype == HSI_MSG_READ) ? + DMA_FROM_DEVICE : DMA_TO_DEVICE; + dma_unmap_sg(intel_hsi->pdev, msg->sgt.sgl, + msg->sgt.nents, dir); + } + + if (msg->destructor) + msg->destructor(msg); + else + hsi_free_msg(msg); +} + +/** + * hsi_flush_queue - flushing all messages of a client from a queue + * @queue: reference to the message queue to flush + * @cl: HSI client reference + * @intel_hsi: Intel HSI controller reference + */ +static void hsi_flush_queue(struct list_head *queue, struct hsi_client *cl, + struct intel_controller *intel_hsi) +{ + unsigned int hsi_channel; + int dma_channel; + struct list_head *node; + struct hsi_msg *msg; + unsigned long flags; + + spin_lock_irqsave(&intel_hsi->sw_lock, flags); + do { + node = queue->prev; + while (node != queue) { + msg = list_entry(node, struct hsi_msg, link); + if (cl != msg->cl) + goto prev_node; + + /* Do not remove the ongoing DMA message yet ! */ + hsi_channel = msg->channel; + dma_channel = hsi_get_dma_channel(intel_hsi, msg, + hsi_channel); + if (dma_channel < 0) + goto del_node; + + if (intel_hsi->dma_ctx[dma_channel]->ongoing->msg == + msg) { + msg->break_frame = 1; + goto prev_node; + } + + if (intel_hsi->dma_ctx[dma_channel]->ready->msg == msg) + intel_hsi->dma_ctx[dma_channel]->ready->msg = + NULL; + +del_node: + list_del(node); + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + + hsi_destruct_msg(msg, dma_channel, intel_hsi); + + spin_lock_irqsave(&intel_hsi->sw_lock, flags); + break; +prev_node: + node = node->prev; + } + } while (node != queue); + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); +} + +/** + * hsi_cleanup_dma - cleanup DMA activities related to a client + * @intel_hsi: Intel HSI controller reference + * @cl: HSI client reference + */ +static void hsi_cleanup_dma(struct intel_controller *intel_hsi, + struct hsi_client *cl) + __acquires(&intel_hsi->sw_lock) __releases(&intel_hsi->sw_lock) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) +{ + void __iomem *ctrl = intel_hsi->ctrl_io; + void __iomem *dma = intel_hsi->dma_io; + struct hsi_msg *msg; + unsigned int hsi_channel; + int tx_not_rx, i; + unsigned long flags; + + for (i = 0; i < DWAHB_CHAN_CNT; i++) { + spin_lock_irqsave(&intel_hsi->sw_lock, flags); + msg = (intel_hsi->dma_ctx[i]) ? + intel_hsi->dma_ctx[i]->ongoing->msg : NULL; + if ((!msg) || (msg->cl != cl)) { + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + continue; + } + intel_hsi->dma_ctx[i]->ongoing->msg = NULL; + msg->break_frame = 0; + msg->status = HSI_STATUS_ERROR; + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + + spin_lock_irqsave(&intel_hsi->hw_lock, flags); + if (likely(intel_hsi->suspend_state == DEVICE_READY)) { + iowrite32(0, ARASAN_HSI_DMA_CONFIG(ctrl, i)); + iowrite32(DWAHB_CHAN_STOP(i), HSI_DWAHB_CHEN(dma)); + } + intel_hsi->dma_running &= ~DMA_BUSY(i); + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + + spin_lock_irqsave(&intel_hsi->sw_lock, flags); + list_del(&msg->link); + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + + /* Restart transfers of other clients */ + tx_not_rx = (msg->ttype == HSI_MSG_WRITE); + hsi_channel = msg->channel; + hsi_destruct_msg(msg, i, intel_hsi); + hsi_transfer(intel_hsi, tx_not_rx, hsi_channel, i); + } +} + +/** + * hsi_mid_setup - setting up the controller from client configuration + * @cl: HSI client reference + * + * This stores the hardware setup and applies it in conformance with the + * client settings. + * + * Return success or an error code if the cleint configuration is invalid. + */ +static int hsi_mid_setup(struct hsi_client *cl) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) +{ + struct hsi_port *port = hsi_get_port(cl); + struct intel_controller *intel_hsi = hsi_port_drvdata(port); + struct hsi_mid_platform_data *pd; + unsigned int divisor, rx_timeout, data_timeout; + int i; + u32 tx_sz, tx_en, rx_sz, rx_en, rx_pio; + unsigned long flags; + int err = 0; + + /* Local register values */ + u32 irq_cfg, err_cfg, clk_cfg, prg_cfg; + u32 tx_fifo_cfg, rx_fifo_cfg, arb_cfg, sz_cfg; + s8 tx_dma_chan[HSI_MID_MAX_CHANNELS]; + s8 rx_dma_chan[HSI_MID_MAX_CHANNELS]; + + /* Read the platform data to initialise the device */ + pd = (struct hsi_mid_platform_data *)(cl->device.platform_data); + if (pd == NULL) { + dev_dbg(&port->device, "platform data not found\n"); + return -EINVAL; + } + + /* Compute the arbiter control register */ + arb_cfg = cl->tx_cfg.arb_mode; + + /* Compute the RX timeout value (for inserting RX error) */ + rx_timeout = rounddown_pow_of_two(200000/intel_hsi->ip_freq); + + /* Compute the data timeout value */ + data_timeout = 0; + for (i = 0; i < HSI_MID_MAX_CHANNELS; i++) + if ((pd->rx_fifo_sizes[i] > 0) && (pd->rx_dma_channels[i] < 0)) + data_timeout += pd->rx_fifo_sizes[i]; + + /* Give every RX HSI frame at least 128 TX clock cycles to arrive */ + data_timeout = NUM_RX_FIFO_DWORDS(data_timeout * 128); + data_timeout = order_base_2(data_timeout/8192); + + /* Compute the clock control register */ + divisor = max(intel_hsi->ip_freq/max(cl->tx_cfg.speed, 1u), 1u); + divisor = min(divisor, 256u); + divisor = rounddown_pow_of_two(divisor); + clk_cfg = ARASAN_CLK_ENABLE | ARASAN_CLK_START | + ARASAN_CLK_DIVISOR(divisor/2) | + ARASAN_DATA_TIMEOUT(data_timeout) | + ARASAN_RX_FRAME_BURST_COUNT(256) | + ARASAN_RX_TAILING_BIT_COUNT(66) | + ARASAN_RX_TAP_DELAY_NS(3); + + /* A HSI break frame shall be at least 38 TX cycles long */ + intel_hsi->brk_us_delay = ((38000*divisor)/intel_hsi->ip_freq)+1; + + /* Compute the program1 register */ + sz_cfg = ARASAN_TX_CHANNEL_SIZE(cl->tx_cfg.channels) | + ARASAN_RX_CHANNEL_SIZE(cl->rx_cfg.channels); + + /* Compute the program, FIFO , DMA and interrupt registers */ + tx_fifo_cfg = 0; + rx_fifo_cfg = 0; + prg_cfg = ARASAN_RX_TIMEOUT_CNT(rx_timeout) | + ARASAN_TX_MODE(cl->tx_cfg.mode) | + ARASAN_RX_FLOW(cl->rx_cfg.flow) | + ARASAN_RX_MODE(cl->rx_cfg.mode); + irq_cfg = ARASAN_IRQ_RX_WAKE; + err_cfg = ARASAN_IRQ_BREAK | ARASAN_IRQ_RX_ERROR; + for (i = 0; i < HSI_MID_MAX_CHANNELS; i++) { + tx_en = (pd->tx_fifo_sizes[i] > 0); + tx_sz = (tx_en) ? min(order_base_2(pd->tx_fifo_sizes[i]), + ARASAN_FIFO_MAX_BITS) : 0; + rx_en = (pd->rx_fifo_sizes[i] > 0); + rx_sz = (rx_en) ? min(order_base_2(pd->rx_fifo_sizes[i]), + ARASAN_FIFO_MAX_BITS) : 0; + rx_pio = ((rx_en) && (pd->rx_dma_channels[i] < 0)) ? + ARASAN_IRQ_DATA_TIMEOUT(i) : 0; + + prg_cfg |= ARASAN_TX_CHANNEL_ENABLE(tx_en, i); + prg_cfg |= ARASAN_RX_CHANNEL_ENABLE(rx_en, i); + tx_fifo_cfg |= ARASAN_FIFO_SIZE(tx_sz, i); + rx_fifo_cfg |= ARASAN_FIFO_SIZE(rx_sz, i); + tx_dma_chan[i] = (tx_en) ? pd->tx_dma_channels[i] : -1; + if (tx_dma_chan[i] >= 0) + irq_cfg |= ARASAN_IRQ_DMA_COMPLETE(tx_dma_chan[i]); + rx_dma_chan[i] = (rx_en) ? pd->rx_dma_channels[i] : -1; + if (rx_dma_chan[i] >= 0) + irq_cfg |= ARASAN_IRQ_DMA_COMPLETE(rx_dma_chan[i]); + } + + /* Check if the configuration is compatible with the current one (if + * any) */ + spin_lock_irqsave(&intel_hsi->hw_lock, flags); + if (!(intel_hsi->prg_cfg & ARASAN_RESET)) { + for (i = 0; i < HSI_MID_MAX_CHANNELS; i++) + if ((tx_dma_chan[i] != intel_hsi->tx_dma_chan[i]) || + (rx_dma_chan[i] != intel_hsi->rx_dma_chan[i])) + err = -EINVAL; + + if (((irq_cfg & ARASAN_IRQ_ANY_DMA_COMPLETE) != + (intel_hsi->irq_cfg & ARASAN_IRQ_ANY_DMA_COMPLETE)) || + (clk_cfg != (intel_hsi->clk_cfg & ~ARASAN_TX_BREAK)) || + (prg_cfg != (intel_hsi->prg_cfg & + ~(ARASAN_TX_ENABLE|ARASAN_RX_ENABLE))) || + (tx_fifo_cfg != intel_hsi->tx_fifo_cfg) || + (rx_fifo_cfg != intel_hsi->rx_fifo_cfg) || + (arb_cfg != intel_hsi->arb_cfg) || + (sz_cfg != intel_hsi->sz_cfg)) + err = -EINVAL; + + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + return err; + } + + /* Setup the HSI controller accordingly */ + intel_hsi->irq_cfg = irq_cfg; + intel_hsi->err_cfg = err_cfg; + intel_hsi->clk_cfg = clk_cfg; + /* Keep the current RX and TX wake status */ + intel_hsi->prg_cfg &= ARASAN_TX_ENABLE|ARASAN_RX_ENABLE; + intel_hsi->prg_cfg |= prg_cfg; + intel_hsi->tx_fifo_cfg = tx_fifo_cfg; + intel_hsi->rx_fifo_cfg = rx_fifo_cfg; + intel_hsi->arb_cfg = arb_cfg; + intel_hsi->sz_cfg = sz_cfg; + + for (i = 0; i < HSI_MID_MAX_CHANNELS; i++) { + intel_hsi->tx_dma_chan[i] = tx_dma_chan[i]; + intel_hsi->rx_dma_chan[i] = rx_dma_chan[i]; + if (tx_dma_chan[i] >= 0) + intel_hsi->tx_ctx[i].dma.sg_entries = + max(pd->tx_sg_entries[i], 1); + if (rx_dma_chan[i] >= 0) + intel_hsi->rx_ctx[i].dma.sg_entries = + max(pd->rx_sg_entries[i], 1); + } + + /* The controller will be configured on resume if necessary */ + if (unlikely(intel_hsi->suspend_state == DEVICE_READY)) + err = hsi_ctrl_set_cfg(intel_hsi); + + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + + /* Prepare the necessary DMA contexts */ + if (!err) + err = alloc_xfer_ctx(intel_hsi); + + return err; +} + +/** + * hsi_mid_flush - flushing resources belonging to a client + * @cl: HSI client reference + * + * Returns success. + */ +static int hsi_mid_flush(struct hsi_client *cl) + __acquires(&intel_hsi->sw_lock) __releases(&intel_hsi->sw_lock) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) +{ + struct hsi_port *port = hsi_get_port(cl); + struct intel_controller *intel_hsi = hsi_port_drvdata(port); + void __iomem *ctrl = intel_hsi->ctrl_io; + unsigned int i, unforce; + unsigned long flags; + + /* Prevent any new message in the software queues */ + spin_lock_irqsave(&intel_hsi->sw_lock, flags); + intel_hsi->tx_queue_busy = ARASAN_ALL_CHANNELS; + intel_hsi->rx_queue_busy = ARASAN_ALL_CHANNELS; + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + + /* Wake the device not to react on the CAWAKE and to access hw */ + hsi_pm_runtime_get_sync(intel_hsi); + + /* Disable the ACREADY line not to be disturbed during flush */ + force_disable_acready(intel_hsi); + + hsi_flush_queue(&intel_hsi->brk_queue, cl, intel_hsi); + for (i = 0; i < hsi_tx_channel_count(intel_hsi); i++) + hsi_flush_queue(&intel_hsi->tx_queue[i], cl, intel_hsi); + for (i = 0; i < hsi_rx_channel_count(intel_hsi); i++) + hsi_flush_queue(&intel_hsi->rx_queue[i], cl, intel_hsi); + hsi_cleanup_dma(intel_hsi, cl); + hsi_flush_queue(&intel_hsi->fwd_queue, cl, intel_hsi); + + /* Flush all RX HW FIFO which do not have any SW message queued */ + spin_lock_irqsave(&intel_hsi->sw_lock, flags); + for (i = 0; i < hsi_rx_channel_count(intel_hsi); i++) + if (list_empty(&intel_hsi->rx_queue[i])) { + while (ioread32(ARASAN_HSI_HSI_STATUS(ctrl)) & + ARASAN_RX_NOT_EMPTY(i)) + (void) ioread32(ARASAN_HSI_RX_DATA(ctrl, i)); + } else + unforce = 1; + intel_hsi->tx_queue_busy = 0; + intel_hsi->rx_queue_busy = 0; + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + + /* Unforce the ACREADY disable if any RX queue is not empty */ + if (unforce) + unforce_disable_acready(intel_hsi); + + /* Get back to the original HSI controller power status */ + pm_runtime_put(intel_hsi->pdev); + + return 0; +} + +/** + * hsi_mid_release - releasing resources belonging to a client + * @cl: HSI client reference + * + * This is also resetting the hardware upon release of the last client. + * + * Returns success. + */ +static int hsi_mid_release(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct intel_controller *intel_hsi = hsi_port_drvdata(port); + + /* Now cleanup all the queues related to the client */ + hsi_mid_flush(cl); + + /* Reset the controller if this client is the last in the list */ + mutex_lock(&port->lock); + if (port->claimed <= 1) + hsi_ctrl_clean_reset(intel_hsi); + mutex_unlock(&port->lock); + + return 0; +} + +/** + * hsi_mid_start_tx - asserting the ACWAKE line + * @cl: HSI client reference + * + * Returns success. + */ +static int hsi_mid_start_tx(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct intel_controller *intel_hsi = hsi_port_drvdata(port); + + assert_acwake(intel_hsi); + + return 0; +} + +/** + * hsi_mid_stop_tx - de-asserting the ACWAKE line + * @cl: HSI client reference + * + * The implementation will only de-assert the line if the TX path is empty and + * if there is no left start_tx outstanding. + * + * Returns success. + */ +static int hsi_mid_stop_tx(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct intel_controller *intel_hsi = hsi_port_drvdata(port); + + (void) deassert_acwake(intel_hsi); + + return 0; +} + +/** + * enable_acready - CA_WAKE assertion event handler + * @hsi: Intel HSI controller reference + */ +static void enable_acready(struct intel_controller *intel_hsi) +{ + struct hsi_controller *hsi = to_hsi_controller(intel_hsi->dev); + unsigned int i; + + if (has_enabled_acready(intel_hsi)) + for (i = 0; i < hsi->num_ports; i++) + hsi_event(&hsi->port[i], HSI_EVENT_START_RX); +} + +/** + * try_disable_acready - CA_WAKE de-assertion event handler + * @hsi: Intel HSI controller reference + */ +static void try_disable_acready(struct intel_controller *intel_hsi) +{ + struct hsi_controller *hsi = to_hsi_controller(intel_hsi->dev); + unsigned int i; + + if (has_disabled_acready(intel_hsi)) + for (i = 0; i < hsi->num_ports; i++) + hsi_event(&hsi->port[i], HSI_EVENT_STOP_RX); +} + +/** + * hsi_pio_xfer_complete - PIO threshold reached interrupt management + * @hsi: Intel HSI controller reference + * @lch: DMA channel + * @tx_not_rx: direction of the transfer (RX = 0, TX != 0) + * + * Returns 0 on completion or some interrupt enable bitfield for re-enabling + * the PIO interrupt if there is still room in the current HSI message. + */ +static inline u32 hsi_pio_xfer_complete(struct intel_controller *intel_hsi, + unsigned int ch, int tx_not_rx) + __acquires(&intel_hsi->sw_lock) __releases(&intel_hsi->sw_lock) +{ + struct list_head *queue; + void __iomem *ctrl = intel_hsi->ctrl_io; + void __iomem *fifo; + struct hsi_msg *msg; + struct intel_pio_ctx *pio_ctx; + u32 *buf; + u32 avail, blk_len, sz; + unsigned long flags; + + queue = (tx_not_rx) ? + &intel_hsi->tx_queue[ch] : &intel_hsi->rx_queue[ch]; + + spin_lock_irqsave(&intel_hsi->sw_lock, flags); + if (list_empty(queue)) { + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + return 0; + } + msg = list_first_entry(queue, struct hsi_msg, link); + if (unlikely(!msg->sgt.nents)) { + msg->actual_len = 0; + goto hsi_pio_xfer_done; + } + + pio_ctx = (tx_not_rx) ? + &intel_hsi->tx_ctx[ch].pio : &intel_hsi->rx_ctx[ch].pio; + + if (msg->status == HSI_STATUS_PROCEEDING) { + avail = (tx_not_rx) ? + NUM_TX_FIFO_DWORDS(tx_fifo_depth(intel_hsi, ch)) : + NUM_RX_FIFO_DWORDS(rx_fifo_depth(intel_hsi, ch)); + fifo = (tx_not_rx) ? + ARASAN_HSI_TX_DATA(ctrl, ch) : + ARASAN_HSI_RX_DATA(ctrl, ch); + + while ((avail > 0) || (unlikely(!pio_ctx->blk->length))) { + buf = sg_virt(pio_ctx->blk) + (pio_ctx->offset*4); + blk_len = HSI_BYTES_TO_FRAMES(pio_ctx->blk->length); + sz = min(avail, blk_len - pio_ctx->offset); + msg->actual_len += HSI_FRAMES_TO_BYTES(sz); + avail -= sz; + pio_ctx->offset += sz; + for (; sz > 0; sz--) { + if (tx_not_rx) + iowrite32(*buf, fifo); + else + *buf = ioread32(fifo); + buf++; + } + + if (pio_ctx->offset >= blk_len) { + pio_ctx->offset = 0; + if (sg_is_last(pio_ctx->blk)) + goto hsi_pio_xfer_done; + pio_ctx->blk = sg_next(pio_ctx->blk); + } + } + } + + if ((pio_ctx->offset < HSI_BYTES_TO_FRAMES(pio_ctx->blk->length)) || + ((tx_not_rx) && + (!(ioread32(ARASAN_HSI_HSI_STATUS(ctrl))&ARASAN_TX_EMPTY(ch))))) { + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + return (tx_not_rx) ? + ARASAN_IRQ_TX_THRESHOLD(ch) : + ARASAN_IRQ_RX_THRESHOLD(ch); + } + +hsi_pio_xfer_done: + msg->status = HSI_STATUS_COMPLETED; + list_del(&msg->link); + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + + hsi_transfer(intel_hsi, tx_not_rx, ch, -1); + if (tx_not_rx) + (void) deassert_acwake(intel_hsi); + msg->complete(msg); + + return 0; +} + +/** + * hsi_pio_rx_complete - Rx threshold reached interrupt management + * @hsi: Intel HSI controller reference + * @lch: DMA channel. + * + * Returns 0 on completion or some interrupt enable bitfield for re-enabling + * the PIO interrupt if there is still room in the current HSI message. + */ +static u32 hsi_pio_rx_complete(struct intel_controller *intel_hsi, + unsigned int ch) + __acquires(&intel_hsi->sw_lock) __releases(&intel_hsi->sw_lock) +{ + return hsi_pio_xfer_complete(intel_hsi, ch, 0); +} + +/** + * hsi_pio_tx_complete - Tx threshold reached interrupt management + * @hsi: Intel HSI controller reference + * @lch: DMA channel. + * + * Returns 0 on completion or some interrupt enable bitfield for re-enabling + * the PIO if there is still data to transfer from the current HSI message. + */ +static u32 hsi_pio_tx_complete(struct intel_controller *intel_hsi, + unsigned int ch) + __acquires(&intel_hsi->sw_lock) __releases(&intel_hsi->sw_lock) +{ + return hsi_pio_xfer_complete(intel_hsi, ch, 1); +} + +/** + * hsi_dma_complete - DMA complete status handler + * @hsi: Intel HSI controller reference + * @lch: DMA channel. + * + * Returns the number of managed DMA transfers. + */ +static int hsi_dma_complete(struct intel_controller *intel_hsi, + unsigned int lch) + __acquires(&intel_hsi->sw_lock) __releases(&intel_hsi->sw_lock) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) +{ + struct intel_dma_ctx *dma_ctx = intel_hsi->dma_ctx[lch]; + struct intel_dma_xfer *ongoing_xfer = dma_ctx->ongoing; + void __iomem *ctrl = intel_hsi->ctrl_io; + struct hsi_msg *msg; + int tx_not_rx; + unsigned int hsi_channel; + unsigned long flags; +#ifdef USE_SOFWARE_WORKAROUND_FOR_DMA_LLI + struct intel_dma_lli_xfer *lli_xfer; + unsigned int blk_sz = 0; +#endif + + spin_lock_irqsave(&intel_hsi->sw_lock, flags); + msg = ongoing_xfer->msg; +#ifdef USE_SOFWARE_WORKAROUND_FOR_DMA_LLI + lli_xfer = &ongoing_xfer->with_link_list; + if ((is_using_link_list(dma_ctx)) && (msg) && + (!sg_is_last(lli_xfer->blk))) { + lli_xfer->blk = sg_next(lli_xfer->blk); + blk_sz = HSI_BYTES_TO_FRAMES(lli_xfer->blk->length); + msg = NULL; + } else +#endif + intel_hsi->dma_ctx[lch]->ongoing->msg = NULL; + if ((msg) && (!msg->status != HSI_STATUS_ERROR)) + msg->status = HSI_STATUS_COMPLETED; + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + +#ifdef USE_SOFWARE_WORKAROUND_FOR_DMA_LLI + if (blk_sz) { + spin_lock_irqsave(&intel_hsi->hw_lock, flags); + iowrite32(ongoing_xfer->slv_enable | + ARASAN_DMA_XFER_FRAMES(blk_sz), + ARASAN_HSI_DMA_CONFIG(ctrl, lch)); + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + } +#endif + + if (unlikely(!msg)) + return 0; + + /* It is safe to disable the DMA channel right now, as no DMA transfer + * can start on this channel as long as the current message is not + * popped from the list, which happens later on! */ + spin_lock_irqsave(&intel_hsi->hw_lock, flags); + iowrite32(0, ARASAN_HSI_DMA_CONFIG(ctrl, lch)); + intel_hsi->dma_running &= ~DMA_BUSY(lch); + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + + tx_not_rx = (msg->ttype == HSI_MSG_WRITE); + hsi_channel = msg->channel; + + spin_lock_irqsave(&intel_hsi->sw_lock, flags); + list_del(&msg->link); + list_add_tail(&msg->link, &intel_hsi->fwd_queue); + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + + hsi_transfer(intel_hsi, tx_not_rx, hsi_channel, lch); + + return 1; +} + +/** + * hsi_isr_tasklet - low-latency interrupt management out of interrupt state + * @hsi: Intel HSI controller reference + */ +static void hsi_isr_tasklet(unsigned long hsi) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) +{ + struct intel_controller *intel_hsi = (struct intel_controller *) hsi; + void __iomem *ctrl = intel_hsi->ctrl_io; + unsigned int ch; + u32 irq_status, err_status; + u32 irq_cfg = 0; + u32 err_cfg = 0; + u32 dma_mask = ARASAN_IRQ_DMA_COMPLETE(0); + u32 tx_mask = ARASAN_IRQ_TX_THRESHOLD(0); + u32 rx_mask = ARASAN_IRQ_RX_THRESHOLD(0); + unsigned long flags; + int do_fwd = 0; + + /* Get a local copy of the current interrupt status */ + spin_lock_irqsave(&intel_hsi->hw_lock, flags); + irq_status = intel_hsi->irq_status; + err_status = intel_hsi->err_status; + intel_hsi->irq_status = 0; + intel_hsi->err_status = 0; + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + + if (irq_status & ARASAN_IRQ_RX_WAKE) + enable_acready(intel_hsi); + + if (err_status & ARASAN_IRQ_RX_ERROR) + hsi_rx_error(intel_hsi); + + for (ch = 0; ch < DWAHB_CHAN_CNT; ch++) { + if (irq_status & dma_mask) + do_fwd |= hsi_dma_complete(intel_hsi, ch); + dma_mask <<= 1; + } + + for (ch = 0; ch < HSI_MID_MAX_CHANNELS; ch++) { + if (irq_status & tx_mask) + irq_cfg |= hsi_pio_tx_complete(intel_hsi, ch); + if (irq_status & rx_mask) + irq_cfg |= hsi_pio_rx_complete(intel_hsi, ch); + tx_mask <<= 1; + rx_mask <<= 1; + } + + if (err_status & ARASAN_IRQ_ANY_DATA_TIMEOUT) + err_cfg = hsi_timeout(intel_hsi, err_status); + + if (err_status & ARASAN_IRQ_BREAK) + hsi_break_complete(intel_hsi); + + try_disable_acready(intel_hsi); + + if (do_fwd) + tasklet_schedule(&intel_hsi->fwd_tasklet); + + /* Re-enable relevant interrupts */ + if (irq_cfg || err_cfg) { + spin_lock_irqsave(&intel_hsi->hw_lock, flags); + if (irq_cfg) { + intel_hsi->irq_cfg |= irq_cfg; + hsi_enable_interrupt(ctrl, intel_hsi->irq_cfg); + } + if (err_cfg) { + intel_hsi->err_cfg |= err_cfg; + hsi_enable_error_interrupt(ctrl, intel_hsi->err_cfg); + } + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + } +} + +/** + * hsi_fwd_tasklet - forwarding tasklet to send HSI messages back to client + * @hsi: Intel HSI controller reference + */ +static void hsi_fwd_tasklet(unsigned long hsi) + __acquires(&intel_hsi->sw_lock) __releases(&intel_hsi->sw_lock) + __acquires(&intel_hsi->sw_lock) __releases(&intel_hsi->sw_lock) +{ + struct intel_controller *intel_hsi = (struct intel_controller *) hsi; + struct hsi_msg *msg; + unsigned int dir; + unsigned long flags; + + spin_lock_irqsave(&intel_hsi->sw_lock, flags); + while (!list_empty(&intel_hsi->fwd_queue)) { + msg = list_first_entry(&intel_hsi->fwd_queue, + struct hsi_msg, link); + list_del(&msg->link); + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); + + dir = (msg->ttype == HSI_MSG_READ) ? + DMA_FROM_DEVICE : DMA_TO_DEVICE; + dma_unmap_sg(intel_hsi->pdev, msg->sgt.sgl, + msg->sgt.nents, dir); + + if (msg->ttype == HSI_MSG_WRITE) + (void) deassert_acwake(intel_hsi); + + if (unlikely(msg->break_frame)) { + msg->break_frame = 0; + if (msg->destructor) + msg->destructor(msg); + else + hsi_free_msg(msg); + } else + msg->complete(msg); + spin_lock_irqsave(&intel_hsi->sw_lock, flags); + } + spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); +} + +/** + * hsi_isr - HSI controller interrupt service routine + * @irq: IRQ number + * @hsi: Intel HSI controller reference + * + * Clears and stores the interrupt sources and schedules a tasklet for handling + * them efficiently. + * + * Returns IRQ_HANDLED as the interrupt sources are handled in all cases. + */ +static irqreturn_t hsi_isr(int irq, void *hsi) + __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) +{ + struct intel_controller *intel_hsi = (struct intel_controller *) hsi; + void __iomem *ctrl = intel_hsi->ctrl_io; + u32 irq_status, err_status, irq_disable, err_disable; + + spin_lock(&intel_hsi->hw_lock); + /* The only interrupt source when suspended is an external wakeup, so + * notify it to the interrupt tasklet */ + if (unlikely(intel_hsi->suspend_state != DEVICE_READY)) { + intel_hsi->suspend_state = DEVICE_AND_IRQ_SUSPENDED; + disable_irq_nosync(intel_hsi->irq); + intel_hsi->irq_status |= ARASAN_IRQ_RX_WAKE; + goto exit_irq; + } + + irq_status = ioread32(ARASAN_HSI_INTERRUPT_STATUS(ctrl)); + err_status = (irq_status & ARASAN_IRQ_ERROR) ? + ioread32(ARASAN_HSI_ERROR_INTERRUPT_STATUS(ctrl)) : 0; + + irq_disable = irq_status & + (ARASAN_IRQ_RX_WAKE | ARASAN_IRQ_ANY_RX_THRESHOLD | + ARASAN_IRQ_ANY_TX_THRESHOLD); + + err_disable = err_status & ARASAN_IRQ_ANY_DATA_TIMEOUT; + + if (irq_disable) { + intel_hsi->irq_cfg &= ~irq_disable; + hsi_enable_interrupt(ctrl, intel_hsi->irq_cfg); + } + + if (irq_status) { + iowrite32(irq_status, ARASAN_HSI_INTERRUPT_STATUS(ctrl)); + intel_hsi->irq_status |= irq_status; + } + + if (err_disable) { + intel_hsi->err_cfg &= ~err_disable; + hsi_enable_error_interrupt(ctrl, intel_hsi->err_cfg); + } + + if (err_status) { + iowrite32(err_status, ARASAN_HSI_ERROR_INTERRUPT_STATUS(ctrl)); + intel_hsi->err_status |= err_status; + } + +exit_irq: + spin_unlock(&intel_hsi->hw_lock); + tasklet_hi_schedule(&intel_hsi->isr_tasklet); + + return IRQ_HANDLED; +} + +/** + * hsi_ports_init - initialise the HSI port callback functions + * @hsi: HSI controller reference + * @intel_hsi: Intel HSI controller reference + */ +static void hsi_ports_init(struct hsi_controller *hsi, + struct intel_controller *intel_hsi) +{ + struct hsi_port *port; + unsigned int i; + + for (i = 0; i < hsi->num_ports; i++) { + port = &hsi->port[i]; + port->async = hsi_mid_async; + port->setup = hsi_mid_setup; + port->flush = hsi_mid_flush; + port->start_tx = hsi_mid_start_tx; + port->stop_tx = hsi_mid_stop_tx; + port->release = hsi_mid_release; + hsi_port_set_drvdata(port, intel_hsi); + } +} + +/** + * hsi_ports_init - exit the HSI port callback functions + * @hsi: HSI controller reference + */ +static void hsi_ports_exit(struct hsi_controller *hsi) +{ + struct hsi_port *port; + unsigned int i; + + for (i = 0; i < hsi->num_ports; i++) { + port = &hsi->port[i]; + port->async = NULL; + port->setup = NULL; + port->flush = NULL; + port->start_tx = NULL; + port->stop_tx = NULL; + port->release = NULL; + hsi_port_set_drvdata(port, NULL); + } +} + +/** + * hsi_unmap_resources - reserve hardware resources for the driver + * @intel_hsi: Intel HSI controller reference + * @pdev: PCI device reference + * + * Returns success or an error code if any resource cannot be reserved. + */ +static int hsi_map_resources(struct intel_controller *intel_hsi, + struct pci_dev *pdev) +{ + int err; + int pci_bar = 0; + unsigned long paddr; + u32 iolen; + + /* get hsi controller io resource and map it */ + intel_hsi->pdev = &pdev->dev; + + paddr = pci_resource_start(pdev, pci_bar); + iolen = pci_resource_len(pdev, pci_bar); + err = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev)); + if (err) + goto no_sys_region; + + intel_hsi->ctrl_io = ioremap_nocache(paddr, iolen); + if (!intel_hsi->ctrl_io) { + err = -EPERM; + goto no_sys_remap; + } + + /* Get master DMA info */ + intel_hsi->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, + HSI_MASTER_DMA_ID, NULL); + if (!intel_hsi->dmac) { + err = -EPERM; + goto no_dmac_device; + } + + paddr = pci_resource_start(intel_hsi->dmac, pci_bar); + iolen = pci_resource_len(intel_hsi->dmac, pci_bar); + err = pci_request_region(intel_hsi->dmac, pci_bar, + dev_name(&intel_hsi->dmac->dev)); + if (err) + goto no_dmac_region; + + intel_hsi->dma_io = ioremap_nocache(paddr, iolen); + if (!intel_hsi->dma_io) { + err = -EPERM; + goto no_dmac_remap; + } + + return 0; + +no_dmac_remap: + pci_release_region(intel_hsi->dmac, pci_bar); +no_dmac_region: + pci_dev_put(intel_hsi->dmac); +no_dmac_device: + iounmap(intel_hsi->ctrl_io); +no_sys_remap: + pci_release_region(pdev, pci_bar); +no_sys_region: + + return err; +} + +/** + * hsi_unmap_resources - free the hardware resources taken by the driver + * @intel_hsi: Intel HSI controller reference + * @pdev: PCI device reference + */ +static void hsi_unmap_resources(struct intel_controller *intel_hsi, + struct pci_dev *pdev) +{ + int pci_bar = 0; + + iounmap(intel_hsi->dma_io); + pci_release_region(intel_hsi->dmac, pci_bar); + pci_dev_put(intel_hsi->dmac); + iounmap(intel_hsi->ctrl_io); + pci_release_region(pdev, pci_bar); +} + +/** + * hsi_controller_init - initialise the controller structure + * @intel_hsi: Intel HSI controller reference + * + * Returns success or an error code if the controller IRQ cannot be requested. + */ +static int hsi_controller_init(struct intel_controller *intel_hsi) +{ + unsigned int ch; + int err; + + for (ch = 0; ch < HSI_MID_MAX_CHANNELS; ch++) { + INIT_LIST_HEAD(&intel_hsi->tx_queue[ch]); + INIT_LIST_HEAD(&intel_hsi->rx_queue[ch]); + } + INIT_LIST_HEAD(&intel_hsi->brk_queue); + INIT_LIST_HEAD(&intel_hsi->fwd_queue); + + spin_lock_init(&intel_hsi->sw_lock); + spin_lock_init(&intel_hsi->hw_lock); + +#ifdef CONFIG_HAS_WAKELOCK + wake_lock_init(&intel_hsi->stay_awake, WAKE_LOCK_SUSPEND, + "hsi_wakelock"); +#ifdef DISABLE_POWER_MANAGEMENT + wake_lock(&intel_hsi->stay_awake); +#endif +#endif + + tasklet_init(&intel_hsi->isr_tasklet, hsi_isr_tasklet, + (unsigned long) intel_hsi); + tasklet_init(&intel_hsi->fwd_tasklet, hsi_fwd_tasklet, + (unsigned long) intel_hsi); + + init_timer(&intel_hsi->cawake_poll); + intel_hsi->cawake_poll.data = (unsigned long) intel_hsi; + intel_hsi->cawake_poll.function = cawake_poll; + + err = request_irq(intel_hsi->irq, hsi_isr, + IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND, + HSI_MPU_IRQ_NAME, intel_hsi); + if (err < 0) { + dev_err(intel_hsi->dev, "Request IRQ %d failed (%d)\n", + intel_hsi->irq, err); + +#ifdef CONFIG_HAS_WAKELOCK +#ifdef DISABLE_POWER_MANAGEMENT + wake_unlock(&intel_hsi->stay_awake); +#endif + wake_lock_destroy(&intel_hsi->stay_awake); +#endif + } + + return err; +} + +/** + * hsi_controller_exit - set the controller driver to a reset state + * @intel_hsi: Intel HSI controller reference + */ +static void hsi_controller_exit(struct intel_controller *intel_hsi) +{ + /* Reset the HSI hardware */ + hsi_ctrl_clean_reset(intel_hsi); + + /* Free the interrupt */ + free_irq(intel_hsi->irq, intel_hsi); + + /* Kill the tasklets */ + tasklet_kill(&intel_hsi->isr_tasklet); + tasklet_kill(&intel_hsi->fwd_tasklet); + +#ifdef CONFIG_HAS_WAKELOCK +#ifdef DISABLE_POWER_MANAGEMENT + wake_unlock(&intel_hsi->stay_awake); +#endif + wake_lock_destroy(&intel_hsi->stay_awake); +#endif +} + +/** + * hsi_rtpm_idle - runtime power management idle callback + * @dev: PCI device reference + * + * Returns -EBUSY and schedule a cancellable delayed suspend if + * SCHEDULE_LATER_SUSPEND_ON_IDLE is set otherwise returns 0 as the device is + * actually idle. + */ +static int hsi_rtpm_idle(struct device *dev) +{ +#ifdef SCHEDULE_LATER_SUSPEND_ON_IDLE + pm_schedule_suspend(dev, IDLE_TO_SUSPEND_DELAY); + + /* Set the device as being busy not to immediately go on suspend */ + return -EBUSY; +#else + /* The device is actually idle */ + return 0; +#endif +} + +/** + * hsi_rtpm_suspend - runtime power management suspend callback + * @dev: PCI device reference + * + * Returns success or an error code if suspend to RAM is failing or impossible. + */ +static int hsi_rtpm_suspend(struct device *dev) +{ + struct hsi_controller *hsi = + (struct hsi_controller *)dev_get_drvdata(dev); + struct intel_controller *intel_hsi = + (struct intel_controller *)hsi_controller_drvdata(hsi); + + dev_dbg(dev, "hsi enter runtime suspend\n"); + return hsi_ctrl_suspend(intel_hsi); +} + +/** + * hsi_rtpm_resume - runtime power management resume callback + * @dev: PCI device reference + * + * Returns success or an error code if resuming from RAM is failing. + */ +static int hsi_rtpm_resume(struct device *dev) +{ + struct hsi_controller *hsi = + (struct hsi_controller *)dev_get_drvdata(dev); + struct intel_controller *intel_hsi = + (struct intel_controller *)hsi_controller_drvdata(hsi); + int err; + + dev_dbg(dev, "hsi enter runtime resume\n"); + err = hsi_ctrl_resume(intel_hsi); + if (!err) + hsi_resume_dma_transfers(intel_hsi); + + return err; +} + +#ifdef CONFIG_SUSPEND +/** + * hsi_pm_suspend - called at system suspend request + * @dev: PCI device reference + * + * Returns success or an error code if suspend to RAM is failing or impossible. + */ +static int hsi_pm_suspend(struct device *dev) +{ + struct hsi_controller *hsi = + (struct hsi_controller *)dev_get_drvdata(dev); + struct intel_controller *intel_hsi = + (struct intel_controller *)hsi_controller_drvdata(hsi); + + dev_dbg(dev, "hsi enter suspend\n"); + return hsi_ctrl_suspend(intel_hsi); +} + +/** + * hsi_pm_resume - called at system resume request + * @dev: PCI device reference + * + * Returns success or an error code if resuming from RAM is failing. + */ +static int hsi_pm_resume(struct device *dev) +{ + struct hsi_controller *hsi = + (struct hsi_controller *)dev_get_drvdata(dev); + struct intel_controller *intel_hsi = + (struct intel_controller *)hsi_controller_drvdata(hsi); + int err; + + dev_dbg(dev, "hsi enter resume\n"); + err = hsi_ctrl_resume(intel_hsi); + if (!err) + hsi_resume_dma_transfers(intel_hsi); + + return err; +} + +#else /* CONFIG_SUSPEND */ +#define hsi_pm_suspend NULL +#define hsi_pm_resume NULL +#endif /* CONFIG_SUSPEND */ + +#ifndef DISABLE_POWER_MANAGEMENT +/** + * hsi_rtpm_init - initialising the runtime power management + * @intel_hsi: Intel HSI controller reference + */ +static void hsi_rtpm_init(struct intel_controller *intel_hsi) +{ + struct device *dev = intel_hsi->pdev; + + pm_runtime_allow(dev); + pm_runtime_put_noidle(dev); +} + +/** + * hsi_rtpm_exit - exiting the runtime power management + * @intel_hsi: Intel HSI controller reference + */ +static void hsi_rtpm_exit(struct intel_controller *intel_hsi) +{ + struct device *dev = intel_hsi->pdev; + + pm_runtime_forbid(dev); + pm_runtime_get_noresume(dev); +} +#endif + +/** + * hsi_add_controller - make and init intel_hsi controller + * @hsi: HSI controller reference + * @pdev: PCI device reference + * + * Allocate intel_hsi controller, attach to hsi_controller, activate + * PCI device and map memory for HSI and master DMA, init ports, and + * register controller with HSI (perform board info scan there). + * + * Returns success or an error code if any initialisation is failing. + */ +static int __init hsi_add_controller(struct hsi_controller *hsi, + struct pci_dev *pdev) +{ + struct intel_controller *intel_hsi; + int err; + + intel_hsi = kzalloc(sizeof(*intel_hsi), GFP_KERNEL); + if (!intel_hsi) { + pr_err("not enough memory for intel hsi\n"); + return -ENOMEM; + } + hsi->id = 0; + hsi->num_ports = 1; + hsi->device.parent = &pdev->dev; + hsi->device.dma_mask = pdev->dev.dma_mask; + + dev_set_name(&hsi->device, "hsi%d", hsi->id); + hsi_controller_set_drvdata(hsi, intel_hsi); + intel_hsi->dev = &hsi->device; + intel_hsi->irq = pdev->irq; + + err = pci_enable_device(pdev); + if (err) { + pr_err("pci enable fail %d\n", err); + goto fail_pci_enable_device; + } + + err = hsi_map_resources(intel_hsi, pdev); + if (err) + goto fail_map_resources; + + hsi_ports_init(hsi, intel_hsi); + err = hsi_controller_init(intel_hsi); + if (err < 0) + goto fail_controller_init; + + err = hsi_ctrl_full_reset(intel_hsi); + if (err < 0) + goto fail_controller_reset; + +#ifdef CONFIG_DEBUG_FS + err = hsi_debug_add_ctrl(hsi); + if (err < 0) + goto fail_add_debug; +#endif + + err = hsi_register_controller(hsi); + if (err < 0) + goto fail_controller_register; + + pci_set_drvdata(pdev, (void *)hsi); +#ifndef DISABLE_POWER_MANAGEMENT + hsi_rtpm_init(intel_hsi); +#endif + + return 0; + +fail_controller_register: +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(intel_hsi->dir); +fail_add_debug: +#endif +fail_controller_reset: + hsi_controller_exit(intel_hsi); +fail_controller_init: + hsi_unmap_resources(intel_hsi, pdev); +fail_map_resources: + hsi_ports_exit(hsi); + pci_disable_device(pdev); +fail_pci_enable_device: + hsi_controller_set_drvdata(hsi, NULL); + kfree(intel_hsi); + return err; +} + +/** + * hsi_remove_controller - stop controller and unregister with HSI + * @hsi: HSI controller reference + * @pdev: PCI device reference + * + * Stop controller and unregister with HSI + */ +static void hsi_remove_controller(struct hsi_controller *hsi, + struct pci_dev *pdev) +{ + struct intel_controller *intel_hsi = + (struct intel_controller *)hsi_controller_drvdata(hsi); + +#ifndef DISABLE_POWER_MANAGEMENT + hsi_rtpm_exit(intel_hsi); +#endif + pci_set_drvdata(pdev, NULL); + hsi_unregister_controller(hsi); +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(intel_hsi->dir); +#endif + hsi_controller_exit(intel_hsi); + hsi_unmap_resources(intel_hsi, pdev); + hsi_ports_exit(hsi); + pci_disable_device(pdev); + hsi_controller_set_drvdata(hsi, NULL); + kfree(intel_hsi); +} + +/** + * intel_hsi_probe - device PCI probe + * @pdev: PCI device reference + * @ent: PCI device id reference + * + * Allocate, add controller to the HSI framework and initialise its hardware. + * + * Returns success or an error code if any initialisation is failing. + */ +static int intel_hsi_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct hsi_controller *hsi; + int err = 0; + + hsi = hsi_alloc_controller(1, GFP_KERNEL); + if (!hsi) { + pr_err("No memory for hsi controller\n"); + return -ENOMEM; + } + + err = hsi_add_controller(hsi, pdev); + if (err < 0) + goto fail_add_controller; + + return 0; + +fail_add_controller: + kfree(hsi); + pr_err("hsi controller probe error exit"); + return err; +} + +/** + * intel_hsi_remove - called during PCI device exit + * @pdev: PCI device reference + * + * Remove the HSI controller from the HSI framework and free its memory. + */ +static void __devexit intel_hsi_remove(struct pci_dev *pdev) +{ + struct hsi_controller *hsi = + (struct hsi_controller *) pci_get_drvdata(pdev); + + if (hsi) { + hsi_remove_controller(hsi, pdev); + kfree(hsi); + } +} + +/** + * struct intel_mid_hsi_rtpm - runtime power management callbacks + */ +static const struct dev_pm_ops intel_mid_hsi_rtpm = { + .suspend = hsi_pm_suspend, + .resume = hsi_pm_resume, + SET_RUNTIME_PM_OPS( + hsi_rtpm_suspend, + hsi_rtpm_resume, + hsi_rtpm_idle + ) +}; + +/** + * struct pci_ids - PCI IDs handled by the driver (ID of HSI controller) + */ +static const struct pci_device_id pci_ids[] __devinitdata = { + { PCI_VDEVICE(INTEL, 0x833) }, /* HSI */ + { } +}; + +/** + * struct intel_hsi_driver - PCI structure for driver + */ +static struct pci_driver intel_hsi_driver = { + .driver = { + .pm = &intel_mid_hsi_rtpm, + }, + .name = "intel_hsi", + .id_table = pci_ids, + .probe = intel_hsi_probe, + .remove = __devexit_p(intel_hsi_remove), +}; + +/** + * intel_hsi_init - HSI controller driver entry point and initialisation + * + * Returns success or an error code if the PCI driver registration is failing. + */ +static int __init intel_hsi_init(void) +{ + pr_info("init Intel HSI controller driver\n"); + return pci_register_driver(&intel_hsi_driver); +} +module_init(intel_hsi_init); + +/** + * intel_hsi_exit - frees resources taken by the HSI controller driver + */ +static void __exit intel_hsi_exit(void) +{ + pr_info("Intel HSI controller driver removed\n"); + pci_unregister_driver(&intel_hsi_driver); +} +module_exit(intel_hsi_exit); + +MODULE_ALIAS("pci:intel_hsi"); +MODULE_AUTHOR("Olivier Stoltz Douchet "); +MODULE_DESCRIPTION("Intel mid HSI Controller Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c new file mode 100644 index 0000000..8c976e0 --- /dev/null +++ b/drivers/hsi/hsi.c @@ -0,0 +1,516 @@ +/* + * hsi.c + * + * HSI core. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * + * Contact: Carlos Chinea + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ +#include +#include +#include +#include +#include +#include +#include + +struct hsi_cl_info { + struct list_head list; + struct hsi_board_info info; +}; + +static LIST_HEAD(hsi_board_list); + +static struct device_type hsi_ctrl = { + .name = "hsi_controller", +}; + +static struct device_type hsi_cl = { + .name = "hsi_client", +}; + +static struct device_type hsi_port = { + .name = "hsi_port", +}; + +static ssize_t modalias_show(struct device *dev, struct device_attribute *a, + char *buf) +{ + return sprintf(buf, "hsi:%s\n", dev_name(dev)); +} + +static struct device_attribute hsi_bus_dev_attrs[] = { + __ATTR_RO(modalias), + __ATTR_NULL, +}; + +static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + add_uevent_var(env, "MODALIAS=hsi:%s", dev_name(dev)); + + return 0; +} + +static int hsi_bus_match(struct device *dev, struct device_driver *driver) +{ + return strcmp(dev_name(dev), driver->name) == 0; +} + +static struct bus_type hsi_bus_type = { + .name = "hsi", + .dev_attrs = hsi_bus_dev_attrs, + .match = hsi_bus_match, + .uevent = hsi_bus_uevent, +}; + +static void hsi_client_release(struct device *dev) +{ + kfree(to_hsi_client(dev)); +} + +static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info) +{ + struct hsi_client *cl; + unsigned long flags; + + cl = kzalloc(sizeof(*cl), GFP_KERNEL); + if (!cl) + return; + cl->device.type = &hsi_cl; + cl->tx_cfg = info->tx_cfg; + cl->rx_cfg = info->rx_cfg; + cl->device.bus = &hsi_bus_type; + cl->device.parent = &port->device; + cl->device.release = hsi_client_release; + dev_set_name(&cl->device, info->name); + cl->device.platform_data = info->platform_data; + spin_lock_irqsave(&port->clock, flags); + list_add_tail(&cl->link, &port->clients); + spin_unlock_irqrestore(&port->clock, flags); + if (info->archdata) + cl->device.archdata = *info->archdata; + if (device_register(&cl->device) < 0) { + pr_err("hsi: failed to register client: %s\n", info->name); + kfree(cl); + } +} + +/** + * hsi_register_board_info - Register HSI clients information + * @info: Array of HSI clients on the board + * @len: Length of the array + * + * HSI clients are statically declared and registered on board files. + * + * HSI clients will be automatically registered to the HSI bus once the + * controller and the port where the clients wishes to attach are registered + * to it. + * + * Return -errno on failure, 0 on success. + */ +int __init hsi_register_board_info(struct hsi_board_info const *info, + unsigned int len) +{ + struct hsi_cl_info *cl_info; + + cl_info = kzalloc(sizeof(*cl_info) * len, GFP_KERNEL); + if (!cl_info) + return -ENOMEM; + + for (; len; len--, info++, cl_info++) { + cl_info->info = *info; + list_add_tail(&cl_info->list, &hsi_board_list); + } + + return 0; +} + +static void hsi_scan_board_info(struct hsi_controller *hsi) +{ + struct hsi_cl_info *cl_info; + struct hsi_port *p; + + list_for_each_entry(cl_info, &hsi_board_list, list) + if (cl_info->info.hsi_id == hsi->id) { + p = hsi_find_port_num(hsi, cl_info->info.port); + if (!p) + continue; + hsi_new_client(p, &cl_info->info); + } +} + +static int hsi_remove_client(struct device *dev, void *data) +{ + struct hsi_client *cl = to_hsi_client(dev); + struct hsi_port *port = to_hsi_port(dev->parent); + unsigned long flags; + + spin_lock_irqsave(&port->clock, flags); + list_del(&cl->link); + spin_unlock_irqrestore(&port->clock, flags); + device_unregister(dev); + + return 0; +} + +static int hsi_remove_port(struct device *dev, void *data) +{ + device_for_each_child(dev, NULL, hsi_remove_client); + device_unregister(dev); + + return 0; +} + +static void hsi_controller_release(struct device *dev) +{ +} + +static void hsi_port_release(struct device *dev) +{ +} + +/** + * hsi_unregister_controller - Unregister an HSI controller + * @hsi: The HSI controller to register + */ +void hsi_unregister_controller(struct hsi_controller *hsi) +{ + device_for_each_child(&hsi->device, NULL, hsi_remove_port); + device_unregister(&hsi->device); +} +EXPORT_SYMBOL_GPL(hsi_unregister_controller); + +/** + * hsi_register_controller - Register an HSI controller and its ports + * @hsi: The HSI controller to register + * + * Returns -errno on failure, 0 on success. + */ +int hsi_register_controller(struct hsi_controller *hsi) +{ + unsigned int i; + int err; + + hsi->device.type = &hsi_ctrl; + hsi->device.bus = &hsi_bus_type; + hsi->device.release = hsi_controller_release; + err = device_register(&hsi->device); + if (err < 0) + return err; + for (i = 0; i < hsi->num_ports; i++) { + hsi->port[i].device.parent = &hsi->device; + hsi->port[i].device.bus = &hsi_bus_type; + hsi->port[i].device.release = hsi_port_release; + hsi->port[i].device.type = &hsi_port; + INIT_LIST_HEAD(&hsi->port[i].clients); + spin_lock_init(&hsi->port[i].clock); + err = device_register(&hsi->port[i].device); + if (err < 0) + goto out; + } + /* Populate HSI bus with HSI clients */ + hsi_scan_board_info(hsi); + + return 0; +out: + hsi_unregister_controller(hsi); + + return err; +} +EXPORT_SYMBOL_GPL(hsi_register_controller); + +/** + * hsi_register_client_driver - Register an HSI client to the HSI bus + * @drv: HSI client driver to register + * + * Returns -errno on failure, 0 on success. + */ +int hsi_register_client_driver(struct hsi_client_driver *drv) +{ + drv->driver.bus = &hsi_bus_type; + + return driver_register(&drv->driver); +} +EXPORT_SYMBOL_GPL(hsi_register_client_driver); + +static inline int hsi_dummy_msg(struct hsi_msg *msg) +{ + return 0; +} + +static inline int hsi_dummy_cl(struct hsi_client *cl) +{ + return 0; +} + +/** + * hsi_alloc_controller - Allocate an HSI controller and its ports + * @n_ports: Number of ports on the HSI controller + * @flags: Kernel allocation flags + * + * Return NULL on failure or a pointer to an hsi_controller on success. + */ +struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags) +{ + struct hsi_controller *hsi; + struct hsi_port *port; + unsigned int i; + + if (!n_ports) + return NULL; + + port = kzalloc(sizeof(*port)*n_ports, flags); + if (!port) + return NULL; + hsi = kzalloc(sizeof(*hsi), flags); + if (!hsi) + goto out; + for (i = 0; i < n_ports; i++) { + dev_set_name(&port[i].device, "port%d", i); + port[i].num = i; + port[i].async = hsi_dummy_msg; + port[i].setup = hsi_dummy_cl; + port[i].flush = hsi_dummy_cl; + port[i].start_tx = hsi_dummy_cl; + port[i].stop_tx = hsi_dummy_cl; + port[i].release = hsi_dummy_cl; + mutex_init(&port[i].lock); + } + hsi->num_ports = n_ports; + hsi->port = port; + + return hsi; +out: + kfree(port); + + return NULL; +} +EXPORT_SYMBOL_GPL(hsi_alloc_controller); + +/** + * hsi_free_controller - Free an HSI controller + * @hsi: Pointer to HSI controller + */ +void hsi_free_controller(struct hsi_controller *hsi) +{ + if (!hsi) + return; + + kfree(hsi->port); + kfree(hsi); +} +EXPORT_SYMBOL_GPL(hsi_free_controller); + +/** + * hsi_free_msg - Free an HSI message + * @msg: Pointer to the HSI message + * + * Client is responsible to free the buffers pointed by the scatterlists. + */ +void hsi_free_msg(struct hsi_msg *msg) +{ + if (!msg) + return; + sg_free_table(&msg->sgt); + kfree(msg); +} +EXPORT_SYMBOL_GPL(hsi_free_msg); + +/** + * hsi_alloc_msg - Allocate an HSI message + * @nents: Number of memory entries + * @flags: Kernel allocation flags + * + * nents can be 0. This mainly makes sense for read transfer. + * In that case, HSI drivers will call the complete callback when + * there is data to be read without consuming it. + * + * Return NULL on failure or a pointer to an hsi_msg on success. + */ +struct hsi_msg *hsi_alloc_msg(unsigned int nents, gfp_t flags) +{ + struct hsi_msg *msg; + int err; + + msg = kzalloc(sizeof(*msg), flags); + if (!msg) + return NULL; + + if (!nents) + return msg; + + err = sg_alloc_table(&msg->sgt, nents, flags); + if (unlikely(err)) { + kfree(msg); + msg = NULL; + } + + return msg; +} +EXPORT_SYMBOL_GPL(hsi_alloc_msg); + +/** + * hsi_async - Submit an HSI transfer to the controller + * @cl: HSI client sending the transfer + * @msg: The HSI transfer passed to controller + * + * The HSI message must have the channel, ttype, complete and destructor + * fields set beforehand. If nents > 0 then the client has to initialize + * also the scatterlists to point to the buffers to write to or read from. + * + * HSI controllers relay on pre-allocated buffers from their clients and they + * do not allocate buffers on their own. + * + * Once the HSI message transfer finishes, the HSI controller calls the + * complete callback with the status and actual_len fields of the HSI message + * updated. The complete callback can be called before returning from + * hsi_async. + * + * Returns -errno on failure or 0 on success + */ +int hsi_async(struct hsi_client *cl, struct hsi_msg *msg) +{ + struct hsi_port *port = hsi_get_port(cl); + + if (!hsi_port_claimed(cl)) + return -EACCES; + + WARN_ON_ONCE(!msg->destructor || !msg->complete); + msg->cl = cl; + + return port->async(msg); +} +EXPORT_SYMBOL_GPL(hsi_async); + +/** + * hsi_claim_port - Claim the HSI client's port + * @cl: HSI client that wants to claim its port + * @share: Flag to indicate if the client wants to share the port or not. + * + * Returns -errno on failure, 0 on success. + */ +int hsi_claim_port(struct hsi_client *cl, unsigned int share) +{ + struct hsi_port *port = hsi_get_port(cl); + int err = 0; + + mutex_lock(&port->lock); + if ((port->claimed) && (!port->shared || !share)) { + err = -EBUSY; + goto out; + } + port->claimed++; + port->shared = !!share; + cl->pclaimed = 1; +out: + mutex_unlock(&port->lock); + + return err; +} +EXPORT_SYMBOL_GPL(hsi_claim_port); + +/** + * hsi_release_port - Release the HSI client's port + * @cl: HSI client which previously claimed its port + */ +void hsi_release_port(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + + /* Allow HW driver to do some cleanup */ + port->release(cl); + mutex_lock(&port->lock); + if (cl->pclaimed) + port->claimed--; + BUG_ON(port->claimed < 0); + cl->pclaimed = 0; + if (!port->claimed) + port->shared = 0; + mutex_unlock(&port->lock); +} +EXPORT_SYMBOL_GPL(hsi_release_port); + +static int hsi_start_rx(struct hsi_client *cl, void *data) +{ + if (cl->hsi_start_rx) + (*cl->hsi_start_rx)(cl); + + return 0; +} + +static int hsi_stop_rx(struct hsi_client *cl, void *data) +{ + if (cl->hsi_stop_rx) + (*cl->hsi_stop_rx)(cl); + + return 0; +} + +static int hsi_port_for_each_client(struct hsi_port *port, void *data, + int (*fn)(struct hsi_client *cl, void *data)) +{ + struct hsi_client *cl; + unsigned long flags; + + spin_lock_irqsave(&port->clock, flags); + list_for_each_entry(cl, &port->clients, link) { + spin_unlock_irqrestore(&port->clock, flags); + (*fn)(cl, data); + spin_lock_irqsave(&port->clock, flags); + } + spin_unlock_irqrestore(&port->clock, flags); + + return 0; +} + +/** + * hsi_event -Notifies clients about port events + * @port: Port where the event occurred + * @event: The event type + * + * Clients should not be concerned about wake line behavior. However, due + * to a race condition in HSI HW protocol, clients need to be notified + * about wake line changes, so they can implement a workaround for it. + * + * Events: + * HSI_EVENT_START_RX - Incoming wake line high + * HSI_EVENT_STOP_RX - Incoming wake line down + */ +void hsi_event(struct hsi_port *port, unsigned int event) +{ + int (*fn)(struct hsi_client *cl, void *data); + + switch (event) { + case HSI_EVENT_START_RX: + fn = hsi_start_rx; + break; + case HSI_EVENT_STOP_RX: + fn = hsi_stop_rx; + break; + default: + return; + } + hsi_port_for_each_client(port, NULL, fn); +} +EXPORT_SYMBOL_GPL(hsi_event); + +static int __init hsi_init(void) +{ + return bus_register(&hsi_bus_type); +} +postcore_initcall(hsi_init); diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h new file mode 100644 index 0000000..54ae22a --- /dev/null +++ b/include/linux/hsi/hsi.h @@ -0,0 +1,376 @@ +/* + * hsi.h + * + * HSI core header file. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * + * Contact: Carlos Chinea + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __LINUX_HSI_H__ +#define __LINUX_HSI_H__ + +#include +#include +#include +#include +#include + +/* HSI message ttype */ +#define HSI_MSG_READ 0 +#define HSI_MSG_WRITE 1 + +/* HSI configuration values */ +#define HSI_MODE_STREAM 1 +#define HSI_MODE_FRAME 2 +#define HSI_FLOW_SYNC 0 /* Synchronized flow */ +#define HSI_FLOW_PIPE 1 /* Pipelined flow */ +#define HSI_ARB_RR 0 /* Round-robin arbitration */ +#define HSI_ARB_PRIO 1 /* Channel priority arbitration */ + +#define HSI_MAX_CHANNELS 16 + +/* HSI message status codes */ +enum { + HSI_STATUS_COMPLETED, /* Message transfer is completed */ + HSI_STATUS_PENDING, /* Message pending to be read/write (POLL) */ + HSI_STATUS_PROCEEDING, /* Message transfer is ongoing */ + HSI_STATUS_QUEUED, /* Message waiting to be served */ + HSI_STATUS_ERROR, /* Error when message transfer was ongoing */ +}; + +/* HSI port event codes */ +enum { + HSI_EVENT_START_RX, + HSI_EVENT_STOP_RX, +}; + +/** + * struct hsi_config - Configuration for RX/TX HSI modules + * @mode: Bit transmission mode (STREAM or FRAME) + * @flow: Flow type (SYNCHRONIZED or PIPELINE) + * @channels: Number of channels to use [1..16] + * @speed: Max bit transmission speed (Kbit/s) + * @arb_mode: Arbitration mode for TX frame (Round robin, priority) + */ +struct hsi_config { + unsigned int mode; + unsigned int flow; + unsigned int channels; + unsigned int speed; + unsigned int arb_mode; /* TX only */ +}; + +/** + * struct hsi_board_info - HSI client board info + * @name: Name for the HSI device + * @hsi_id: HSI controller id where the client sits + * @port: Port number in the controller where the client sits + * @tx_cfg: HSI TX configuration + * @rx_cfg: HSI RX configuration + * @platform_data: Platform related data + * @archdata: Architecture-dependent device data + */ +struct hsi_board_info { + const char *name; + int hsi_id; + unsigned int port; + struct hsi_config tx_cfg; + struct hsi_config rx_cfg; + void *platform_data; + struct dev_archdata *archdata; +}; + +#ifdef CONFIG_HSI +extern int hsi_register_board_info(struct hsi_board_info const *info, + unsigned int len); +#else +static inline int hsi_register_board_info(struct hsi_board_info const *info, + unsigned int len) +{ + return 0; +} +#endif + +/** + * struct hsi_client - HSI client attached to an HSI port + * @device: Driver model representation of the device + * @tx_cfg: HSI TX configuration + * @rx_cfg: HSI RX configuration + * @hsi_start_rx: Called after incoming wake line goes high + * @hsi_stop_rx: Called after incoming wake line goes low + */ +struct hsi_client { + struct device device; + struct hsi_config tx_cfg; + struct hsi_config rx_cfg; + void (*hsi_start_rx)(struct hsi_client *cl); + void (*hsi_stop_rx)(struct hsi_client *cl); + /* private: */ + unsigned int pclaimed:1; + struct list_head link; +}; + +#define to_hsi_client(dev) container_of(dev, struct hsi_client, device) + +static inline void hsi_client_set_drvdata(struct hsi_client *cl, void *data) +{ + dev_set_drvdata(&cl->device, data); +} + +static inline void *hsi_client_drvdata(struct hsi_client *cl) +{ + return dev_get_drvdata(&cl->device); +} + +/** + * struct hsi_client_driver - Driver associated to an HSI client + * @driver: Driver model representation of the driver + */ +struct hsi_client_driver { + struct device_driver driver; +}; + +#define to_hsi_client_driver(drv) container_of(drv, struct hsi_client_driver,\ + driver) + +int hsi_register_client_driver(struct hsi_client_driver *drv); + +static inline void hsi_unregister_client_driver(struct hsi_client_driver *drv) +{ + driver_unregister(&drv->driver); +} + +/** + * struct hsi_msg - HSI message descriptor + * @link: Free to use by the current descriptor owner + * @cl: HSI device client that issues the transfer + * @sgt: Head of the scatterlist array + * @context: Client context data associated to the transfer + * @complete: Transfer completion callback + * @destructor: Destructor to free resources when flushing + * @status: Status of the transfer when completed + * @actual_len: Actual length of data transfered on completion + * @channel: Channel were to TX/RX the message + * @ttype: Transfer type (TX if set, RX otherwise) + * @break_frame: if true HSI will send/receive a break frame (FRAME MODE) + */ +struct hsi_msg { + struct list_head link; + struct hsi_client *cl; + struct sg_table sgt; + void *context; + + void (*complete)(struct hsi_msg *msg); + void (*destructor)(struct hsi_msg *msg); + + int status; + unsigned int actual_len; + unsigned int channel; + unsigned int ttype:1; + unsigned int break_frame:1; +}; + +struct hsi_msg *hsi_alloc_msg(unsigned int n_frag, gfp_t flags); +void hsi_free_msg(struct hsi_msg *msg); + +/** + * struct hsi_port - HSI port device + * @device: Driver model representation of the device + * @tx_cfg: Current TX path configuration + * @rx_cfg: Current RX path configuration + * @num: Port number + * @shared: Set when port can be shared by different clients + * @claimed: Reference count of clients which claimed the port + * @lock: Serialize port claim + * @async: Asynchronous transfer callback + * @setup: Callback to set the HSI client configuration + * @flush: Callback to clean the HW state and destroy all pending transfers + * @start_tx: Callback to inform that a client wants to TX data + * @stop_tx: Callback to inform that a client no longer wishes to TX data + * @release: Callback to inform that a client no longer uses the port + * @clients: List of hsi_clients using the port. + * @clock: Lock to serialize access to the clients list. + */ +struct hsi_port { + struct device device; + struct hsi_config tx_cfg; + struct hsi_config rx_cfg; + unsigned int num; + unsigned int shared:1; + int claimed; + struct mutex lock; + int (*async)(struct hsi_msg *msg); + int (*setup)(struct hsi_client *cl); + int (*flush)(struct hsi_client *cl); + int (*start_tx)(struct hsi_client *cl); + int (*stop_tx)(struct hsi_client *cl); + int (*release)(struct hsi_client *cl); + struct list_head clients; + spinlock_t clock; +}; + +#define to_hsi_port(dev) container_of(dev, struct hsi_port, device) +#define hsi_get_port(cl) to_hsi_port((cl)->device.parent) + +void hsi_event(struct hsi_port *port, unsigned int event); +int hsi_claim_port(struct hsi_client *cl, unsigned int share); +void hsi_release_port(struct hsi_client *cl); + +static inline int hsi_port_claimed(struct hsi_client *cl) +{ + return cl->pclaimed; +} + +static inline void hsi_port_set_drvdata(struct hsi_port *port, void *data) +{ + dev_set_drvdata(&port->device, data); +} + +static inline void *hsi_port_drvdata(struct hsi_port *port) +{ + return dev_get_drvdata(&port->device); +} + +/** + * struct hsi_controller - HSI controller device + * @device: Driver model representation of the device + * @id: HSI controller ID + * @num_ports: Number of ports in the HSI controller + * @port: Array of HSI ports + */ +struct hsi_controller { + struct device device; + int id; + unsigned int num_ports; + struct hsi_port *port; +}; + +#define to_hsi_controller(dev) container_of(dev, struct hsi_controller, device) + +struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags); +void hsi_free_controller(struct hsi_controller *hsi); +int hsi_register_controller(struct hsi_controller *hsi); +void hsi_unregister_controller(struct hsi_controller *hsi); + +static inline void hsi_controller_set_drvdata(struct hsi_controller *hsi, + void *data) +{ + dev_set_drvdata(&hsi->device, data); +} + +static inline void *hsi_controller_drvdata(struct hsi_controller *hsi) +{ + return dev_get_drvdata(&hsi->device); +} + +static inline struct hsi_port *hsi_find_port_num(struct hsi_controller *hsi, + unsigned int num) +{ + return (num < hsi->num_ports) ? &hsi->port[num] : NULL; +} + +/* + * API for HSI clients + */ +int hsi_async(struct hsi_client *cl, struct hsi_msg *msg); + +/** + * hsi_setup - Configure the client's port + * @cl: Pointer to the HSI client + * + * When sharing ports, clients should either relay on a single + * client setup or have the same setup for all of them. + * + * Return -errno on failure, 0 on success + */ +static inline int hsi_setup(struct hsi_client *cl) +{ + if (!hsi_port_claimed(cl)) + return -EACCES; + return hsi_get_port(cl)->setup(cl); +} + +/** + * hsi_flush - Flush all pending transactions on the client's port + * @cl: Pointer to the HSI client + * + * This function will destroy all pending hsi_msg in the port and reset + * the HW port so it is ready to receive and transmit from a clean state. + * + * Return -errno on failure, 0 on success + */ +static inline int hsi_flush(struct hsi_client *cl) +{ + if (!hsi_port_claimed(cl)) + return -EACCES; + return hsi_get_port(cl)->flush(cl); +} + +/** + * hsi_async_read - Submit a read transfer + * @cl: Pointer to the HSI client + * @msg: HSI message descriptor of the transfer + * + * Return -errno on failure, 0 on success + */ +static inline int hsi_async_read(struct hsi_client *cl, struct hsi_msg *msg) +{ + msg->ttype = HSI_MSG_READ; + return hsi_async(cl, msg); +} + +/** + * hsi_async_write - Submit a write transfer + * @cl: Pointer to the HSI client + * @msg: HSI message descriptor of the transfer + * + * Return -errno on failure, 0 on success + */ +static inline int hsi_async_write(struct hsi_client *cl, struct hsi_msg *msg) +{ + msg->ttype = HSI_MSG_WRITE; + return hsi_async(cl, msg); +} + +/** + * hsi_start_tx - Signal the port that the client wants to start a TX + * @cl: Pointer to the HSI client + * + * Return -errno on failure, 0 on success + */ +static inline int hsi_start_tx(struct hsi_client *cl) +{ + if (!hsi_port_claimed(cl)) + return -EACCES; + return hsi_get_port(cl)->start_tx(cl); +} + +/** + * hsi_stop_tx - Signal the port that the client no longer wants to transmit + * @cl: Pointer to the HSI client + * + * Return -errno on failure, 0 on success + */ +static inline int hsi_stop_tx(struct hsi_client *cl) +{ + if (!hsi_port_claimed(cl)) + return -EACCES; + return hsi_get_port(cl)->stop_tx(cl); +} +#endif /* __LINUX_HSI_H__ */ diff --git a/include/linux/hsi/hsi_char.h b/include/linux/hsi/hsi_char.h new file mode 100644 index 0000000..b550362 --- /dev/null +++ b/include/linux/hsi/hsi_char.h @@ -0,0 +1,66 @@ +/* + * hsi_char.h + * + * Part of the HSI character device driver. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * + * Contact: Andras Domokos + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + + +#ifndef __HSI_CHAR_H +#define __HSI_CHAR_H + +#define HSI_CHAR_MAGIC 'k' +#define HSC_IOW(num, dtype) _IOW(HSI_CHAR_MAGIC, num, dtype) +#define HSC_IOR(num, dtype) _IOR(HSI_CHAR_MAGIC, num, dtype) +#define HSC_IOWR(num, dtype) _IOWR(HSI_CHAR_MAGIC, num, dtype) +#define HSC_IO(num) _IO(HSI_CHAR_MAGIC, num) + +#define HSC_RESET HSC_IO(16) +#define HSC_SET_PM HSC_IO(17) +#define HSC_SEND_BREAK HSC_IO(18) +#define HSC_SET_RX HSC_IOW(19, struct hsc_rx_config) +#define HSC_GET_RX HSC_IOW(20, struct hsc_rx_config) +#define HSC_SET_TX HSC_IOW(21, struct hsc_tx_config) +#define HSC_GET_TX HSC_IOW(22, struct hsc_tx_config) + +#define HSC_PM_DISABLE 0 +#define HSC_PM_ENABLE 1 + +#define HSC_MODE_STREAM 1 +#define HSC_MODE_FRAME 2 +#define HSC_FLOW_SYNC 0 +#define HSC_ARB_RR 0 +#define HSC_ARB_PRIO 1 + +struct hsc_rx_config { + uint32_t mode; + uint32_t flow; + uint32_t channels; +}; + +struct hsc_tx_config { + uint32_t mode; + uint32_t flow; + uint32_t channels; + uint32_t speed; + uint32_t arb_mode; +}; + +#endif /* __HSI_CHAR_H */ diff --git a/include/linux/hsi/hsi_ffl_tty.h b/include/linux/hsi/hsi_ffl_tty.h new file mode 100644 index 0000000..77a7361 --- /dev/null +++ b/include/linux/hsi/hsi_ffl_tty.h @@ -0,0 +1,297 @@ +/* + * + * hsi_ffl_tty.h + * + * Fixed frame length modem protocol over HSI: IOCTL definitions + * + * Copyright (C) 2010 Intel Corporation. All rights reserved. + * + * Contact: Olivier Stoltz Douchet + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ +#ifndef _HSI_FFL_TTY_H +#define _HSI_FFL_TTY_H + +#include + +/* reasons for hanging up tty */ +enum { + HU_TIMEOUT = 1, + HU_RESET = 2, + HU_COREDUMP = 4, +}; + +/** + * struct hsi_ffl_stats - statistics related to the TX and RX side + * @data_sz: total size of actual transferred data + * @frame_cnt: total number of transferred frames + * @overflow_cnt: total number of transfer stalls due to FIFO full + */ +struct hsi_ffl_stats { + unsigned long long data_sz; + unsigned int frame_cnt; + unsigned int overflow_cnt; +}; + +#define FFL_TTY_MAGIC 0x77 + +/* + * FFL_TTY_RESET_TX - reset the TX state machine (flushes it) + */ +#define FFL_TTY_RESET_TX _IO(FFL_TTY_MAGIC, 0) + +/* + * FFL_TTY_RESET_RX - reset the RX state machine (flushes it) + */ +#define FFL_TTY_RESET_RX _IO(FFL_TTY_MAGIC, 1) + +/* + * FFL_TTY_GET_TX_STATE - get the current state of the TX state machine + */ +#define FFL_TTY_GET_TX_STATE _IOR(FFL_TTY_MAGIC, 2, unsigned int) + +/* + * FFL_TTY_GET_RX_STATE - get the current state of the RX state machine + */ +#define FFL_TTY_GET_RX_STATE _IOR(FFL_TTY_MAGIC, 3, unsigned int) + +/* + * FFL_TTY_MODEM_RESET - reset the modem (solicited reset) + * Shared with SPI + */ +#define FFL_TTY_MODEM_RESET _IO(FFL_TTY_MAGIC, 4) + +/* + * FFL_TTY_MODEM_STATE - return 1 if first transmission completed + * Shared with SPI + */ +#define FFL_TTY_MODEM_STATE _IOR(FFL_TTY_MAGIC, 5, int) + +/* + * FFL_TTY_GET_HANGUP_REASON - return reason for latest hangup + * Shared with SPI + */ +#define FFL_TTY_GET_HANGUP_REASON _IOR(FFL_TTY_MAGIC, 6, int) + +/* + * FFL_TTY_SET_TX_WAIT_MAX - set the maximal size of the TX waiting FIFO + */ +#define FFL_TTY_SET_TX_WAIT_MAX _IOW(FFL_TTY_MAGIC, 8, unsigned int) + +/* + * FFL_TTY_GET_TX_WAIT_MAX - get the maximal size of the TX waiting FIFO + */ +#define FFL_TTY_GET_TX_WAIT_MAX _IOR(FFL_TTY_MAGIC, 8, unsigned int) + +/* + * FFL_TTY_SET_RX_WAIT_MAX - set the maximal size of the RX waiting FIFO + */ +#define FFL_TTY_SET_RX_WAIT_MAX _IOW(FFL_TTY_MAGIC, 9, unsigned int) + +/* + * FFL_TTY_GET_RX_WAIT_MAX - get the maximal size of the RX waiting FIFO + */ +#define FFL_TTY_GET_RX_WAIT_MAX _IOR(FFL_TTY_MAGIC, 9, unsigned int) + +/* + * FFL_TTY_SET_TX_CTRL_MAX - set the maximal size of the TX controller FIFO + */ +#define FFL_TTY_SET_TX_CTRL_MAX _IOW(FFL_TTY_MAGIC, 10, unsigned int) + +/* + * FFL_TTY_GET_TX_CTRL_MAX - get the maximal size of the TX controller FIFO + */ +#define FFL_TTY_GET_TX_CTRL_MAX _IOR(FFL_TTY_MAGIC, 10, unsigned int) + +/* + * FFL_TTY_SET_RX_CTRL_MAX - set the maximal size of the RX controller FIFO + */ +#define FFL_TTY_SET_RX_CTRL_MAX _IOW(FFL_TTY_MAGIC, 11, unsigned int) + +/* + * FFL_TTY_GET_RX_CTRL_MAX - get the maximal size of the RX controller FIFO + */ +#define FFL_TTY_GET_RX_CTRL_MAX _IOR(FFL_TTY_MAGIC, 11, unsigned int) + +/* + * FFL_TTY_SET_TX_DELAY - set the TX delay in us + */ +#define FFL_TTY_SET_TX_DELAY _IOW(FFL_TTY_MAGIC, 12, unsigned int) + +/* + * FFL_TTY_GET_TX_DELAY - get the TX delay in us + */ +#define FFL_TTY_GET_TX_DELAY _IOR(FFL_TTY_MAGIC, 12, unsigned int) + +/* + * FFL_TTY_SET_RX_DELAY - set the RX delay in us + */ +#define FFL_TTY_SET_RX_DELAY _IOW(FFL_TTY_MAGIC, 13, unsigned int) + +/* + * FFL_TTY_GET_RX_DELAY - get the RX delay in us + */ +#define FFL_TTY_GET_RX_DELAY _IOR(FFL_TTY_MAGIC, 13, unsigned int) + +/* + * FFL_TTY_SET_TX_FLOW - set the TX flow type (PIPE, SYNC) + */ +#define FFL_TTY_SET_TX_FLOW _IOW(FFL_TTY_MAGIC, 16, unsigned int) + +/* + * FFL_TTY_GET_TX_FLOW - get the TX flow type (PIPE, SYNC) + */ +#define FFL_TTY_GET_TX_FLOW _IOR(FFL_TTY_MAGIC, 16, unsigned int) + +/* + * FFL_TTY_SET_RX_FLOW - set the RX flow type (PIPE, SYNC) + */ +#define FFL_TTY_SET_RX_FLOW _IOW(FFL_TTY_MAGIC, 17, unsigned int) + +/* + * FFL_TTY_GET_RX_FLOW - get the RX flow type (PIPE, SYNC) + */ +#define FFL_TTY_GET_RX_FLOW _IOR(FFL_TTY_MAGIC, 17, unsigned int) + +/* + * FFL_TTY_SET_TX_MODE - set the TX mode type (FRAME, STREAM) + */ +#define FFL_TTY_SET_TX_MODE _IOW(FFL_TTY_MAGIC, 18, unsigned int) + +/* + * FFL_TTY_GET_TX_MODE - get the TX mode type (FRAME, STREAM) + */ +#define FFL_TTY_GET_TX_MODE _IOR(FFL_TTY_MAGIC, 18, unsigned int) + +/* + * FFL_TTY_SET_RX_MODE - set the RX mode type (FRAME, STREAM) + */ +#define FFL_TTY_SET_RX_MODE _IOW(FFL_TTY_MAGIC, 19, unsigned int) + +/* + * FFL_TTY_GET_RX_MODE - get the RX mode type (FRAME, STREAM) + */ +#define FFL_TTY_GET_RX_MODE _IOR(FFL_TTY_MAGIC, 19, unsigned int) + +/* + * FFL_TTY_SET_TX_CHANNELS - set the maximal number of TX channels + */ +#define FFL_TTY_SET_TX_CHANNELS _IOW(FFL_TTY_MAGIC, 20, unsigned int) + +/* + * FFL_TTY_GET_TX_CHANNELS - get the maximal number of TX channels + */ +#define FFL_TTY_GET_TX_CHANNELS _IOR(FFL_TTY_MAGIC, 20, unsigned int) + +/* + * FFL_TTY_SET_RX_CHANNELS - set the maximal number of RX channels + */ +#define FFL_TTY_SET_RX_CHANNELS _IOW(FFL_TTY_MAGIC, 21, unsigned int) + +/* + * FFL_TTY_GET_RX_CHANNELS - get the maximal number of RX channels + */ +#define FFL_TTY_GET_RX_CHANNELS _IOR(FFL_TTY_MAGIC, 21, unsigned int) + +/* + * FFL_TTY_SET_TX_CHANNEL - set the FFL TX channel + */ +#define FFL_TTY_SET_TX_CHANNEL _IOW(FFL_TTY_MAGIC, 22, unsigned int) + +/* + * FFL_TTY_GET_TX_CHANNEL - get the FFL TX channel + */ +#define FFL_TTY_GET_TX_CHANNEL _IOR(FFL_TTY_MAGIC, 22, unsigned int) + +/* + * FFL_TTY_SET_RX_CHANNEL - set the FFL RX channel + */ +#define FFL_TTY_SET_RX_CHANNEL _IOW(FFL_TTY_MAGIC, 23, unsigned int) + +/* + * FFL_TTY_GET_RX_CHANNEL - get the FFL RX channel + */ +#define FFL_TTY_GET_RX_CHANNEL _IOR(FFL_TTY_MAGIC, 23, unsigned int) + +/* + * FFL_TTY_SET_TX_FRAME_LEN - set the FFL TX frame length + */ +#define FFL_TTY_SET_TX_FRAME_LEN _IOW(FFL_TTY_MAGIC, 24, unsigned int) + +/* + * FFL_TTY_GET_TX_FRAME_LEN - get the FFL TX frame length + */ +#define FFL_TTY_GET_TX_FRAME_LEN _IOR(FFL_TTY_MAGIC, 24, unsigned int) + +/* + * FFL_TTY_SET_RX_FRAME_LEN - set the FFL RX frame length + */ +#define FFL_TTY_SET_RX_FRAME_LEN _IOW(FFL_TTY_MAGIC, 25, unsigned int) + +/* + * FFL_TTY_GET_RX_FRAME_LEN - get the FFL RX frame length + */ +#define FFL_TTY_GET_RX_FRAME_LEN _IOR(FFL_TTY_MAGIC, 25, unsigned int) + +/* + * FFL_TTY_SET_TX_ARB_MODE - set the FFL TX arbitration (RR ou priority) + */ +#define FFL_TTY_SET_TX_ARB_MODE _IOW(FFL_TTY_MAGIC, 28, unsigned int) + +/* + * FFL_TTY_GET_TX_ARB_MODE - get the FFL TX arbitration (RR or priority) + */ +#define FFL_TTY_GET_TX_ARB_MODE _IOR(FFL_TTY_MAGIC, 28, unsigned int) + +/* + * FFL_TTY_SET_TX_FREQUENCY - set the maximum FFL TX frequency (in kbit/s) + */ +#define FFL_TTY_SET_TX_FREQUENCY _IOW(FFL_TTY_MAGIC, 30, unsigned int) + +/* + * FFL_TTY_GET_TX_FREQUENCY - get the maximum FFL TX frequency (in kbit/s) + */ +#define FFL_TTY_GET_TX_FREQUENCY _IOR(FFL_TTY_MAGIC, 30, unsigned int) + +/* + * FFL_TTY_RESET_TX_STATS - reset the TX statistics + */ +#define FFL_TTY_RESET_TX_STATS _IO(FFL_TTY_MAGIC, 32) + +/* + * FFL_TTY_GET_TX_STATS - get the TX statistics + */ +#define FFL_TTY_GET_TX_STATS _IOR(FFL_TTY_MAGIC, 32, \ + struct hsi_ffl_stats) + +/* + * FFL_TTY_RESET_RX_STATS - reset the RX statistics + */ +#define FFL_TTY_RESET_RX_STATS _IO(FFL_TTY_MAGIC, 33) + +/* + * FFL_TTY_GET_RX_STATS - get the RX statistics + */ +#define FFL_TTY_GET_RX_STATS _IOR(FFL_TTY_MAGIC, 33, \ + struct hsi_ffl_stats) + +/* + * FFL_TTY_SEND_BREAK - send a BREAK frame to the modem + */ +#define FFL_TTY_SEND_BREAK _IO(FFL_TTY_MAGIC, 34) + +#endif /* _HSI_FFL_TTY_H */ + diff --git a/include/linux/hsi/intel_mid_hsi.h b/include/linux/hsi/intel_mid_hsi.h new file mode 100644 index 0000000..5b101d4 --- /dev/null +++ b/include/linux/hsi/intel_mid_hsi.h @@ -0,0 +1,60 @@ +/* + * intel_mid_hsi.h + * + * Header for the Intel HSI controller driver. + * + * Copyright (C) 2010, 2011 Intel Corporation. All rights reserved. + * + * Contact: Jim Stanley + * Contact: Olivier Stoltz Douchet + * Modified from OMAP SSI driver + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ +#ifndef __INTEL_MID_HSI_H__ +#define __INTEL_MID_HSI_H__ + +#define HSI_MID_MAX_CHANNELS 8 + +/** + * struct hsi_mid_platform_data - HSI platform specific data for clients + * @rx_dma_channels: HSI-channel indexed list of RX DMA channel (-1 if no DMA) + * @rx_sg_entries: HSI-channel indexed list of RX scatter gather entries + * @rx_fifo_size: HSI-channel indexed list of RX FIFO size in HSI frames + * @tx_dma_channels: HSI-channel indexed list of TX DMA channel (-1 if no DMA) + * @tx_sg_entries: HSI-channel indexed list of TX scatter gather entries + * @tx_fifo_size: HSI-channel indexed list of TX FIFO size in HSI frames + * @gpio_mdm_rst_out: GPIO index for modem reset input + * @gpio_mdm_pwr_on: GPIO index for modem power on + * @gpio_mdm_rst_bbn: GPIO index for modem reset request + * @gpio_fcdp_rb: GPIO index for modem core dump + */ +struct hsi_mid_platform_data { + int rx_dma_channels[HSI_MID_MAX_CHANNELS]; + int rx_sg_entries[HSI_MID_MAX_CHANNELS]; + int rx_fifo_sizes[HSI_MID_MAX_CHANNELS]; + int tx_dma_channels[HSI_MID_MAX_CHANNELS]; + int tx_sg_entries[HSI_MID_MAX_CHANNELS]; + int tx_fifo_sizes[HSI_MID_MAX_CHANNELS]; + + /* FIXME: the next four entries need to go in a separate client specific + * section */ + int gpio_mdm_rst_out; + int gpio_mdm_pwr_on; + int gpio_mdm_rst_bbn; + int gpio_fcdp_rb; +}; + +#endif /* __INTEL_MID_HSI_H__ */ -- 2.7.4