From 08adb2e63485a1031287ee8756259cd5f0c49d98 Mon Sep 17 00:00:00 2001 From: Olivier Stoltz Douchet Date: Tue, 27 Mar 2012 16:42:33 +0200 Subject: [PATCH] intel_mid_hsi.c: improving hsi_pm_runtime_get_sync and start tx synchronicity BZ: 30613 The purpose of this patch is two fold: making the hsi_pm_runtime_get_sync() function call synchronous to the PM management as well as start TX callback functions. This patch is actually waiting for the hsi_pm_resume() callback event completion in the hsi_pm_runtime_get_sync() and start_tx() functions to ensure that the IP is actually resumed upon the completion of these functions. Relying solely on the wake_lock and pm_runtime_get_sync is not sufficient for this purpose because of some race conditions between the Linux PM runtime state machine and Android's PM state machine. The TX path of the FFL modem protocol driver has been updated to manage the synchronous start TX call. Since the TTY write function can be called in non sleepable contexts, the start TX is actually called in a separate work, outside the ffl_tty_write() function. Using the single threaded TX worqueue to manage the start TX and stop TX is also serialiasing this management and easing the stop TX to start TX pulse delay implementation. This patch is also the opportunity to switch to a 3-workqueue modem protocol implementation instead of the exisiting 5 workqueues (without considering the start TX and stop TX workqueue), which is making much more sense. Change-Id: I414757e6312c6648f6007ad17ac160bde758441b Signed-off-by: Olivier Stoltz Douchet Reviewed-on: http://android.intel.com:8080/40948 Reviewed-by: Roulliere, Pierre Reviewed-by: Predon, Frederic Reviewed-by: Lucas, Guillaume Reviewed-by: Lebsir, SamiX Tested-by: Lebsir, SamiX Reviewed-by: buildbot Tested-by: buildbot --- drivers/hsi/clients/hsi_ffl_tty.c | 273 ++++++++++++++++++-------------- drivers/hsi/controllers/intel_mid_hsi.c | 112 ++++++++----- 2 files changed, 222 insertions(+), 163 deletions(-) diff --git a/drivers/hsi/clients/hsi_ffl_tty.c b/drivers/hsi/clients/hsi_ffl_tty.c index 454a8b9..f2c7644 100644 --- a/drivers/hsi/clients/hsi_ffl_tty.c +++ b/drivers/hsi/clients/hsi_ffl_tty.c @@ -72,6 +72,9 @@ /* ACWAKE minimal pulse udelay in us (set to 0 if none is necessary) */ #define ACWAKE_MINIMAL_PULSE_UDELAY 800 +/* ACWAKE to DATA transmission minimal delay in us (0 if none is necessary) */ +#define ACWAKE_TO_DATA_MINIMAL_UDELAY 100 + /* Initial minimal buffering size (in bytes) */ #define FFL_MIN_TX_BUFFERING 65536 #define FFL_MIN_RX_BUFFERING 65536 @@ -117,8 +120,8 @@ /* RX and TX state machine definitions */ enum { IDLE, + READY, ACTIVE, - TTY, }; #define FFL_GLOBAL_STATE_SZ 2 @@ -140,7 +143,7 @@ enum { #define V1P35_OFF 4 #define V1P35_ON 6 #define COLD_BOOT_DELAY_OFF 20000 /* 20 ms (use of usleep_range) */ -#define COLD_BOOT_DELAY_ON 10000 /* 10 ms (use of usleep_range) */ +#define COLD_BOOT_DELAY_ON 10000 /* 10 ms (use of usleep_range) */ /* Forward declaration for ffl_xfer_ctx structure */ struct ffl_ctx; @@ -259,6 +262,8 @@ struct ffl_recovery_ctx { * clean or about to be clean * @tx_write_pipe_clean_event: event signalling that the write part of the TX * pipeline is clean (no more write can occur) + * @start_tx: work for making synchronous TX start request + * @stop_tx: work for making synchronous TX stop request * @tx: current TX context * @rx: current RX context * @do_tty_forward: dedicated TTY forwarding work structure @@ -273,9 +278,8 @@ struct ffl_ctx { struct tty_port tty_prt; wait_queue_head_t tx_full_pipe_clean_event; wait_queue_head_t tx_write_pipe_clean_event; -#if (ACWAKE_MINIMAL_PULSE_UDELAY > 0) - ktime_t ktime_stop_tx; -#endif + struct work_struct start_tx; + struct work_struct stop_tx; struct ffl_xfer_ctx tx; struct ffl_xfer_ctx rx; struct work_struct do_tty_forward; @@ -303,22 +307,14 @@ struct ffl_driver { /* Protocol driver instance */ static struct ffl_driver ffl_drv; -/* Workqueue for submitting frame-recycling background tasks */ -static struct workqueue_struct *ffl_recycle_wq; +/* Workqueue for submitting tx background tasks */ +static struct workqueue_struct *ffl_tx_wq; -/* Workqueue for submitting tx timeout hangup background tasks */ -static struct workqueue_struct *ffl_hangup_wq; +/* Workqueue for submitting rx background tasks */ +static struct workqueue_struct *ffl_rx_wq; -#ifdef USE_IPC_ERROR_RECOVERY -/* Workqueue for submitting TX draining upon recovery background tasks */ -static struct workqueue_struct *ffl_tx_drain_wq; - -/* Workqueue for submitting RX draining upon recovery background tasks */ -static struct workqueue_struct *ffl_rx_drain_wq; -#endif - -/* Workqueue for tty buffer flush */ -static struct workqueue_struct *ffl_forwarding_wq; +/* Workqueue for submitting hangup background tasks */ +static struct workqueue_struct *ffl_hu_wq; /* * Modem power / reset managers @@ -926,6 +922,7 @@ static int _ffl_from_wait_to_ctrl(struct ffl_xfer_ctx *ctx, mod_timer(&main_ctx->hangup.timer, jiffies + usecs_to_jiffies(TTY_HANGUP_DELAY)); del_timer(&ctx->timer); + _ffl_ctx_set_state(ctx, ACTIVE); } #ifdef CONFIG_HSI_FFL_TTY_STATS ctx->data_sz += actual_len; @@ -1175,32 +1172,91 @@ static __must_check int ffl_modem_is_awake(struct ffl_xfer_ctx *ctx) */ /** + * ffl_do_start_tx - making a synchronous HSI TX start request + * @work: a reference to work queue element + */ +static void ffl_do_start_tx(struct work_struct *work) +{ + struct ffl_ctx *main_ctx; + struct ffl_xfer_ctx *ctx; + unsigned long flags; + int exit; + + main_ctx = container_of(work, struct ffl_ctx, start_tx); + ctx = &main_ctx->tx; + + spin_lock_irqsave(&ctx->lock, flags); + exit = !_ffl_ctx_is_state(ctx, IDLE); + spin_unlock_irqrestore(&ctx->lock, flags); + + if (unlikely(exit)) + return; + + exit = hsi_start_tx(main_ctx->client); + + if (unlikely(exit)) { + pr_err(DRVNAME ": hsi_start_tx error (%d)", exit); + return; + } + +#if (ACWAKE_TO_DATA_MINIMAL_UDELAY > 0) + /* Prevent too soon a data write further to a ACWAKE */ + udelay(ACWAKE_TO_DATA_MINIMAL_UDELAY); +#endif + + spin_lock_irqsave(&ctx->lock, flags); + /* The HSI controller is ready, push as many frames as possible */ + _ffl_ctx_set_state(ctx, READY); + _ffl_pop_wait_push_ctrl(ctx, &flags); + spin_unlock_irqrestore(&ctx->lock, flags); +} + +/** + * ffl_do_stop_tx - making a synchronous HSI TX stop request + * @work: a reference to work queue element + */ +static void ffl_do_stop_tx(struct work_struct *work) +{ + struct ffl_ctx *main_ctx; + struct ffl_xfer_ctx *ctx; + unsigned long flags; + int exit; + + main_ctx = container_of(work, struct ffl_ctx, stop_tx); + ctx = &main_ctx->tx; + + exit = hsi_stop_tx(main_ctx->client); + + if (unlikely(exit)) { + pr_err(DRVNAME ": hsi_stop_tx error (%d)", exit); + spin_lock_irqsave(&ctx->lock, flags); + _ffl_ctx_set_state(ctx, READY); + spin_unlock_irqrestore(&ctx->lock, flags); + return; + } + +#if (ACWAKE_MINIMAL_PULSE_UDELAY > 0) + /* Prevent too small a ACWAKE pulse */ + udelay(ACWAKE_MINIMAL_PULSE_UDELAY); +#endif +} + +/** * _ffl_start_tx - update the TX state machine on every new transfer * @ctx: a reference to the FFL TX context to consider - * @flags: a reference to the flag used by the external spinlock, passed in to - * unlock it and end the atomic context temporarily. * * This helper function updates the TX state if it is currently idle and * inform the HSI framework and attached controller. */ -static void _ffl_start_tx(struct ffl_xfer_ctx *ctx, unsigned long *flags) +static void _ffl_start_tx(struct ffl_xfer_ctx *ctx) { - struct ffl_ctx *main_ctx = container_of(ctx, struct ffl_ctx, tx); - int err; + struct ffl_ctx *main_ctx; if (_ffl_ctx_is_state(ctx, IDLE)) { - _ffl_ctx_set_state(ctx, ACTIVE); - spin_unlock_irqrestore(&ctx->lock, *flags); -#if (ACWAKE_MINIMAL_PULSE_UDELAY > 0) - while (ktime_us_delta(ktime_get(), main_ctx->ktime_stop_tx) < - ACWAKE_MINIMAL_PULSE_UDELAY) - cpu_relax(); -#endif - err = hsi_start_tx(main_ctx->client); - spin_lock_irqsave(&ctx->lock, *flags); - if (unlikely(err)) - _ffl_ctx_set_state(ctx, IDLE); + main_ctx = container_of(ctx, struct ffl_ctx, tx); + queue_work(ffl_tx_wq, &main_ctx->start_tx); } else { + _ffl_ctx_set_state(ctx, READY); del_timer(&ctx->timer); } } @@ -1209,25 +1265,18 @@ static void _ffl_start_tx(struct ffl_xfer_ctx *ctx, unsigned long *flags) * _ffl_stop_tx - update the TX state machine after expiration of the TX active * timeout further to a no outstanding TX transaction status * @ctx: a reference to the FFL TX context to consider - * @flags: a reference to the flag used by the external spinlock, passed in to - * unlock it and end the atomic context temporarily. * * This helper function updates the TX state if it is currently active and * inform the HSI framework and attached controller. */ -static void _ffl_stop_tx(struct ffl_xfer_ctx *ctx, unsigned long *flags) +static void _ffl_stop_tx(struct ffl_xfer_ctx *ctx) { - struct ffl_ctx *main_ctx; + struct ffl_ctx *main_ctx; if (_ffl_ctx_is_state(ctx, ACTIVE)) { - _ffl_ctx_set_state(ctx, IDLE); main_ctx = container_of(ctx, struct ffl_ctx, tx); - spin_unlock_irqrestore(&ctx->lock, *flags); - hsi_stop_tx(main_ctx->client); -#if (ACWAKE_MINIMAL_PULSE_UDELAY > 0) - main_ctx->ktime_stop_tx = ktime_get(); -#endif - spin_lock_irqsave(&ctx->lock, *flags); + _ffl_ctx_set_state(ctx, IDLE); + queue_work(ffl_tx_wq, &main_ctx->stop_tx); } } @@ -1245,7 +1294,7 @@ static void ffl_stop_tx(unsigned long param) unsigned long flags; spin_lock_irqsave(&ctx->lock, flags); - _ffl_stop_tx(ctx, &flags); + _ffl_stop_tx(ctx); spin_unlock_irqrestore(&ctx->lock, flags); } @@ -1280,7 +1329,7 @@ static void ffl_start_rx(struct hsi_client *cl) static inline void _ffl_state_rx_not_active(struct ffl_xfer_ctx *ctx) { if (!_ffl_ctx_is_empty(ctx)) - _ffl_ctx_set_state(ctx, TTY); + _ffl_ctx_set_state(ctx, READY); else _ffl_ctx_set_state(ctx, IDLE); } @@ -1503,8 +1552,8 @@ static void _ffl_rx_fifo_wait_recycle(struct ffl_xfer_ctx *ctx) * ffl_increase_pool_of_frames - background work aimed at creating new frames * @work: a reference to the work context * - * This function is called as a background job (in the ffl_recycle_wq work - * queue) for performing the frame resource allocation (which can then sleep). + * This function is called as a background job (in the ffl_rx_wq/ffl_tx_wq work + * queues) for performing the frame resource allocation (which can then sleep). * * An error message is sent upon the failure of FFL_FRAME_ALLOC_RETRY_MAX_CNT * allocation requests. @@ -1847,7 +1896,7 @@ static void ffl_complete_rx(struct hsi_msg *frame) #endif _ffl_fifo_wait_push(ctx, frame); spin_unlock_irqrestore(&ctx->lock, flags); - queue_work(ffl_forwarding_wq, &main_ctx->do_tty_forward); + queue_work(ffl_rx_wq, &main_ctx->do_tty_forward); } /** @@ -1863,7 +1912,7 @@ static void ffl_rx_forward_retry(unsigned long param) struct ffl_ctx *main_ctx = container_of(ctx, struct ffl_ctx, rx); - queue_work(ffl_forwarding_wq, &main_ctx->do_tty_forward); + queue_work(ffl_rx_wq, &main_ctx->do_tty_forward); } /** @@ -1881,7 +1930,7 @@ static void ffl_rx_forward_resume(struct tty_struct *tty) main_ctx = (struct ffl_ctx *) tty->driver_data; if (main_ctx) - queue_work(ffl_forwarding_wq, &main_ctx->do_tty_forward); + queue_work(ffl_rx_wq, &main_ctx->do_tty_forward); } /* @@ -2058,9 +2107,13 @@ static void ffl_tty_port_shutdown(struct tty_port *port) del_timer_sync(&tx_ctx->timer); spin_lock_irqsave(&tx_ctx->lock, flags); _ffl_tx_fifo_wait_recycle(tx_ctx); - _ffl_stop_tx(tx_ctx, &flags); + _ffl_stop_tx(tx_ctx); spin_unlock_irqrestore(&tx_ctx->lock, flags); + /* Flush the ACWAKE works */ + flush_work_sync(&ctx->start_tx); + flush_work_sync(&ctx->stop_tx); + hsi_release_port(ctx->client); } @@ -2143,7 +2196,7 @@ static void ffl_tty_tx_timeout(unsigned long int param) if (do_hangup) { pr_err(DRVNAME ": TX timeout"); - queue_work(ffl_hangup_wq, &ctx->hangup.work); + queue_work(ffl_hu_wq, &ctx->hangup.work); } } @@ -2274,7 +2327,7 @@ static int do_ffl_tty_write(struct ffl_xfer_ctx *ctx, unsigned char *buf, /* Do a start TX on new frames only and after having marked * the current frame as pending, e.g. don't touch ! */ if (offset == 0) - _ffl_start_tx(ctx, &flags); + _ffl_start_tx(ctx); } else { _ffl_ctx_set_flag(ctx, TX_TTY_WRITE_PENDING_BIT); #ifdef CONFIG_HSI_FFL_TTY_STATS @@ -2298,7 +2351,8 @@ static int do_ffl_tty_write(struct ffl_xfer_ctx *ctx, unsigned char *buf, ctx->buffered += copied; ctx->room -= copied; frame->status = HSI_STATUS_COMPLETED; - _ffl_pop_wait_push_ctrl(ctx, &flags); + if (!_ffl_ctx_is_state(ctx, IDLE)) + _ffl_pop_wait_push_ctrl(ctx, &flags); } else { /* ERROR frames have already been popped from the wait FIFO */ _ffl_free_frame(ctx, frame); @@ -2426,6 +2480,7 @@ static int ffl_tty_ioctl(struct tty_struct *tty, { struct ffl_ctx *ctx = (struct ffl_ctx *) tty->driver_data; struct work_struct *increase_pool = NULL; + static struct workqueue_struct *increase_pool_wq; unsigned int data; #ifdef CONFIG_HSI_FFL_TTY_STATS struct hsi_ffl_stats stats; @@ -2461,6 +2516,7 @@ static int ffl_tty_ioctl(struct tty_struct *tty, if (arg > ctx->tx.wait_max) increase_pool = &ctx->tx.increase_pool; ctx->tx.wait_max = arg; + increase_pool_wq = ffl_tx_wq; spin_unlock_irqrestore(&ctx->tx.lock, flags); } else { dev_dbg(&ctx->client->device, @@ -2483,6 +2539,7 @@ static int ffl_tty_ioctl(struct tty_struct *tty, if (arg > ctx->rx.ctrl_max) increase_pool = &ctx->rx.increase_pool; ctx->rx.wait_max = arg; + increase_pool_wq = ffl_rx_wq; spin_unlock_irqrestore(&ctx->rx.lock, flags); } else { dev_dbg(&ctx->client->device, @@ -2505,6 +2562,7 @@ static int ffl_tty_ioctl(struct tty_struct *tty, if (arg > ctx->tx.ctrl_max) increase_pool = &ctx->tx.increase_pool; ctx->tx.ctrl_max = arg; + increase_pool_wq = ffl_tx_wq; spin_unlock_irqrestore(&ctx->tx.lock, flags); } else { dev_dbg(&ctx->client->device, @@ -2527,6 +2585,7 @@ static int ffl_tty_ioctl(struct tty_struct *tty, if (arg > ctx->rx.ctrl_max) increase_pool = &ctx->rx.increase_pool; ctx->rx.ctrl_max = arg; + increase_pool_wq = ffl_rx_wq; spin_unlock_irqrestore(&ctx->rx.lock, flags); } else { dev_dbg(&ctx->client->device, @@ -2838,7 +2897,7 @@ static int ffl_tty_ioctl(struct tty_struct *tty, } if (increase_pool) - (void) queue_work(ffl_recycle_wq, increase_pool); + (void) queue_work(increase_pool_wq, increase_pool); return 0; } @@ -2907,7 +2966,7 @@ static int is_modem_reset(char *val, const struct kernel_param *kp) reset_ongoing |= (ctx->reset.ongoing << i); } - return sprintf(val, "%d", reset_ongoing); + return sprintf(val, "%lu", reset_ongoing); } /** @@ -2990,7 +3049,7 @@ static int hangup_reasons(char *val, const struct kernel_param *kp) } } - return sprintf(val, "%d", hangup_reasons); + return sprintf(val, "%lu", hangup_reasons); } /** @@ -3136,7 +3195,7 @@ static irqreturn_t ffl_reset_isr(int irq, void *dev) spin_unlock(&tx_ctx->lock); if (do_hangup) - queue_work(ffl_hangup_wq, &ctx->hangup.work); + queue_work(ffl_hu_wq, &ctx->hangup.work); } return IRQ_HANDLED; @@ -3164,11 +3223,9 @@ static void do_recovery_drain_unless(struct ffl_xfer_ctx *xfer_ctx, _ffl_ctx_set_flag(xfer_ctx, ERROR_RECOVERY_ONGOING_BIT); if (xfer_ctx == &main_ctx->rx) { del_timer(&recovery_ctx->rx_drain_timer); - queue_work(ffl_rx_drain_wq, - &recovery_ctx->do_rx_drain); + queue_work(ffl_rx_wq, &recovery_ctx->do_rx_drain); } else { - queue_work(ffl_tx_drain_wq, - &recovery_ctx->do_tx_drain); + queue_work(ffl_tx_wq, &recovery_ctx->do_tx_drain); } } spin_unlock_irqrestore(&xfer_ctx->lock, flags); @@ -3377,7 +3434,7 @@ static void ffl_recovery_ctx_clear(struct ffl_recovery_ctx *ctx_recovery) * been started in any case) */ flush_work(&ctx_recovery->do_tx_drain); if (del_timer_sync(&ctx_recovery->rx_drain_timer)) - queue_work(ffl_rx_drain_wq, &ctx_recovery->do_rx_drain); + queue_work(ffl_rx_wq, &ctx_recovery->do_rx_drain); flush_work(&ctx_recovery->do_rx_drain); } #endif @@ -3781,6 +3838,9 @@ static int __init ffl_driver_probe(struct device *dev) &client->rx_cfg); INIT_WORK(&ctx->do_tty_forward, ffl_do_tty_forward); + INIT_WORK(&ctx->start_tx, ffl_do_start_tx); + INIT_WORK(&ctx->stop_tx, ffl_do_stop_tx); + ctx->tx.timer.function = ffl_stop_tx; ctx->rx.timer.function = ffl_rx_forward_retry; @@ -3816,8 +3876,8 @@ static int __init ffl_driver_probe(struct device *dev) #endif /* Allocate FIFO in background */ - (void) queue_work(ffl_recycle_wq, &ctx->tx.increase_pool); - (void) queue_work(ffl_recycle_wq, &ctx->rx.increase_pool); + (void) queue_work(ffl_tx_wq, &ctx->tx.increase_pool); + (void) queue_work(ffl_rx_wq, &ctx->rx.increase_pool); dev_dbg(dev, "ffl_driver_probe completed\n"); return 0; @@ -3911,46 +3971,29 @@ static int __init ffl_driver_init(void) for (i = 0; i < FFL_TTY_MAX_LINES; i++) ffl_drv.ctx[i] = NULL; - /* Create the workqueue for allocating frames */ - ffl_recycle_wq = create_singlethread_workqueue(DRVNAME "-rc"); - if (unlikely(!ffl_recycle_wq)) { - pr_err(DRVNAME ": unable to create pool-handling workqueue"); + /* Create a single thread workqueue for serialising tx background + * tasks */ + ffl_tx_wq = alloc_workqueue(DRVNAME "-tq", WQ_UNBOUND, 1); + if (unlikely(!ffl_tx_wq)) { + pr_err(DRVNAME ": unable to create FFL TX-side workqueue"); err = -EFAULT; - goto out; + goto no_tx_wq; } - /* Create the workqueue for tx hangup */ - ffl_hangup_wq = create_freezable_workqueue(DRVNAME "-hg"); - if (unlikely(!ffl_hangup_wq)) { - pr_err(DRVNAME ": unable to create tx hangup workqueue"); + /* Create a high priority workqueue for rx background tasks */ + ffl_rx_wq = alloc_workqueue(DRVNAME "-rq", WQ_HIGHPRI, 1); + if (unlikely(!ffl_rx_wq)) { + pr_err(DRVNAME ": unable to create FFL RX-side workqueue"); err = -EFAULT; - goto no_tx_hangup_wq; + goto no_rx_wq; } -#ifdef USE_IPC_ERROR_RECOVERY - /* Create the workqueue for TX drain recovery */ - ffl_tx_drain_wq = create_singlethread_workqueue(DRVNAME "-td"); - if (unlikely(!ffl_tx_drain_wq)) { - pr_err(DRVNAME ": unable to create rx error workqueue"); + /* Create a single thread workqueue for hangup background tasks */ + ffl_hu_wq = alloc_workqueue(DRVNAME "-hq", WQ_UNBOUND, 1); + if (unlikely(!ffl_hu_wq)) { + pr_err(DRVNAME ": unable to create FFL hangup workqueue"); err = -EFAULT; - goto no_tx_drain_wq; - } - - /* Create the workqueue for RX drain recovery */ - ffl_rx_drain_wq = create_singlethread_workqueue(DRVNAME "-rd"); - if (unlikely(!ffl_rx_drain_wq)) { - pr_err(DRVNAME ": unable to create rx break workqueue"); - err = -EFAULT; - goto no_rx_drain_wq; - } -#endif - - /* Create the workqueue for TTY line discipline buffer flush */ - ffl_forwarding_wq = create_singlethread_workqueue(DRVNAME "-fw"); - if (unlikely(!ffl_forwarding_wq)) { - pr_err(DRVNAME ": unable to create TTY forwarding workqueue"); - err = -EFAULT; - goto no_ffl_forwarding_wq; + goto no_hu_wq; } /* Allocate the TTY interface */ @@ -4004,18 +4047,12 @@ no_tty_driver_registration: put_tty_driver(ffl_drv.tty_drv); ffl_drv.tty_drv = NULL; no_tty_driver_allocation: - destroy_workqueue(ffl_forwarding_wq); -no_ffl_forwarding_wq: -#ifdef USE_IPC_ERROR_RECOVERY - destroy_workqueue(ffl_rx_drain_wq); -no_rx_drain_wq: - destroy_workqueue(ffl_tx_drain_wq); -no_tx_drain_wq: -#endif - destroy_workqueue(ffl_hangup_wq); -no_tx_hangup_wq: - destroy_workqueue(ffl_recycle_wq); -out: + destroy_workqueue(ffl_hu_wq); +no_hu_wq: + destroy_workqueue(ffl_rx_wq); +no_rx_wq: + destroy_workqueue(ffl_tx_wq); +no_tx_wq: return err; } module_init(ffl_driver_init); @@ -4031,13 +4068,9 @@ static void __exit ffl_driver_exit(void) put_tty_driver(ffl_drv.tty_drv); ffl_drv.tty_drv = NULL; - destroy_workqueue(ffl_forwarding_wq); -#ifdef USE_IPC_ERROR_RECOVERY - destroy_workqueue(ffl_rx_drain_wq); - destroy_workqueue(ffl_tx_drain_wq); -#endif - destroy_workqueue(ffl_hangup_wq); - destroy_workqueue(ffl_recycle_wq); + destroy_workqueue(ffl_hu_wq); + destroy_workqueue(ffl_rx_wq); + destroy_workqueue(ffl_tx_wq); pr_debug(DRVNAME ": driver removed"); } diff --git a/drivers/hsi/controllers/intel_mid_hsi.c b/drivers/hsi/controllers/intel_mid_hsi.c index d360c0a..e0eac6b 100644 --- a/drivers/hsi/controllers/intel_mid_hsi.c +++ b/drivers/hsi/controllers/intel_mid_hsi.c @@ -126,6 +126,11 @@ enum { }; enum { + HSI_PM_ASYNC, + HSI_PM_SYNC +}; + +enum { RX_SLEEPING, RX_READY, RX_CAN_SLEEP @@ -301,7 +306,8 @@ struct intel_xfer_ctx { * @arb_cfg: current arbiter priority configuration register * @sz_cfg: current program1 configuration register * @ip_freq: HSI controller IP frequency in kHz - * @brk_us_delay: Minimal BREAK sequence delay in us + * @brk_us_delay: minimal BREAK sequence delay in us + * @ip_resumed: event signalling that the IP has actually been resumed * @stay_awake: Android wake lock for preventing entering low power mode * @dir: debugfs HSI root directory */ @@ -354,6 +360,7 @@ struct intel_controller { /* HSI controller IP frequency */ unsigned int ip_freq; unsigned int brk_us_delay; + wait_queue_head_t ip_resumed; #ifdef CONFIG_HAS_WAKELOCK /* Android PM support */ struct wake_lock stay_awake; @@ -460,56 +467,65 @@ static inline void hsi_enable_error_interrupt(void __iomem *ctrl, iowrite32(irq_enable, ARASAN_HSI_ERROR_INTERRUPT_SIGNAL_ENABLE(ctrl)); } - /** - * hsi_pm_wake_lock - acquire the wake lock whenever necessary + * hsi_is_resumed - checks if the HSI controller IP has been resumed or not * @intel_hsi: Intel HSI controller reference + * + * This helper function is returning a non-zero value if the HSI IP has been + * resumed or 0 if still suspended. */ -static inline void hsi_pm_wake_lock(struct intel_controller *intel_hsi) +static __must_check int hsi_is_resumed(struct intel_controller *intel_hsi) __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) { -#ifdef CONFIG_HAS_WAKELOCK unsigned long flags; + int ip_resumed; spin_lock_irqsave(&intel_hsi->hw_lock, flags); - if (intel_hsi->suspend_state != DEVICE_READY) - wake_lock(&intel_hsi->stay_awake); + ip_resumed = (intel_hsi->suspend_state == DEVICE_READY); spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); -#endif + + return ip_resumed; } /** * hsi_pm_runtime_get - getting a PM runtime reference to the HSI controller * @intel_hsi: Intel HSI controller reference + * @mode: mode of the runtime PM reference get * * This function is also getting the wake lock should wake lock is used. */ -static void hsi_pm_runtime_get(struct intel_controller *intel_hsi) +static void hsi_pm_runtime_get(struct intel_controller *intel_hsi, int mode) { - hsi_pm_wake_lock(intel_hsi); - pm_runtime_get(intel_hsi->pdev); -} +#ifdef CONFIG_HAS_WAKELOCK + unsigned long flags; +#endif -/** - * hsi_pm_runtime_get_sync - getting a synchronised PM runtime reference to the - * HSI controller - * @intel_hsi: Intel HSI controller reference - * - * This function is also getting the wake lock should wake lock is used. - */ -static void hsi_pm_runtime_get_sync(struct intel_controller *intel_hsi) -{ - hsi_pm_wake_lock(intel_hsi); - pm_runtime_get_sync(intel_hsi->pdev); + might_sleep_if(mode == HSI_PM_SYNC); + +#ifdef CONFIG_HAS_WAKELOCK + spin_lock_irqsave(&intel_hsi->hw_lock, flags); + if (intel_hsi->suspend_state != DEVICE_READY) + wake_lock(&intel_hsi->stay_awake); + spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); +#endif + + if (mode == HSI_PM_ASYNC) { + pm_runtime_get(intel_hsi->pdev); + } else { + pm_runtime_get_sync(intel_hsi->pdev); + wait_event_interruptible(intel_hsi->ip_resumed, + hsi_is_resumed(intel_hsi)); + } } /** * assert_acwake - asserting the ACWAKE line status * @intel_hsi: Intel HSI controller reference + * @mode: mode of the ACWAKE assertion * * The actual ACWAKE assertion happens when tx_state was 0. */ -static void assert_acwake(struct intel_controller *intel_hsi) +static void assert_acwake(struct intel_controller *intel_hsi, int mode) __acquires(&intel_hsi->hw_lock) __releases(&intel_hsi->hw_lock) { #ifndef PREVENT_ACWAKE_TOGGLING @@ -533,7 +549,7 @@ static void assert_acwake(struct intel_controller *intel_hsi) spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); if (do_wakeup) - hsi_pm_runtime_get(intel_hsi); + hsi_pm_runtime_get(intel_hsi, mode); } /** @@ -675,7 +691,7 @@ static int has_enabled_acready(struct intel_controller *intel_hsi) spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); if (do_wakeup) - hsi_pm_runtime_get(intel_hsi); + hsi_pm_runtime_get(intel_hsi, HSI_PM_ASYNC); return do_wakeup; } @@ -988,6 +1004,8 @@ static int hsi_ctrl_resume(struct intel_controller *intel_hsi) } spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); + wake_up(&intel_hsi->ip_resumed); + return err; } @@ -1265,15 +1283,20 @@ static void hsi_ctrl_clean_reset(struct intel_controller *intel_hsi) /* Disable the interrupt line */ disable_irq(intel_hsi->irq); + /* Deassert ACWAKE and ACREADY as shutting down */ + while (deassert_acwake(intel_hsi)) + ; + force_disable_acready(intel_hsi); + + /* Remove the CAWAKE poll timer */ + del_timer_sync(&intel_hsi->cawake_poll); + /* Disable (and flush) all tasklets */ tasklet_disable(&intel_hsi->isr_tasklet); tasklet_disable(&intel_hsi->fwd_tasklet); spin_lock_irqsave(&intel_hsi->hw_lock, flags); - /* Remove the CAWAKE poll timer */ - del_timer_sync(&intel_hsi->cawake_poll); - /* If suspended then there is nothing to do on the hardware side */ if (intel_hsi->suspend_state != DEVICE_READY) goto exit_clean_reset; @@ -1291,18 +1314,12 @@ static void hsi_ctrl_clean_reset(struct intel_controller *intel_hsi) iowrite32(0, ARASAN_HSI_PROGRAM(ctrl)); exit_clean_reset: - if (intel_hsi->rx_state == RX_READY) - intel_hsi->rx_state = RX_CAN_SLEEP; intel_hsi->dma_running = 0; intel_hsi->irq_status = 0; intel_hsi->err_status = 0; intel_hsi->prg_cfg = ARASAN_RESET; spin_unlock_irqrestore(&intel_hsi->hw_lock, flags); - while (deassert_acwake(intel_hsi)) - ; - (void) has_disabled_acready(intel_hsi); - /* Re-enable all tasklets */ tasklet_enable(&intel_hsi->fwd_tasklet); tasklet_enable(&intel_hsi->isr_tasklet); @@ -1368,7 +1385,7 @@ static int hsi_debug_show(struct seq_file *m, void *p) void __iomem *ctrl = intel_hsi->ctrl_io; int ch; - hsi_pm_runtime_get_sync(intel_hsi); + hsi_pm_runtime_get(intel_hsi, HSI_PM_SYNC); seq_printf(m, "REVISION\t\t: 0x%08x\n", ioread32(ARASAN_HSI_VERSION(ctrl))); for (ch = 0; ch < DWAHB_CHAN_CNT; ch++) { @@ -1429,7 +1446,7 @@ static int hsi_debug_dma_show(struct seq_file *m, void *p) void __iomem *dma = intel_hsi->dma_io; int i; - hsi_pm_runtime_get_sync(intel_hsi); + hsi_pm_runtime_get(intel_hsi, HSI_PM_SYNC); for (i = 0; i < DWAHB_CHAN_CNT; i++) { HSI_DEBUG_GDD_PRINT2(SAR, i); HSI_DEBUG_GDD_PRINT2(DAR, i); @@ -1795,7 +1812,7 @@ static void hsi_transfer(struct intel_controller *intel_hsi, int tx_not_rx, /* Assert ACWAKE (deasserted on complete or destruct) */ if (tx_not_rx) - assert_acwake(intel_hsi); + assert_acwake(intel_hsi, HSI_PM_ASYNC); else unforce_disable_acready(intel_hsi); @@ -1985,9 +2002,9 @@ static int hsi_async_break(struct hsi_msg *msg) if (unlikely(!is_in_tx_frame_mode(intel_hsi))) return -EINVAL; - hsi_pm_runtime_get_sync(intel_hsi); if (msg->ttype == HSI_MSG_WRITE) { - assert_acwake(intel_hsi); + hsi_pm_runtime_get(intel_hsi, HSI_PM_SYNC); + assert_acwake(intel_hsi, HSI_PM_ASYNC); spin_lock_irqsave(&intel_hsi->hw_lock, flags); intel_hsi->clk_cfg |= ARASAN_TX_BREAK; iowrite32(intel_hsi->clk_cfg, ARASAN_HSI_CLOCK_CONTROL(ctrl)); @@ -2001,12 +2018,12 @@ static int hsi_async_break(struct hsi_msg *msg) msg->status = HSI_STATUS_COMPLETED; msg->complete(msg); (void) deassert_acwake(intel_hsi); + pm_runtime_put(intel_hsi->pdev); } else { spin_lock_irqsave(&intel_hsi->sw_lock, flags); list_add_tail(&msg->link, &intel_hsi->brk_queue); spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); } - pm_runtime_put(intel_hsi->pdev); return 0; } @@ -2400,7 +2417,7 @@ static int hsi_mid_flush(struct hsi_client *cl) spin_unlock_irqrestore(&intel_hsi->sw_lock, flags); /* Wake the device not to react on the CAWAKE and to access hw */ - hsi_pm_runtime_get_sync(intel_hsi); + hsi_pm_runtime_get(intel_hsi, HSI_PM_SYNC); /* Disable the ACREADY line not to be disturbed during flush */ force_disable_acready(intel_hsi); @@ -2472,7 +2489,7 @@ static int hsi_mid_start_tx(struct hsi_client *cl) struct hsi_port *port = hsi_get_port(cl); struct intel_controller *intel_hsi = hsi_port_drvdata(port); - assert_acwake(intel_hsi); + assert_acwake(intel_hsi, HSI_PM_SYNC); return 0; } @@ -2860,6 +2877,13 @@ static irqreturn_t hsi_isr(int irq, void *hsi) if (unlikely(intel_hsi->suspend_state != DEVICE_READY)) { intel_hsi->suspend_state = DEVICE_AND_IRQ_SUSPENDED; disable_irq_nosync(intel_hsi->irq); + + /* Ignore this wakeup signal if not configured */ + if (intel_hsi->prg_cfg & ARASAN_RESET) { + spin_unlock(&intel_hsi->hw_lock); + return IRQ_HANDLED; + } + intel_hsi->irq_status |= ARASAN_IRQ_RX_WAKE; goto exit_irq; } @@ -3054,6 +3078,8 @@ static int hsi_controller_init(struct intel_controller *intel_hsi) spin_lock_init(&intel_hsi->sw_lock); spin_lock_init(&intel_hsi->hw_lock); + init_waitqueue_head(&intel_hsi->ip_resumed); + #ifdef CONFIG_HAS_WAKELOCK wake_lock_init(&intel_hsi->stay_awake, WAKE_LOCK_SUSPEND, "hsi_wakelock"); -- 2.7.4