1 // SPDX-License-Identifier: GPL-2.0
3 * MHI Endpoint bus stack
5 * Copyright (C) 2022 Linaro Ltd.
6 * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
9 #include <linux/bitfield.h>
10 #include <linux/delay.h>
11 #include <linux/dma-direction.h>
12 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/mhi_ep.h>
16 #include <linux/mod_devicetable.h>
17 #include <linux/module.h>
20 #define M0_WAIT_DELAY_MS 100
21 #define M0_WAIT_COUNT 100
23 static DEFINE_IDA(mhi_ep_cntrl_ida);
25 static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
26 static int mhi_ep_destroy_device(struct device *dev, void *data);
28 static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx,
29 struct mhi_ring_element *el, bool bei)
31 struct device *dev = &mhi_cntrl->mhi_dev->dev;
32 union mhi_ep_ring_ctx *ctx;
33 struct mhi_ep_ring *ring;
36 mutex_lock(&mhi_cntrl->event_lock);
37 ring = &mhi_cntrl->mhi_event[ring_idx].ring;
38 ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx];
40 ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx);
42 dev_err(dev, "Error starting event ring (%u)\n", ring_idx);
47 /* Add element to the event ring */
48 ret = mhi_ep_ring_add_element(ring, el);
50 dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx);
54 mutex_unlock(&mhi_cntrl->event_lock);
57 * Raise IRQ to host only if the BEI flag is not set in TRE. Host might
58 * set this flag for interrupt moderation as per MHI protocol.
61 mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector);
66 mutex_unlock(&mhi_cntrl->event_lock);
71 static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
72 struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code)
74 struct mhi_ring_element *event;
77 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
81 event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre));
82 event->dword[0] = MHI_TRE_EV_DWORD0(code, len);
83 event->dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
85 ret = mhi_ep_send_event(mhi_cntrl, ring->er_index, event, MHI_TRE_DATA_GET_BEI(tre));
86 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
91 int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state)
93 struct mhi_ring_element *event;
96 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
100 event->dword[0] = MHI_SC_EV_DWORD0(state);
101 event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
103 ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
104 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
109 int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env)
111 struct mhi_ring_element *event;
114 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
118 event->dword[0] = MHI_EE_EV_DWORD0(exec_env);
119 event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
121 ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
122 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
127 static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code)
129 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
130 struct mhi_ring_element *event;
133 event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA);
137 event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element));
138 event->dword[0] = MHI_CC_EV_DWORD0(code);
139 event->dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
141 ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0);
142 kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event);
147 static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
149 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
150 struct device *dev = &mhi_cntrl->mhi_dev->dev;
151 struct mhi_result result = {};
152 struct mhi_ep_chan *mhi_chan;
153 struct mhi_ep_ring *ch_ring;
157 ch_id = MHI_TRE_GET_CMD_CHID(el);
159 /* Check if the channel is supported by the controller */
160 if ((ch_id >= mhi_cntrl->max_chan) || !mhi_cntrl->mhi_chan[ch_id].name) {
161 dev_dbg(dev, "Channel (%u) not supported!\n", ch_id);
165 mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
166 ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring;
168 switch (MHI_TRE_GET_CMD_TYPE(el)) {
169 case MHI_PKT_TYPE_START_CHAN_CMD:
170 dev_dbg(dev, "Received START command for channel (%u)\n", ch_id);
172 mutex_lock(&mhi_chan->lock);
173 /* Initialize and configure the corresponding channel ring */
174 if (!ch_ring->started) {
175 ret = mhi_ep_ring_start(mhi_cntrl, ch_ring,
176 (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]);
178 dev_err(dev, "Failed to start ring for channel (%u)\n", ch_id);
179 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl,
180 MHI_EV_CC_UNDEFINED_ERR);
182 dev_err(dev, "Error sending completion event: %d\n", ret);
188 /* Set channel state to RUNNING */
189 mhi_chan->state = MHI_CH_STATE_RUNNING;
190 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
191 tmp &= ~CHAN_CTX_CHSTATE_MASK;
192 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
193 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
195 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
197 dev_err(dev, "Error sending command completion event (%u)\n",
202 mutex_unlock(&mhi_chan->lock);
205 * Create MHI device only during UL channel start. Since the MHI
206 * channels operate in a pair, we'll associate both UL and DL
207 * channels to the same device.
209 * We also need to check for mhi_dev != NULL because, the host
210 * will issue START_CHAN command during resume and we don't
211 * destroy the device during suspend.
213 if (!(ch_id % 2) && !mhi_chan->mhi_dev) {
214 ret = mhi_ep_create_device(mhi_cntrl, ch_id);
216 dev_err(dev, "Error creating device for channel (%u)\n", ch_id);
217 mhi_ep_handle_syserr(mhi_cntrl);
222 /* Finally, enable DB for the channel */
223 mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id);
226 case MHI_PKT_TYPE_STOP_CHAN_CMD:
227 dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id);
228 if (!ch_ring->started) {
229 dev_err(dev, "Channel (%u) not opened\n", ch_id);
233 mutex_lock(&mhi_chan->lock);
234 /* Disable DB for the channel */
235 mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id);
237 /* Send channel disconnect status to client drivers */
238 if (mhi_chan->xfer_cb) {
239 result.transaction_status = -ENOTCONN;
240 result.bytes_xferd = 0;
241 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
244 /* Set channel state to STOP */
245 mhi_chan->state = MHI_CH_STATE_STOP;
246 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
247 tmp &= ~CHAN_CTX_CHSTATE_MASK;
248 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP);
249 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
251 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
253 dev_err(dev, "Error sending command completion event (%u)\n",
258 mutex_unlock(&mhi_chan->lock);
260 case MHI_PKT_TYPE_RESET_CHAN_CMD:
261 dev_dbg(dev, "Received RESET command for channel (%u)\n", ch_id);
262 if (!ch_ring->started) {
263 dev_err(dev, "Channel (%u) not opened\n", ch_id);
267 mutex_lock(&mhi_chan->lock);
268 /* Stop and reset the transfer ring */
269 mhi_ep_ring_reset(mhi_cntrl, ch_ring);
271 /* Send channel disconnect status to client driver */
272 if (mhi_chan->xfer_cb) {
273 result.transaction_status = -ENOTCONN;
274 result.bytes_xferd = 0;
275 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
278 /* Set channel state to DISABLED */
279 mhi_chan->state = MHI_CH_STATE_DISABLED;
280 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg);
281 tmp &= ~CHAN_CTX_CHSTATE_MASK;
282 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
283 mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp);
285 ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
287 dev_err(dev, "Error sending command completion event (%u)\n",
292 mutex_unlock(&mhi_chan->lock);
295 dev_err(dev, "Invalid command received: %lu for channel (%u)\n",
296 MHI_TRE_GET_CMD_TYPE(el), ch_id);
303 mutex_unlock(&mhi_chan->lock);
308 bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir)
310 struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan :
312 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
313 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
315 return !!(ring->rd_offset == ring->wr_offset);
317 EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
319 static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
320 struct mhi_ep_ring *ring,
321 struct mhi_result *result,
324 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
325 struct device *dev = &mhi_cntrl->mhi_dev->dev;
326 size_t tr_len, read_offset, write_offset;
327 struct mhi_ring_element *el;
328 bool tr_done = false;
337 /* Don't process the transfer ring if the channel is not in RUNNING state */
338 if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
339 dev_err(dev, "Channel not available\n");
343 el = &ring->ring_cache[ring->rd_offset];
345 /* Check if there is data pending to be read from previous read operation */
346 if (mhi_chan->tre_bytes_left) {
347 dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left);
348 tr_len = min(buf_left, mhi_chan->tre_bytes_left);
350 mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el);
351 mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el);
352 mhi_chan->tre_bytes_left = mhi_chan->tre_size;
354 tr_len = min(buf_left, mhi_chan->tre_size);
357 read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
358 write_offset = len - buf_left;
359 read_addr = mhi_chan->tre_loc + read_offset;
360 write_addr = result->buf_addr + write_offset;
362 dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id);
363 ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len);
365 dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n");
370 mhi_chan->tre_bytes_left -= tr_len;
373 * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been
376 * 1. Send completion event to the host based on the flags set in TRE.
377 * 2. Increment the local read offset of the transfer ring.
379 if (!mhi_chan->tre_bytes_left) {
381 * The host will split the data packet into multiple TREs if it can't fit
382 * the packet in a single TRE. In that case, CHAIN flag will be set by the
383 * host for all TREs except the last one.
385 if (MHI_TRE_DATA_GET_CHAIN(el)) {
387 * IEOB (Interrupt on End of Block) flag will be set by the host if
388 * it expects the completion event for all TREs of a TD.
390 if (MHI_TRE_DATA_GET_IEOB(el)) {
391 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
392 MHI_TRE_DATA_GET_LEN(el),
395 dev_err(&mhi_chan->mhi_dev->dev,
396 "Error sending transfer compl. event\n");
402 * IEOT (Interrupt on End of Transfer) flag will be set by the host
403 * for the last TRE of the TD and expects the completion event for
406 if (MHI_TRE_DATA_GET_IEOT(el)) {
407 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el,
408 MHI_TRE_DATA_GET_LEN(el),
411 dev_err(&mhi_chan->mhi_dev->dev,
412 "Error sending transfer compl. event\n");
420 mhi_ep_ring_inc_index(ring);
423 result->bytes_xferd += tr_len;
424 } while (buf_left && !tr_done);
429 static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el)
431 struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
432 struct mhi_result result = {};
433 u32 len = MHI_EP_DEFAULT_MTU;
434 struct mhi_ep_chan *mhi_chan;
437 mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
440 * Bail out if transfer callback is not registered for the channel.
441 * This is most likely due to the client driver not loaded at this point.
443 if (!mhi_chan->xfer_cb) {
444 dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n");
448 if (ring->ch_id % 2) {
450 result.dir = mhi_chan->dir;
451 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
454 result.buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA);
455 if (!result.buf_addr)
459 ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
461 dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
462 kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr);
466 result.dir = mhi_chan->dir;
467 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
468 result.bytes_xferd = 0;
469 memset(result.buf_addr, 0, len);
471 /* Read until the ring becomes empty */
472 } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
474 kmem_cache_free(mhi_cntrl->tre_buf_cache, result.buf_addr);
480 /* TODO: Handle partially formed TDs */
481 int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb)
483 struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
484 struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan;
485 struct device *dev = &mhi_chan->mhi_dev->dev;
486 struct mhi_ring_element *el;
487 u32 buf_left, read_offset;
488 struct mhi_ep_ring *ring;
489 enum mhi_ev_ccs code;
497 ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
499 mutex_lock(&mhi_chan->lock);
502 /* Don't process the transfer ring if the channel is not in RUNNING state */
503 if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
504 dev_err(dev, "Channel not available\n");
509 if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) {
510 dev_err(dev, "TRE not available!\n");
515 el = &ring->ring_cache[ring->rd_offset];
516 tre_len = MHI_TRE_DATA_GET_LEN(el);
518 tr_len = min(buf_left, tre_len);
519 read_offset = skb->len - buf_left;
520 read_addr = skb->data + read_offset;
521 write_addr = MHI_TRE_DATA_GET_PTR(el);
523 dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id);
524 ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len);
526 dev_err(dev, "Error writing to the channel\n");
532 * For all TREs queued by the host for DL channel, only the EOT flag will be set.
533 * If the packet doesn't fit into a single TRE, send the OVERFLOW event to
534 * the host so that the host can adjust the packet boundary to next TREs. Else send
535 * the EOT event to the host indicating the packet boundary.
538 code = MHI_EV_CC_OVERFLOW;
540 code = MHI_EV_CC_EOT;
542 ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code);
544 dev_err(dev, "Error sending transfer completion event\n");
548 mhi_ep_ring_inc_index(ring);
551 mutex_unlock(&mhi_chan->lock);
556 mutex_unlock(&mhi_chan->lock);
560 EXPORT_SYMBOL_GPL(mhi_ep_queue_skb);
562 static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
564 size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size;
565 struct device *dev = &mhi_cntrl->mhi_dev->dev;
568 /* Update the number of event rings (NER) programmed by the host */
569 mhi_ep_mmio_update_ner(mhi_cntrl);
571 dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n",
572 mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings);
574 ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
575 ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
576 cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS;
578 /* Get the channel context base pointer from host */
579 mhi_ep_mmio_get_chc_base(mhi_cntrl);
581 /* Allocate and map memory for caching host channel context */
582 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa,
583 &mhi_cntrl->ch_ctx_cache_phys,
584 (void __iomem **) &mhi_cntrl->ch_ctx_cache,
587 dev_err(dev, "Failed to allocate and map ch_ctx_cache\n");
591 /* Get the event context base pointer from host */
592 mhi_ep_mmio_get_erc_base(mhi_cntrl);
594 /* Allocate and map memory for caching host event context */
595 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa,
596 &mhi_cntrl->ev_ctx_cache_phys,
597 (void __iomem **) &mhi_cntrl->ev_ctx_cache,
600 dev_err(dev, "Failed to allocate and map ev_ctx_cache\n");
604 /* Get the command context base pointer from host */
605 mhi_ep_mmio_get_crc_base(mhi_cntrl);
607 /* Allocate and map memory for caching host command context */
608 ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa,
609 &mhi_cntrl->cmd_ctx_cache_phys,
610 (void __iomem **) &mhi_cntrl->cmd_ctx_cache,
613 dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n");
617 /* Initialize command ring */
618 ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring,
619 (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache);
621 dev_err(dev, "Failed to start the command ring\n");
628 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
629 (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
632 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
633 (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
636 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
637 (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
642 static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
644 size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size;
646 ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan;
647 ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings;
648 cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS;
650 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys,
651 (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size);
653 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys,
654 (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size);
656 mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys,
657 (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size);
660 static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl)
663 * Doorbell interrupts are enabled when the corresponding channel gets started.
664 * Enabling all interrupts here triggers spurious irqs as some of the interrupts
665 * associated with hw channels always get triggered.
667 mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl);
668 mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl);
671 static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl)
673 struct device *dev = &mhi_cntrl->mhi_dev->dev;
674 enum mhi_state state;
679 /* Wait for Host to set the M0 state */
681 msleep(M0_WAIT_DELAY_MS);
682 mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
684 /* Clear the MHI reset if host is in reset state */
685 mhi_ep_mmio_clear_reset(mhi_cntrl);
686 dev_info(dev, "Detected Host reset while waiting for M0\n");
689 } while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT);
691 if (state != MHI_STATE_M0) {
692 dev_err(dev, "Host failed to enter M0\n");
696 ret = mhi_ep_cache_host_cfg(mhi_cntrl);
698 dev_err(dev, "Failed to cache host config\n");
702 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
704 /* Enable all interrupts now */
705 mhi_ep_enable_int(mhi_cntrl);
710 static void mhi_ep_cmd_ring_worker(struct work_struct *work)
712 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work);
713 struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring;
714 struct device *dev = &mhi_cntrl->mhi_dev->dev;
715 struct mhi_ring_element *el;
718 /* Update the write offset for the ring */
719 ret = mhi_ep_update_wr_offset(ring);
721 dev_err(dev, "Error updating write offset for ring\n");
725 /* Sanity check to make sure there are elements in the ring */
726 if (ring->rd_offset == ring->wr_offset)
730 * Process command ring element till write offset. In case of an error, just try to
731 * process next element.
733 while (ring->rd_offset != ring->wr_offset) {
734 el = &ring->ring_cache[ring->rd_offset];
736 ret = mhi_ep_process_cmd_ring(ring, el);
737 if (ret && ret != -ENODEV)
738 dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset);
740 mhi_ep_ring_inc_index(ring);
744 static void mhi_ep_ch_ring_worker(struct work_struct *work)
746 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work);
747 struct device *dev = &mhi_cntrl->mhi_dev->dev;
748 struct mhi_ep_ring_item *itr, *tmp;
749 struct mhi_ring_element *el;
750 struct mhi_ep_ring *ring;
751 struct mhi_ep_chan *chan;
756 spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
757 list_splice_tail_init(&mhi_cntrl->ch_db_list, &head);
758 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
760 /* Process each queued channel ring. In case of an error, just process next element. */
761 list_for_each_entry_safe(itr, tmp, &head, node) {
762 list_del(&itr->node);
765 chan = &mhi_cntrl->mhi_chan[ring->ch_id];
766 mutex_lock(&chan->lock);
769 * The ring could've stopped while we waited to grab the (chan->lock), so do
770 * a sanity check before going further.
772 if (!ring->started) {
773 mutex_unlock(&chan->lock);
778 /* Update the write offset for the ring */
779 ret = mhi_ep_update_wr_offset(ring);
781 dev_err(dev, "Error updating write offset for ring\n");
782 mutex_unlock(&chan->lock);
783 kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
787 /* Sanity check to make sure there are elements in the ring */
788 if (ring->rd_offset == ring->wr_offset) {
789 mutex_unlock(&chan->lock);
790 kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
794 el = &ring->ring_cache[ring->rd_offset];
796 dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
797 ret = mhi_ep_process_ch_ring(ring, el);
799 dev_err(dev, "Error processing ring for channel (%u): %d\n",
801 mutex_unlock(&chan->lock);
802 kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
806 mutex_unlock(&chan->lock);
807 kmem_cache_free(mhi_cntrl->ring_item_cache, itr);
811 static void mhi_ep_state_worker(struct work_struct *work)
813 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work);
814 struct device *dev = &mhi_cntrl->mhi_dev->dev;
815 struct mhi_ep_state_transition *itr, *tmp;
820 spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
821 list_splice_tail_init(&mhi_cntrl->st_transition_list, &head);
822 spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
824 list_for_each_entry_safe(itr, tmp, &head, node) {
825 list_del(&itr->node);
826 dev_dbg(dev, "Handling MHI state transition to %s\n",
827 mhi_state_str(itr->state));
829 switch (itr->state) {
831 ret = mhi_ep_set_m0_state(mhi_cntrl);
833 dev_err(dev, "Failed to transition to M0 state\n");
836 ret = mhi_ep_set_m3_state(mhi_cntrl);
838 dev_err(dev, "Failed to transition to M3 state\n");
841 dev_err(dev, "Invalid MHI state transition: %d\n", itr->state);
848 static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int,
851 struct mhi_ep_ring_item *item;
852 struct mhi_ep_ring *ring;
853 bool work = !!ch_int;
857 /* First add the ring items to a local list */
858 for_each_set_bit(i, &ch_int, 32) {
859 /* Channel index varies for each register: 0, 32, 64, 96 */
860 u32 ch_id = ch_idx + i;
862 ring = &mhi_cntrl->mhi_chan[ch_id].ring;
863 item = kmem_cache_zalloc(mhi_cntrl->ring_item_cache, GFP_ATOMIC);
868 list_add_tail(&item->node, &head);
871 /* Now, splice the local list into ch_db_list and queue the work item */
873 spin_lock(&mhi_cntrl->list_lock);
874 list_splice_tail_init(&head, &mhi_cntrl->ch_db_list);
875 spin_unlock(&mhi_cntrl->list_lock);
877 queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work);
882 * Channel interrupt statuses are contained in 4 registers each of 32bit length.
883 * For checking all interrupts, we need to loop through each registers and then
884 * check for bits set.
886 static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
888 u32 ch_int, ch_idx, i;
890 /* Bail out if there is no channel doorbell interrupt */
891 if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl))
894 for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
895 ch_idx = i * MHI_MASK_CH_LEN;
897 /* Only process channel interrupt if the mask is enabled */
898 ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask;
900 mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx);
901 mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i),
902 mhi_cntrl->chdb[i].status);
907 static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl,
908 enum mhi_state state)
910 struct mhi_ep_state_transition *item;
912 item = kzalloc(sizeof(*item), GFP_ATOMIC);
917 spin_lock(&mhi_cntrl->list_lock);
918 list_add_tail(&item->node, &mhi_cntrl->st_transition_list);
919 spin_unlock(&mhi_cntrl->list_lock);
921 queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work);
925 * Interrupt handler that services interrupts raised by the host writing to
926 * MHICTRL and Command ring doorbell (CRDB) registers for state change and
927 * channel interrupts.
929 static irqreturn_t mhi_ep_irq(int irq, void *data)
931 struct mhi_ep_cntrl *mhi_cntrl = data;
932 struct device *dev = &mhi_cntrl->mhi_dev->dev;
933 enum mhi_state state;
937 /* Acknowledge the ctrl interrupt */
938 int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS);
939 mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value);
941 /* Check for ctrl interrupt */
942 if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) {
943 dev_dbg(dev, "Processing ctrl interrupt\n");
944 mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
946 dev_info(dev, "Host triggered MHI reset!\n");
947 disable_irq_nosync(mhi_cntrl->irq);
948 schedule_work(&mhi_cntrl->reset_work);
952 mhi_ep_process_ctrl_interrupt(mhi_cntrl, state);
955 /* Check for command doorbell interrupt */
956 if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) {
957 dev_dbg(dev, "Processing command doorbell interrupt\n");
958 queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work);
961 /* Check for channel interrupts */
962 mhi_ep_check_channel_interrupt(mhi_cntrl);
967 static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl)
969 struct mhi_ep_ring *ch_ring, *ev_ring;
970 struct mhi_result result = {};
971 struct mhi_ep_chan *mhi_chan;
974 /* Stop all the channels */
975 for (i = 0; i < mhi_cntrl->max_chan; i++) {
976 mhi_chan = &mhi_cntrl->mhi_chan[i];
977 if (!mhi_chan->ring.started)
980 mutex_lock(&mhi_chan->lock);
981 /* Send channel disconnect status to client drivers */
982 if (mhi_chan->xfer_cb) {
983 result.transaction_status = -ENOTCONN;
984 result.bytes_xferd = 0;
985 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
988 mhi_chan->state = MHI_CH_STATE_DISABLED;
989 mutex_unlock(&mhi_chan->lock);
992 flush_workqueue(mhi_cntrl->wq);
994 /* Destroy devices associated with all channels */
995 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device);
997 /* Stop and reset the transfer rings */
998 for (i = 0; i < mhi_cntrl->max_chan; i++) {
999 mhi_chan = &mhi_cntrl->mhi_chan[i];
1000 if (!mhi_chan->ring.started)
1003 ch_ring = &mhi_cntrl->mhi_chan[i].ring;
1004 mutex_lock(&mhi_chan->lock);
1005 mhi_ep_ring_reset(mhi_cntrl, ch_ring);
1006 mutex_unlock(&mhi_chan->lock);
1009 /* Stop and reset the event rings */
1010 for (i = 0; i < mhi_cntrl->event_rings; i++) {
1011 ev_ring = &mhi_cntrl->mhi_event[i].ring;
1012 if (!ev_ring->started)
1015 mutex_lock(&mhi_cntrl->event_lock);
1016 mhi_ep_ring_reset(mhi_cntrl, ev_ring);
1017 mutex_unlock(&mhi_cntrl->event_lock);
1020 /* Stop and reset the command ring */
1021 mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring);
1023 mhi_ep_free_host_cfg(mhi_cntrl);
1024 mhi_ep_mmio_mask_interrupts(mhi_cntrl);
1026 mhi_cntrl->enabled = false;
1029 static void mhi_ep_reset_worker(struct work_struct *work)
1031 struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
1032 enum mhi_state cur_state;
1034 mhi_ep_power_down(mhi_cntrl);
1036 mutex_lock(&mhi_cntrl->state_lock);
1038 /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
1039 mhi_ep_mmio_reset(mhi_cntrl);
1040 cur_state = mhi_cntrl->mhi_state;
1043 * Only proceed further if the reset is due to SYS_ERR. The host will
1044 * issue reset during shutdown also and we don't need to do re-init in
1047 if (cur_state == MHI_STATE_SYS_ERR)
1048 mhi_ep_power_up(mhi_cntrl);
1050 mutex_unlock(&mhi_cntrl->state_lock);
1054 * We don't need to do anything special other than setting the MHI SYS_ERR
1055 * state. The host will reset all contexts and issue MHI RESET so that we
1056 * could also recover from error state.
1058 void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl)
1060 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1063 ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
1067 /* Signal host that the device went to SYS_ERR state */
1068 ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR);
1070 dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret);
1073 int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl)
1075 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1079 * Mask all interrupts until the state machine is ready. Interrupts will
1080 * be enabled later with mhi_ep_enable().
1082 mhi_ep_mmio_mask_interrupts(mhi_cntrl);
1083 mhi_ep_mmio_init(mhi_cntrl);
1085 mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)),
1087 if (!mhi_cntrl->mhi_event)
1090 /* Initialize command, channel and event rings */
1091 mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0);
1092 for (i = 0; i < mhi_cntrl->max_chan; i++)
1093 mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i);
1094 for (i = 0; i < mhi_cntrl->event_rings; i++)
1095 mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i);
1097 mhi_cntrl->mhi_state = MHI_STATE_RESET;
1099 /* Set AMSS EE before signaling ready state */
1100 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
1102 /* All set, notify the host that we are ready */
1103 ret = mhi_ep_set_ready_state(mhi_cntrl);
1105 goto err_free_event;
1107 dev_dbg(dev, "READY state notification sent to the host\n");
1109 ret = mhi_ep_enable(mhi_cntrl);
1111 dev_err(dev, "Failed to enable MHI endpoint\n");
1112 goto err_free_event;
1115 enable_irq(mhi_cntrl->irq);
1116 mhi_cntrl->enabled = true;
1121 kfree(mhi_cntrl->mhi_event);
1125 EXPORT_SYMBOL_GPL(mhi_ep_power_up);
1127 void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
1129 if (mhi_cntrl->enabled) {
1130 mhi_ep_abort_transfer(mhi_cntrl);
1131 kfree(mhi_cntrl->mhi_event);
1132 disable_irq(mhi_cntrl->irq);
1135 EXPORT_SYMBOL_GPL(mhi_ep_power_down);
1137 void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl)
1139 struct mhi_ep_chan *mhi_chan;
1143 for (i = 0; i < mhi_cntrl->max_chan; i++) {
1144 mhi_chan = &mhi_cntrl->mhi_chan[i];
1146 if (!mhi_chan->mhi_dev)
1149 mutex_lock(&mhi_chan->lock);
1150 /* Skip if the channel is not currently running */
1151 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
1152 if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) {
1153 mutex_unlock(&mhi_chan->lock);
1157 dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n");
1158 /* Set channel state to SUSPENDED */
1159 mhi_chan->state = MHI_CH_STATE_SUSPENDED;
1160 tmp &= ~CHAN_CTX_CHSTATE_MASK;
1161 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED);
1162 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
1163 mutex_unlock(&mhi_chan->lock);
1167 void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl)
1169 struct mhi_ep_chan *mhi_chan;
1173 for (i = 0; i < mhi_cntrl->max_chan; i++) {
1174 mhi_chan = &mhi_cntrl->mhi_chan[i];
1176 if (!mhi_chan->mhi_dev)
1179 mutex_lock(&mhi_chan->lock);
1180 /* Skip if the channel is not currently suspended */
1181 tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg);
1182 if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) {
1183 mutex_unlock(&mhi_chan->lock);
1187 dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n");
1188 /* Set channel state to RUNNING */
1189 mhi_chan->state = MHI_CH_STATE_RUNNING;
1190 tmp &= ~CHAN_CTX_CHSTATE_MASK;
1191 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
1192 mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
1193 mutex_unlock(&mhi_chan->lock);
1197 static void mhi_ep_release_device(struct device *dev)
1199 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
1201 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1202 mhi_dev->mhi_cntrl->mhi_dev = NULL;
1205 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
1206 * devices for the channels will only get created in mhi_ep_create_device()
1207 * if the mhi_dev associated with it is NULL.
1209 if (mhi_dev->ul_chan)
1210 mhi_dev->ul_chan->mhi_dev = NULL;
1212 if (mhi_dev->dl_chan)
1213 mhi_dev->dl_chan->mhi_dev = NULL;
1218 static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl,
1219 enum mhi_device_type dev_type)
1221 struct mhi_ep_device *mhi_dev;
1224 mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
1226 return ERR_PTR(-ENOMEM);
1228 dev = &mhi_dev->dev;
1229 device_initialize(dev);
1230 dev->bus = &mhi_ep_bus_type;
1231 dev->release = mhi_ep_release_device;
1233 /* Controller device is always allocated first */
1234 if (dev_type == MHI_DEVICE_CONTROLLER)
1235 /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */
1236 dev->parent = mhi_cntrl->cntrl_dev;
1238 /* for MHI client devices, parent is the MHI controller device */
1239 dev->parent = &mhi_cntrl->mhi_dev->dev;
1241 mhi_dev->mhi_cntrl = mhi_cntrl;
1242 mhi_dev->dev_type = dev_type;
1248 * MHI channels are always defined in pairs with UL as the even numbered
1249 * channel and DL as odd numbered one. This function gets UL channel (primary)
1250 * as the ch_id and always looks after the next entry in channel list for
1251 * the corresponding DL channel (secondary).
1253 static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
1255 struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
1256 struct device *dev = mhi_cntrl->cntrl_dev;
1257 struct mhi_ep_device *mhi_dev;
1260 /* Check if the channel name is same for both UL and DL */
1261 if (strcmp(mhi_chan->name, mhi_chan[1].name)) {
1262 dev_err(dev, "UL and DL channel names are not same: (%s) != (%s)\n",
1263 mhi_chan->name, mhi_chan[1].name);
1267 mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER);
1268 if (IS_ERR(mhi_dev))
1269 return PTR_ERR(mhi_dev);
1271 /* Configure primary channel */
1272 mhi_dev->ul_chan = mhi_chan;
1273 get_device(&mhi_dev->dev);
1274 mhi_chan->mhi_dev = mhi_dev;
1276 /* Configure secondary channel as well */
1278 mhi_dev->dl_chan = mhi_chan;
1279 get_device(&mhi_dev->dev);
1280 mhi_chan->mhi_dev = mhi_dev;
1282 /* Channel name is same for both UL and DL */
1283 mhi_dev->name = mhi_chan->name;
1284 ret = dev_set_name(&mhi_dev->dev, "%s_%s",
1285 dev_name(&mhi_cntrl->mhi_dev->dev),
1288 put_device(&mhi_dev->dev);
1292 ret = device_add(&mhi_dev->dev);
1294 put_device(&mhi_dev->dev);
1299 static int mhi_ep_destroy_device(struct device *dev, void *data)
1301 struct mhi_ep_device *mhi_dev;
1302 struct mhi_ep_cntrl *mhi_cntrl;
1303 struct mhi_ep_chan *ul_chan, *dl_chan;
1305 if (dev->bus != &mhi_ep_bus_type)
1308 mhi_dev = to_mhi_ep_device(dev);
1309 mhi_cntrl = mhi_dev->mhi_cntrl;
1311 /* Only destroy devices created for channels */
1312 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1315 ul_chan = mhi_dev->ul_chan;
1316 dl_chan = mhi_dev->dl_chan;
1319 put_device(&ul_chan->mhi_dev->dev);
1322 put_device(&dl_chan->mhi_dev->dev);
1324 dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n",
1327 /* Notify the client and remove the device from MHI bus */
1334 static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl,
1335 const struct mhi_ep_cntrl_config *config)
1337 const struct mhi_ep_channel_config *ch_cfg;
1338 struct device *dev = mhi_cntrl->cntrl_dev;
1342 mhi_cntrl->max_chan = config->max_channels;
1345 * Allocate max_channels supported by the MHI endpoint and populate
1346 * only the defined channels
1348 mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan),
1350 if (!mhi_cntrl->mhi_chan)
1353 for (i = 0; i < config->num_channels; i++) {
1354 struct mhi_ep_chan *mhi_chan;
1356 ch_cfg = &config->ch_cfg[i];
1359 if (chan >= mhi_cntrl->max_chan) {
1360 dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n",
1361 chan, mhi_cntrl->max_chan);
1362 goto error_chan_cfg;
1365 /* Bi-directional and direction less channels are not supported */
1366 if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) {
1367 dev_err(dev, "Invalid direction (%u) for channel (%u)\n",
1369 goto error_chan_cfg;
1372 mhi_chan = &mhi_cntrl->mhi_chan[chan];
1373 mhi_chan->name = ch_cfg->name;
1374 mhi_chan->chan = chan;
1375 mhi_chan->dir = ch_cfg->dir;
1376 mutex_init(&mhi_chan->lock);
1382 kfree(mhi_cntrl->mhi_chan);
1388 * Allocate channel and command rings here. Event rings will be allocated
1389 * in mhi_ep_power_up() as the config comes from the host.
1391 int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
1392 const struct mhi_ep_cntrl_config *config)
1394 struct mhi_ep_device *mhi_dev;
1397 if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq)
1400 ret = mhi_ep_chan_init(mhi_cntrl, config);
1404 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
1405 if (!mhi_cntrl->mhi_cmd) {
1410 mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el",
1411 sizeof(struct mhi_ring_element), 0,
1412 SLAB_CACHE_DMA, NULL);
1413 if (!mhi_cntrl->ev_ring_el_cache) {
1418 mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0,
1419 SLAB_CACHE_DMA, NULL);
1420 if (!mhi_cntrl->tre_buf_cache) {
1422 goto err_destroy_ev_ring_el_cache;
1425 mhi_cntrl->ring_item_cache = kmem_cache_create("mhi_ep_ring_item",
1426 sizeof(struct mhi_ep_ring_item), 0,
1428 if (!mhi_cntrl->ev_ring_el_cache) {
1430 goto err_destroy_tre_buf_cache;
1433 INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
1434 INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker);
1435 INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
1436 INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker);
1438 mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0);
1439 if (!mhi_cntrl->wq) {
1441 goto err_destroy_ring_item_cache;
1444 INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
1445 INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
1446 spin_lock_init(&mhi_cntrl->list_lock);
1447 mutex_init(&mhi_cntrl->state_lock);
1448 mutex_init(&mhi_cntrl->event_lock);
1450 /* Set MHI version and AMSS EE before enumeration */
1451 mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version);
1452 mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
1454 /* Set controller index */
1455 ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL);
1457 goto err_destroy_wq;
1459 mhi_cntrl->index = ret;
1461 irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN);
1462 ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH,
1463 "doorbell_irq", mhi_cntrl);
1465 dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n");
1469 /* Allocate the controller device */
1470 mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER);
1471 if (IS_ERR(mhi_dev)) {
1472 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n");
1473 ret = PTR_ERR(mhi_dev);
1477 ret = dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index);
1481 mhi_dev->name = dev_name(&mhi_dev->dev);
1482 mhi_cntrl->mhi_dev = mhi_dev;
1484 ret = device_add(&mhi_dev->dev);
1488 dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n");
1493 put_device(&mhi_dev->dev);
1495 free_irq(mhi_cntrl->irq, mhi_cntrl);
1497 ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
1499 destroy_workqueue(mhi_cntrl->wq);
1500 err_destroy_ring_item_cache:
1501 kmem_cache_destroy(mhi_cntrl->ring_item_cache);
1502 err_destroy_ev_ring_el_cache:
1503 kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache);
1504 err_destroy_tre_buf_cache:
1505 kmem_cache_destroy(mhi_cntrl->tre_buf_cache);
1507 kfree(mhi_cntrl->mhi_cmd);
1509 kfree(mhi_cntrl->mhi_chan);
1513 EXPORT_SYMBOL_GPL(mhi_ep_register_controller);
1516 * It is expected that the controller drivers will power down the MHI EP stack
1517 * using "mhi_ep_power_down()" before calling this function to unregister themselves.
1519 void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl)
1521 struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev;
1523 destroy_workqueue(mhi_cntrl->wq);
1525 free_irq(mhi_cntrl->irq, mhi_cntrl);
1527 kmem_cache_destroy(mhi_cntrl->tre_buf_cache);
1528 kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache);
1529 kmem_cache_destroy(mhi_cntrl->ring_item_cache);
1530 kfree(mhi_cntrl->mhi_cmd);
1531 kfree(mhi_cntrl->mhi_chan);
1533 device_del(&mhi_dev->dev);
1534 put_device(&mhi_dev->dev);
1536 ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
1538 EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller);
1540 static int mhi_ep_driver_probe(struct device *dev)
1542 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
1543 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
1544 struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan;
1545 struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan;
1547 ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
1548 dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
1550 return mhi_drv->probe(mhi_dev, mhi_dev->id);
1553 static int mhi_ep_driver_remove(struct device *dev)
1555 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
1556 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
1557 struct mhi_result result = {};
1558 struct mhi_ep_chan *mhi_chan;
1561 /* Skip if it is a controller device */
1562 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1565 /* Disconnect the channels associated with the driver */
1566 for (dir = 0; dir < 2; dir++) {
1567 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1572 mutex_lock(&mhi_chan->lock);
1573 /* Send channel disconnect status to the client driver */
1574 if (mhi_chan->xfer_cb) {
1575 result.transaction_status = -ENOTCONN;
1576 result.bytes_xferd = 0;
1577 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
1580 mhi_chan->state = MHI_CH_STATE_DISABLED;
1581 mhi_chan->xfer_cb = NULL;
1582 mutex_unlock(&mhi_chan->lock);
1585 /* Remove the client driver now */
1586 mhi_drv->remove(mhi_dev);
1591 int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner)
1593 struct device_driver *driver = &mhi_drv->driver;
1595 if (!mhi_drv->probe || !mhi_drv->remove)
1598 /* Client drivers should have callbacks defined for both channels */
1599 if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb)
1602 driver->bus = &mhi_ep_bus_type;
1603 driver->owner = owner;
1604 driver->probe = mhi_ep_driver_probe;
1605 driver->remove = mhi_ep_driver_remove;
1607 return driver_register(driver);
1609 EXPORT_SYMBOL_GPL(__mhi_ep_driver_register);
1611 void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv)
1613 driver_unregister(&mhi_drv->driver);
1615 EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister);
1617 static int mhi_ep_uevent(const struct device *dev, struct kobj_uevent_env *env)
1619 const struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
1621 return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT,
1625 static int mhi_ep_match(struct device *dev, struct device_driver *drv)
1627 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
1628 struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv);
1629 const struct mhi_device_id *id;
1632 * If the device is a controller type then there is no client driver
1633 * associated with it
1635 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1638 for (id = mhi_drv->id_table; id->chan[0]; id++)
1639 if (!strcmp(mhi_dev->name, id->chan)) {
1647 struct bus_type mhi_ep_bus_type = {
1649 .dev_name = "mhi_ep",
1650 .match = mhi_ep_match,
1651 .uevent = mhi_ep_uevent,
1654 static int __init mhi_ep_init(void)
1656 return bus_register(&mhi_ep_bus_type);
1659 static void __exit mhi_ep_exit(void)
1661 bus_unregister(&mhi_ep_bus_type);
1664 postcore_initcall(mhi_ep_init);
1665 module_exit(mhi_ep_exit);
1667 MODULE_LICENSE("GPL v2");
1668 MODULE_DESCRIPTION("MHI Bus Endpoint stack");
1669 MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");