1 // SPDX-License-Identifier: GPL-2.0
3 * PCI EPF driver for MHI Endpoint devices
5 * Copyright (C) 2023 Linaro Ltd.
6 * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
9 #include <linux/dmaengine.h>
10 #include <linux/mhi_ep.h>
11 #include <linux/module.h>
12 #include <linux/of_dma.h>
13 #include <linux/platform_device.h>
14 #include <linux/pci-epc.h>
15 #include <linux/pci-epf.h>
17 #define MHI_VERSION_1_0 0x01000000
19 #define to_epf_mhi(cntrl) container_of(cntrl, struct pci_epf_mhi, cntrl)
21 /* Platform specific flags */
22 #define MHI_EPF_USE_DMA BIT(0)
24 struct pci_epf_mhi_ep_info {
25 const struct mhi_ep_cntrl_config *config;
26 struct pci_epf_header *epf_header;
27 enum pci_barno bar_num;
34 #define MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, direction) \
41 #define MHI_EP_CHANNEL_CONFIG_UL(ch_num, ch_name) \
42 MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, DMA_TO_DEVICE)
44 #define MHI_EP_CHANNEL_CONFIG_DL(ch_num, ch_name) \
45 MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, DMA_FROM_DEVICE)
47 static const struct mhi_ep_channel_config mhi_v1_channels[] = {
48 MHI_EP_CHANNEL_CONFIG_UL(0, "LOOPBACK"),
49 MHI_EP_CHANNEL_CONFIG_DL(1, "LOOPBACK"),
50 MHI_EP_CHANNEL_CONFIG_UL(2, "SAHARA"),
51 MHI_EP_CHANNEL_CONFIG_DL(3, "SAHARA"),
52 MHI_EP_CHANNEL_CONFIG_UL(4, "DIAG"),
53 MHI_EP_CHANNEL_CONFIG_DL(5, "DIAG"),
54 MHI_EP_CHANNEL_CONFIG_UL(6, "SSR"),
55 MHI_EP_CHANNEL_CONFIG_DL(7, "SSR"),
56 MHI_EP_CHANNEL_CONFIG_UL(8, "QDSS"),
57 MHI_EP_CHANNEL_CONFIG_DL(9, "QDSS"),
58 MHI_EP_CHANNEL_CONFIG_UL(10, "EFS"),
59 MHI_EP_CHANNEL_CONFIG_DL(11, "EFS"),
60 MHI_EP_CHANNEL_CONFIG_UL(12, "MBIM"),
61 MHI_EP_CHANNEL_CONFIG_DL(13, "MBIM"),
62 MHI_EP_CHANNEL_CONFIG_UL(14, "QMI"),
63 MHI_EP_CHANNEL_CONFIG_DL(15, "QMI"),
64 MHI_EP_CHANNEL_CONFIG_UL(16, "QMI"),
65 MHI_EP_CHANNEL_CONFIG_DL(17, "QMI"),
66 MHI_EP_CHANNEL_CONFIG_UL(18, "IP-CTRL-1"),
67 MHI_EP_CHANNEL_CONFIG_DL(19, "IP-CTRL-1"),
68 MHI_EP_CHANNEL_CONFIG_UL(20, "IPCR"),
69 MHI_EP_CHANNEL_CONFIG_DL(21, "IPCR"),
70 MHI_EP_CHANNEL_CONFIG_UL(32, "DUN"),
71 MHI_EP_CHANNEL_CONFIG_DL(33, "DUN"),
72 MHI_EP_CHANNEL_CONFIG_UL(46, "IP_SW0"),
73 MHI_EP_CHANNEL_CONFIG_DL(47, "IP_SW0"),
76 static const struct mhi_ep_cntrl_config mhi_v1_config = {
78 .num_channels = ARRAY_SIZE(mhi_v1_channels),
79 .ch_cfg = mhi_v1_channels,
80 .mhi_version = MHI_VERSION_1_0,
83 static struct pci_epf_header sdx55_header = {
84 .vendorid = PCI_VENDOR_ID_QCOM,
86 .baseclass_code = PCI_BASE_CLASS_COMMUNICATION,
87 .subclass_code = PCI_CLASS_COMMUNICATION_MODEM & 0xff,
88 .interrupt_pin = PCI_INTERRUPT_INTA,
91 static const struct pci_epf_mhi_ep_info sdx55_info = {
92 .config = &mhi_v1_config,
93 .epf_header = &sdx55_header,
95 .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
100 static struct pci_epf_header sm8450_header = {
101 .vendorid = PCI_VENDOR_ID_QCOM,
103 .baseclass_code = PCI_CLASS_OTHERS,
104 .interrupt_pin = PCI_INTERRUPT_INTA,
107 static const struct pci_epf_mhi_ep_info sm8450_info = {
108 .config = &mhi_v1_config,
109 .epf_header = &sm8450_header,
111 .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
114 .flags = MHI_EPF_USE_DMA,
118 const struct pci_epc_features *epc_features;
119 const struct pci_epf_mhi_ep_info *info;
120 struct mhi_ep_cntrl mhi_cntrl;
124 resource_size_t mmio_phys;
125 struct dma_chan *dma_chan_tx;
126 struct dma_chan *dma_chan_rx;
131 static size_t get_align_offset(struct pci_epf_mhi *epf_mhi, u64 addr)
133 return addr & (epf_mhi->epc_features->align -1);
136 static int __pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
137 phys_addr_t *paddr, void __iomem **vaddr,
138 size_t offset, size_t size)
140 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
141 struct pci_epf *epf = epf_mhi->epf;
142 struct pci_epc *epc = epf->epc;
145 *vaddr = pci_epc_mem_alloc_addr(epc, paddr, size + offset);
149 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, *paddr,
150 pci_addr - offset, size + offset);
152 pci_epc_mem_free_addr(epc, *paddr, *vaddr, size + offset);
156 *paddr = *paddr + offset;
157 *vaddr = *vaddr + offset;
162 static int pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
163 phys_addr_t *paddr, void __iomem **vaddr,
166 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
167 size_t offset = get_align_offset(epf_mhi, pci_addr);
169 return __pci_epf_mhi_alloc_map(mhi_cntrl, pci_addr, paddr, vaddr,
173 static void __pci_epf_mhi_unmap_free(struct mhi_ep_cntrl *mhi_cntrl,
174 u64 pci_addr, phys_addr_t paddr,
175 void __iomem *vaddr, size_t offset,
178 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
179 struct pci_epf *epf = epf_mhi->epf;
180 struct pci_epc *epc = epf->epc;
182 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, paddr - offset);
183 pci_epc_mem_free_addr(epc, paddr - offset, vaddr - offset,
187 static void pci_epf_mhi_unmap_free(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
188 phys_addr_t paddr, void __iomem *vaddr,
191 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
192 size_t offset = get_align_offset(epf_mhi, pci_addr);
194 __pci_epf_mhi_unmap_free(mhi_cntrl, pci_addr, paddr, vaddr, offset,
198 static void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector)
200 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
201 struct pci_epf *epf = epf_mhi->epf;
202 struct pci_epc *epc = epf->epc;
205 * MHI supplies 0 based MSI vectors but the API expects the vector
206 * number to start from 1, so we need to increment the vector by 1.
208 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_EPC_IRQ_MSI,
212 static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl,
213 struct mhi_ep_buf_info *buf_info)
215 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
216 size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
217 void __iomem *tre_buf;
218 phys_addr_t tre_phys;
221 mutex_lock(&epf_mhi->lock);
223 ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
224 &tre_buf, offset, buf_info->size);
226 mutex_unlock(&epf_mhi->lock);
230 memcpy_fromio(buf_info->dev_addr, tre_buf, buf_info->size);
232 __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
233 tre_buf, offset, buf_info->size);
235 mutex_unlock(&epf_mhi->lock);
240 static int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl,
241 struct mhi_ep_buf_info *buf_info)
243 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
244 size_t offset = get_align_offset(epf_mhi, buf_info->host_addr);
245 void __iomem *tre_buf;
246 phys_addr_t tre_phys;
249 mutex_lock(&epf_mhi->lock);
251 ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys,
252 &tre_buf, offset, buf_info->size);
254 mutex_unlock(&epf_mhi->lock);
258 memcpy_toio(tre_buf, buf_info->dev_addr, buf_info->size);
260 __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys,
261 tre_buf, offset, buf_info->size);
263 mutex_unlock(&epf_mhi->lock);
268 static void pci_epf_mhi_dma_callback(void *param)
273 static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl,
274 struct mhi_ep_buf_info *buf_info)
276 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
277 struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
278 struct dma_chan *chan = epf_mhi->dma_chan_rx;
279 struct device *dev = &epf_mhi->epf->dev;
280 DECLARE_COMPLETION_ONSTACK(complete);
281 struct dma_async_tx_descriptor *desc;
282 struct dma_slave_config config = {};
287 if (buf_info->size < SZ_4K)
288 return pci_epf_mhi_iatu_read(mhi_cntrl, buf_info);
290 mutex_lock(&epf_mhi->lock);
292 config.direction = DMA_DEV_TO_MEM;
293 config.src_addr = buf_info->host_addr;
295 ret = dmaengine_slave_config(chan, &config);
297 dev_err(dev, "Failed to configure DMA channel\n");
301 dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
303 ret = dma_mapping_error(dma_dev, dst_addr);
305 dev_err(dev, "Failed to map remote memory\n");
309 desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size,
311 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
313 dev_err(dev, "Failed to prepare DMA\n");
318 desc->callback = pci_epf_mhi_dma_callback;
319 desc->callback_param = &complete;
321 cookie = dmaengine_submit(desc);
322 ret = dma_submit_error(cookie);
324 dev_err(dev, "Failed to do DMA submit\n");
328 dma_async_issue_pending(chan);
329 ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
331 dev_err(dev, "DMA transfer timeout\n");
332 dmaengine_terminate_sync(chan);
337 dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE);
339 mutex_unlock(&epf_mhi->lock);
344 static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl,
345 struct mhi_ep_buf_info *buf_info)
347 struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
348 struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
349 struct dma_chan *chan = epf_mhi->dma_chan_tx;
350 struct device *dev = &epf_mhi->epf->dev;
351 DECLARE_COMPLETION_ONSTACK(complete);
352 struct dma_async_tx_descriptor *desc;
353 struct dma_slave_config config = {};
358 if (buf_info->size < SZ_4K)
359 return pci_epf_mhi_iatu_write(mhi_cntrl, buf_info);
361 mutex_lock(&epf_mhi->lock);
363 config.direction = DMA_MEM_TO_DEV;
364 config.dst_addr = buf_info->host_addr;
366 ret = dmaengine_slave_config(chan, &config);
368 dev_err(dev, "Failed to configure DMA channel\n");
372 src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size,
374 ret = dma_mapping_error(dma_dev, src_addr);
376 dev_err(dev, "Failed to map remote memory\n");
380 desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size,
382 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
384 dev_err(dev, "Failed to prepare DMA\n");
389 desc->callback = pci_epf_mhi_dma_callback;
390 desc->callback_param = &complete;
392 cookie = dmaengine_submit(desc);
393 ret = dma_submit_error(cookie);
395 dev_err(dev, "Failed to do DMA submit\n");
399 dma_async_issue_pending(chan);
400 ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
402 dev_err(dev, "DMA transfer timeout\n");
403 dmaengine_terminate_sync(chan);
408 dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_FROM_DEVICE);
410 mutex_unlock(&epf_mhi->lock);
415 struct epf_dma_filter {
420 static bool pci_epf_mhi_filter(struct dma_chan *chan, void *node)
422 struct epf_dma_filter *filter = node;
423 struct dma_slave_caps caps;
425 memset(&caps, 0, sizeof(caps));
426 dma_get_slave_caps(chan, &caps);
428 return chan->device->dev == filter->dev && filter->dma_mask &
432 static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi)
434 struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
435 struct device *dev = &epf_mhi->epf->dev;
436 struct epf_dma_filter filter;
440 dma_cap_set(DMA_SLAVE, mask);
442 filter.dev = dma_dev;
443 filter.dma_mask = BIT(DMA_MEM_TO_DEV);
444 epf_mhi->dma_chan_tx = dma_request_channel(mask, pci_epf_mhi_filter,
446 if (IS_ERR_OR_NULL(epf_mhi->dma_chan_tx)) {
447 dev_err(dev, "Failed to request tx channel\n");
451 filter.dma_mask = BIT(DMA_DEV_TO_MEM);
452 epf_mhi->dma_chan_rx = dma_request_channel(mask, pci_epf_mhi_filter,
454 if (IS_ERR_OR_NULL(epf_mhi->dma_chan_rx)) {
455 dev_err(dev, "Failed to request rx channel\n");
456 dma_release_channel(epf_mhi->dma_chan_tx);
457 epf_mhi->dma_chan_tx = NULL;
464 static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi)
466 dma_release_channel(epf_mhi->dma_chan_tx);
467 dma_release_channel(epf_mhi->dma_chan_rx);
468 epf_mhi->dma_chan_tx = NULL;
469 epf_mhi->dma_chan_rx = NULL;
472 static int pci_epf_mhi_core_init(struct pci_epf *epf)
474 struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
475 const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
476 struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
477 struct pci_epc *epc = epf->epc;
478 struct device *dev = &epf->dev;
481 epf_bar->phys_addr = epf_mhi->mmio_phys;
482 epf_bar->size = epf_mhi->mmio_size;
483 epf_bar->barno = info->bar_num;
484 epf_bar->flags = info->epf_flags;
485 ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
487 dev_err(dev, "Failed to set BAR: %d\n", ret);
491 ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
492 order_base_2(info->msi_count));
494 dev_err(dev, "Failed to set MSI configuration: %d\n", ret);
498 ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no,
501 dev_err(dev, "Failed to set Configuration header: %d\n", ret);
505 epf_mhi->epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
506 if (!epf_mhi->epc_features)
512 static int pci_epf_mhi_link_up(struct pci_epf *epf)
514 struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
515 const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
516 struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
517 struct pci_epc *epc = epf->epc;
518 struct device *dev = &epf->dev;
521 if (info->flags & MHI_EPF_USE_DMA) {
522 ret = pci_epf_mhi_dma_init(epf_mhi);
524 dev_err(dev, "Failed to initialize DMA: %d\n", ret);
529 mhi_cntrl->mmio = epf_mhi->mmio;
530 mhi_cntrl->irq = epf_mhi->irq;
531 mhi_cntrl->mru = info->mru;
533 /* Assign the struct dev of PCI EP as MHI controller device */
534 mhi_cntrl->cntrl_dev = epc->dev.parent;
535 mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
536 mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
537 mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
538 if (info->flags & MHI_EPF_USE_DMA) {
539 mhi_cntrl->read_from_host = pci_epf_mhi_edma_read;
540 mhi_cntrl->write_to_host = pci_epf_mhi_edma_write;
542 mhi_cntrl->read_from_host = pci_epf_mhi_iatu_read;
543 mhi_cntrl->write_to_host = pci_epf_mhi_iatu_write;
546 /* Register the MHI EP controller */
547 ret = mhi_ep_register_controller(mhi_cntrl, info->config);
549 dev_err(dev, "Failed to register MHI EP controller: %d\n", ret);
550 if (info->flags & MHI_EPF_USE_DMA)
551 pci_epf_mhi_dma_deinit(epf_mhi);
558 static int pci_epf_mhi_link_down(struct pci_epf *epf)
560 struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
561 const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
562 struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
564 if (mhi_cntrl->mhi_dev) {
565 mhi_ep_power_down(mhi_cntrl);
566 if (info->flags & MHI_EPF_USE_DMA)
567 pci_epf_mhi_dma_deinit(epf_mhi);
568 mhi_ep_unregister_controller(mhi_cntrl);
574 static int pci_epf_mhi_bme(struct pci_epf *epf)
576 struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
577 const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
578 struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
579 struct device *dev = &epf->dev;
583 * Power up the MHI EP stack if link is up and stack is in power down
586 if (!mhi_cntrl->enabled && mhi_cntrl->mhi_dev) {
587 ret = mhi_ep_power_up(mhi_cntrl);
589 dev_err(dev, "Failed to power up MHI EP: %d\n", ret);
590 if (info->flags & MHI_EPF_USE_DMA)
591 pci_epf_mhi_dma_deinit(epf_mhi);
592 mhi_ep_unregister_controller(mhi_cntrl);
599 static int pci_epf_mhi_bind(struct pci_epf *epf)
601 struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
602 struct pci_epc *epc = epf->epc;
603 struct platform_device *pdev = to_platform_device(epc->dev.parent);
604 struct resource *res;
607 /* Get MMIO base address from Endpoint controller */
608 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio");
609 epf_mhi->mmio_phys = res->start;
610 epf_mhi->mmio_size = resource_size(res);
612 epf_mhi->mmio = ioremap(epf_mhi->mmio_phys, epf_mhi->mmio_size);
616 ret = platform_get_irq_byname(pdev, "doorbell");
618 iounmap(epf_mhi->mmio);
627 static void pci_epf_mhi_unbind(struct pci_epf *epf)
629 struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
630 const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
631 struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
632 struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
633 struct pci_epc *epc = epf->epc;
636 * Forcefully power down the MHI EP stack. Only way to bring the MHI EP
637 * stack back to working state after successive bind is by getting BME
640 if (mhi_cntrl->mhi_dev) {
641 mhi_ep_power_down(mhi_cntrl);
642 if (info->flags & MHI_EPF_USE_DMA)
643 pci_epf_mhi_dma_deinit(epf_mhi);
644 mhi_ep_unregister_controller(mhi_cntrl);
647 iounmap(epf_mhi->mmio);
648 pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
651 static struct pci_epc_event_ops pci_epf_mhi_event_ops = {
652 .core_init = pci_epf_mhi_core_init,
653 .link_up = pci_epf_mhi_link_up,
654 .link_down = pci_epf_mhi_link_down,
655 .bme = pci_epf_mhi_bme,
658 static int pci_epf_mhi_probe(struct pci_epf *epf,
659 const struct pci_epf_device_id *id)
661 struct pci_epf_mhi_ep_info *info =
662 (struct pci_epf_mhi_ep_info *)id->driver_data;
663 struct pci_epf_mhi *epf_mhi;
664 struct device *dev = &epf->dev;
666 epf_mhi = devm_kzalloc(dev, sizeof(*epf_mhi), GFP_KERNEL);
670 epf->header = info->epf_header;
671 epf_mhi->info = info;
674 epf->event_ops = &pci_epf_mhi_event_ops;
676 mutex_init(&epf_mhi->lock);
678 epf_set_drvdata(epf, epf_mhi);
683 static const struct pci_epf_device_id pci_epf_mhi_ids[] = {
684 { .name = "sdx55", .driver_data = (kernel_ulong_t)&sdx55_info },
685 { .name = "sm8450", .driver_data = (kernel_ulong_t)&sm8450_info },
689 static struct pci_epf_ops pci_epf_mhi_ops = {
690 .unbind = pci_epf_mhi_unbind,
691 .bind = pci_epf_mhi_bind,
694 static struct pci_epf_driver pci_epf_mhi_driver = {
695 .driver.name = "pci_epf_mhi",
696 .probe = pci_epf_mhi_probe,
697 .id_table = pci_epf_mhi_ids,
698 .ops = &pci_epf_mhi_ops,
699 .owner = THIS_MODULE,
702 static int __init pci_epf_mhi_init(void)
704 return pci_epf_register_driver(&pci_epf_mhi_driver);
706 module_init(pci_epf_mhi_init);
708 static void __exit pci_epf_mhi_exit(void)
710 pci_epf_unregister_driver(&pci_epf_mhi_driver);
712 module_exit(pci_epf_mhi_exit);
714 MODULE_DESCRIPTION("PCI EPF driver for MHI Endpoint devices");
715 MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
716 MODULE_LICENSE("GPL");