3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/kernel.h>
19 #include <linux/device.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/fcntl.h>
24 #include <linux/pci.h>
25 #include <linux/poll.h>
26 #include <linux/ioctl.h>
27 #include <linux/cdev.h>
28 #include <linux/sched.h>
29 #include <linux/uuid.h>
30 #include <linux/compat.h>
31 #include <linux/jiffies.h>
32 #include <linux/interrupt.h>
34 #include <linux/pm_domain.h>
35 #include <linux/pm_runtime.h>
37 #include <linux/mei.h>
41 #include "hw-me-regs.h"
44 /* mei_pci_tbl - PCI Device ID Table */
45 static const struct pci_device_id mei_me_pci_tbl[] = {
46 {MEI_PCI_DEVICE(MEI_DEV_ID_82946GZ, mei_me_legacy_cfg)},
47 {MEI_PCI_DEVICE(MEI_DEV_ID_82G35, mei_me_legacy_cfg)},
48 {MEI_PCI_DEVICE(MEI_DEV_ID_82Q965, mei_me_legacy_cfg)},
49 {MEI_PCI_DEVICE(MEI_DEV_ID_82G965, mei_me_legacy_cfg)},
50 {MEI_PCI_DEVICE(MEI_DEV_ID_82GM965, mei_me_legacy_cfg)},
51 {MEI_PCI_DEVICE(MEI_DEV_ID_82GME965, mei_me_legacy_cfg)},
52 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q35, mei_me_legacy_cfg)},
53 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82G33, mei_me_legacy_cfg)},
54 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82Q33, mei_me_legacy_cfg)},
55 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_82X38, mei_me_legacy_cfg)},
56 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_3200, mei_me_legacy_cfg)},
58 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_6, mei_me_legacy_cfg)},
59 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_7, mei_me_legacy_cfg)},
60 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_8, mei_me_legacy_cfg)},
61 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_9, mei_me_legacy_cfg)},
62 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9_10, mei_me_legacy_cfg)},
63 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_1, mei_me_legacy_cfg)},
64 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_2, mei_me_legacy_cfg)},
65 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_3, mei_me_legacy_cfg)},
66 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH9M_4, mei_me_legacy_cfg)},
67 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_1, mei_me_ich_cfg)},
68 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_2, mei_me_ich_cfg)},
69 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, mei_me_ich_cfg)},
70 {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, mei_me_ich_cfg)},
72 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, mei_me_pch_cfg)},
73 {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, mei_me_pch_cfg)},
74 {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, mei_me_pch_cpt_pbg_cfg)},
75 {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, mei_me_pch_cpt_pbg_cfg)},
76 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, mei_me_pch_cfg)},
77 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, mei_me_pch_cfg)},
78 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, mei_me_pch_cfg)},
79 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, mei_me_pch8_sps_cfg)},
80 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, mei_me_pch8_sps_cfg)},
81 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch8_cfg)},
82 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_pch8_sps_cfg)},
83 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch8_cfg)},
84 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, mei_me_pch8_cfg)},
86 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, mei_me_pch8_cfg)},
87 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)},
88 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)},
89 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)},
91 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
92 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
94 /* required last entry */
98 MODULE_DEVICE_TABLE(pci, mei_me_pci_tbl);
101 static inline void mei_me_set_pm_domain(struct mei_device *dev);
102 static inline void mei_me_unset_pm_domain(struct mei_device *dev);
104 static inline void mei_me_set_pm_domain(struct mei_device *dev) {}
105 static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
106 #endif /* CONFIG_PM */
109 * mei_me_quirk_probe - probe for devices that doesn't valid ME interface
111 * @pdev: PCI device structure
112 * @cfg: per generation config
114 * Return: true if ME Interface is valid, false otherwise
116 static bool mei_me_quirk_probe(struct pci_dev *pdev,
117 const struct mei_cfg *cfg)
119 if (cfg->quirk_probe && cfg->quirk_probe(pdev)) {
120 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
128 * mei_me_probe - Device Initialization Routine
130 * @pdev: PCI device structure
131 * @ent: entry in kcs_pci_tbl
133 * Return: 0 on success, <0 on failure.
135 static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
137 const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);
138 struct mei_device *dev;
139 struct mei_me_hw *hw;
140 unsigned int irqflags;
144 if (!mei_me_quirk_probe(pdev, cfg))
148 err = pci_enable_device(pdev);
150 dev_err(&pdev->dev, "failed to enable pci device.\n");
153 /* set PCI host mastering */
154 pci_set_master(pdev);
155 /* pci request regions for mei driver */
156 err = pci_request_regions(pdev, KBUILD_MODNAME);
158 dev_err(&pdev->dev, "failed to get pci regions.\n");
162 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
163 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
165 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
167 err = dma_set_coherent_mask(&pdev->dev,
171 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
172 goto release_regions;
176 /* allocates and initializes the mei dev structure */
177 dev = mei_me_dev_init(pdev, cfg);
180 goto release_regions;
183 /* mapping IO device memory */
184 hw->mem_addr = pci_iomap(pdev, 0, 0);
186 dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
190 pci_enable_msi(pdev);
192 /* request and enable interrupt */
193 irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
195 err = request_threaded_irq(pdev->irq,
196 mei_me_irq_quick_handler,
197 mei_me_irq_thread_handler,
198 irqflags, KBUILD_MODNAME, dev);
200 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
205 if (mei_start(dev)) {
206 dev_err(&pdev->dev, "init hw failure.\n");
211 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
212 pm_runtime_use_autosuspend(&pdev->dev);
214 err = mei_register(dev, &pdev->dev);
218 pci_set_drvdata(pdev, dev);
220 schedule_delayed_work(&dev->timer_work, HZ);
223 * For not wake-able HW runtime pm framework
224 * can't be used on pci device level.
225 * Use domain runtime pm callbacks instead.
227 if (!pci_dev_run_wake(pdev))
228 mei_me_set_pm_domain(dev);
230 if (mei_pg_is_enabled(dev))
231 pm_runtime_put_noidle(&pdev->dev);
233 dev_dbg(&pdev->dev, "initialization successful.\n");
240 mei_cancel_work(dev);
241 mei_disable_interrupts(dev);
242 free_irq(pdev->irq, dev);
244 pci_disable_msi(pdev);
245 pci_iounmap(pdev, hw->mem_addr);
249 pci_release_regions(pdev);
251 pci_disable_device(pdev);
253 dev_err(&pdev->dev, "initialization failed.\n");
258 * mei_me_remove - Device Removal Routine
260 * @pdev: PCI device structure
262 * mei_remove is called by the PCI subsystem to alert the driver
263 * that it should release a PCI device.
265 static void mei_me_remove(struct pci_dev *pdev)
267 struct mei_device *dev;
268 struct mei_me_hw *hw;
270 dev = pci_get_drvdata(pdev);
274 if (mei_pg_is_enabled(dev))
275 pm_runtime_get_noresume(&pdev->dev);
280 dev_dbg(&pdev->dev, "stop\n");
283 if (!pci_dev_run_wake(pdev))
284 mei_me_unset_pm_domain(dev);
286 /* disable interrupts */
287 mei_disable_interrupts(dev);
289 free_irq(pdev->irq, dev);
290 pci_disable_msi(pdev);
293 pci_iounmap(pdev, hw->mem_addr);
299 pci_release_regions(pdev);
300 pci_disable_device(pdev);
304 #ifdef CONFIG_PM_SLEEP
305 static int mei_me_pci_suspend(struct device *device)
307 struct pci_dev *pdev = to_pci_dev(device);
308 struct mei_device *dev = pci_get_drvdata(pdev);
313 dev_dbg(&pdev->dev, "suspend\n");
317 mei_disable_interrupts(dev);
319 free_irq(pdev->irq, dev);
320 pci_disable_msi(pdev);
325 static int mei_me_pci_resume(struct device *device)
327 struct pci_dev *pdev = to_pci_dev(device);
328 struct mei_device *dev;
329 unsigned int irqflags;
332 dev = pci_get_drvdata(pdev);
336 pci_enable_msi(pdev);
338 irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
340 /* request and enable interrupt */
341 err = request_threaded_irq(pdev->irq,
342 mei_me_irq_quick_handler,
343 mei_me_irq_thread_handler,
344 irqflags, KBUILD_MODNAME, dev);
347 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
352 err = mei_restart(dev);
356 /* Start timer if stopped in suspend */
357 schedule_delayed_work(&dev->timer_work, HZ);
361 #endif /* CONFIG_PM_SLEEP */
364 static int mei_me_pm_runtime_idle(struct device *device)
366 struct pci_dev *pdev = to_pci_dev(device);
367 struct mei_device *dev;
369 dev_dbg(&pdev->dev, "rpm: me: runtime_idle\n");
371 dev = pci_get_drvdata(pdev);
374 if (mei_write_is_idle(dev))
375 pm_runtime_autosuspend(device);
380 static int mei_me_pm_runtime_suspend(struct device *device)
382 struct pci_dev *pdev = to_pci_dev(device);
383 struct mei_device *dev;
386 dev_dbg(&pdev->dev, "rpm: me: runtime suspend\n");
388 dev = pci_get_drvdata(pdev);
392 mutex_lock(&dev->device_lock);
394 if (mei_write_is_idle(dev))
395 ret = mei_me_pg_enter_sync(dev);
399 mutex_unlock(&dev->device_lock);
401 dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret);
406 static int mei_me_pm_runtime_resume(struct device *device)
408 struct pci_dev *pdev = to_pci_dev(device);
409 struct mei_device *dev;
412 dev_dbg(&pdev->dev, "rpm: me: runtime resume\n");
414 dev = pci_get_drvdata(pdev);
418 mutex_lock(&dev->device_lock);
420 ret = mei_me_pg_exit_sync(dev);
422 mutex_unlock(&dev->device_lock);
424 dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret);
430 * mei_me_set_pm_domain - fill and set pm domain structure for device
434 static inline void mei_me_set_pm_domain(struct mei_device *dev)
436 struct pci_dev *pdev = to_pci_dev(dev->dev);
438 if (pdev->dev.bus && pdev->dev.bus->pm) {
439 dev->pg_domain.ops = *pdev->dev.bus->pm;
441 dev->pg_domain.ops.runtime_suspend = mei_me_pm_runtime_suspend;
442 dev->pg_domain.ops.runtime_resume = mei_me_pm_runtime_resume;
443 dev->pg_domain.ops.runtime_idle = mei_me_pm_runtime_idle;
445 dev_pm_domain_set(&pdev->dev, &dev->pg_domain);
450 * mei_me_unset_pm_domain - clean pm domain structure for device
454 static inline void mei_me_unset_pm_domain(struct mei_device *dev)
456 /* stop using pm callbacks if any */
457 dev_pm_domain_set(dev->dev, NULL);
460 static const struct dev_pm_ops mei_me_pm_ops = {
461 SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend,
464 mei_me_pm_runtime_suspend,
465 mei_me_pm_runtime_resume,
466 mei_me_pm_runtime_idle)
469 #define MEI_ME_PM_OPS (&mei_me_pm_ops)
471 #define MEI_ME_PM_OPS NULL
472 #endif /* CONFIG_PM */
474 * PCI driver structure
476 static struct pci_driver mei_me_driver = {
477 .name = KBUILD_MODNAME,
478 .id_table = mei_me_pci_tbl,
479 .probe = mei_me_probe,
480 .remove = mei_me_remove,
481 .shutdown = mei_me_remove,
482 .driver.pm = MEI_ME_PM_OPS,
485 module_pci_driver(mei_me_driver);
487 MODULE_AUTHOR("Intel Corporation");
488 MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
489 MODULE_LICENSE("GPL v2");