1 // SPDX-License-Identifier: GPL-2.0-only
3 * STA2x11 mfd for GPIO, SCTL and APBREG
5 * Copyright (c) 2009-2011 Wind River Systems, Inc.
6 * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini, Davide Ciminaghi)
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/export.h>
12 #include <linux/spinlock.h>
13 #include <linux/errno.h>
14 #include <linux/device.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
18 #include <linux/ioport.h>
19 #include <linux/pci.h>
20 #include <linux/seq_file.h>
21 #include <linux/platform_device.h>
22 #include <linux/mfd/core.h>
23 #include <linux/mfd/sta2x11-mfd.h>
24 #include <linux/regmap.h>
26 #include <asm/sta2x11.h>
28 static inline int __reg_within_range(unsigned int r,
32 return ((r >= start) && (r <= end));
35 /* This describes STA2X11 MFD chip for us, we may have several */
37 struct sta2x11_instance *instance;
38 struct regmap *regmap[sta2x11_n_mfd_plat_devs];
39 spinlock_t lock[sta2x11_n_mfd_plat_devs];
40 struct list_head list;
41 void __iomem *regs[sta2x11_n_mfd_plat_devs];
44 static LIST_HEAD(sta2x11_mfd_list);
46 /* Three functions to act on the list */
47 static struct sta2x11_mfd *sta2x11_mfd_find(struct pci_dev *pdev)
49 struct sta2x11_instance *instance;
50 struct sta2x11_mfd *mfd;
52 if (!pdev && !list_empty(&sta2x11_mfd_list)) {
53 pr_warn("%s: Unspecified device, using first instance\n",
55 return list_entry(sta2x11_mfd_list.next,
56 struct sta2x11_mfd, list);
59 instance = sta2x11_get_instance(pdev);
62 list_for_each_entry(mfd, &sta2x11_mfd_list, list) {
63 if (mfd->instance == instance)
69 static int sta2x11_mfd_add(struct pci_dev *pdev, gfp_t flags)
72 struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
73 struct sta2x11_instance *instance;
77 instance = sta2x11_get_instance(pdev);
80 mfd = kzalloc(sizeof(*mfd), flags);
83 INIT_LIST_HEAD(&mfd->list);
84 for (i = 0; i < ARRAY_SIZE(mfd->lock); i++)
85 spin_lock_init(&mfd->lock[i]);
86 mfd->instance = instance;
87 list_add(&mfd->list, &sta2x11_mfd_list);
91 /* This function is exported and is not expected to fail */
92 u32 __sta2x11_mfd_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val,
93 enum sta2x11_mfd_plat_dev index)
95 struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
101 dev_warn(&pdev->dev, ": can't access sctl regs\n");
105 regs = mfd->regs[index];
107 dev_warn(&pdev->dev, ": system ctl not initialized\n");
110 spin_lock_irqsave(&mfd->lock[index], flags);
111 r = readl(regs + reg);
115 writel(r, regs + reg);
116 spin_unlock_irqrestore(&mfd->lock[index], flags);
119 EXPORT_SYMBOL(__sta2x11_mfd_mask);
121 int sta2x11_mfd_get_regs_data(struct platform_device *dev,
122 enum sta2x11_mfd_plat_dev index,
126 struct pci_dev *pdev = *(struct pci_dev **)dev_get_platdata(&dev->dev);
127 struct sta2x11_mfd *mfd;
131 mfd = sta2x11_mfd_find(pdev);
134 if (index >= sta2x11_n_mfd_plat_devs)
136 *regs = mfd->regs[index];
137 *lock = &mfd->lock[index];
138 pr_debug("%s %d *regs = %p\n", __func__, __LINE__, *regs);
139 return *regs ? 0 : -ENODEV;
141 EXPORT_SYMBOL(sta2x11_mfd_get_regs_data);
144 * Special sta2x11-mfd regmap lock/unlock functions
147 static void sta2x11_regmap_lock(void *__lock)
149 spinlock_t *lock = __lock;
153 static void sta2x11_regmap_unlock(void *__lock)
155 spinlock_t *lock = __lock;
159 /* OTP (one time programmable registers do not require locking */
160 static void sta2x11_regmap_nolock(void *__lock)
164 static const char *sta2x11_mfd_names[sta2x11_n_mfd_plat_devs] = {
165 [sta2x11_sctl] = STA2X11_MFD_SCTL_NAME,
166 [sta2x11_apbreg] = STA2X11_MFD_APBREG_NAME,
167 [sta2x11_apb_soc_regs] = STA2X11_MFD_APB_SOC_REGS_NAME,
168 [sta2x11_scr] = STA2X11_MFD_SCR_NAME,
171 static bool sta2x11_sctl_writeable_reg(struct device *dev, unsigned int reg)
173 return !__reg_within_range(reg, SCTL_SCPCIECSBRST, SCTL_SCRSTSTA);
176 static struct regmap_config sta2x11_sctl_regmap_config = {
180 .lock = sta2x11_regmap_lock,
181 .unlock = sta2x11_regmap_unlock,
182 .max_register = SCTL_SCRSTSTA,
183 .writeable_reg = sta2x11_sctl_writeable_reg,
186 static bool sta2x11_scr_readable_reg(struct device *dev, unsigned int reg)
188 return (reg == STA2X11_SECR_CR) ||
189 __reg_within_range(reg, STA2X11_SECR_FVR0, STA2X11_SECR_FVR1);
192 static bool sta2x11_scr_writeable_reg(struct device *dev, unsigned int reg)
197 static struct regmap_config sta2x11_scr_regmap_config = {
201 .lock = sta2x11_regmap_nolock,
202 .unlock = sta2x11_regmap_nolock,
203 .max_register = STA2X11_SECR_FVR1,
204 .readable_reg = sta2x11_scr_readable_reg,
205 .writeable_reg = sta2x11_scr_writeable_reg,
208 static bool sta2x11_apbreg_readable_reg(struct device *dev, unsigned int reg)
210 /* Two blocks (CAN and MLB, SARAC) 0x100 bytes apart */
211 if (reg >= APBREG_BSR_SARAC)
212 reg -= APBREG_BSR_SARAC;
227 static bool sta2x11_apbreg_writeable_reg(struct device *dev, unsigned int reg)
229 if (reg >= APBREG_BSR_SARAC)
230 reg -= APBREG_BSR_SARAC;
231 if (!sta2x11_apbreg_readable_reg(dev, reg))
233 return reg != APBREG_PAER;
236 static struct regmap_config sta2x11_apbreg_regmap_config = {
240 .lock = sta2x11_regmap_lock,
241 .unlock = sta2x11_regmap_unlock,
242 .max_register = APBREG_EMU_PCG_SARAC,
243 .readable_reg = sta2x11_apbreg_readable_reg,
244 .writeable_reg = sta2x11_apbreg_writeable_reg,
247 static bool sta2x11_apb_soc_regs_readable_reg(struct device *dev,
250 return reg <= PCIE_SoC_INT_ROUTER_STATUS3_REG ||
251 __reg_within_range(reg, DMA_IP_CTRL_REG, SPARE3_RESERVED) ||
252 __reg_within_range(reg, MASTER_LOCK_REG,
253 SYSTEM_CONFIG_STATUS_REG) ||
254 reg == MSP_CLK_CTRL_REG ||
255 __reg_within_range(reg, COMPENSATION_REG1, TEST_CTL_REG);
258 static bool sta2x11_apb_soc_regs_writeable_reg(struct device *dev,
261 if (!sta2x11_apb_soc_regs_readable_reg(dev, reg))
264 case PCIE_COMMON_CLOCK_CONFIG_0_4_0:
265 case SYSTEM_CONFIG_STATUS_REG:
266 case COMPENSATION_REG1:
267 case PCIE_SoC_INT_ROUTER_STATUS0_REG...PCIE_SoC_INT_ROUTER_STATUS3_REG:
268 case PCIE_PM_STATUS_0_PORT_0_4...PCIE_PM_STATUS_7_0_EP4:
275 static struct regmap_config sta2x11_apb_soc_regs_regmap_config = {
279 .lock = sta2x11_regmap_lock,
280 .unlock = sta2x11_regmap_unlock,
281 .max_register = TEST_CTL_REG,
282 .readable_reg = sta2x11_apb_soc_regs_readable_reg,
283 .writeable_reg = sta2x11_apb_soc_regs_writeable_reg,
286 static struct regmap_config *
287 sta2x11_mfd_regmap_configs[sta2x11_n_mfd_plat_devs] = {
288 [sta2x11_sctl] = &sta2x11_sctl_regmap_config,
289 [sta2x11_apbreg] = &sta2x11_apbreg_regmap_config,
290 [sta2x11_apb_soc_regs] = &sta2x11_apb_soc_regs_regmap_config,
291 [sta2x11_scr] = &sta2x11_scr_regmap_config,
294 /* Probe for the four platform devices */
296 static int sta2x11_mfd_platform_probe(struct platform_device *dev,
297 enum sta2x11_mfd_plat_dev index)
299 struct pci_dev **pdev;
300 struct sta2x11_mfd *mfd;
301 struct resource *res;
302 const char *name = sta2x11_mfd_names[index];
303 struct regmap_config *regmap_config = sta2x11_mfd_regmap_configs[index];
305 pdev = dev_get_platdata(&dev->dev);
306 mfd = sta2x11_mfd_find(*pdev);
312 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
316 if (!request_mem_region(res->start, resource_size(res), name))
319 mfd->regs[index] = ioremap(res->start, resource_size(res));
320 if (!mfd->regs[index]) {
321 release_mem_region(res->start, resource_size(res));
324 regmap_config->lock_arg = &mfd->lock;
326 No caching, registers could be reached both via regmap and via
329 regmap_config->cache_type = REGCACHE_NONE;
330 mfd->regmap[index] = devm_regmap_init_mmio(&dev->dev, mfd->regs[index],
332 WARN_ON(IS_ERR(mfd->regmap[index]));
337 static int sta2x11_sctl_probe(struct platform_device *dev)
339 return sta2x11_mfd_platform_probe(dev, sta2x11_sctl);
342 static int sta2x11_apbreg_probe(struct platform_device *dev)
344 return sta2x11_mfd_platform_probe(dev, sta2x11_apbreg);
347 static int sta2x11_apb_soc_regs_probe(struct platform_device *dev)
349 return sta2x11_mfd_platform_probe(dev, sta2x11_apb_soc_regs);
352 static int sta2x11_scr_probe(struct platform_device *dev)
354 return sta2x11_mfd_platform_probe(dev, sta2x11_scr);
357 /* The three platform drivers */
358 static struct platform_driver sta2x11_sctl_platform_driver = {
360 .name = STA2X11_MFD_SCTL_NAME,
362 .probe = sta2x11_sctl_probe,
365 static struct platform_driver sta2x11_platform_driver = {
367 .name = STA2X11_MFD_APBREG_NAME,
369 .probe = sta2x11_apbreg_probe,
372 static struct platform_driver sta2x11_apb_soc_regs_platform_driver = {
374 .name = STA2X11_MFD_APB_SOC_REGS_NAME,
376 .probe = sta2x11_apb_soc_regs_probe,
379 static struct platform_driver sta2x11_scr_platform_driver = {
381 .name = STA2X11_MFD_SCR_NAME,
383 .probe = sta2x11_scr_probe,
386 static struct platform_driver * const drivers[] = {
387 &sta2x11_platform_driver,
388 &sta2x11_sctl_platform_driver,
389 &sta2x11_apb_soc_regs_platform_driver,
390 &sta2x11_scr_platform_driver,
393 static int __init sta2x11_drivers_init(void)
395 return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
399 * What follows are the PCI devices that host the above pdevs.
400 * Each logic block is 4kB and they are all consecutive: we use this info.
406 enum mfd0_bar0_cells {
416 enum mfd0_bar1_cells {
419 #define CELL_4K(_name, _cell) { \
421 .start = _cell * 4096, .end = _cell * 4096 + 4095, \
422 .flags = IORESOURCE_MEM, \
425 static const struct resource gpio_resources[] = {
427 /* 4 consecutive cells, 1 driver */
428 .name = STA2X11_MFD_GPIO_NAME,
430 .end = (4 * 4096) - 1,
431 .flags = IORESOURCE_MEM,
434 static const struct resource sctl_resources[] = {
435 CELL_4K(STA2X11_MFD_SCTL_NAME, STA2X11_SCTL),
437 static const struct resource scr_resources[] = {
438 CELL_4K(STA2X11_MFD_SCR_NAME, STA2X11_SCR),
440 static const struct resource time_resources[] = {
441 CELL_4K(STA2X11_MFD_TIME_NAME, STA2X11_TIME),
444 static const struct resource apbreg_resources[] = {
445 CELL_4K(STA2X11_MFD_APBREG_NAME, STA2X11_APBREG),
448 #define DEV(_name, _r) \
449 { .name = _name, .num_resources = ARRAY_SIZE(_r), .resources = _r, }
451 static struct mfd_cell sta2x11_mfd0_bar0[] = {
452 /* offset 0: we add pdata later */
453 DEV(STA2X11_MFD_GPIO_NAME, gpio_resources),
454 DEV(STA2X11_MFD_SCTL_NAME, sctl_resources),
455 DEV(STA2X11_MFD_SCR_NAME, scr_resources),
456 DEV(STA2X11_MFD_TIME_NAME, time_resources),
459 static struct mfd_cell sta2x11_mfd0_bar1[] = {
460 DEV(STA2X11_MFD_APBREG_NAME, apbreg_resources),
466 enum mfd1_bar0_cells {
471 enum mfd1_bar1_cells {
472 STA2X11_APB_SOC_REGS = 0,
475 static const struct resource vic_resources[] = {
476 CELL_4K(STA2X11_MFD_VIC_NAME, STA2X11_VIC),
479 static const struct resource apb_soc_regs_resources[] = {
480 CELL_4K(STA2X11_MFD_APB_SOC_REGS_NAME, STA2X11_APB_SOC_REGS),
483 static struct mfd_cell sta2x11_mfd1_bar0[] = {
484 DEV(STA2X11_MFD_VIC_NAME, vic_resources),
487 static struct mfd_cell sta2x11_mfd1_bar1[] = {
488 DEV(STA2X11_MFD_APB_SOC_REGS_NAME, apb_soc_regs_resources),
492 static int sta2x11_mfd_suspend(struct pci_dev *pdev, pm_message_t state)
494 pci_save_state(pdev);
495 pci_disable_device(pdev);
496 pci_set_power_state(pdev, pci_choose_state(pdev, state));
501 static int sta2x11_mfd_resume(struct pci_dev *pdev)
505 pci_set_power_state(pdev, PCI_D0);
506 err = pci_enable_device(pdev);
509 pci_restore_state(pdev);
514 struct sta2x11_mfd_bar_setup_data {
515 struct mfd_cell *cells;
519 struct sta2x11_mfd_setup_data {
520 struct sta2x11_mfd_bar_setup_data bars[2];
523 #define STA2X11_MFD0 0
524 #define STA2X11_MFD1 1
526 static struct sta2x11_mfd_setup_data mfd_setup_data[] = {
527 /* Mfd 0: gpio, sctl, scr, timers / apbregs */
531 .cells = sta2x11_mfd0_bar0,
532 .ncells = ARRAY_SIZE(sta2x11_mfd0_bar0),
535 .cells = sta2x11_mfd0_bar1,
536 .ncells = ARRAY_SIZE(sta2x11_mfd0_bar1),
540 /* Mfd 1: vic / apb-soc-regs */
544 .cells = sta2x11_mfd1_bar0,
545 .ncells = ARRAY_SIZE(sta2x11_mfd1_bar0),
548 .cells = sta2x11_mfd1_bar1,
549 .ncells = ARRAY_SIZE(sta2x11_mfd1_bar1),
555 static void sta2x11_mfd_setup(struct pci_dev *pdev,
556 struct sta2x11_mfd_setup_data *sd)
559 for (i = 0; i < ARRAY_SIZE(sd->bars); i++)
560 for (j = 0; j < sd->bars[i].ncells; j++) {
561 sd->bars[i].cells[j].pdata_size = sizeof(pdev);
562 sd->bars[i].cells[j].platform_data = &pdev;
566 static int sta2x11_mfd_probe(struct pci_dev *pdev,
567 const struct pci_device_id *pci_id)
570 struct sta2x11_mfd_setup_data *setup_data;
572 dev_info(&pdev->dev, "%s\n", __func__);
574 err = pci_enable_device(pdev);
576 dev_err(&pdev->dev, "Can't enable device.\n");
580 err = pci_enable_msi(pdev);
582 dev_info(&pdev->dev, "Enable msi failed\n");
584 setup_data = pci_id->device == PCI_DEVICE_ID_STMICRO_GPIO ?
585 &mfd_setup_data[STA2X11_MFD0] :
586 &mfd_setup_data[STA2X11_MFD1];
588 /* platform data is the pci device for all of them */
589 sta2x11_mfd_setup(pdev, setup_data);
591 /* Record this pdev before mfd_add_devices: their probe looks for it */
592 if (!sta2x11_mfd_find(pdev))
593 sta2x11_mfd_add(pdev, GFP_KERNEL);
595 /* Just 2 bars for all mfd's at present */
596 for (i = 0; i < 2; i++) {
597 err = mfd_add_devices(&pdev->dev, -1,
598 setup_data->bars[i].cells,
599 setup_data->bars[i].ncells,
604 "mfd_add_devices[%d] failed: %d\n", i, err);
612 mfd_remove_devices(&pdev->dev);
613 pci_disable_device(pdev);
614 pci_disable_msi(pdev);
618 static const struct pci_device_id sta2x11_mfd_tbl[] = {
619 {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_GPIO)},
620 {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_VIC)},
624 static struct pci_driver sta2x11_mfd_driver = {
625 .name = "sta2x11-mfd",
626 .id_table = sta2x11_mfd_tbl,
627 .probe = sta2x11_mfd_probe,
628 .suspend = sta2x11_mfd_suspend,
629 .resume = sta2x11_mfd_resume,
632 static int __init sta2x11_mfd_init(void)
634 pr_info("%s\n", __func__);
635 return pci_register_driver(&sta2x11_mfd_driver);
639 * All of this must be ready before "normal" devices like MMCI appear.
640 * But MFD (the pci device) can't be too early. The following choice
641 * prepares platform drivers very early and probe the PCI device later,
642 * but before other PCI devices.
644 subsys_initcall(sta2x11_drivers_init);
645 rootfs_initcall(sta2x11_mfd_init);