1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for the Intel SCU IPC mechanism
5 * (C) Copyright 2008-2010,2015 Intel Corporation
6 * Author: Sreedhara DS (sreedhara.ds@intel.com)
8 * SCU running in ARC processor communicates with other entity running in IA
9 * core through IPC mechanism which in turn messaging between IA core ad SCU.
10 * SCU has two IPC mechanism IPC-1 and IPC-2. IPC-1 is used between IA32 and
11 * SCU where IPC-2 is used between P-Unit and SCU. This driver delas with
12 * IPC-1 Driver provides an API for power control unit registers (e.g. MSIC)
13 * along with other APIs.
16 #include <linux/delay.h>
17 #include <linux/device.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
25 #include <asm/intel_scu_ipc.h>
27 /* IPC defines the following message types */
28 #define IPCMSG_PCNTRL 0xff /* Power controller unit read/write */
30 /* Command id associated with message IPCMSG_PCNTRL */
31 #define IPC_CMD_PCNTRL_W 0 /* Register write */
32 #define IPC_CMD_PCNTRL_R 1 /* Register read */
33 #define IPC_CMD_PCNTRL_M 2 /* Register read-modify-write */
36 * IPC register summary
38 * IPC register blocks are memory mapped at fixed address of PCI BAR 0.
39 * To read or write information to the SCU, driver writes to IPC-1 memory
40 * mapped registers. The following is the IPC mechanism
42 * 1. IA core cDMI interface claims this transaction and converts it to a
43 * Transaction Layer Packet (TLP) message which is sent across the cDMI.
45 * 2. South Complex cDMI block receives this message and writes it to
46 * the IPC-1 register block, causing an interrupt to the SCU
48 * 3. SCU firmware decodes this interrupt and IPC message and the appropriate
49 * message handler is called within firmware.
52 #define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */
53 #define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
54 #define IPC_IOC 0x100 /* IPC command register IOC bit */
56 struct intel_scu_ipc_dev {
61 void __iomem *ipc_base;
62 struct completion cmd_complete;
65 #define IPC_STATUS 0x04
66 #define IPC_STATUS_IRQ BIT(2)
67 #define IPC_STATUS_ERR BIT(1)
68 #define IPC_STATUS_BUSY BIT(0)
71 * IPC Write/Read Buffers:
72 * 16 byte buffer for sending and receiving data to and from SCU.
74 #define IPC_WRITE_BUFFER 0x80
75 #define IPC_READ_BUFFER 0x90
77 /* Timeout in jiffies */
78 #define IPC_TIMEOUT (5 * HZ)
80 static struct intel_scu_ipc_dev *ipcdev; /* Only one for now */
81 static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */
83 static struct class intel_scu_ipc_class = {
84 .name = "intel_scu_ipc",
89 * intel_scu_ipc_dev_get() - Get SCU IPC instance
91 * The recommended new API takes SCU IPC instance as parameter and this
92 * function can be called by driver to get the instance. This also makes
93 * sure the driver providing the IPC functionality cannot be unloaded
94 * while the caller has the instance.
96 * Call intel_scu_ipc_dev_put() to release the instance.
98 * Returns %NULL if SCU IPC is not currently available.
100 struct intel_scu_ipc_dev *intel_scu_ipc_dev_get(void)
102 struct intel_scu_ipc_dev *scu = NULL;
104 mutex_lock(&ipclock);
106 get_device(&ipcdev->dev);
108 * Prevent the IPC provider from being unloaded while it
111 if (!try_module_get(ipcdev->owner))
112 put_device(&ipcdev->dev);
117 mutex_unlock(&ipclock);
120 EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_get);
123 * intel_scu_ipc_dev_put() - Put SCU IPC instance
124 * @scu: SCU IPC instance
126 * This function releases the SCU IPC instance retrieved from
127 * intel_scu_ipc_dev_get() and allows the driver providing IPC to be
130 void intel_scu_ipc_dev_put(struct intel_scu_ipc_dev *scu)
133 module_put(scu->owner);
134 put_device(&scu->dev);
137 EXPORT_SYMBOL_GPL(intel_scu_ipc_dev_put);
139 struct intel_scu_ipc_devres {
140 struct intel_scu_ipc_dev *scu;
143 static void devm_intel_scu_ipc_dev_release(struct device *dev, void *res)
145 struct intel_scu_ipc_devres *dr = res;
146 struct intel_scu_ipc_dev *scu = dr->scu;
148 intel_scu_ipc_dev_put(scu);
152 * devm_intel_scu_ipc_dev_get() - Allocate managed SCU IPC device
153 * @dev: Device requesting the SCU IPC device
155 * The recommended new API takes SCU IPC instance as parameter and this
156 * function can be called by driver to get the instance. This also makes
157 * sure the driver providing the IPC functionality cannot be unloaded
158 * while the caller has the instance.
160 * Returns %NULL if SCU IPC is not currently available.
162 struct intel_scu_ipc_dev *devm_intel_scu_ipc_dev_get(struct device *dev)
164 struct intel_scu_ipc_devres *dr;
165 struct intel_scu_ipc_dev *scu;
167 dr = devres_alloc(devm_intel_scu_ipc_dev_release, sizeof(*dr), GFP_KERNEL);
171 scu = intel_scu_ipc_dev_get();
182 EXPORT_SYMBOL_GPL(devm_intel_scu_ipc_dev_get);
186 * Command Register (Write Only):
187 * A write to this register results in an interrupt to the SCU core processor
189 * |rfu2(8) | size(8) | command id(4) | rfu1(3) | ioc(1) | command(8)|
191 static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd)
193 reinit_completion(&scu->cmd_complete);
194 writel(cmd | IPC_IOC, scu->ipc_base);
199 * IPC Write Buffer (Write Only):
200 * 16-byte buffer for sending data associated with IPC command to
201 * SCU. Size of the data is specified in the IPC_COMMAND_REG register
203 static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset)
205 writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset);
209 * Status Register (Read Only):
210 * Driver will read this register to get the ready/busy status of the IPC
211 * block and error status of the IPC command that was just processed by SCU
213 * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
215 static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu)
217 return __raw_readl(scu->ipc_base + IPC_STATUS);
220 /* Read ipc byte data */
221 static inline u8 ipc_data_readb(struct intel_scu_ipc_dev *scu, u32 offset)
223 return readb(scu->ipc_base + IPC_READ_BUFFER + offset);
226 /* Read ipc u32 data */
227 static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
229 return readl(scu->ipc_base + IPC_READ_BUFFER + offset);
232 /* Wait till scu status is busy */
233 static inline int busy_loop(struct intel_scu_ipc_dev *scu)
235 unsigned long end = jiffies + msecs_to_jiffies(IPC_TIMEOUT);
240 status = ipc_read_status(scu);
241 if (!(status & IPC_STATUS_BUSY))
242 return (status & IPC_STATUS_ERR) ? -EIO : 0;
244 usleep_range(50, 100);
245 } while (time_before(jiffies, end));
250 /* Wait till ipc ioc interrupt is received or timeout in 3 HZ */
251 static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
255 if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT))
258 status = ipc_read_status(scu);
259 if (status & IPC_STATUS_ERR)
265 static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev *scu)
267 return scu->irq > 0 ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
270 /* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
271 static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
272 u32 count, u32 op, u32 id)
277 u8 cbuf[IPC_WWBUF_SIZE];
278 u32 *wbuf = (u32 *)&cbuf;
280 memset(cbuf, 0, sizeof(cbuf));
282 mutex_lock(&ipclock);
286 mutex_unlock(&ipclock);
290 for (nc = 0; nc < count; nc++, offset += 2) {
291 cbuf[offset] = addr[nc];
292 cbuf[offset + 1] = addr[nc] >> 8;
295 if (id == IPC_CMD_PCNTRL_R) {
296 for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
297 ipc_data_writel(scu, wbuf[nc], offset);
298 ipc_command(scu, (count * 2) << 16 | id << 12 | 0 << 8 | op);
299 } else if (id == IPC_CMD_PCNTRL_W) {
300 for (nc = 0; nc < count; nc++, offset += 1)
301 cbuf[offset] = data[nc];
302 for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
303 ipc_data_writel(scu, wbuf[nc], offset);
304 ipc_command(scu, (count * 3) << 16 | id << 12 | 0 << 8 | op);
305 } else if (id == IPC_CMD_PCNTRL_M) {
306 cbuf[offset] = data[0];
307 cbuf[offset + 1] = data[1];
308 ipc_data_writel(scu, wbuf[0], 0); /* Write wbuff */
309 ipc_command(scu, 4 << 16 | id << 12 | 0 << 8 | op);
312 err = intel_scu_ipc_check_status(scu);
313 if (!err && id == IPC_CMD_PCNTRL_R) { /* Read rbuf */
314 /* Workaround: values are read as 0 without memcpy_fromio */
315 memcpy_fromio(cbuf, scu->ipc_base + 0x90, 16);
316 for (nc = 0; nc < count; nc++)
317 data[nc] = ipc_data_readb(scu, nc);
319 mutex_unlock(&ipclock);
324 * intel_scu_ipc_dev_ioread8() - Read a byte via the SCU
325 * @scu: Optional SCU IPC instance
326 * @addr: Register on SCU
327 * @data: Return pointer for read byte
329 * Read a single register. Returns %0 on success or an error code. All
330 * locking between SCU accesses is handled for the caller.
332 * This function may sleep.
334 int intel_scu_ipc_dev_ioread8(struct intel_scu_ipc_dev *scu, u16 addr, u8 *data)
336 return pwr_reg_rdwr(scu, &addr, data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
338 EXPORT_SYMBOL(intel_scu_ipc_dev_ioread8);
341 * intel_scu_ipc_dev_iowrite8() - Write a byte via the SCU
342 * @scu: Optional SCU IPC instance
343 * @addr: Register on SCU
344 * @data: Byte to write
346 * Write a single register. Returns %0 on success or an error code. All
347 * locking between SCU accesses is handled for the caller.
349 * This function may sleep.
351 int intel_scu_ipc_dev_iowrite8(struct intel_scu_ipc_dev *scu, u16 addr, u8 data)
353 return pwr_reg_rdwr(scu, &addr, &data, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
355 EXPORT_SYMBOL(intel_scu_ipc_dev_iowrite8);
358 * intel_scu_ipc_dev_readv() - Read a set of registers
359 * @scu: Optional SCU IPC instance
360 * @addr: Register list
361 * @data: Bytes to return
362 * @len: Length of array
364 * Read registers. Returns %0 on success or an error code. All locking
365 * between SCU accesses is handled for the caller.
367 * The largest array length permitted by the hardware is 5 items.
369 * This function may sleep.
371 int intel_scu_ipc_dev_readv(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
374 return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
376 EXPORT_SYMBOL(intel_scu_ipc_dev_readv);
379 * intel_scu_ipc_dev_writev() - Write a set of registers
380 * @scu: Optional SCU IPC instance
381 * @addr: Register list
382 * @data: Bytes to write
383 * @len: Length of array
385 * Write registers. Returns %0 on success or an error code. All locking
386 * between SCU accesses is handled for the caller.
388 * The largest array length permitted by the hardware is 5 items.
390 * This function may sleep.
392 int intel_scu_ipc_dev_writev(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
395 return pwr_reg_rdwr(scu, addr, data, len, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
397 EXPORT_SYMBOL(intel_scu_ipc_dev_writev);
400 * intel_scu_ipc_dev_update() - Update a register
401 * @scu: Optional SCU IPC instance
402 * @addr: Register address
403 * @data: Bits to update
404 * @mask: Mask of bits to update
406 * Read-modify-write power control unit register. The first data argument
407 * must be register value and second is mask value mask is a bitmap that
408 * indicates which bits to update. %0 = masked. Don't modify this bit, %1 =
409 * modify this bit. returns %0 on success or an error code.
411 * This function may sleep. Locking between SCU accesses is handled
414 int intel_scu_ipc_dev_update(struct intel_scu_ipc_dev *scu, u16 addr, u8 data,
417 u8 tmp[2] = { data, mask };
418 return pwr_reg_rdwr(scu, &addr, tmp, 1, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_M);
420 EXPORT_SYMBOL(intel_scu_ipc_dev_update);
423 * intel_scu_ipc_dev_simple_command() - Send a simple command
424 * @scu: Optional SCU IPC instance
428 * Issue a simple command to the SCU. Do not use this interface if you must
429 * then access data as any data values may be overwritten by another SCU
430 * access by the time this function returns.
432 * This function may sleep. Locking for SCU accesses is handled for the
435 int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
441 mutex_lock(&ipclock);
445 mutex_unlock(&ipclock);
449 cmdval = sub << 12 | cmd;
450 ipc_command(scu, cmdval);
451 err = intel_scu_ipc_check_status(scu);
452 mutex_unlock(&ipclock);
454 dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
457 EXPORT_SYMBOL(intel_scu_ipc_dev_simple_command);
460 * intel_scu_ipc_dev_command_with_size() - Command with data
461 * @scu: Optional SCU IPC instance
465 * @inlen: Input length in bytes
466 * @size: Input size written to the IPC command register in whatever
467 * units (dword, byte) the particular firmware requires. Normally
468 * should be the same as @inlen.
470 * @outlen: Output length in bytes
472 * Issue a command to the SCU which involves data transfers. Do the
473 * data copies under the lock but leave it for the caller to interpret.
475 int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
476 int sub, const void *in, size_t inlen,
477 size_t size, void *out, size_t outlen)
479 size_t outbuflen = DIV_ROUND_UP(outlen, sizeof(u32));
480 size_t inbuflen = DIV_ROUND_UP(inlen, sizeof(u32));
481 u32 cmdval, inbuf[4] = {};
484 if (inbuflen > 4 || outbuflen > 4)
487 mutex_lock(&ipclock);
491 mutex_unlock(&ipclock);
495 memcpy(inbuf, in, inlen);
496 for (i = 0; i < inbuflen; i++)
497 ipc_data_writel(scu, inbuf[i], 4 * i);
499 cmdval = (size << 16) | (sub << 12) | cmd;
500 ipc_command(scu, cmdval);
501 err = intel_scu_ipc_check_status(scu);
506 for (i = 0; i < outbuflen; i++)
507 outbuf[i] = ipc_data_readl(scu, 4 * i);
509 memcpy(out, outbuf, outlen);
512 mutex_unlock(&ipclock);
514 dev_err(&scu->dev, "IPC command %#x failed with %d\n", cmdval, err);
517 EXPORT_SYMBOL(intel_scu_ipc_dev_command_with_size);
520 * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
521 * When ioc bit is set to 1, caller api must wait for interrupt handler called
522 * which in turn unlocks the caller api. Currently this is not used
524 * This is edge triggered so we need take no action to clear anything
526 static irqreturn_t ioc(int irq, void *dev_id)
528 struct intel_scu_ipc_dev *scu = dev_id;
529 int status = ipc_read_status(scu);
531 writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS);
532 complete(&scu->cmd_complete);
537 static void intel_scu_ipc_release(struct device *dev)
539 struct intel_scu_ipc_dev *scu;
541 scu = container_of(dev, struct intel_scu_ipc_dev, dev);
543 free_irq(scu->irq, scu);
544 iounmap(scu->ipc_base);
545 release_mem_region(scu->mem.start, resource_size(&scu->mem));
550 * __intel_scu_ipc_register() - Register SCU IPC device
551 * @parent: Parent device
552 * @scu_data: Data used to configure SCU IPC
553 * @owner: Module registering the SCU IPC device
555 * Call this function to register SCU IPC mechanism under @parent.
556 * Returns pointer to the new SCU IPC device or ERR_PTR() in case of
557 * failure. The caller may use the returned instance if it needs to do
558 * SCU IPC calls itself.
560 struct intel_scu_ipc_dev *
561 __intel_scu_ipc_register(struct device *parent,
562 const struct intel_scu_ipc_data *scu_data,
563 struct module *owner)
566 struct intel_scu_ipc_dev *scu;
567 void __iomem *ipc_base;
569 mutex_lock(&ipclock);
570 /* We support only one IPC */
576 scu = kzalloc(sizeof(*scu), GFP_KERNEL);
583 scu->dev.parent = parent;
584 scu->dev.class = &intel_scu_ipc_class;
585 scu->dev.release = intel_scu_ipc_release;
586 dev_set_name(&scu->dev, "intel_scu_ipc");
588 if (!request_mem_region(scu_data->mem.start, resource_size(&scu_data->mem),
594 ipc_base = ioremap(scu_data->mem.start, resource_size(&scu_data->mem));
600 scu->ipc_base = ipc_base;
601 scu->mem = scu_data->mem;
602 scu->irq = scu_data->irq;
603 init_completion(&scu->cmd_complete);
606 err = request_irq(scu->irq, ioc, 0, "intel_scu_ipc", scu);
612 * After this point intel_scu_ipc_release() takes care of
613 * releasing the SCU IPC resources once refcount drops to zero.
615 err = device_register(&scu->dev);
617 put_device(&scu->dev);
621 /* Assign device at last */
623 mutex_unlock(&ipclock);
630 release_mem_region(scu_data->mem.start, resource_size(&scu_data->mem));
634 mutex_unlock(&ipclock);
638 EXPORT_SYMBOL_GPL(__intel_scu_ipc_register);
641 * intel_scu_ipc_unregister() - Unregister SCU IPC
642 * @scu: SCU IPC handle
644 * This unregisters the SCU IPC device and releases the acquired
645 * resources once the refcount goes to zero.
647 void intel_scu_ipc_unregister(struct intel_scu_ipc_dev *scu)
649 mutex_lock(&ipclock);
650 if (!WARN_ON(!ipcdev)) {
652 device_unregister(&scu->dev);
654 mutex_unlock(&ipclock);
656 EXPORT_SYMBOL_GPL(intel_scu_ipc_unregister);
658 static void devm_intel_scu_ipc_unregister(struct device *dev, void *res)
660 struct intel_scu_ipc_devres *dr = res;
661 struct intel_scu_ipc_dev *scu = dr->scu;
663 intel_scu_ipc_unregister(scu);
667 * __devm_intel_scu_ipc_register() - Register managed SCU IPC device
668 * @parent: Parent device
669 * @scu_data: Data used to configure SCU IPC
670 * @owner: Module registering the SCU IPC device
672 * Call this function to register managed SCU IPC mechanism under
673 * @parent. Returns pointer to the new SCU IPC device or ERR_PTR() in
674 * case of failure. The caller may use the returned instance if it needs
675 * to do SCU IPC calls itself.
677 struct intel_scu_ipc_dev *
678 __devm_intel_scu_ipc_register(struct device *parent,
679 const struct intel_scu_ipc_data *scu_data,
680 struct module *owner)
682 struct intel_scu_ipc_devres *dr;
683 struct intel_scu_ipc_dev *scu;
685 dr = devres_alloc(devm_intel_scu_ipc_unregister, sizeof(*dr), GFP_KERNEL);
689 scu = __intel_scu_ipc_register(parent, scu_data, owner);
696 devres_add(parent, dr);
700 EXPORT_SYMBOL_GPL(__devm_intel_scu_ipc_register);
702 static int __init intel_scu_ipc_init(void)
704 return class_register(&intel_scu_ipc_class);
706 subsys_initcall(intel_scu_ipc_init);
708 static void __exit intel_scu_ipc_exit(void)
710 class_unregister(&intel_scu_ipc_class);
712 module_exit(intel_scu_ipc_exit);