1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016, Linaro Ltd
7 #include <linux/module.h>
9 #include <linux/of_address.h>
10 #include <linux/of_irq.h>
11 #include <linux/interrupt.h>
12 #include <linux/platform_device.h>
13 #include <linux/mailbox_client.h>
14 #include <linux/mfd/syscon.h>
15 #include <linux/slab.h>
16 #include <linux/rpmsg.h>
17 #include <linux/idr.h>
18 #include <linux/circ_buf.h>
19 #include <linux/soc/qcom/smem.h>
20 #include <linux/sizes.h>
21 #include <linux/delay.h>
22 #include <linux/regmap.h>
23 #include <linux/workqueue.h>
24 #include <linux/list.h>
26 #include <linux/rpmsg/qcom_glink.h>
28 #include "qcom_glink_native.h"
30 #define FIFO_FULL_RESERVE 8
31 #define FIFO_ALIGNMENT 8
32 #define TX_BLOCKED_CMD_RESERVE 8 /* size of struct read_notif_request */
34 #define SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR 478
35 #define SMEM_GLINK_NATIVE_XPRT_FIFO_0 479
36 #define SMEM_GLINK_NATIVE_XPRT_FIFO_1 480
38 struct qcom_glink_smem {
42 struct qcom_glink *glink;
44 struct mbox_client mbox_client;
45 struct mbox_chan *mbox_chan;
50 struct glink_smem_pipe {
51 struct qcom_glink_pipe native;
58 struct qcom_glink_smem *smem;
61 #define to_smem_pipe(p) container_of(p, struct glink_smem_pipe, native)
63 static size_t glink_smem_rx_avail(struct qcom_glink_pipe *np)
65 struct glink_smem_pipe *pipe = to_smem_pipe(np);
66 struct qcom_glink_smem *smem = pipe->smem;
73 fifo = qcom_smem_get(smem->remote_pid,
74 SMEM_GLINK_NATIVE_XPRT_FIFO_1, &len);
76 pr_err("failed to acquire RX fifo handle: %ld\n",
82 pipe->native.length = len;
85 head = le32_to_cpu(*pipe->head);
86 tail = le32_to_cpu(*pipe->tail);
89 return pipe->native.length - tail + head;
94 static void glink_smem_rx_peek(struct qcom_glink_pipe *np,
95 void *data, unsigned int offset, size_t count)
97 struct glink_smem_pipe *pipe = to_smem_pipe(np);
101 tail = le32_to_cpu(*pipe->tail);
103 if (tail >= pipe->native.length)
104 tail -= pipe->native.length;
106 len = min_t(size_t, count, pipe->native.length - tail);
108 memcpy_fromio(data, pipe->fifo + tail, len);
111 memcpy_fromio(data + len, pipe->fifo, (count - len));
114 static void glink_smem_rx_advance(struct qcom_glink_pipe *np,
117 struct glink_smem_pipe *pipe = to_smem_pipe(np);
120 tail = le32_to_cpu(*pipe->tail);
123 if (tail >= pipe->native.length)
124 tail -= pipe->native.length;
126 *pipe->tail = cpu_to_le32(tail);
129 static size_t glink_smem_tx_avail(struct qcom_glink_pipe *np)
131 struct glink_smem_pipe *pipe = to_smem_pipe(np);
136 head = le32_to_cpu(*pipe->head);
137 tail = le32_to_cpu(*pipe->tail);
140 avail = pipe->native.length - head + tail;
144 if (avail < (FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE))
147 avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE;
152 static unsigned int glink_smem_tx_write_one(struct glink_smem_pipe *pipe,
154 const void *data, size_t count)
158 len = min_t(size_t, count, pipe->native.length - head);
160 memcpy(pipe->fifo + head, data, len);
163 memcpy(pipe->fifo, data + len, count - len);
166 if (head >= pipe->native.length)
167 head -= pipe->native.length;
172 static void glink_smem_tx_write(struct qcom_glink_pipe *glink_pipe,
173 const void *hdr, size_t hlen,
174 const void *data, size_t dlen)
176 struct glink_smem_pipe *pipe = to_smem_pipe(glink_pipe);
179 head = le32_to_cpu(*pipe->head);
181 head = glink_smem_tx_write_one(pipe, head, hdr, hlen);
182 head = glink_smem_tx_write_one(pipe, head, data, dlen);
184 /* Ensure head is always aligned to 8 bytes */
185 head = ALIGN(head, 8);
186 if (head >= pipe->native.length)
187 head -= pipe->native.length;
189 /* Ensure ordering of fifo and head update */
192 *pipe->head = cpu_to_le32(head);
195 static void glink_smem_tx_kick(struct qcom_glink_pipe *glink_pipe)
197 struct glink_smem_pipe *pipe = to_smem_pipe(glink_pipe);
198 struct qcom_glink_smem *smem = pipe->smem;
200 mbox_send_message(smem->mbox_chan, NULL);
201 mbox_client_txdone(smem->mbox_chan, 0);
204 static irqreturn_t qcom_glink_smem_intr(int irq, void *data)
206 struct qcom_glink_smem *smem = data;
208 qcom_glink_native_rx(smem->glink);
213 static void qcom_glink_smem_release(struct device *dev)
215 struct qcom_glink_smem *smem = container_of(dev, struct qcom_glink_smem, dev);
220 struct qcom_glink_smem *qcom_glink_smem_register(struct device *parent,
221 struct device_node *node)
223 struct glink_smem_pipe *rx_pipe;
224 struct glink_smem_pipe *tx_pipe;
225 struct qcom_glink_smem *smem;
226 struct qcom_glink *glink;
233 smem = kzalloc(sizeof(*smem), GFP_KERNEL);
235 return ERR_PTR(-ENOMEM);
239 dev->parent = parent;
241 dev->release = qcom_glink_smem_release;
242 dev_set_name(dev, "%s:%pOFn", dev_name(parent->parent), node);
243 ret = device_register(dev);
245 pr_err("failed to register glink edge\n");
250 ret = of_property_read_u32(dev->of_node, "qcom,remote-pid",
253 dev_err(dev, "failed to parse qcom,remote-pid\n");
257 smem->remote_pid = remote_pid;
259 rx_pipe = devm_kzalloc(dev, sizeof(*rx_pipe), GFP_KERNEL);
260 tx_pipe = devm_kzalloc(dev, sizeof(*tx_pipe), GFP_KERNEL);
261 if (!rx_pipe || !tx_pipe) {
266 ret = qcom_smem_alloc(remote_pid,
267 SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, 32);
268 if (ret && ret != -EEXIST) {
269 dev_err(dev, "failed to allocate glink descriptors\n");
273 descs = qcom_smem_get(remote_pid,
274 SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, &size);
276 dev_err(dev, "failed to acquire xprt descriptor\n");
277 ret = PTR_ERR(descs);
282 dev_err(dev, "glink descriptor of invalid size\n");
287 tx_pipe->tail = &descs[0];
288 tx_pipe->head = &descs[1];
289 rx_pipe->tail = &descs[2];
290 rx_pipe->head = &descs[3];
292 ret = qcom_smem_alloc(remote_pid, SMEM_GLINK_NATIVE_XPRT_FIFO_0,
294 if (ret && ret != -EEXIST) {
295 dev_err(dev, "failed to allocate TX fifo\n");
299 tx_pipe->fifo = qcom_smem_get(remote_pid, SMEM_GLINK_NATIVE_XPRT_FIFO_0,
300 &tx_pipe->native.length);
301 if (IS_ERR(tx_pipe->fifo)) {
302 dev_err(dev, "failed to acquire TX fifo\n");
303 ret = PTR_ERR(tx_pipe->fifo);
307 smem->irq = of_irq_get(smem->dev.of_node, 0);
308 ret = devm_request_irq(&smem->dev, smem->irq, qcom_glink_smem_intr,
309 IRQF_NO_SUSPEND | IRQF_NO_AUTOEN,
312 dev_err(&smem->dev, "failed to request IRQ\n");
316 smem->mbox_client.dev = &smem->dev;
317 smem->mbox_client.knows_txdone = true;
318 smem->mbox_chan = mbox_request_channel(&smem->mbox_client, 0);
319 if (IS_ERR(smem->mbox_chan)) {
320 ret = dev_err_probe(&smem->dev, PTR_ERR(smem->mbox_chan),
321 "failed to acquire IPC channel\n");
325 rx_pipe->smem = smem;
326 rx_pipe->native.avail = glink_smem_rx_avail;
327 rx_pipe->native.peek = glink_smem_rx_peek;
328 rx_pipe->native.advance = glink_smem_rx_advance;
330 tx_pipe->smem = smem;
331 tx_pipe->native.avail = glink_smem_tx_avail;
332 tx_pipe->native.write = glink_smem_tx_write;
333 tx_pipe->native.kick = glink_smem_tx_kick;
338 glink = qcom_glink_native_probe(dev,
339 GLINK_FEATURE_INTENT_REUSE,
340 &rx_pipe->native, &tx_pipe->native,
343 ret = PTR_ERR(glink);
349 enable_irq(smem->irq);
354 mbox_free_channel(smem->mbox_chan);
357 device_unregister(dev);
361 EXPORT_SYMBOL_GPL(qcom_glink_smem_register);
363 void qcom_glink_smem_unregister(struct qcom_glink_smem *smem)
365 struct qcom_glink *glink = smem->glink;
367 disable_irq(smem->irq);
369 qcom_glink_native_remove(glink);
371 mbox_free_channel(smem->mbox_chan);
372 device_unregister(&smem->dev);
374 EXPORT_SYMBOL_GPL(qcom_glink_smem_unregister);
376 MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@linaro.org>");
377 MODULE_DESCRIPTION("Qualcomm GLINK SMEM driver");
378 MODULE_LICENSE("GPL v2");