blob: ed616b0bc563db9e3575538aa08d82aa4dacb473 [file] [log] [blame]
Bjorn Anderssoncaf989c2017-08-24 12:51:30 +05301/*
2 * Copyright (c) 2016, Linaro Ltd
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/io.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/of_address.h>
18#include <linux/interrupt.h>
19#include <linux/platform_device.h>
20#include <linux/mfd/syscon.h>
21#include <linux/slab.h>
22#include <linux/rpmsg.h>
23#include <linux/idr.h>
24#include <linux/circ_buf.h>
25#include <linux/soc/qcom/smem.h>
26#include <linux/sizes.h>
27#include <linux/delay.h>
28#include <linux/regmap.h>
29#include <linux/workqueue.h>
30#include <linux/list.h>
31
32#include <linux/delay.h>
33#include <linux/rpmsg.h>
34#include <linux/rpmsg/qcom_glink.h>
35
36#include "qcom_glink_native.h"
37
38#define FIFO_FULL_RESERVE 8
39#define FIFO_ALIGNMENT 8
40#define TX_BLOCKED_CMD_RESERVE 8 /* size of struct read_notif_request */
41
42#define SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR 478
43#define SMEM_GLINK_NATIVE_XPRT_FIFO_0 479
44#define SMEM_GLINK_NATIVE_XPRT_FIFO_1 480
45
46struct glink_smem_pipe {
47 struct qcom_glink_pipe native;
48
49 __le32 *tail;
50 __le32 *head;
51
52 void *fifo;
53
54 int remote_pid;
55};
56
57#define to_smem_pipe(p) container_of(p, struct glink_smem_pipe, native)
58
59static size_t glink_smem_rx_avail(struct qcom_glink_pipe *np)
60{
61 struct glink_smem_pipe *pipe = to_smem_pipe(np);
62 size_t len;
63 void *fifo;
64 u32 head;
65 u32 tail;
66
67 if (!pipe->fifo) {
68 fifo = qcom_smem_get(pipe->remote_pid,
69 SMEM_GLINK_NATIVE_XPRT_FIFO_1, &len);
70 if (IS_ERR(fifo)) {
71 pr_err("failed to acquire RX fifo handle: %ld\n",
72 PTR_ERR(fifo));
73 return 0;
74 }
75
76 pipe->fifo = fifo;
77 pipe->native.length = len;
78 }
79
80 head = le32_to_cpu(*pipe->head);
81 tail = le32_to_cpu(*pipe->tail);
82
83 if (head < tail)
84 return pipe->native.length - tail + head;
85 else
86 return head - tail;
87}
88
89static void glink_smem_rx_peak(struct qcom_glink_pipe *np,
Bjorn Anderssonb88eee92017-08-24 12:51:36 +053090 void *data, unsigned int offset, size_t count)
Bjorn Anderssoncaf989c2017-08-24 12:51:30 +053091{
92 struct glink_smem_pipe *pipe = to_smem_pipe(np);
93 size_t len;
94 u32 tail;
95
96 tail = le32_to_cpu(*pipe->tail);
Bjorn Anderssonb88eee92017-08-24 12:51:36 +053097 tail += offset;
98 if (tail >= pipe->native.length)
99 tail -= pipe->native.length;
Bjorn Anderssoncaf989c2017-08-24 12:51:30 +0530100
101 len = min_t(size_t, count, pipe->native.length - tail);
Arun Kumar Neelakantamdf9c43b2018-10-03 17:08:20 +0530102 if (len)
103 memcpy_fromio(data, pipe->fifo + tail, len);
Bjorn Anderssoncaf989c2017-08-24 12:51:30 +0530104
Arun Kumar Neelakantamdf9c43b2018-10-03 17:08:20 +0530105 if (len != count)
106 memcpy_fromio(data + len, pipe->fifo, (count - len));
Bjorn Anderssoncaf989c2017-08-24 12:51:30 +0530107}
108
109static void glink_smem_rx_advance(struct qcom_glink_pipe *np,
110 size_t count)
111{
112 struct glink_smem_pipe *pipe = to_smem_pipe(np);
113 u32 tail;
114
115 tail = le32_to_cpu(*pipe->tail);
116
117 tail += count;
Chris Lewcb622fd2018-06-27 18:19:57 -0700118 if (tail >= pipe->native.length)
Bjorn Anderssoncaf989c2017-08-24 12:51:30 +0530119 tail -= pipe->native.length;
120
121 *pipe->tail = cpu_to_le32(tail);
122}
123
124static size_t glink_smem_tx_avail(struct qcom_glink_pipe *np)
125{
126 struct glink_smem_pipe *pipe = to_smem_pipe(np);
127 u32 head;
128 u32 tail;
129 u32 avail;
130
131 head = le32_to_cpu(*pipe->head);
132 tail = le32_to_cpu(*pipe->tail);
133
134 if (tail <= head)
135 avail = pipe->native.length - head + tail;
136 else
137 avail = tail - head;
138
139 if (avail < (FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE))
140 avail = 0;
141 else
142 avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE;
143
144 return avail;
145}
146
147static unsigned int glink_smem_tx_write_one(struct glink_smem_pipe *pipe,
148 unsigned int head,
149 const void *data, size_t count)
150{
151 size_t len;
152
153 len = min_t(size_t, count, pipe->native.length - head);
154 if (len)
155 memcpy(pipe->fifo + head, data, len);
156
157 if (len != count)
158 memcpy(pipe->fifo, data + len, count - len);
159
160 head += count;
161 if (head >= pipe->native.length)
162 head -= pipe->native.length;
163
164 return head;
165}
166
167static void glink_smem_tx_write(struct qcom_glink_pipe *glink_pipe,
168 const void *hdr, size_t hlen,
169 const void *data, size_t dlen)
170{
171 struct glink_smem_pipe *pipe = to_smem_pipe(glink_pipe);
172 unsigned int head;
173
174 head = le32_to_cpu(*pipe->head);
175
176 head = glink_smem_tx_write_one(pipe, head, hdr, hlen);
177 head = glink_smem_tx_write_one(pipe, head, data, dlen);
178
179 /* Ensure head is always aligned to 8 bytes */
180 head = ALIGN(head, 8);
181 if (head >= pipe->native.length)
182 head -= pipe->native.length;
183
Bjorn Anderssondeb16822020-04-21 13:40:10 +0100184 /* Ensure ordering of fifo and head update */
185 wmb();
186
Bjorn Anderssoncaf989c2017-08-24 12:51:30 +0530187 *pipe->head = cpu_to_le32(head);
188}
189
190static void qcom_glink_smem_release(struct device *dev)
191{
192 kfree(dev);
193}
194
195struct qcom_glink *qcom_glink_smem_register(struct device *parent,
196 struct device_node *node)
197{
198 struct glink_smem_pipe *rx_pipe;
199 struct glink_smem_pipe *tx_pipe;
200 struct qcom_glink *glink;
201 struct device *dev;
202 u32 remote_pid;
203 __le32 *descs;
204 size_t size;
205 int ret;
206
207 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
208 if (!dev)
209 return ERR_PTR(-ENOMEM);
210
211 dev->parent = parent;
212 dev->of_node = node;
213 dev->release = qcom_glink_smem_release;
214 dev_set_name(dev, "%s:%s", node->parent->name, node->name);
215 ret = device_register(dev);
216 if (ret) {
217 pr_err("failed to register glink edge\n");
Arvind Yadav054808c2020-04-21 13:40:06 +0100218 put_device(dev);
Bjorn Anderssoncaf989c2017-08-24 12:51:30 +0530219 return ERR_PTR(ret);
220 }
221
222 ret = of_property_read_u32(dev->of_node, "qcom,remote-pid",
223 &remote_pid);
224 if (ret) {
225 dev_err(dev, "failed to parse qcom,remote-pid\n");
226 goto err_put_dev;
227 }
228
229 rx_pipe = devm_kzalloc(dev, sizeof(*rx_pipe), GFP_KERNEL);
230 tx_pipe = devm_kzalloc(dev, sizeof(*tx_pipe), GFP_KERNEL);
231 if (!rx_pipe || !tx_pipe) {
232 ret = -ENOMEM;
233 goto err_put_dev;
234 }
235
236 ret = qcom_smem_alloc(remote_pid,
237 SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, 32);
238 if (ret && ret != -EEXIST) {
239 dev_err(dev, "failed to allocate glink descriptors\n");
240 goto err_put_dev;
241 }
242
243 descs = qcom_smem_get(remote_pid,
244 SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, &size);
245 if (IS_ERR(descs)) {
246 dev_err(dev, "failed to acquire xprt descriptor\n");
247 ret = PTR_ERR(descs);
248 goto err_put_dev;
249 }
250
251 if (size != 32) {
252 dev_err(dev, "glink descriptor of invalid size\n");
253 ret = -EINVAL;
254 goto err_put_dev;
255 }
256
257 tx_pipe->tail = &descs[0];
258 tx_pipe->head = &descs[1];
259 rx_pipe->tail = &descs[2];
260 rx_pipe->head = &descs[3];
261
262 ret = qcom_smem_alloc(remote_pid, SMEM_GLINK_NATIVE_XPRT_FIFO_0,
263 SZ_16K);
264 if (ret && ret != -EEXIST) {
265 dev_err(dev, "failed to allocate TX fifo\n");
266 goto err_put_dev;
267 }
268
269 tx_pipe->fifo = qcom_smem_get(remote_pid, SMEM_GLINK_NATIVE_XPRT_FIFO_0,
270 &tx_pipe->native.length);
271 if (IS_ERR(tx_pipe->fifo)) {
272 dev_err(dev, "failed to acquire TX fifo\n");
273 ret = PTR_ERR(tx_pipe->fifo);
274 goto err_put_dev;
275 }
276
277 rx_pipe->native.avail = glink_smem_rx_avail;
278 rx_pipe->native.peak = glink_smem_rx_peak;
279 rx_pipe->native.advance = glink_smem_rx_advance;
280 rx_pipe->remote_pid = remote_pid;
281
282 tx_pipe->native.avail = glink_smem_tx_avail;
283 tx_pipe->native.write = glink_smem_tx_write;
284 tx_pipe->remote_pid = remote_pid;
285
286 *rx_pipe->tail = 0;
287 *tx_pipe->head = 0;
288
289 glink = qcom_glink_native_probe(dev,
Sricharan R933b45d2017-08-24 12:51:34 +0530290 GLINK_FEATURE_INTENT_REUSE,
291 &rx_pipe->native, &tx_pipe->native,
292 false);
Bjorn Anderssoncaf989c2017-08-24 12:51:30 +0530293 if (IS_ERR(glink)) {
294 ret = PTR_ERR(glink);
295 goto err_put_dev;
296 }
297
298 return glink;
299
300err_put_dev:
Arvind Yadav054808c2020-04-21 13:40:06 +0100301 device_unregister(dev);
Bjorn Anderssoncaf989c2017-08-24 12:51:30 +0530302
303 return ERR_PTR(ret);
304}
305EXPORT_SYMBOL_GPL(qcom_glink_smem_register);
306
307void qcom_glink_smem_unregister(struct qcom_glink *glink)
308{
309 qcom_glink_native_remove(glink);
310 qcom_glink_native_unregister(glink);
311}
312EXPORT_SYMBOL_GPL(qcom_glink_smem_unregister);
313
314MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@linaro.org>");
315MODULE_DESCRIPTION("Qualcomm GLINK SMEM driver");
316MODULE_LICENSE("GPL v2");