1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <interface/spi/spi.h>
18 #include <lib/spi/common/utils.h>
19 #include <lib/spi/srv/common/common.h>
20 #include <lib/spi/srv/tipc/tipc.h>
21 #include <lib/tipc/tipc_srv.h>
22 #include <stdlib.h>
23 #include <sys/mman.h>
24 #include <uapi/err.h>
25 #include <uapi/mm.h>
26
27 #define TLOG_TAG "spi-srv-tipc"
28 #include <trusty_log.h>
29
30 /**
31 * chan_ctx - per-connection SPI data
32 * @shm: state of memory region shared with SPI server
33 * @shm_handle: handle to shared memory region
34 * @cs: tracks CS state of the underlying SPI device
35 * true - asserted, false - deasserted
36 */
37 struct chan_ctx {
38 struct mem_buf shm;
39 handle_t shm_handle;
40 bool cs;
41 };
42
shm_is_mapped(struct chan_ctx * ctx)43 static inline bool shm_is_mapped(struct chan_ctx* ctx) {
44 return ctx->shm.buf && ctx->shm_handle != INVALID_IPC_HANDLE;
45 }
46
shm_unmap(struct chan_ctx * ctx)47 static inline void shm_unmap(struct chan_ctx* ctx) {
48 if (shm_is_mapped(ctx)) {
49 int rc = munmap(ctx->shm.buf, ctx->shm.capacity);
50 if (rc != NO_ERROR) {
51 TLOGW("munmap() failed: %d\n", rc);
52 }
53 mb_destroy(&ctx->shm);
54 close(ctx->shm_handle);
55 ctx->shm_handle = INVALID_IPC_HANDLE;
56 }
57 }
58
59 union spi_msg_req_args {
60 struct spi_shm_map_req shm;
61 struct spi_batch_req batch;
62 };
63
get_spi_msg_size(struct spi_msg_req * req)64 static size_t get_spi_msg_size(struct spi_msg_req* req) {
65 size_t msg_size = sizeof(struct spi_msg_req);
66 switch (req->cmd & SPI_CMD_OP_MASK) {
67 case SPI_CMD_MSG_OP_SHM_MAP:
68 msg_size += sizeof(struct spi_shm_map_req);
69 break;
70
71 case SPI_CMD_MSG_OP_BATCH_EXEC:
72 msg_size += sizeof(struct spi_batch_req);
73 break;
74 }
75 return msg_size;
76 }
77
recv_msg(handle_t chan,struct spi_msg_req * req,union spi_msg_req_args * args,handle_t * h)78 static int recv_msg(handle_t chan,
79 struct spi_msg_req* req,
80 union spi_msg_req_args* args,
81 handle_t* h) {
82 int rc;
83 struct ipc_msg_info msg_inf;
84 size_t num_handles = h ? 1 : 0;
85
86 rc = get_msg(chan, &msg_inf);
87 if (rc != NO_ERROR) {
88 TLOGE("failed (%d) to get_msg()\n", rc);
89 return rc;
90 }
91
92 struct iovec iovs[2] = {
93 {
94 .iov_base = req,
95 .iov_len = sizeof(*req),
96 },
97 {
98 .iov_base = args,
99 .iov_len = sizeof(*args),
100 },
101 };
102 struct ipc_msg msg = {
103 .iov = iovs,
104 .num_iov = countof(iovs),
105 .handles = h,
106 .num_handles = num_handles,
107 };
108 rc = read_msg(chan, msg_inf.id, 0, &msg);
109 if (rc != (int)get_spi_msg_size(req)) {
110 TLOGE("failed (%d) to read_msg()\n", rc);
111 put_msg(chan, msg_inf.id);
112 return rc;
113 }
114
115 put_msg(chan, msg_inf.id);
116 return NO_ERROR;
117 }
118
handle_msg_shm_map_req(handle_t chan,struct chan_ctx * ctx,struct spi_shm_map_req * shm_req,handle_t shm_handle)119 static int handle_msg_shm_map_req(handle_t chan,
120 struct chan_ctx* ctx,
121 struct spi_shm_map_req* shm_req,
122 handle_t shm_handle) {
123 int rc1, rc = NO_ERROR;
124 void* shm_base;
125
126 shm_unmap(ctx);
127
128 shm_base = mmap(0, shm_req->len, MMAP_FLAG_PROT_READ | MMAP_FLAG_PROT_WRITE,
129 0, shm_handle, 0);
130 if (shm_base == MAP_FAILED) {
131 TLOGE("failed to map shared memory\n");
132 rc = ERR_GENERIC;
133 goto err_mmap;
134 }
135
136 struct spi_msg_resp resp = {
137 .status = translate_lk_err(rc)
138 };
139
140 rc = tipc_send1(chan, &resp, sizeof(resp));
141 if (rc < 0 || (size_t)rc != sizeof(resp)) {
142 TLOGE("failed (%d) to send SPI response\n", rc);
143 if (rc >= 0) {
144 rc = ERR_BAD_LEN;
145 }
146 goto err_resp;
147 }
148
149 mb_init(&ctx->shm, shm_base, shm_req->len, SPI_CMD_SHM_ALIGN);
150 ctx->shm_handle = shm_handle;
151 return NO_ERROR;
152
153 err_resp:
154 rc1 = munmap(shm_base, shm_req->len);
155 if (rc1 != NO_ERROR) {
156 TLOGW("munmap() failed: %d\n", rc);
157 }
158 err_mmap:
159 return rc;
160 }
161
handle_msg_batch_req(handle_t chan,struct spi_dev_ctx * spi,struct chan_ctx * ctx,struct spi_batch_req * batch_req)162 static int handle_msg_batch_req(handle_t chan,
163 struct spi_dev_ctx* spi,
164 struct chan_ctx* ctx,
165 struct spi_batch_req* batch_req) {
166 int rc;
167 struct spi_batch_state state;
168
169 if (!shm_is_mapped(ctx)) {
170 return ERR_BAD_STATE;
171 }
172
173 state.cs = ctx->cs;
174 state.num_cmds = 0;
175 rc = spi_srv_handle_batch(spi, &ctx->shm, batch_req, &state);
176 if (rc == NO_ERROR) {
177 ctx->cs = state.cs;
178 }
179
180 struct spi_msg_resp resp = {
181 .cmd = SPI_CMD_MSG_OP_BATCH_EXEC | SPI_CMD_RESP_BIT,
182 .status = translate_lk_err(rc)
183 };
184
185 struct spi_batch_resp batch_resp = {
186 .len = mb_curr_pos(&ctx->shm),
187 .failed = (rc != NO_ERROR) ? (uint32_t)state.num_cmds : 0
188 };
189
190 rc = tipc_send2(chan, &resp, sizeof(resp), &batch_resp, sizeof(batch_resp));
191 if (rc < 0 || (size_t)rc != sizeof(resp) + sizeof(batch_resp)) {
192 TLOGE("failed (%d) to send batch response\n", rc);
193 if (rc >= 0) {
194 rc = ERR_BAD_LEN;
195 }
196 return rc;
197 }
198
199 return NO_ERROR;
200 }
201
on_connect(const struct tipc_port * port,handle_t chan,const struct uuid * peer,void ** ctx_p)202 static int on_connect(const struct tipc_port* port,
203 handle_t chan,
204 const struct uuid* peer,
205 void** ctx_p) {
206 struct chan_ctx* ctx = calloc(1, sizeof(struct chan_ctx));
207 if (!ctx) {
208 TLOGE("failed to allocate channel context\n");
209 return ERR_NO_MEMORY;
210 }
211
212 ctx->shm_handle = INVALID_IPC_HANDLE;
213
214 *ctx_p = ctx;
215 return NO_ERROR;
216 }
217
on_message(const struct tipc_port * port,handle_t chan,void * chan_ctx)218 static int on_message(const struct tipc_port* port,
219 handle_t chan,
220 void* chan_ctx) {
221 int rc;
222 struct spi_msg_req req;
223 union spi_msg_req_args args;
224 struct spi_dev_ctx* spi = (struct spi_dev_ctx*)port->priv;
225 struct chan_ctx* ctx = (struct chan_ctx*)chan_ctx;
226 handle_t h = INVALID_IPC_HANDLE;
227
228 rc = recv_msg(chan, &req, &args, &h);
229 if (rc != NO_ERROR) {
230 TLOGE("failed (%d) to receive SPI message, closing connection\n", rc);
231 return rc;
232 }
233
234 switch (req.cmd & SPI_CMD_OP_MASK) {
235 case SPI_CMD_MSG_OP_SHM_MAP:
236 rc = handle_msg_shm_map_req(chan, ctx, &args.shm, h);
237 break;
238
239 case SPI_CMD_MSG_OP_BATCH_EXEC:
240 rc = handle_msg_batch_req(chan, spi, ctx, &args.batch);
241 break;
242
243 default:
244 TLOGE("cmd 0x%x: unknown command\n", req.cmd);
245 rc = ERR_CMD_UNKNOWN;
246 }
247
248 if (rc != NO_ERROR) {
249 TLOGE("failed (%d) to handle SPI message, closing connection\n", rc);
250 return rc;
251 }
252
253 return NO_ERROR;
254 }
255
on_disconnect(const struct tipc_port * port,handle_t chan,void * _ctx)256 static void on_disconnect(const struct tipc_port* port,
257 handle_t chan,
258 void* _ctx) {
259 int rc;
260 struct spi_dev_ctx* spi = (struct spi_dev_ctx*)port->priv;
261 struct chan_ctx* ctx = (struct chan_ctx*)_ctx;
262 struct spi_shm_hdr hdr;
263 struct mem_buf mb;
264 struct spi_batch_req req;
265 struct spi_batch_state state;
266
267 /* make sure CS is deasserted */
268 if (!ctx->cs) {
269 return;
270 }
271
272 /* Construct a batch with a single deassert command to recover CS state */
273 hdr.cmd = SPI_CMD_SHM_OP_CS_DEASSERT;
274
275 /* Deassert commands are header only */
276 mb_init(&mb, &hdr, sizeof(hdr), SPI_CMD_SHM_ALIGN);
277 mb_resize(&mb, sizeof(hdr));
278
279 req.len = sizeof(hdr);
280 req.num_cmds = 1;
281
282 state.cs = true;
283 state.num_cmds = 0;
284
285 rc = spi_srv_handle_batch(spi, &mb, &req, &state);
286 /* CS state will be out of sync. This is an unrecoverable error. */
287 assert(rc == NO_ERROR);
288
289 ctx->cs = false;
290 }
291
on_channel_cleanup(void * _ctx)292 static void on_channel_cleanup(void* _ctx) {
293 struct chan_ctx* ctx = (struct chan_ctx*)_ctx;
294 assert(!ctx->cs);
295 shm_unmap(ctx);
296 free(ctx);
297 }
298
299 static const struct tipc_srv_ops spi_dev_ops = {
300 .on_connect = on_connect,
301 .on_message = on_message,
302 .on_disconnect = on_disconnect,
303 .on_channel_cleanup = on_channel_cleanup,
304 };
305
add_spi_service(struct tipc_hset * hset,const struct tipc_port * ports,size_t num_ports)306 int add_spi_service(struct tipc_hset* hset,
307 const struct tipc_port* ports,
308 size_t num_ports) {
309 int rc;
310
311 for (size_t i = 0; i < num_ports; i++) {
312 if (!ports[i].priv) {
313 return ERR_INVALID_ARGS;
314 }
315
316 #if TEST_BUILD
317 const uint32_t max_chan_cnt = 2;
318 #else
319 const uint32_t max_chan_cnt = 1;
320 #endif
321 rc = tipc_add_service(hset, &ports[i], 1, max_chan_cnt, &spi_dev_ops);
322 if (rc != NO_ERROR) {
323 return rc;
324 }
325 }
326
327 return NO_ERROR;
328 }
329