1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
3
4 #include <linux/types.h>
5 #include <linux/completion.h>
6 #include <linux/mutex.h>
7 #include <linux/bitops.h>
8 #include <linux/io.h>
9 #include <linux/highmem.h>
10 #include <linux/kthread.h>
11 #include <linux/wait.h>
12 #include <linux/delay.h>
13 #include <linux/slab.h>
14 #include <linux/kref.h>
15 #include <linux/rcupdate.h>
16 #include <linux/sched/signal.h>
17
18 #include "vchiq_arm.h"
19 #include "vchiq_core.h"
20
21 #define VCHIQ_SLOT_HANDLER_STACK 8192
22
23 #define VCHIQ_MSG_PADDING 0 /* - */
24 #define VCHIQ_MSG_CONNECT 1 /* - */
25 #define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
26 #define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
27 #define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
28 #define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
29 #define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
30 #define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
31 #define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
32 #define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
33 #define VCHIQ_MSG_PAUSE 10 /* - */
34 #define VCHIQ_MSG_RESUME 11 /* - */
35 #define VCHIQ_MSG_REMOTE_USE 12 /* - */
36 #define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
37 #define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
38
39 #define TYPE_SHIFT 24
40
41 #define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
42 #define VCHIQ_PORT_FREE 0x1000
43 #define VCHIQ_PORT_IS_VALID(port) ((port) < VCHIQ_PORT_FREE)
44 #define VCHIQ_MAKE_MSG(type, srcport, dstport) \
45 (((type) << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
46 #define VCHIQ_MSG_TYPE(msgid) ((unsigned int)(msgid) >> TYPE_SHIFT)
47 #define VCHIQ_MSG_SRCPORT(msgid) \
48 ((unsigned short)(((unsigned int)(msgid) >> 12) & 0xfff))
49 #define VCHIQ_MSG_DSTPORT(msgid) \
50 ((unsigned short)(msgid) & 0xfff)
51
52 #define MAKE_CONNECT (VCHIQ_MSG_CONNECT << TYPE_SHIFT)
53 #define MAKE_OPEN(srcport) \
54 ((VCHIQ_MSG_OPEN << TYPE_SHIFT) | ((srcport) << 12))
55 #define MAKE_OPENACK(srcport, dstport) \
56 ((VCHIQ_MSG_OPENACK << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
57 #define MAKE_CLOSE(srcport, dstport) \
58 ((VCHIQ_MSG_CLOSE << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
59 #define MAKE_DATA(srcport, dstport) \
60 ((VCHIQ_MSG_DATA << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
61 #define MAKE_PAUSE (VCHIQ_MSG_PAUSE << TYPE_SHIFT)
62 #define MAKE_RESUME (VCHIQ_MSG_RESUME << TYPE_SHIFT)
63 #define MAKE_REMOTE_USE (VCHIQ_MSG_REMOTE_USE << TYPE_SHIFT)
64 #define MAKE_REMOTE_USE_ACTIVE (VCHIQ_MSG_REMOTE_USE_ACTIVE << TYPE_SHIFT)
65
66 #define PAGELIST_WRITE 0
67 #define PAGELIST_READ 1
68 #define PAGELIST_READ_WITH_FRAGMENTS 2
69
70 #define BELL2 0x08
71
72 /* Ensure the fields are wide enough */
73 static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX)) == 0);
74 static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
75 static_assert((unsigned int)VCHIQ_PORT_MAX < (unsigned int)VCHIQ_PORT_FREE);
76
77 #define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
78 #define VCHIQ_MSGID_CLAIMED 0x40000000
79
80 #define VCHIQ_FOURCC_INVALID 0x00000000
81 #define VCHIQ_FOURCC_IS_LEGAL(fourcc) ((fourcc) != VCHIQ_FOURCC_INVALID)
82
83 #define VCHIQ_BULK_ACTUAL_ABORTED -1
84
85 #if VCHIQ_ENABLE_STATS
86 #define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
87 #define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
88 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
89 (service->stats. stat += addend)
90 #else
91 #define VCHIQ_STATS_INC(state, stat) ((void)0)
92 #define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
93 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
94 #endif
95
96 #define HANDLE_STATE_SHIFT 12
97
98 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
99 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
100 #define SLOT_INDEX_FROM_DATA(state, data) \
101 (((unsigned int)((char *)data - (char *)state->slot_data)) / \
102 VCHIQ_SLOT_SIZE)
103 #define SLOT_INDEX_FROM_INFO(state, info) \
104 ((unsigned int)(info - state->slot_info))
105 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \
106 ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
107 #define SLOT_QUEUE_INDEX_FROM_POS_MASKED(pos) \
108 (SLOT_QUEUE_INDEX_FROM_POS(pos) & VCHIQ_SLOT_QUEUE_MASK)
109
110 #define BULK_INDEX(x) ((x) & (VCHIQ_NUM_SERVICE_BULKS - 1))
111
112 #define NO_CLOSE_RECVD 0
113 #define CLOSE_RECVD 1
114
115 #define NO_RETRY_POLL 0
116 #define RETRY_POLL 1
117
118 struct vchiq_open_payload {
119 int fourcc;
120 int client_id;
121 short version;
122 short version_min;
123 };
124
125 struct vchiq_openack_payload {
126 short version;
127 };
128
129 enum {
130 QMFLAGS_IS_BLOCKING = BIT(0),
131 QMFLAGS_NO_MUTEX_LOCK = BIT(1),
132 QMFLAGS_NO_MUTEX_UNLOCK = BIT(2)
133 };
134
135 enum {
136 VCHIQ_POLL_TERMINATE,
137 VCHIQ_POLL_REMOVE,
138 VCHIQ_POLL_TXNOTIFY,
139 VCHIQ_POLL_RXNOTIFY,
140 VCHIQ_POLL_COUNT
141 };
142
143 /* we require this for consistency between endpoints */
144 static_assert(sizeof(struct vchiq_header) == 8);
145 static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
146
check_sizes(void)147 static inline void check_sizes(void)
148 {
149 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_SLOT_SIZE);
150 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS);
151 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS_PER_SIDE);
152 BUILD_BUG_ON_NOT_POWER_OF_2(sizeof(struct vchiq_header));
153 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_CURRENT_BULKS);
154 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_SERVICE_BULKS);
155 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SERVICES);
156 }
157
158 static unsigned int handle_seq;
159
160 static const char *const srvstate_names[] = {
161 "FREE",
162 "HIDDEN",
163 "LISTENING",
164 "OPENING",
165 "OPEN",
166 "OPENSYNC",
167 "CLOSESENT",
168 "CLOSERECVD",
169 "CLOSEWAIT",
170 "CLOSED"
171 };
172
173 static const char *const reason_names[] = {
174 "SERVICE_OPENED",
175 "SERVICE_CLOSED",
176 "MESSAGE_AVAILABLE",
177 "BULK_TRANSMIT_DONE",
178 "BULK_RECEIVE_DONE",
179 "BULK_TRANSMIT_ABORTED",
180 "BULK_RECEIVE_ABORTED"
181 };
182
183 static const char *const conn_state_names[] = {
184 "DISCONNECTED",
185 "CONNECTING",
186 "CONNECTED",
187 "PAUSING",
188 "PAUSE_SENT",
189 "PAUSED",
190 "RESUMING",
191 "PAUSE_TIMEOUT",
192 "RESUME_TIMEOUT"
193 };
194
195 static void
196 release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
197
msg_type_str(unsigned int msg_type)198 static const char *msg_type_str(unsigned int msg_type)
199 {
200 switch (msg_type) {
201 case VCHIQ_MSG_PADDING: return "PADDING";
202 case VCHIQ_MSG_CONNECT: return "CONNECT";
203 case VCHIQ_MSG_OPEN: return "OPEN";
204 case VCHIQ_MSG_OPENACK: return "OPENACK";
205 case VCHIQ_MSG_CLOSE: return "CLOSE";
206 case VCHIQ_MSG_DATA: return "DATA";
207 case VCHIQ_MSG_BULK_RX: return "BULK_RX";
208 case VCHIQ_MSG_BULK_TX: return "BULK_TX";
209 case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
210 case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
211 case VCHIQ_MSG_PAUSE: return "PAUSE";
212 case VCHIQ_MSG_RESUME: return "RESUME";
213 case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
214 case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
215 case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
216 }
217 return "???";
218 }
219
220 static inline void
set_service_state(struct vchiq_service * service,int newstate)221 set_service_state(struct vchiq_service *service, int newstate)
222 {
223 dev_dbg(service->state->dev, "core: %d: srv:%d %s->%s\n",
224 service->state->id, service->localport,
225 srvstate_names[service->srvstate],
226 srvstate_names[newstate]);
227 service->srvstate = newstate;
228 }
229
handle_to_service(struct vchiq_instance * instance,unsigned int handle)230 struct vchiq_service *handle_to_service(struct vchiq_instance *instance, unsigned int handle)
231 {
232 int idx = handle & (VCHIQ_MAX_SERVICES - 1);
233
234 return rcu_dereference(instance->state->services[idx]);
235 }
236
237 struct vchiq_service *
find_service_by_handle(struct vchiq_instance * instance,unsigned int handle)238 find_service_by_handle(struct vchiq_instance *instance, unsigned int handle)
239 {
240 struct vchiq_service *service;
241
242 rcu_read_lock();
243 service = handle_to_service(instance, handle);
244 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
245 service->handle == handle &&
246 kref_get_unless_zero(&service->ref_count)) {
247 service = rcu_pointer_handoff(service);
248 rcu_read_unlock();
249 return service;
250 }
251 rcu_read_unlock();
252 dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle);
253 return NULL;
254 }
255
256 struct vchiq_service *
find_service_by_port(struct vchiq_state * state,unsigned int localport)257 find_service_by_port(struct vchiq_state *state, unsigned int localport)
258 {
259 if (localport <= VCHIQ_PORT_MAX) {
260 struct vchiq_service *service;
261
262 rcu_read_lock();
263 service = rcu_dereference(state->services[localport]);
264 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
265 kref_get_unless_zero(&service->ref_count)) {
266 service = rcu_pointer_handoff(service);
267 rcu_read_unlock();
268 return service;
269 }
270 rcu_read_unlock();
271 }
272 dev_dbg(state->dev, "core: Invalid port %u\n", localport);
273 return NULL;
274 }
275
276 struct vchiq_service *
find_service_for_instance(struct vchiq_instance * instance,unsigned int handle)277 find_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
278 {
279 struct vchiq_service *service;
280
281 rcu_read_lock();
282 service = handle_to_service(instance, handle);
283 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
284 service->handle == handle &&
285 service->instance == instance &&
286 kref_get_unless_zero(&service->ref_count)) {
287 service = rcu_pointer_handoff(service);
288 rcu_read_unlock();
289 return service;
290 }
291 rcu_read_unlock();
292 dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle);
293 return NULL;
294 }
295
296 struct vchiq_service *
find_closed_service_for_instance(struct vchiq_instance * instance,unsigned int handle)297 find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
298 {
299 struct vchiq_service *service;
300
301 rcu_read_lock();
302 service = handle_to_service(instance, handle);
303 if (service &&
304 (service->srvstate == VCHIQ_SRVSTATE_FREE ||
305 service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
306 service->handle == handle &&
307 service->instance == instance &&
308 kref_get_unless_zero(&service->ref_count)) {
309 service = rcu_pointer_handoff(service);
310 rcu_read_unlock();
311 return service;
312 }
313 rcu_read_unlock();
314 dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle);
315 return service;
316 }
317
318 struct vchiq_service *
__next_service_by_instance(struct vchiq_state * state,struct vchiq_instance * instance,int * pidx)319 __next_service_by_instance(struct vchiq_state *state,
320 struct vchiq_instance *instance,
321 int *pidx)
322 {
323 struct vchiq_service *service = NULL;
324 int idx = *pidx;
325
326 while (idx < state->unused_service) {
327 struct vchiq_service *srv;
328
329 srv = rcu_dereference(state->services[idx]);
330 idx++;
331 if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
332 srv->instance == instance) {
333 service = srv;
334 break;
335 }
336 }
337
338 *pidx = idx;
339 return service;
340 }
341
342 struct vchiq_service *
next_service_by_instance(struct vchiq_state * state,struct vchiq_instance * instance,int * pidx)343 next_service_by_instance(struct vchiq_state *state,
344 struct vchiq_instance *instance,
345 int *pidx)
346 {
347 struct vchiq_service *service;
348
349 rcu_read_lock();
350 while (1) {
351 service = __next_service_by_instance(state, instance, pidx);
352 if (!service)
353 break;
354 if (kref_get_unless_zero(&service->ref_count)) {
355 service = rcu_pointer_handoff(service);
356 break;
357 }
358 }
359 rcu_read_unlock();
360 return service;
361 }
362
363 void
vchiq_service_get(struct vchiq_service * service)364 vchiq_service_get(struct vchiq_service *service)
365 {
366 if (!service) {
367 WARN(1, "%s service is NULL\n", __func__);
368 return;
369 }
370 kref_get(&service->ref_count);
371 }
372
service_release(struct kref * kref)373 static void service_release(struct kref *kref)
374 {
375 struct vchiq_service *service =
376 container_of(kref, struct vchiq_service, ref_count);
377 struct vchiq_state *state = service->state;
378
379 WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
380 rcu_assign_pointer(state->services[service->localport], NULL);
381 if (service->userdata_term)
382 service->userdata_term(service->base.userdata);
383 kfree_rcu(service, rcu);
384 }
385
386 void
vchiq_service_put(struct vchiq_service * service)387 vchiq_service_put(struct vchiq_service *service)
388 {
389 if (!service) {
390 WARN(1, "%s: service is NULL\n", __func__);
391 return;
392 }
393 kref_put(&service->ref_count, service_release);
394 }
395
396 int
vchiq_get_client_id(struct vchiq_instance * instance,unsigned int handle)397 vchiq_get_client_id(struct vchiq_instance *instance, unsigned int handle)
398 {
399 struct vchiq_service *service;
400 int id;
401
402 rcu_read_lock();
403 service = handle_to_service(instance, handle);
404 id = service ? service->client_id : 0;
405 rcu_read_unlock();
406 return id;
407 }
408
409 void *
vchiq_get_service_userdata(struct vchiq_instance * instance,unsigned int handle)410 vchiq_get_service_userdata(struct vchiq_instance *instance, unsigned int handle)
411 {
412 void *userdata;
413 struct vchiq_service *service;
414
415 rcu_read_lock();
416 service = handle_to_service(instance, handle);
417 userdata = service ? service->base.userdata : NULL;
418 rcu_read_unlock();
419 return userdata;
420 }
421 EXPORT_SYMBOL(vchiq_get_service_userdata);
422
423 static void
mark_service_closing_internal(struct vchiq_service * service,int sh_thread)424 mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
425 {
426 struct vchiq_state *state = service->state;
427 struct vchiq_service_quota *quota;
428
429 service->closing = 1;
430
431 /* Synchronise with other threads. */
432 mutex_lock(&state->recycle_mutex);
433 mutex_unlock(&state->recycle_mutex);
434 if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
435 /*
436 * If we're pausing then the slot_mutex is held until resume
437 * by the slot handler. Therefore don't try to acquire this
438 * mutex if we're the slot handler and in the pause sent state.
439 * We don't need to in this case anyway.
440 */
441 mutex_lock(&state->slot_mutex);
442 mutex_unlock(&state->slot_mutex);
443 }
444
445 /* Unblock any sending thread. */
446 quota = &state->service_quotas[service->localport];
447 complete("a->quota_event);
448 }
449
450 static void
mark_service_closing(struct vchiq_service * service)451 mark_service_closing(struct vchiq_service *service)
452 {
453 mark_service_closing_internal(service, 0);
454 }
455
456 static inline int
make_service_callback(struct vchiq_service * service,enum vchiq_reason reason,struct vchiq_header * header,struct vchiq_bulk * bulk)457 make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
458 struct vchiq_header *header, struct vchiq_bulk *bulk)
459 {
460 void *cb_data = NULL;
461 void __user *cb_userdata = NULL;
462 int status;
463
464 /*
465 * If a bulk transfer is in progress, pass bulk->cb_*data to the
466 * callback function.
467 */
468 if (bulk) {
469 cb_data = bulk->cb_data;
470 cb_userdata = bulk->cb_userdata;
471 }
472
473 dev_dbg(service->state->dev, "core: %d: callback:%d (%s, %pK, %pK %pK)\n",
474 service->state->id, service->localport, reason_names[reason],
475 header, cb_data, cb_userdata);
476 status = service->base.callback(service->instance, reason, header, service->handle,
477 cb_data, cb_userdata);
478 if (status && (status != -EAGAIN)) {
479 dev_warn(service->state->dev,
480 "core: %d: ignoring ERROR from callback to service %x\n",
481 service->state->id, service->handle);
482 status = 0;
483 }
484
485 if (reason != VCHIQ_MESSAGE_AVAILABLE)
486 vchiq_release_message(service->instance, service->handle, header);
487
488 return status;
489 }
490
491 inline void
vchiq_set_conn_state(struct vchiq_state * state,enum vchiq_connstate newstate)492 vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
493 {
494 enum vchiq_connstate oldstate = state->conn_state;
495
496 dev_dbg(state->dev, "core: %d: %s->%s\n",
497 state->id, conn_state_names[oldstate], conn_state_names[newstate]);
498 state->conn_state = newstate;
499 vchiq_platform_conn_state_changed(state, oldstate, newstate);
500 }
501
502 /* This initialises a single remote_event, and the associated wait_queue. */
503 static inline void
remote_event_create(wait_queue_head_t * wq,struct remote_event * event)504 remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
505 {
506 event->armed = 0;
507 /*
508 * Don't clear the 'fired' flag because it may already have been set
509 * by the other side.
510 */
511 init_waitqueue_head(wq);
512 }
513
514 /*
515 * All the event waiting routines in VCHIQ used a custom semaphore
516 * implementation that filtered most signals. This achieved a behaviour similar
517 * to the "killable" family of functions. While cleaning up this code all the
518 * routines where switched to the "interruptible" family of functions, as the
519 * former was deemed unjustified and the use "killable" set all VCHIQ's
520 * threads in D state.
521 *
522 * Returns: 0 on success, a negative error code on failure
523 */
524 static inline int
remote_event_wait(wait_queue_head_t * wq,struct remote_event * event)525 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
526 {
527 int ret = 0;
528
529 if (!event->fired) {
530 event->armed = 1;
531 dsb(sy);
532 ret = wait_event_interruptible(*wq, event->fired);
533 if (ret) {
534 event->armed = 0;
535 return ret;
536 }
537 event->armed = 0;
538 /* Ensure that the peer sees that we are not waiting (armed == 0). */
539 wmb();
540 }
541
542 event->fired = 0;
543 return ret;
544 }
545
546 static void
remote_event_signal(struct vchiq_state * state,struct remote_event * event)547 remote_event_signal(struct vchiq_state *state, struct remote_event *event)
548 {
549 struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(state->dev);
550
551 /*
552 * Ensure that all writes to shared data structures have completed
553 * before signalling the peer.
554 */
555 wmb();
556
557 event->fired = 1;
558
559 dsb(sy); /* data barrier operation */
560
561 if (event->armed)
562 writel(0, mgmt->regs + BELL2); /* trigger vc interrupt */
563 }
564
565 /*
566 * Acknowledge that the event has been signalled, and wake any waiters. Usually
567 * called as a result of the doorbell being rung.
568 */
569 static inline void
remote_event_signal_local(wait_queue_head_t * wq,struct remote_event * event)570 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
571 {
572 event->fired = 1;
573 event->armed = 0;
574 wake_up_all(wq);
575 }
576
577 /* Check if a single event has been signalled, waking the waiters if it has. */
578 static inline void
remote_event_poll(wait_queue_head_t * wq,struct remote_event * event)579 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
580 {
581 if (event->fired && event->armed)
582 remote_event_signal_local(wq, event);
583 }
584
585 /*
586 * VCHIQ used a small, fixed number of remote events. It is simplest to
587 * enumerate them here for polling.
588 */
589 void
remote_event_pollall(struct vchiq_state * state)590 remote_event_pollall(struct vchiq_state *state)
591 {
592 remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
593 remote_event_poll(&state->sync_release_event, &state->local->sync_release);
594 remote_event_poll(&state->trigger_event, &state->local->trigger);
595 remote_event_poll(&state->recycle_event, &state->local->recycle);
596 }
597
598 /*
599 * Round up message sizes so that any space at the end of a slot is always big
600 * enough for a header. This relies on header size being a power of two, which
601 * has been verified earlier by a static assertion.
602 */
603
604 static inline size_t
calc_stride(size_t size)605 calc_stride(size_t size)
606 {
607 /* Allow room for the header */
608 size += sizeof(struct vchiq_header);
609
610 /* Round up */
611 return (size + sizeof(struct vchiq_header) - 1) &
612 ~(sizeof(struct vchiq_header) - 1);
613 }
614
615 /* Called by the slot handler thread */
616 static struct vchiq_service *
get_listening_service(struct vchiq_state * state,int fourcc)617 get_listening_service(struct vchiq_state *state, int fourcc)
618 {
619 int i;
620
621 WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
622
623 rcu_read_lock();
624 for (i = 0; i < state->unused_service; i++) {
625 struct vchiq_service *service;
626
627 service = rcu_dereference(state->services[i]);
628 if (service &&
629 service->public_fourcc == fourcc &&
630 (service->srvstate == VCHIQ_SRVSTATE_LISTENING ||
631 (service->srvstate == VCHIQ_SRVSTATE_OPEN &&
632 service->remoteport == VCHIQ_PORT_FREE)) &&
633 kref_get_unless_zero(&service->ref_count)) {
634 service = rcu_pointer_handoff(service);
635 rcu_read_unlock();
636 return service;
637 }
638 }
639 rcu_read_unlock();
640 return NULL;
641 }
642
643 /* Called by the slot handler thread */
644 static struct vchiq_service *
get_connected_service(struct vchiq_state * state,unsigned int port)645 get_connected_service(struct vchiq_state *state, unsigned int port)
646 {
647 int i;
648
649 rcu_read_lock();
650 for (i = 0; i < state->unused_service; i++) {
651 struct vchiq_service *service =
652 rcu_dereference(state->services[i]);
653
654 if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN &&
655 service->remoteport == port &&
656 kref_get_unless_zero(&service->ref_count)) {
657 service = rcu_pointer_handoff(service);
658 rcu_read_unlock();
659 return service;
660 }
661 }
662 rcu_read_unlock();
663 return NULL;
664 }
665
666 inline void
request_poll(struct vchiq_state * state,struct vchiq_service * service,int poll_type)667 request_poll(struct vchiq_state *state, struct vchiq_service *service,
668 int poll_type)
669 {
670 u32 value;
671 int index;
672
673 if (!service)
674 goto skip_service;
675
676 do {
677 value = atomic_read(&service->poll_flags);
678 } while (atomic_cmpxchg(&service->poll_flags, value,
679 value | BIT(poll_type)) != value);
680
681 index = BITSET_WORD(service->localport);
682 do {
683 value = atomic_read(&state->poll_services[index]);
684 } while (atomic_cmpxchg(&state->poll_services[index],
685 value, value | BIT(service->localport & 0x1f)) != value);
686
687 skip_service:
688 state->poll_needed = 1;
689 /* Ensure the slot handler thread sees the poll_needed flag. */
690 wmb();
691
692 /* ... and ensure the slot handler runs. */
693 remote_event_signal_local(&state->trigger_event, &state->local->trigger);
694 }
695
696 /*
697 * Called from queue_message, by the slot handler and application threads,
698 * with slot_mutex held
699 */
700 static struct vchiq_header *
reserve_space(struct vchiq_state * state,size_t space,int is_blocking)701 reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
702 {
703 struct vchiq_shared_state *local = state->local;
704 int tx_pos = state->local_tx_pos;
705 int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
706
707 if (space > slot_space) {
708 struct vchiq_header *header;
709 /* Fill the remaining space with padding */
710 WARN_ON(!state->tx_data);
711 header = (struct vchiq_header *)
712 (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
713 header->msgid = VCHIQ_MSGID_PADDING;
714 header->size = slot_space - sizeof(struct vchiq_header);
715
716 tx_pos += slot_space;
717 }
718
719 /* If necessary, get the next slot. */
720 if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
721 int slot_index;
722
723 /* If there is no free slot... */
724
725 if (!try_wait_for_completion(&state->slot_available_event)) {
726 /* ...wait for one. */
727
728 VCHIQ_STATS_INC(state, slot_stalls);
729
730 /* But first, flush through the last slot. */
731 state->local_tx_pos = tx_pos;
732 local->tx_pos = tx_pos;
733 remote_event_signal(state, &state->remote->trigger);
734
735 if (!is_blocking ||
736 (wait_for_completion_interruptible(&state->slot_available_event)))
737 return NULL; /* No space available */
738 }
739
740 if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
741 complete(&state->slot_available_event);
742 dev_warn(state->dev, "%s: invalid tx_pos: %d\n",
743 __func__, tx_pos);
744 return NULL;
745 }
746
747 slot_index = local->slot_queue[SLOT_QUEUE_INDEX_FROM_POS_MASKED(tx_pos)];
748 state->tx_data =
749 (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
750 }
751
752 state->local_tx_pos = tx_pos + space;
753
754 return (struct vchiq_header *)(state->tx_data +
755 (tx_pos & VCHIQ_SLOT_MASK));
756 }
757
758 static void
process_free_data_message(struct vchiq_state * state,u32 * service_found,struct vchiq_header * header)759 process_free_data_message(struct vchiq_state *state, u32 *service_found,
760 struct vchiq_header *header)
761 {
762 int msgid = header->msgid;
763 int port = VCHIQ_MSG_SRCPORT(msgid);
764 struct vchiq_service_quota *quota = &state->service_quotas[port];
765 int count;
766
767 spin_lock(&state->quota_spinlock);
768 count = quota->message_use_count;
769 if (count > 0)
770 quota->message_use_count = count - 1;
771 spin_unlock(&state->quota_spinlock);
772
773 if (count == quota->message_quota) {
774 /*
775 * Signal the service that it
776 * has dropped below its quota
777 */
778 complete("a->quota_event);
779 } else if (count == 0) {
780 dev_err(state->dev,
781 "core: service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)\n",
782 port, quota->message_use_count, header, msgid,
783 header->msgid, header->size);
784 WARN(1, "invalid message use count\n");
785 }
786 if (!BITSET_IS_SET(service_found, port)) {
787 /* Set the found bit for this service */
788 BITSET_SET(service_found, port);
789
790 spin_lock(&state->quota_spinlock);
791 count = quota->slot_use_count;
792 if (count > 0)
793 quota->slot_use_count = count - 1;
794 spin_unlock(&state->quota_spinlock);
795
796 if (count > 0) {
797 /*
798 * Signal the service in case
799 * it has dropped below its quota
800 */
801 complete("a->quota_event);
802 dev_dbg(state->dev, "core: %d: pfq:%d %x@%pK - slot_use->%d\n",
803 state->id, port, header->size, header, count - 1);
804 } else {
805 dev_err(state->dev,
806 "core: service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)\n",
807 port, count, header, msgid, header->msgid, header->size);
808 WARN(1, "bad slot use count\n");
809 }
810 }
811 }
812
813 /* Called by the recycle thread. */
814 static void
process_free_queue(struct vchiq_state * state,u32 * service_found,size_t length)815 process_free_queue(struct vchiq_state *state, u32 *service_found,
816 size_t length)
817 {
818 struct vchiq_shared_state *local = state->local;
819 int slot_queue_available;
820
821 /*
822 * Find slots which have been freed by the other side, and return them
823 * to the available queue.
824 */
825 slot_queue_available = state->slot_queue_available;
826
827 /*
828 * Use a memory barrier to ensure that any state that may have been
829 * modified by another thread is not masked by stale prefetched
830 * values.
831 */
832 mb();
833
834 while (slot_queue_available != local->slot_queue_recycle) {
835 unsigned int pos;
836 int slot_index = local->slot_queue[slot_queue_available &
837 VCHIQ_SLOT_QUEUE_MASK];
838 char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
839 int data_found = 0;
840
841 slot_queue_available++;
842 /*
843 * Beware of the address dependency - data is calculated
844 * using an index written by the other side.
845 */
846 rmb();
847
848 dev_dbg(state->dev, "core: %d: pfq %d=%pK %x %x\n",
849 state->id, slot_index, data, local->slot_queue_recycle,
850 slot_queue_available);
851
852 /* Initialise the bitmask for services which have used this slot */
853 memset(service_found, 0, length);
854
855 pos = 0;
856
857 while (pos < VCHIQ_SLOT_SIZE) {
858 struct vchiq_header *header =
859 (struct vchiq_header *)(data + pos);
860 int msgid = header->msgid;
861
862 if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
863 process_free_data_message(state, service_found,
864 header);
865 data_found = 1;
866 }
867
868 pos += calc_stride(header->size);
869 if (pos > VCHIQ_SLOT_SIZE) {
870 dev_err(state->dev,
871 "core: pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x\n",
872 pos, header, msgid, header->msgid, header->size);
873 WARN(1, "invalid slot position\n");
874 }
875 }
876
877 if (data_found) {
878 int count;
879
880 spin_lock(&state->quota_spinlock);
881 count = state->data_use_count;
882 if (count > 0)
883 state->data_use_count = count - 1;
884 spin_unlock(&state->quota_spinlock);
885 if (count == state->data_quota)
886 complete(&state->data_quota_event);
887 }
888
889 /*
890 * Don't allow the slot to be reused until we are no
891 * longer interested in it.
892 */
893 mb();
894
895 state->slot_queue_available = slot_queue_available;
896 complete(&state->slot_available_event);
897 }
898 }
899
900 static ssize_t
memcpy_copy_callback(void * context,void * dest,size_t offset,size_t maxsize)901 memcpy_copy_callback(void *context, void *dest, size_t offset, size_t maxsize)
902 {
903 memcpy(dest + offset, context + offset, maxsize);
904 return maxsize;
905 }
906
907 static ssize_t
copy_message_data(ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,void * dest,size_t size)908 copy_message_data(ssize_t (*copy_callback)(void *context, void *dest, size_t offset,
909 size_t maxsize),
910 void *context,
911 void *dest,
912 size_t size)
913 {
914 size_t pos = 0;
915
916 while (pos < size) {
917 ssize_t callback_result;
918 size_t max_bytes = size - pos;
919
920 callback_result = copy_callback(context, dest + pos, pos,
921 max_bytes);
922
923 if (callback_result < 0)
924 return callback_result;
925
926 if (!callback_result)
927 return -EIO;
928
929 if (callback_result > max_bytes)
930 return -EIO;
931
932 pos += callback_result;
933 }
934
935 return size;
936 }
937
938 /* Called by the slot handler and application threads */
939 static int
queue_message(struct vchiq_state * state,struct vchiq_service * service,int msgid,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,size_t size,int flags)940 queue_message(struct vchiq_state *state, struct vchiq_service *service,
941 int msgid,
942 ssize_t (*copy_callback)(void *context, void *dest,
943 size_t offset, size_t maxsize),
944 void *context, size_t size, int flags)
945 {
946 struct vchiq_shared_state *local;
947 struct vchiq_service_quota *quota = NULL;
948 struct vchiq_header *header;
949 int type = VCHIQ_MSG_TYPE(msgid);
950 int svc_fourcc;
951
952 size_t stride;
953
954 local = state->local;
955
956 stride = calc_stride(size);
957
958 WARN_ON(stride > VCHIQ_SLOT_SIZE);
959
960 if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
961 mutex_lock_killable(&state->slot_mutex))
962 return -EINTR;
963
964 if (type == VCHIQ_MSG_DATA) {
965 int tx_end_index;
966
967 if (!service) {
968 WARN(1, "%s: service is NULL\n", __func__);
969 mutex_unlock(&state->slot_mutex);
970 return -EINVAL;
971 }
972
973 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
974 QMFLAGS_NO_MUTEX_UNLOCK));
975
976 if (service->closing) {
977 /* The service has been closed */
978 mutex_unlock(&state->slot_mutex);
979 return -EHOSTDOWN;
980 }
981
982 quota = &state->service_quotas[service->localport];
983
984 spin_lock(&state->quota_spinlock);
985
986 /*
987 * Ensure this service doesn't use more than its quota of
988 * messages or slots
989 */
990 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
991
992 /*
993 * Ensure data messages don't use more than their quota of
994 * slots
995 */
996 while ((tx_end_index != state->previous_data_index) &&
997 (state->data_use_count == state->data_quota)) {
998 VCHIQ_STATS_INC(state, data_stalls);
999 spin_unlock(&state->quota_spinlock);
1000 mutex_unlock(&state->slot_mutex);
1001
1002 if (wait_for_completion_killable(&state->data_quota_event))
1003 return -EINTR;
1004
1005 mutex_lock(&state->slot_mutex);
1006 spin_lock(&state->quota_spinlock);
1007 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
1008 if ((tx_end_index == state->previous_data_index) ||
1009 (state->data_use_count < state->data_quota)) {
1010 /* Pass the signal on to other waiters */
1011 complete(&state->data_quota_event);
1012 break;
1013 }
1014 }
1015
1016 while ((quota->message_use_count == quota->message_quota) ||
1017 ((tx_end_index != quota->previous_tx_index) &&
1018 (quota->slot_use_count == quota->slot_quota))) {
1019 spin_unlock(&state->quota_spinlock);
1020 dev_dbg(state->dev,
1021 "core: %d: qm:%d %s,%zx - quota stall (msg %d, slot %d)\n",
1022 state->id, service->localport, msg_type_str(type), size,
1023 quota->message_use_count, quota->slot_use_count);
1024 VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
1025 mutex_unlock(&state->slot_mutex);
1026 if (wait_for_completion_killable("a->quota_event))
1027 return -EINTR;
1028 if (service->closing)
1029 return -EHOSTDOWN;
1030 if (mutex_lock_killable(&state->slot_mutex))
1031 return -EINTR;
1032 if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
1033 /* The service has been closed */
1034 mutex_unlock(&state->slot_mutex);
1035 return -EHOSTDOWN;
1036 }
1037 spin_lock(&state->quota_spinlock);
1038 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
1039 }
1040
1041 spin_unlock(&state->quota_spinlock);
1042 }
1043
1044 header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
1045
1046 if (!header) {
1047 if (service)
1048 VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
1049 /*
1050 * In the event of a failure, return the mutex to the
1051 * state it was in
1052 */
1053 if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
1054 mutex_unlock(&state->slot_mutex);
1055 return -EAGAIN;
1056 }
1057
1058 if (type == VCHIQ_MSG_DATA) {
1059 ssize_t callback_result;
1060 int tx_end_index;
1061 int slot_use_count;
1062
1063 dev_dbg(state->dev, "core: %d: qm %s@%pK,%zx (%d->%d)\n",
1064 state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
1065 VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
1066
1067 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
1068 QMFLAGS_NO_MUTEX_UNLOCK));
1069
1070 callback_result =
1071 copy_message_data(copy_callback, context,
1072 header->data, size);
1073
1074 if (callback_result < 0) {
1075 mutex_unlock(&state->slot_mutex);
1076 VCHIQ_SERVICE_STATS_INC(service, error_count);
1077 return -EINVAL;
1078 }
1079
1080 vchiq_log_dump_mem(state->dev, "Sent", 0,
1081 header->data,
1082 min_t(size_t, 16, callback_result));
1083
1084 spin_lock(&state->quota_spinlock);
1085 quota->message_use_count++;
1086
1087 tx_end_index =
1088 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
1089
1090 /*
1091 * If this transmission can't fit in the last slot used by any
1092 * service, the data_use_count must be increased.
1093 */
1094 if (tx_end_index != state->previous_data_index) {
1095 state->previous_data_index = tx_end_index;
1096 state->data_use_count++;
1097 }
1098
1099 /*
1100 * If this isn't the same slot last used by this service,
1101 * the service's slot_use_count must be increased.
1102 */
1103 if (tx_end_index != quota->previous_tx_index) {
1104 quota->previous_tx_index = tx_end_index;
1105 slot_use_count = ++quota->slot_use_count;
1106 } else {
1107 slot_use_count = 0;
1108 }
1109
1110 spin_unlock(&state->quota_spinlock);
1111
1112 if (slot_use_count)
1113 dev_dbg(state->dev, "core: %d: qm:%d %s,%zx - slot_use->%d (hdr %p)\n",
1114 state->id, service->localport, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1115 size, slot_use_count, header);
1116
1117 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1118 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1119 } else {
1120 dev_dbg(state->dev, "core: %d: qm %s@%pK,%zx (%d->%d)\n",
1121 state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
1122 VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
1123 if (size != 0) {
1124 /*
1125 * It is assumed for now that this code path
1126 * only happens from calls inside this file.
1127 *
1128 * External callers are through the vchiq_queue_message
1129 * path which always sets the type to be VCHIQ_MSG_DATA
1130 *
1131 * At first glance this appears to be correct but
1132 * more review is needed.
1133 */
1134 copy_message_data(copy_callback, context,
1135 header->data, size);
1136 }
1137 VCHIQ_STATS_INC(state, ctrl_tx_count);
1138 }
1139
1140 header->msgid = msgid;
1141 header->size = size;
1142
1143 svc_fourcc = service ? service->base.fourcc
1144 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1145
1146 dev_dbg(state->dev, "core_msg: Sent Msg %s(%u) to %p4cc s:%u d:%d len:%zu\n",
1147 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1148 VCHIQ_MSG_TYPE(msgid), &svc_fourcc,
1149 VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid), size);
1150
1151 /* Make sure the new header is visible to the peer. */
1152 wmb();
1153
1154 /* Make the new tx_pos visible to the peer. */
1155 local->tx_pos = state->local_tx_pos;
1156 wmb();
1157
1158 if (service && (type == VCHIQ_MSG_CLOSE))
1159 set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
1160
1161 if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
1162 mutex_unlock(&state->slot_mutex);
1163
1164 remote_event_signal(state, &state->remote->trigger);
1165
1166 return 0;
1167 }
1168
1169 /* Called by the slot handler and application threads */
1170 static int
queue_message_sync(struct vchiq_state * state,struct vchiq_service * service,int msgid,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,int size)1171 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
1172 int msgid,
1173 ssize_t (*copy_callback)(void *context, void *dest,
1174 size_t offset, size_t maxsize),
1175 void *context, int size)
1176 {
1177 struct vchiq_shared_state *local;
1178 struct vchiq_header *header;
1179 ssize_t callback_result;
1180 int svc_fourcc;
1181 int ret;
1182
1183 local = state->local;
1184
1185 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME &&
1186 mutex_lock_killable(&state->sync_mutex))
1187 return -EAGAIN;
1188
1189 ret = remote_event_wait(&state->sync_release_event, &local->sync_release);
1190 if (ret)
1191 return ret;
1192
1193 /* Ensure that reads don't overtake the remote_event_wait. */
1194 rmb();
1195
1196 header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1197 local->slot_sync);
1198
1199 {
1200 int oldmsgid = header->msgid;
1201
1202 if (oldmsgid != VCHIQ_MSGID_PADDING)
1203 dev_err(state->dev, "core: %d: qms - msgid %x, not PADDING\n",
1204 state->id, oldmsgid);
1205 }
1206
1207 dev_dbg(state->dev, "sync: %d: qms %s@%pK,%x (%d->%d)\n",
1208 state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
1209 VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
1210
1211 callback_result = copy_message_data(copy_callback, context,
1212 header->data, size);
1213
1214 if (callback_result < 0) {
1215 mutex_unlock(&state->slot_mutex);
1216 VCHIQ_SERVICE_STATS_INC(service, error_count);
1217 return -EINVAL;
1218 }
1219
1220 if (service) {
1221 vchiq_log_dump_mem(state->dev, "Sent", 0,
1222 header->data,
1223 min_t(size_t, 16, callback_result));
1224
1225 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1226 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1227 } else {
1228 VCHIQ_STATS_INC(state, ctrl_tx_count);
1229 }
1230
1231 header->size = size;
1232 header->msgid = msgid;
1233
1234 svc_fourcc = service ? service->base.fourcc
1235 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1236
1237 dev_dbg(state->dev,
1238 "sync: Sent Sync Msg %s(%u) to %p4cc s:%u d:%d len:%d\n",
1239 msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
1240 &svc_fourcc, VCHIQ_MSG_SRCPORT(msgid),
1241 VCHIQ_MSG_DSTPORT(msgid), size);
1242
1243 remote_event_signal(state, &state->remote->sync_trigger);
1244
1245 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
1246 mutex_unlock(&state->sync_mutex);
1247
1248 return 0;
1249 }
1250
1251 static inline void
claim_slot(struct vchiq_slot_info * slot)1252 claim_slot(struct vchiq_slot_info *slot)
1253 {
1254 slot->use_count++;
1255 }
1256
1257 static void
release_slot(struct vchiq_state * state,struct vchiq_slot_info * slot_info,struct vchiq_header * header,struct vchiq_service * service)1258 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
1259 struct vchiq_header *header, struct vchiq_service *service)
1260 {
1261 mutex_lock(&state->recycle_mutex);
1262
1263 if (header) {
1264 int msgid = header->msgid;
1265
1266 if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) || (service && service->closing)) {
1267 mutex_unlock(&state->recycle_mutex);
1268 return;
1269 }
1270
1271 /* Rewrite the message header to prevent a double release */
1272 header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
1273 }
1274
1275 slot_info->release_count++;
1276
1277 if (slot_info->release_count == slot_info->use_count) {
1278 int slot_queue_recycle;
1279 /* Add to the freed queue */
1280
1281 /*
1282 * A read barrier is necessary here to prevent speculative
1283 * fetches of remote->slot_queue_recycle from overtaking the
1284 * mutex.
1285 */
1286 rmb();
1287
1288 slot_queue_recycle = state->remote->slot_queue_recycle;
1289 state->remote->slot_queue[slot_queue_recycle &
1290 VCHIQ_SLOT_QUEUE_MASK] =
1291 SLOT_INDEX_FROM_INFO(state, slot_info);
1292 state->remote->slot_queue_recycle = slot_queue_recycle + 1;
1293 dev_dbg(state->dev, "core: %d: %d - recycle->%x\n",
1294 state->id, SLOT_INDEX_FROM_INFO(state, slot_info),
1295 state->remote->slot_queue_recycle);
1296
1297 /*
1298 * A write barrier is necessary, but remote_event_signal
1299 * contains one.
1300 */
1301 remote_event_signal(state, &state->remote->recycle);
1302 }
1303
1304 mutex_unlock(&state->recycle_mutex);
1305 }
1306
1307 static inline enum vchiq_reason
get_bulk_reason(struct vchiq_bulk * bulk)1308 get_bulk_reason(struct vchiq_bulk *bulk)
1309 {
1310 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1311 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1312 return VCHIQ_BULK_TRANSMIT_ABORTED;
1313
1314 return VCHIQ_BULK_TRANSMIT_DONE;
1315 }
1316
1317 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1318 return VCHIQ_BULK_RECEIVE_ABORTED;
1319
1320 return VCHIQ_BULK_RECEIVE_DONE;
1321 }
1322
service_notify_bulk(struct vchiq_service * service,struct vchiq_bulk * bulk)1323 static int service_notify_bulk(struct vchiq_service *service,
1324 struct vchiq_bulk *bulk)
1325 {
1326 if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
1327 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1328 VCHIQ_SERVICE_STATS_INC(service, bulk_tx_count);
1329 VCHIQ_SERVICE_STATS_ADD(service, bulk_tx_bytes,
1330 bulk->actual);
1331 } else {
1332 VCHIQ_SERVICE_STATS_INC(service, bulk_rx_count);
1333 VCHIQ_SERVICE_STATS_ADD(service, bulk_rx_bytes,
1334 bulk->actual);
1335 }
1336 } else {
1337 VCHIQ_SERVICE_STATS_INC(service, bulk_aborted_count);
1338 }
1339
1340 if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
1341 struct bulk_waiter *waiter;
1342
1343 spin_lock(&service->state->bulk_waiter_spinlock);
1344 waiter = bulk->waiter;
1345 if (waiter) {
1346 waiter->actual = bulk->actual;
1347 complete(&waiter->event);
1348 }
1349 spin_unlock(&service->state->bulk_waiter_spinlock);
1350 } else if (bulk->mode == VCHIQ_BULK_MODE_CALLBACK) {
1351 enum vchiq_reason reason = get_bulk_reason(bulk);
1352
1353 return make_service_callback(service, reason, NULL, bulk);
1354 }
1355
1356 return 0;
1357 }
1358
1359 /* Called by the slot handler - don't hold the bulk mutex */
1360 static int
notify_bulks(struct vchiq_service * service,struct vchiq_bulk_queue * queue,int retry_poll)1361 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
1362 int retry_poll)
1363 {
1364 int status = 0;
1365
1366 dev_dbg(service->state->dev,
1367 "core: %d: nb:%d %cx - p=%x rn=%x r=%x\n",
1368 service->state->id, service->localport,
1369 (queue == &service->bulk_tx) ? 't' : 'r',
1370 queue->process, queue->remote_notify, queue->remove);
1371
1372 queue->remote_notify = queue->process;
1373
1374 while (queue->remove != queue->remote_notify) {
1375 struct vchiq_bulk *bulk =
1376 &queue->bulks[BULK_INDEX(queue->remove)];
1377
1378 /*
1379 * Only generate callbacks for non-dummy bulk
1380 * requests, and non-terminated services
1381 */
1382 if (bulk->dma_addr && service->instance) {
1383 status = service_notify_bulk(service, bulk);
1384 if (status == -EAGAIN)
1385 break;
1386 }
1387
1388 queue->remove++;
1389 complete(&service->bulk_remove_event);
1390 }
1391 if (!retry_poll)
1392 status = 0;
1393
1394 if (status == -EAGAIN)
1395 request_poll(service->state, service, (queue == &service->bulk_tx) ?
1396 VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
1397
1398 return status;
1399 }
1400
1401 static void
poll_services_of_group(struct vchiq_state * state,int group)1402 poll_services_of_group(struct vchiq_state *state, int group)
1403 {
1404 u32 flags = atomic_xchg(&state->poll_services[group], 0);
1405 int i;
1406
1407 for (i = 0; flags; i++) {
1408 struct vchiq_service *service;
1409 u32 service_flags;
1410
1411 if ((flags & BIT(i)) == 0)
1412 continue;
1413
1414 service = find_service_by_port(state, (group << 5) + i);
1415 flags &= ~BIT(i);
1416
1417 if (!service)
1418 continue;
1419
1420 service_flags = atomic_xchg(&service->poll_flags, 0);
1421 if (service_flags & BIT(VCHIQ_POLL_REMOVE)) {
1422 dev_dbg(state->dev, "core: %d: ps - remove %d<->%d\n",
1423 state->id, service->localport, service->remoteport);
1424
1425 /*
1426 * Make it look like a client, because
1427 * it must be removed and not left in
1428 * the LISTENING state.
1429 */
1430 service->public_fourcc = VCHIQ_FOURCC_INVALID;
1431
1432 if (vchiq_close_service_internal(service, NO_CLOSE_RECVD))
1433 request_poll(state, service, VCHIQ_POLL_REMOVE);
1434 } else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) {
1435 dev_dbg(state->dev, "core: %d: ps - terminate %d<->%d\n",
1436 state->id, service->localport, service->remoteport);
1437 if (vchiq_close_service_internal(service, NO_CLOSE_RECVD))
1438 request_poll(state, service, VCHIQ_POLL_TERMINATE);
1439 }
1440 if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
1441 notify_bulks(service, &service->bulk_tx, RETRY_POLL);
1442 if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
1443 notify_bulks(service, &service->bulk_rx, RETRY_POLL);
1444 vchiq_service_put(service);
1445 }
1446 }
1447
1448 /* Called by the slot handler thread */
1449 static void
poll_services(struct vchiq_state * state)1450 poll_services(struct vchiq_state *state)
1451 {
1452 int group;
1453
1454 for (group = 0; group < BITSET_SIZE(state->unused_service); group++)
1455 poll_services_of_group(state, group);
1456 }
1457
1458 static void
cleanup_pagelistinfo(struct vchiq_instance * instance,struct vchiq_pagelist_info * pagelistinfo)1459 cleanup_pagelistinfo(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo)
1460 {
1461 if (pagelistinfo->scatterlist_mapped) {
1462 dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
1463 pagelistinfo->num_pages, pagelistinfo->dma_dir);
1464 }
1465
1466 if (pagelistinfo->pages_need_release)
1467 unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
1468
1469 dma_free_coherent(instance->state->dev, pagelistinfo->pagelist_buffer_size,
1470 pagelistinfo->pagelist, pagelistinfo->dma_addr);
1471 }
1472
1473 static inline bool
is_adjacent_block(u32 * addrs,dma_addr_t addr,unsigned int k)1474 is_adjacent_block(u32 *addrs, dma_addr_t addr, unsigned int k)
1475 {
1476 u32 tmp;
1477
1478 if (!k)
1479 return false;
1480
1481 tmp = (addrs[k - 1] & PAGE_MASK) +
1482 (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT);
1483
1484 return tmp == (addr & PAGE_MASK);
1485 }
1486
1487 /* There is a potential problem with partial cache lines (pages?)
1488 * at the ends of the block when reading. If the CPU accessed anything in
1489 * the same line (page?) then it may have pulled old data into the cache,
1490 * obscuring the new data underneath. We can solve this by transferring the
1491 * partial cache lines separately, and allowing the ARM to copy into the
1492 * cached area.
1493 */
1494 static struct vchiq_pagelist_info *
create_pagelist(struct vchiq_instance * instance,struct vchiq_bulk * bulk)1495 create_pagelist(struct vchiq_instance *instance, struct vchiq_bulk *bulk)
1496 {
1497 struct vchiq_drv_mgmt *drv_mgmt;
1498 struct pagelist *pagelist;
1499 struct vchiq_pagelist_info *pagelistinfo;
1500 struct page **pages;
1501 u32 *addrs;
1502 unsigned int num_pages, offset, i, k;
1503 int actual_pages;
1504 size_t pagelist_size;
1505 struct scatterlist *scatterlist, *sg;
1506 int dma_buffers;
1507 unsigned int cache_line_size;
1508 dma_addr_t dma_addr;
1509 size_t count = bulk->size;
1510 unsigned short type = (bulk->dir == VCHIQ_BULK_RECEIVE)
1511 ? PAGELIST_READ : PAGELIST_WRITE;
1512
1513 if (count >= INT_MAX - PAGE_SIZE)
1514 return NULL;
1515
1516 drv_mgmt = dev_get_drvdata(instance->state->dev);
1517
1518 if (bulk->offset)
1519 offset = (uintptr_t)bulk->offset & (PAGE_SIZE - 1);
1520 else
1521 offset = (uintptr_t)bulk->uoffset & (PAGE_SIZE - 1);
1522 num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
1523
1524 if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) -
1525 sizeof(struct vchiq_pagelist_info)) /
1526 (sizeof(u32) + sizeof(pages[0]) +
1527 sizeof(struct scatterlist)))
1528 return NULL;
1529
1530 pagelist_size = sizeof(struct pagelist) +
1531 (num_pages * sizeof(u32)) +
1532 (num_pages * sizeof(pages[0]) +
1533 (num_pages * sizeof(struct scatterlist))) +
1534 sizeof(struct vchiq_pagelist_info);
1535
1536 /* Allocate enough storage to hold the page pointers and the page
1537 * list
1538 */
1539 pagelist = dma_alloc_coherent(instance->state->dev, pagelist_size, &dma_addr,
1540 GFP_KERNEL);
1541
1542 dev_dbg(instance->state->dev, "arm: %pK\n", pagelist);
1543
1544 if (!pagelist)
1545 return NULL;
1546
1547 addrs = pagelist->addrs;
1548 pages = (struct page **)(addrs + num_pages);
1549 scatterlist = (struct scatterlist *)(pages + num_pages);
1550 pagelistinfo = (struct vchiq_pagelist_info *)
1551 (scatterlist + num_pages);
1552
1553 pagelist->length = count;
1554 pagelist->type = type;
1555 pagelist->offset = offset;
1556
1557 /* Populate the fields of the pagelistinfo structure */
1558 pagelistinfo->pagelist = pagelist;
1559 pagelistinfo->pagelist_buffer_size = pagelist_size;
1560 pagelistinfo->dma_addr = dma_addr;
1561 pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ?
1562 DMA_TO_DEVICE : DMA_FROM_DEVICE;
1563 pagelistinfo->num_pages = num_pages;
1564 pagelistinfo->pages_need_release = 0;
1565 pagelistinfo->pages = pages;
1566 pagelistinfo->scatterlist = scatterlist;
1567 pagelistinfo->scatterlist_mapped = 0;
1568
1569 if (bulk->offset) {
1570 unsigned long length = count;
1571 unsigned int off = offset;
1572
1573 for (actual_pages = 0; actual_pages < num_pages;
1574 actual_pages++) {
1575 struct page *pg =
1576 vmalloc_to_page(((unsigned int *)bulk->offset +
1577 (actual_pages * PAGE_SIZE)));
1578 size_t bytes = PAGE_SIZE - off;
1579
1580 if (!pg) {
1581 cleanup_pagelistinfo(instance, pagelistinfo);
1582 return NULL;
1583 }
1584
1585 if (bytes > length)
1586 bytes = length;
1587 pages[actual_pages] = pg;
1588 length -= bytes;
1589 off = 0;
1590 }
1591 /* do not try and release vmalloc pages */
1592 } else {
1593 actual_pages =
1594 pin_user_pages_fast((unsigned long)bulk->uoffset & PAGE_MASK, num_pages,
1595 type == PAGELIST_READ, pages);
1596
1597 if (actual_pages != num_pages) {
1598 dev_dbg(instance->state->dev, "arm: Only %d/%d pages locked\n",
1599 actual_pages, num_pages);
1600
1601 /* This is probably due to the process being killed */
1602 if (actual_pages > 0)
1603 unpin_user_pages(pages, actual_pages);
1604 cleanup_pagelistinfo(instance, pagelistinfo);
1605 return NULL;
1606 }
1607 /* release user pages */
1608 pagelistinfo->pages_need_release = 1;
1609 }
1610
1611 /*
1612 * Initialize the scatterlist so that the magic cookie
1613 * is filled if debugging is enabled
1614 */
1615 sg_init_table(scatterlist, num_pages);
1616 /* Now set the pages for each scatterlist */
1617 for (i = 0; i < num_pages; i++) {
1618 unsigned int len = PAGE_SIZE - offset;
1619
1620 if (len > count)
1621 len = count;
1622 sg_set_page(scatterlist + i, pages[i], len, offset);
1623 offset = 0;
1624 count -= len;
1625 }
1626
1627 dma_buffers = dma_map_sg(instance->state->dev,
1628 scatterlist,
1629 num_pages,
1630 pagelistinfo->dma_dir);
1631
1632 if (dma_buffers == 0) {
1633 cleanup_pagelistinfo(instance, pagelistinfo);
1634 return NULL;
1635 }
1636
1637 pagelistinfo->scatterlist_mapped = 1;
1638
1639 /* Combine adjacent blocks for performance */
1640 k = 0;
1641 for_each_sg(scatterlist, sg, dma_buffers, i) {
1642 unsigned int len = sg_dma_len(sg);
1643 dma_addr_t addr = sg_dma_address(sg);
1644
1645 /* Note: addrs is the address + page_count - 1
1646 * The firmware expects blocks after the first to be page-
1647 * aligned and a multiple of the page size
1648 */
1649 WARN_ON(len == 0);
1650 WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
1651 WARN_ON(i && (addr & ~PAGE_MASK));
1652 if (is_adjacent_block(addrs, addr, k))
1653 addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
1654 else
1655 addrs[k++] = (addr & PAGE_MASK) |
1656 (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
1657 }
1658
1659 /* Partial cache lines (fragments) require special measures */
1660 cache_line_size = drv_mgmt->info->cache_line_size;
1661 if ((type == PAGELIST_READ) &&
1662 ((pagelist->offset & (cache_line_size - 1)) ||
1663 ((pagelist->offset + pagelist->length) & (cache_line_size - 1)))) {
1664 char *fragments;
1665
1666 if (down_interruptible(&drv_mgmt->free_fragments_sema)) {
1667 cleanup_pagelistinfo(instance, pagelistinfo);
1668 return NULL;
1669 }
1670
1671 WARN_ON(!drv_mgmt->free_fragments);
1672
1673 down(&drv_mgmt->free_fragments_mutex);
1674 fragments = drv_mgmt->free_fragments;
1675 WARN_ON(!fragments);
1676 drv_mgmt->free_fragments = *(char **)drv_mgmt->free_fragments;
1677 up(&drv_mgmt->free_fragments_mutex);
1678 pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
1679 (fragments - drv_mgmt->fragments_base) / drv_mgmt->fragments_size;
1680 }
1681
1682 return pagelistinfo;
1683 }
1684
1685 static void
free_pagelist(struct vchiq_instance * instance,struct vchiq_pagelist_info * pagelistinfo,int actual)1686 free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo,
1687 int actual)
1688 {
1689 struct vchiq_drv_mgmt *drv_mgmt;
1690 struct pagelist *pagelist = pagelistinfo->pagelist;
1691 struct page **pages = pagelistinfo->pages;
1692 unsigned int num_pages = pagelistinfo->num_pages;
1693 unsigned int cache_line_size;
1694
1695 dev_dbg(instance->state->dev, "arm: %pK, %d\n", pagelistinfo->pagelist, actual);
1696
1697 drv_mgmt = dev_get_drvdata(instance->state->dev);
1698
1699 /*
1700 * NOTE: dma_unmap_sg must be called before the
1701 * cpu can touch any of the data/pages.
1702 */
1703 dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
1704 pagelistinfo->num_pages, pagelistinfo->dma_dir);
1705 pagelistinfo->scatterlist_mapped = 0;
1706
1707 /* Deal with any partial cache lines (fragments) */
1708 cache_line_size = drv_mgmt->info->cache_line_size;
1709 if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS && drv_mgmt->fragments_base) {
1710 char *fragments = drv_mgmt->fragments_base +
1711 (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
1712 drv_mgmt->fragments_size;
1713 int head_bytes, tail_bytes;
1714
1715 head_bytes = (cache_line_size - pagelist->offset) &
1716 (cache_line_size - 1);
1717 tail_bytes = (pagelist->offset + actual) &
1718 (cache_line_size - 1);
1719
1720 if ((actual >= 0) && (head_bytes != 0)) {
1721 if (head_bytes > actual)
1722 head_bytes = actual;
1723
1724 memcpy_to_page(pages[0], pagelist->offset,
1725 fragments, head_bytes);
1726 }
1727 if ((actual >= 0) && (head_bytes < actual) &&
1728 (tail_bytes != 0))
1729 memcpy_to_page(pages[num_pages - 1],
1730 (pagelist->offset + actual) &
1731 (PAGE_SIZE - 1) & ~(cache_line_size - 1),
1732 fragments + cache_line_size,
1733 tail_bytes);
1734
1735 down(&drv_mgmt->free_fragments_mutex);
1736 *(char **)fragments = drv_mgmt->free_fragments;
1737 drv_mgmt->free_fragments = fragments;
1738 up(&drv_mgmt->free_fragments_mutex);
1739 up(&drv_mgmt->free_fragments_sema);
1740 }
1741
1742 /* Need to mark all the pages dirty. */
1743 if (pagelist->type != PAGELIST_WRITE &&
1744 pagelistinfo->pages_need_release) {
1745 unsigned int i;
1746
1747 for (i = 0; i < num_pages; i++)
1748 set_page_dirty(pages[i]);
1749 }
1750
1751 cleanup_pagelistinfo(instance, pagelistinfo);
1752 }
1753
1754 static int
vchiq_prepare_bulk_data(struct vchiq_instance * instance,struct vchiq_bulk * bulk)1755 vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk)
1756 {
1757 struct vchiq_pagelist_info *pagelistinfo;
1758
1759 pagelistinfo = create_pagelist(instance, bulk);
1760
1761 if (!pagelistinfo)
1762 return -ENOMEM;
1763
1764 bulk->dma_addr = pagelistinfo->dma_addr;
1765
1766 /*
1767 * Store the pagelistinfo address in remote_data,
1768 * which isn't used by the slave.
1769 */
1770 bulk->remote_data = pagelistinfo;
1771
1772 return 0;
1773 }
1774
1775 static void
vchiq_complete_bulk(struct vchiq_instance * instance,struct vchiq_bulk * bulk)1776 vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk)
1777 {
1778 if (bulk && bulk->remote_data && bulk->actual)
1779 free_pagelist(instance, (struct vchiq_pagelist_info *)bulk->remote_data,
1780 bulk->actual);
1781 }
1782
1783 /* Called with the bulk_mutex held */
1784 static void
abort_outstanding_bulks(struct vchiq_service * service,struct vchiq_bulk_queue * queue)1785 abort_outstanding_bulks(struct vchiq_service *service,
1786 struct vchiq_bulk_queue *queue)
1787 {
1788 int is_tx = (queue == &service->bulk_tx);
1789
1790 dev_dbg(service->state->dev,
1791 "core: %d: aob:%d %cx - li=%x ri=%x p=%x\n",
1792 service->state->id, service->localport,
1793 is_tx ? 't' : 'r', queue->local_insert,
1794 queue->remote_insert, queue->process);
1795
1796 WARN_ON((int)(queue->local_insert - queue->process) < 0);
1797 WARN_ON((int)(queue->remote_insert - queue->process) < 0);
1798
1799 while ((queue->process != queue->local_insert) ||
1800 (queue->process != queue->remote_insert)) {
1801 struct vchiq_bulk *bulk = &queue->bulks[BULK_INDEX(queue->process)];
1802
1803 if (queue->process == queue->remote_insert) {
1804 /* fabricate a matching dummy bulk */
1805 bulk->remote_data = NULL;
1806 bulk->remote_size = 0;
1807 queue->remote_insert++;
1808 }
1809
1810 if (queue->process != queue->local_insert) {
1811 vchiq_complete_bulk(service->instance, bulk);
1812
1813 dev_dbg(service->state->dev,
1814 "core_msg: %s %p4cc d:%d ABORTED - tx len:%d, rx len:%d\n",
1815 is_tx ? "Send Bulk to" : "Recv Bulk from",
1816 &service->base.fourcc,
1817 service->remoteport, bulk->size, bulk->remote_size);
1818 } else {
1819 /* fabricate a matching dummy bulk */
1820 bulk->dma_addr = 0;
1821 bulk->size = 0;
1822 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
1823 bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
1824 VCHIQ_BULK_RECEIVE;
1825 queue->local_insert++;
1826 }
1827
1828 queue->process++;
1829 }
1830 }
1831
1832 static int
parse_open(struct vchiq_state * state,struct vchiq_header * header)1833 parse_open(struct vchiq_state *state, struct vchiq_header *header)
1834 {
1835 const struct vchiq_open_payload *payload;
1836 struct vchiq_openack_payload ack_payload;
1837 struct vchiq_service *service = NULL;
1838 int msgid, size;
1839 int openack_id;
1840 unsigned int localport, remoteport, fourcc;
1841 short version, version_min;
1842
1843 msgid = header->msgid;
1844 size = header->size;
1845 localport = VCHIQ_MSG_DSTPORT(msgid);
1846 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1847 if (size < sizeof(struct vchiq_open_payload))
1848 goto fail_open;
1849
1850 payload = (struct vchiq_open_payload *)header->data;
1851 fourcc = payload->fourcc;
1852 dev_dbg(state->dev, "core: %d: prs OPEN@%pK (%d->'%p4cc')\n",
1853 state->id, header, localport, &fourcc);
1854
1855 service = get_listening_service(state, fourcc);
1856 if (!service)
1857 goto fail_open;
1858
1859 /* A matching service exists */
1860 version = payload->version;
1861 version_min = payload->version_min;
1862
1863 if ((service->version < version_min) || (version < service->version_min)) {
1864 /* Version mismatch */
1865 dev_err(state->dev, "%d: service %d (%p4cc) version mismatch - local (%d, min %d) vs. remote (%d, min %d)",
1866 state->id, service->localport, &fourcc,
1867 service->version, service->version_min, version, version_min);
1868 vchiq_service_put(service);
1869 service = NULL;
1870 goto fail_open;
1871 }
1872 service->peer_version = version;
1873
1874 if (service->srvstate != VCHIQ_SRVSTATE_LISTENING)
1875 goto done;
1876
1877 ack_payload.version = service->version;
1878 openack_id = MAKE_OPENACK(service->localport, remoteport);
1879
1880 if (state->version_common < VCHIQ_VERSION_SYNCHRONOUS_MODE)
1881 service->sync = 0;
1882
1883 /* Acknowledge the OPEN */
1884 if (service->sync) {
1885 if (queue_message_sync(state, NULL, openack_id,
1886 memcpy_copy_callback,
1887 &ack_payload,
1888 sizeof(ack_payload)) == -EAGAIN)
1889 goto bail_not_ready;
1890
1891 /* The service is now open */
1892 set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
1893 } else {
1894 if (queue_message(state, NULL, openack_id,
1895 memcpy_copy_callback, &ack_payload,
1896 sizeof(ack_payload), 0) == -EINTR)
1897 goto bail_not_ready;
1898
1899 /* The service is now open */
1900 set_service_state(service, VCHIQ_SRVSTATE_OPEN);
1901 }
1902
1903 done:
1904 /* Success - the message has been dealt with */
1905 vchiq_service_put(service);
1906 return 1;
1907
1908 fail_open:
1909 /* No available service, or an invalid request - send a CLOSE */
1910 if (queue_message(state, NULL, MAKE_CLOSE(0, VCHIQ_MSG_SRCPORT(msgid)),
1911 NULL, NULL, 0, 0) == -EINTR)
1912 goto bail_not_ready;
1913
1914 return 1;
1915
1916 bail_not_ready:
1917 if (service)
1918 vchiq_service_put(service);
1919
1920 return 0;
1921 }
1922
1923 /**
1924 * parse_message() - parses a single message from the rx slot
1925 * @state: vchiq state struct
1926 * @header: message header
1927 *
1928 * Context: Process context
1929 *
1930 * Return:
1931 * * >= 0 - size of the parsed message payload (without header)
1932 * * -EINVAL - fatal error occurred, bail out is required
1933 */
1934 static int
parse_message(struct vchiq_state * state,struct vchiq_header * header)1935 parse_message(struct vchiq_state *state, struct vchiq_header *header)
1936 {
1937 struct vchiq_service *service = NULL;
1938 unsigned int localport, remoteport;
1939 int msgid, size, type, ret = -EINVAL;
1940 int svc_fourcc;
1941
1942 DEBUG_INITIALISE(state->local);
1943
1944 DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
1945 msgid = header->msgid;
1946 DEBUG_VALUE(PARSE_MSGID, msgid);
1947 size = header->size;
1948 type = VCHIQ_MSG_TYPE(msgid);
1949 localport = VCHIQ_MSG_DSTPORT(msgid);
1950 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1951
1952 if (type != VCHIQ_MSG_DATA)
1953 VCHIQ_STATS_INC(state, ctrl_rx_count);
1954
1955 switch (type) {
1956 case VCHIQ_MSG_OPENACK:
1957 case VCHIQ_MSG_CLOSE:
1958 case VCHIQ_MSG_DATA:
1959 case VCHIQ_MSG_BULK_RX:
1960 case VCHIQ_MSG_BULK_TX:
1961 case VCHIQ_MSG_BULK_RX_DONE:
1962 case VCHIQ_MSG_BULK_TX_DONE:
1963 service = find_service_by_port(state, localport);
1964 if ((!service ||
1965 ((service->remoteport != remoteport) &&
1966 (service->remoteport != VCHIQ_PORT_FREE))) &&
1967 (localport == 0) &&
1968 (type == VCHIQ_MSG_CLOSE)) {
1969 /*
1970 * This could be a CLOSE from a client which
1971 * hadn't yet received the OPENACK - look for
1972 * the connected service
1973 */
1974 if (service)
1975 vchiq_service_put(service);
1976 service = get_connected_service(state, remoteport);
1977 if (service)
1978 dev_warn(state->dev,
1979 "core: %d: prs %s@%pK (%d->%d) - found connected service %d\n",
1980 state->id, msg_type_str(type), header,
1981 remoteport, localport, service->localport);
1982 }
1983
1984 if (!service) {
1985 dev_err(state->dev,
1986 "core: %d: prs %s@%pK (%d->%d) - invalid/closed service %d\n",
1987 state->id, msg_type_str(type), header, remoteport,
1988 localport, localport);
1989 goto skip_message;
1990 }
1991 break;
1992 default:
1993 break;
1994 }
1995
1996 svc_fourcc = service ? service->base.fourcc
1997 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1998
1999 dev_dbg(state->dev, "core_msg: Rcvd Msg %s(%u) from %p4cc s:%d d:%d len:%d\n",
2000 msg_type_str(type), type, &svc_fourcc, remoteport, localport, size);
2001 if (size > 0)
2002 vchiq_log_dump_mem(state->dev, "Rcvd", 0, header->data, min(16, size));
2003
2004 if (((unsigned long)header & VCHIQ_SLOT_MASK) +
2005 calc_stride(size) > VCHIQ_SLOT_SIZE) {
2006 dev_err(state->dev, "core: header %pK (msgid %x) - size %x too big for slot\n",
2007 header, (unsigned int)msgid, (unsigned int)size);
2008 WARN(1, "oversized for slot\n");
2009 }
2010
2011 switch (type) {
2012 case VCHIQ_MSG_OPEN:
2013 WARN_ON(VCHIQ_MSG_DSTPORT(msgid));
2014 if (!parse_open(state, header))
2015 goto bail_not_ready;
2016 break;
2017 case VCHIQ_MSG_OPENACK:
2018 if (size >= sizeof(struct vchiq_openack_payload)) {
2019 const struct vchiq_openack_payload *payload =
2020 (struct vchiq_openack_payload *)
2021 header->data;
2022 service->peer_version = payload->version;
2023 }
2024 dev_dbg(state->dev,
2025 "core: %d: prs OPENACK@%pK,%x (%d->%d) v:%d\n",
2026 state->id, header, size, remoteport, localport,
2027 service->peer_version);
2028 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2029 service->remoteport = remoteport;
2030 set_service_state(service, VCHIQ_SRVSTATE_OPEN);
2031 complete(&service->remove_event);
2032 } else {
2033 dev_err(state->dev, "core: OPENACK received in state %s\n",
2034 srvstate_names[service->srvstate]);
2035 }
2036 break;
2037 case VCHIQ_MSG_CLOSE:
2038 WARN_ON(size); /* There should be no data */
2039
2040 dev_dbg(state->dev, "core: %d: prs CLOSE@%pK (%d->%d)\n",
2041 state->id, header, remoteport, localport);
2042
2043 mark_service_closing_internal(service, 1);
2044
2045 if (vchiq_close_service_internal(service, CLOSE_RECVD) == -EAGAIN)
2046 goto bail_not_ready;
2047
2048 dev_dbg(state->dev, "core: Close Service %p4cc s:%u d:%d\n",
2049 &service->base.fourcc, service->localport, service->remoteport);
2050 break;
2051 case VCHIQ_MSG_DATA:
2052 dev_dbg(state->dev, "core: %d: prs DATA@%pK,%x (%d->%d)\n",
2053 state->id, header, size, remoteport, localport);
2054
2055 if ((service->remoteport == remoteport) &&
2056 (service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
2057 header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
2058 claim_slot(state->rx_info);
2059 DEBUG_TRACE(PARSE_LINE);
2060 if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
2061 NULL) == -EAGAIN) {
2062 DEBUG_TRACE(PARSE_LINE);
2063 goto bail_not_ready;
2064 }
2065 VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
2066 VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes, size);
2067 } else {
2068 VCHIQ_STATS_INC(state, error_count);
2069 }
2070 break;
2071 case VCHIQ_MSG_CONNECT:
2072 dev_dbg(state->dev, "core: %d: prs CONNECT@%pK\n",
2073 state->id, header);
2074 state->version_common = ((struct vchiq_slot_zero *)
2075 state->slot_data)->version;
2076 complete(&state->connect);
2077 break;
2078 case VCHIQ_MSG_BULK_RX:
2079 case VCHIQ_MSG_BULK_TX:
2080 /*
2081 * We should never receive a bulk request from the
2082 * other side since we're not setup to perform as the
2083 * master.
2084 */
2085 WARN_ON(1);
2086 break;
2087 case VCHIQ_MSG_BULK_RX_DONE:
2088 case VCHIQ_MSG_BULK_TX_DONE:
2089 if ((service->remoteport == remoteport) &&
2090 (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
2091 struct vchiq_bulk_queue *queue;
2092 struct vchiq_bulk *bulk;
2093
2094 queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
2095 &service->bulk_rx : &service->bulk_tx;
2096
2097 DEBUG_TRACE(PARSE_LINE);
2098 if (mutex_lock_killable(&service->bulk_mutex)) {
2099 DEBUG_TRACE(PARSE_LINE);
2100 goto bail_not_ready;
2101 }
2102 if ((int)(queue->remote_insert -
2103 queue->local_insert) >= 0) {
2104 dev_err(state->dev,
2105 "core: %d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)\n",
2106 state->id, msg_type_str(type), header, remoteport,
2107 localport, queue->remote_insert, queue->local_insert);
2108 mutex_unlock(&service->bulk_mutex);
2109 break;
2110 }
2111 if (queue->process != queue->remote_insert) {
2112 dev_err(state->dev, "%s: p %x != ri %x\n",
2113 __func__, queue->process,
2114 queue->remote_insert);
2115 mutex_unlock(&service->bulk_mutex);
2116 goto bail_not_ready;
2117 }
2118
2119 bulk = &queue->bulks[BULK_INDEX(queue->remote_insert)];
2120 bulk->actual = *(int *)header->data;
2121 queue->remote_insert++;
2122
2123 dev_dbg(state->dev, "core: %d: prs %s@%pK (%d->%d) %x@%pad\n",
2124 state->id, msg_type_str(type), header, remoteport,
2125 localport, bulk->actual, &bulk->dma_addr);
2126
2127 dev_dbg(state->dev, "core: %d: prs:%d %cx li=%x ri=%x p=%x\n",
2128 state->id, localport,
2129 (type == VCHIQ_MSG_BULK_RX_DONE) ? 'r' : 't',
2130 queue->local_insert, queue->remote_insert, queue->process);
2131
2132 DEBUG_TRACE(PARSE_LINE);
2133 WARN_ON(queue->process == queue->local_insert);
2134 vchiq_complete_bulk(service->instance, bulk);
2135 queue->process++;
2136 mutex_unlock(&service->bulk_mutex);
2137 DEBUG_TRACE(PARSE_LINE);
2138 notify_bulks(service, queue, RETRY_POLL);
2139 DEBUG_TRACE(PARSE_LINE);
2140 }
2141 break;
2142 case VCHIQ_MSG_PADDING:
2143 dev_dbg(state->dev, "core: %d: prs PADDING@%pK,%x\n",
2144 state->id, header, size);
2145 break;
2146 case VCHIQ_MSG_PAUSE:
2147 /* If initiated, signal the application thread */
2148 dev_dbg(state->dev, "core: %d: prs PAUSE@%pK,%x\n",
2149 state->id, header, size);
2150 if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
2151 dev_err(state->dev, "core: %d: PAUSE received in state PAUSED\n",
2152 state->id);
2153 break;
2154 }
2155 if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
2156 /* Send a PAUSE in response */
2157 if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
2158 QMFLAGS_NO_MUTEX_UNLOCK) == -EINTR)
2159 goto bail_not_ready;
2160 }
2161 /* At this point slot_mutex is held */
2162 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
2163 break;
2164 case VCHIQ_MSG_RESUME:
2165 dev_dbg(state->dev, "core: %d: prs RESUME@%pK,%x\n",
2166 state->id, header, size);
2167 /* Release the slot mutex */
2168 mutex_unlock(&state->slot_mutex);
2169 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
2170 break;
2171
2172 case VCHIQ_MSG_REMOTE_USE:
2173 vchiq_on_remote_use(state);
2174 break;
2175 case VCHIQ_MSG_REMOTE_RELEASE:
2176 vchiq_on_remote_release(state);
2177 break;
2178 case VCHIQ_MSG_REMOTE_USE_ACTIVE:
2179 break;
2180
2181 default:
2182 dev_err(state->dev, "core: %d: prs invalid msgid %x@%pK,%x\n",
2183 state->id, msgid, header, size);
2184 WARN(1, "invalid message\n");
2185 break;
2186 }
2187
2188 skip_message:
2189 ret = size;
2190
2191 bail_not_ready:
2192 if (service)
2193 vchiq_service_put(service);
2194
2195 return ret;
2196 }
2197
2198 /* Called by the slot handler thread */
2199 static void
parse_rx_slots(struct vchiq_state * state)2200 parse_rx_slots(struct vchiq_state *state)
2201 {
2202 struct vchiq_shared_state *remote = state->remote;
2203 int tx_pos;
2204
2205 DEBUG_INITIALISE(state->local);
2206
2207 tx_pos = remote->tx_pos;
2208
2209 while (state->rx_pos != tx_pos) {
2210 struct vchiq_header *header;
2211 int size;
2212
2213 DEBUG_TRACE(PARSE_LINE);
2214 if (!state->rx_data) {
2215 int rx_index;
2216
2217 WARN_ON(state->rx_pos & VCHIQ_SLOT_MASK);
2218 rx_index = remote->slot_queue[
2219 SLOT_QUEUE_INDEX_FROM_POS_MASKED(state->rx_pos)];
2220 state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
2221 rx_index);
2222 state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
2223
2224 /*
2225 * Initialise use_count to one, and increment
2226 * release_count at the end of the slot to avoid
2227 * releasing the slot prematurely.
2228 */
2229 state->rx_info->use_count = 1;
2230 state->rx_info->release_count = 0;
2231 }
2232
2233 header = (struct vchiq_header *)(state->rx_data +
2234 (state->rx_pos & VCHIQ_SLOT_MASK));
2235 size = parse_message(state, header);
2236 if (size < 0)
2237 return;
2238
2239 state->rx_pos += calc_stride(size);
2240
2241 DEBUG_TRACE(PARSE_LINE);
2242 /*
2243 * Perform some housekeeping when the end of the slot is
2244 * reached.
2245 */
2246 if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
2247 /* Remove the extra reference count. */
2248 release_slot(state, state->rx_info, NULL, NULL);
2249 state->rx_data = NULL;
2250 }
2251 }
2252 }
2253
2254 /**
2255 * handle_poll() - handle service polling and other rare conditions
2256 * @state: vchiq state struct
2257 *
2258 * Context: Process context
2259 *
2260 * Return:
2261 * * 0 - poll handled successful
2262 * * -EAGAIN - retry later
2263 */
2264 static int
handle_poll(struct vchiq_state * state)2265 handle_poll(struct vchiq_state *state)
2266 {
2267 switch (state->conn_state) {
2268 case VCHIQ_CONNSTATE_CONNECTED:
2269 /* Poll the services as requested */
2270 poll_services(state);
2271 break;
2272
2273 case VCHIQ_CONNSTATE_PAUSING:
2274 if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
2275 QMFLAGS_NO_MUTEX_UNLOCK) != -EINTR) {
2276 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSE_SENT);
2277 } else {
2278 /* Retry later */
2279 return -EAGAIN;
2280 }
2281 break;
2282
2283 case VCHIQ_CONNSTATE_RESUMING:
2284 if (queue_message(state, NULL, MAKE_RESUME, NULL, NULL, 0,
2285 QMFLAGS_NO_MUTEX_LOCK) != -EINTR) {
2286 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
2287 } else {
2288 /*
2289 * This should really be impossible,
2290 * since the PAUSE should have flushed
2291 * through outstanding messages.
2292 */
2293 dev_err(state->dev, "core: Failed to send RESUME message\n");
2294 }
2295 break;
2296 default:
2297 break;
2298 }
2299
2300 return 0;
2301 }
2302
2303 /* Called by the slot handler thread */
2304 static int
slot_handler_func(void * v)2305 slot_handler_func(void *v)
2306 {
2307 struct vchiq_state *state = v;
2308 struct vchiq_shared_state *local = state->local;
2309 int ret;
2310
2311 DEBUG_INITIALISE(local);
2312
2313 while (!kthread_should_stop()) {
2314 DEBUG_COUNT(SLOT_HANDLER_COUNT);
2315 DEBUG_TRACE(SLOT_HANDLER_LINE);
2316 ret = remote_event_wait(&state->trigger_event, &local->trigger);
2317 if (ret)
2318 return ret;
2319
2320 /* Ensure that reads don't overtake the remote_event_wait. */
2321 rmb();
2322
2323 DEBUG_TRACE(SLOT_HANDLER_LINE);
2324 if (state->poll_needed) {
2325 state->poll_needed = 0;
2326
2327 /*
2328 * Handle service polling and other rare conditions here
2329 * out of the mainline code
2330 */
2331 if (handle_poll(state) == -EAGAIN)
2332 state->poll_needed = 1;
2333 }
2334
2335 DEBUG_TRACE(SLOT_HANDLER_LINE);
2336 parse_rx_slots(state);
2337 }
2338 return 0;
2339 }
2340
2341 /* Called by the recycle thread */
2342 static int
recycle_func(void * v)2343 recycle_func(void *v)
2344 {
2345 struct vchiq_state *state = v;
2346 struct vchiq_shared_state *local = state->local;
2347 u32 *found;
2348 size_t length;
2349 int ret;
2350
2351 length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
2352
2353 found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
2354 GFP_KERNEL);
2355 if (!found)
2356 return -ENOMEM;
2357
2358 while (!kthread_should_stop()) {
2359 ret = remote_event_wait(&state->recycle_event, &local->recycle);
2360 if (ret)
2361 return ret;
2362
2363 process_free_queue(state, found, length);
2364 }
2365 return 0;
2366 }
2367
2368 /* Called by the sync thread */
2369 static int
sync_func(void * v)2370 sync_func(void *v)
2371 {
2372 struct vchiq_state *state = v;
2373 struct vchiq_shared_state *local = state->local;
2374 struct vchiq_header *header =
2375 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2376 state->remote->slot_sync);
2377 int svc_fourcc;
2378 int ret;
2379
2380 while (!kthread_should_stop()) {
2381 struct vchiq_service *service;
2382 int msgid, size;
2383 int type;
2384 unsigned int localport, remoteport;
2385
2386 ret = remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
2387 if (ret)
2388 return ret;
2389
2390 /* Ensure that reads don't overtake the remote_event_wait. */
2391 rmb();
2392
2393 msgid = header->msgid;
2394 size = header->size;
2395 type = VCHIQ_MSG_TYPE(msgid);
2396 localport = VCHIQ_MSG_DSTPORT(msgid);
2397 remoteport = VCHIQ_MSG_SRCPORT(msgid);
2398
2399 service = find_service_by_port(state, localport);
2400
2401 if (!service) {
2402 dev_err(state->dev,
2403 "sync: %d: sf %s@%pK (%d->%d) - invalid/closed service %d\n",
2404 state->id, msg_type_str(type), header, remoteport,
2405 localport, localport);
2406 release_message_sync(state, header);
2407 continue;
2408 }
2409
2410 svc_fourcc = service->base.fourcc;
2411
2412 dev_dbg(state->dev, "sync: Rcvd Msg %s from %p4cc s:%d d:%d len:%d\n",
2413 msg_type_str(type), &svc_fourcc, remoteport, localport, size);
2414 if (size > 0)
2415 vchiq_log_dump_mem(state->dev, "Rcvd", 0, header->data, min(16, size));
2416
2417 switch (type) {
2418 case VCHIQ_MSG_OPENACK:
2419 if (size >= sizeof(struct vchiq_openack_payload)) {
2420 const struct vchiq_openack_payload *payload =
2421 (struct vchiq_openack_payload *)
2422 header->data;
2423 service->peer_version = payload->version;
2424 }
2425 dev_err(state->dev, "sync: %d: sf OPENACK@%pK,%x (%d->%d) v:%d\n",
2426 state->id, header, size, remoteport, localport,
2427 service->peer_version);
2428 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2429 service->remoteport = remoteport;
2430 set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
2431 service->sync = 1;
2432 complete(&service->remove_event);
2433 }
2434 release_message_sync(state, header);
2435 break;
2436
2437 case VCHIQ_MSG_DATA:
2438 dev_dbg(state->dev, "sync: %d: sf DATA@%pK,%x (%d->%d)\n",
2439 state->id, header, size, remoteport, localport);
2440
2441 if ((service->remoteport == remoteport) &&
2442 (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) {
2443 if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
2444 NULL) == -EAGAIN)
2445 dev_err(state->dev,
2446 "sync: error: synchronous callback to service %d returns -EAGAIN\n",
2447 localport);
2448 }
2449 break;
2450
2451 default:
2452 dev_err(state->dev, "sync: error: %d: sf unexpected msgid %x@%pK,%x\n",
2453 state->id, msgid, header, size);
2454 release_message_sync(state, header);
2455 break;
2456 }
2457
2458 vchiq_service_put(service);
2459 }
2460
2461 return 0;
2462 }
2463
2464 inline const char *
get_conn_state_name(enum vchiq_connstate conn_state)2465 get_conn_state_name(enum vchiq_connstate conn_state)
2466 {
2467 return conn_state_names[conn_state];
2468 }
2469
2470 struct vchiq_slot_zero *
vchiq_init_slots(struct device * dev,void * mem_base,int mem_size)2471 vchiq_init_slots(struct device *dev, void *mem_base, int mem_size)
2472 {
2473 int mem_align =
2474 (int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
2475 struct vchiq_slot_zero *slot_zero =
2476 (struct vchiq_slot_zero *)(mem_base + mem_align);
2477 int num_slots = (mem_size - mem_align) / VCHIQ_SLOT_SIZE;
2478 int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
2479
2480 check_sizes();
2481
2482 /* Ensure there is enough memory to run an absolutely minimum system */
2483 num_slots -= first_data_slot;
2484
2485 if (num_slots < 4) {
2486 dev_err(dev, "core: %s: Insufficient memory %x bytes\n",
2487 __func__, mem_size);
2488 return NULL;
2489 }
2490
2491 memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
2492
2493 slot_zero->magic = VCHIQ_MAGIC;
2494 slot_zero->version = VCHIQ_VERSION;
2495 slot_zero->version_min = VCHIQ_VERSION_MIN;
2496 slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
2497 slot_zero->slot_size = VCHIQ_SLOT_SIZE;
2498 slot_zero->max_slots = VCHIQ_MAX_SLOTS;
2499 slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
2500
2501 slot_zero->master.slot_sync = first_data_slot;
2502 slot_zero->master.slot_first = first_data_slot + 1;
2503 slot_zero->master.slot_last = first_data_slot + (num_slots / 2) - 1;
2504 slot_zero->slave.slot_sync = first_data_slot + (num_slots / 2);
2505 slot_zero->slave.slot_first = first_data_slot + (num_slots / 2) + 1;
2506 slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
2507
2508 return slot_zero;
2509 }
2510
2511 int
vchiq_init_state(struct vchiq_state * state,struct vchiq_slot_zero * slot_zero,struct device * dev)2512 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev)
2513 {
2514 struct vchiq_shared_state *local;
2515 struct vchiq_shared_state *remote;
2516 char threadname[16];
2517 int i, ret;
2518
2519 local = &slot_zero->slave;
2520 remote = &slot_zero->master;
2521
2522 if (local->initialised) {
2523 if (remote->initialised)
2524 dev_err(dev, "local state has already been initialised\n");
2525 else
2526 dev_err(dev, "master/slave mismatch two slaves\n");
2527
2528 return -EINVAL;
2529 }
2530
2531 memset(state, 0, sizeof(struct vchiq_state));
2532
2533 state->dev = dev;
2534
2535 /*
2536 * initialize shared state pointers
2537 */
2538
2539 state->local = local;
2540 state->remote = remote;
2541 state->slot_data = (struct vchiq_slot *)slot_zero;
2542
2543 /*
2544 * initialize events and mutexes
2545 */
2546
2547 init_completion(&state->connect);
2548 mutex_init(&state->mutex);
2549 mutex_init(&state->slot_mutex);
2550 mutex_init(&state->recycle_mutex);
2551 mutex_init(&state->sync_mutex);
2552
2553 spin_lock_init(&state->msg_queue_spinlock);
2554 spin_lock_init(&state->bulk_waiter_spinlock);
2555 spin_lock_init(&state->quota_spinlock);
2556
2557 init_completion(&state->slot_available_event);
2558 init_completion(&state->data_quota_event);
2559
2560 state->slot_queue_available = 0;
2561
2562 for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
2563 struct vchiq_service_quota *quota = &state->service_quotas[i];
2564
2565 init_completion("a->quota_event);
2566 }
2567
2568 for (i = local->slot_first; i <= local->slot_last; i++) {
2569 local->slot_queue[state->slot_queue_available] = i;
2570 state->slot_queue_available++;
2571 complete(&state->slot_available_event);
2572 }
2573
2574 state->default_slot_quota = state->slot_queue_available / 2;
2575 state->default_message_quota =
2576 min_t(unsigned short, state->default_slot_quota * 256, ~0);
2577
2578 state->previous_data_index = -1;
2579 state->data_use_count = 0;
2580 state->data_quota = state->slot_queue_available - 1;
2581
2582 remote_event_create(&state->trigger_event, &local->trigger);
2583 local->tx_pos = 0;
2584 remote_event_create(&state->recycle_event, &local->recycle);
2585 local->slot_queue_recycle = state->slot_queue_available;
2586 remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
2587 remote_event_create(&state->sync_release_event, &local->sync_release);
2588
2589 /* At start-of-day, the slot is empty and available */
2590 ((struct vchiq_header *)
2591 SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
2592 VCHIQ_MSGID_PADDING;
2593 remote_event_signal_local(&state->sync_release_event, &local->sync_release);
2594
2595 local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
2596
2597 ret = vchiq_platform_init_state(state);
2598 if (ret)
2599 return ret;
2600
2601 /*
2602 * bring up slot handler thread
2603 */
2604 snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
2605 state->slot_handler_thread = kthread_create(&slot_handler_func, (void *)state, threadname);
2606
2607 if (IS_ERR(state->slot_handler_thread)) {
2608 dev_err(state->dev, "couldn't create thread %s\n", threadname);
2609 return PTR_ERR(state->slot_handler_thread);
2610 }
2611 set_user_nice(state->slot_handler_thread, -19);
2612
2613 snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
2614 state->recycle_thread = kthread_create(&recycle_func, (void *)state, threadname);
2615 if (IS_ERR(state->recycle_thread)) {
2616 dev_err(state->dev, "couldn't create thread %s\n", threadname);
2617 ret = PTR_ERR(state->recycle_thread);
2618 goto fail_free_handler_thread;
2619 }
2620 set_user_nice(state->recycle_thread, -19);
2621
2622 snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
2623 state->sync_thread = kthread_create(&sync_func, (void *)state, threadname);
2624 if (IS_ERR(state->sync_thread)) {
2625 dev_err(state->dev, "couldn't create thread %s\n", threadname);
2626 ret = PTR_ERR(state->sync_thread);
2627 goto fail_free_recycle_thread;
2628 }
2629 set_user_nice(state->sync_thread, -20);
2630
2631 wake_up_process(state->slot_handler_thread);
2632 wake_up_process(state->recycle_thread);
2633 wake_up_process(state->sync_thread);
2634
2635 /* Indicate readiness to the other side */
2636 local->initialised = 1;
2637
2638 return 0;
2639
2640 fail_free_recycle_thread:
2641 kthread_stop(state->recycle_thread);
2642 fail_free_handler_thread:
2643 kthread_stop(state->slot_handler_thread);
2644
2645 return ret;
2646 }
2647
vchiq_msg_queue_push(struct vchiq_instance * instance,unsigned int handle,struct vchiq_header * header)2648 void vchiq_msg_queue_push(struct vchiq_instance *instance, unsigned int handle,
2649 struct vchiq_header *header)
2650 {
2651 struct vchiq_service *service = find_service_by_handle(instance, handle);
2652 int pos;
2653
2654 if (!service)
2655 return;
2656
2657 while (service->msg_queue_write == service->msg_queue_read +
2658 VCHIQ_MAX_SLOTS) {
2659 if (wait_for_completion_interruptible(&service->msg_queue_pop))
2660 flush_signals(current);
2661 }
2662
2663 pos = service->msg_queue_write & (VCHIQ_MAX_SLOTS - 1);
2664 service->msg_queue_write++;
2665 service->msg_queue[pos] = header;
2666
2667 complete(&service->msg_queue_push);
2668 }
2669 EXPORT_SYMBOL(vchiq_msg_queue_push);
2670
vchiq_msg_hold(struct vchiq_instance * instance,unsigned int handle)2671 struct vchiq_header *vchiq_msg_hold(struct vchiq_instance *instance, unsigned int handle)
2672 {
2673 struct vchiq_service *service = find_service_by_handle(instance, handle);
2674 struct vchiq_header *header;
2675 int pos;
2676
2677 if (!service)
2678 return NULL;
2679
2680 if (service->msg_queue_write == service->msg_queue_read)
2681 return NULL;
2682
2683 while (service->msg_queue_write == service->msg_queue_read) {
2684 if (wait_for_completion_interruptible(&service->msg_queue_push))
2685 flush_signals(current);
2686 }
2687
2688 pos = service->msg_queue_read & (VCHIQ_MAX_SLOTS - 1);
2689 service->msg_queue_read++;
2690 header = service->msg_queue[pos];
2691
2692 complete(&service->msg_queue_pop);
2693
2694 return header;
2695 }
2696 EXPORT_SYMBOL(vchiq_msg_hold);
2697
vchiq_validate_params(struct vchiq_state * state,const struct vchiq_service_params_kernel * params)2698 static int vchiq_validate_params(struct vchiq_state *state,
2699 const struct vchiq_service_params_kernel *params)
2700 {
2701 if (!params->callback || !params->fourcc) {
2702 dev_err(state->dev, "Can't add service, invalid params\n");
2703 return -EINVAL;
2704 }
2705
2706 return 0;
2707 }
2708
2709 /* Called from application thread when a client or server service is created. */
2710 struct vchiq_service *
vchiq_add_service_internal(struct vchiq_state * state,const struct vchiq_service_params_kernel * params,int srvstate,struct vchiq_instance * instance,void (* userdata_term)(void * userdata))2711 vchiq_add_service_internal(struct vchiq_state *state,
2712 const struct vchiq_service_params_kernel *params,
2713 int srvstate, struct vchiq_instance *instance,
2714 void (*userdata_term)(void *userdata))
2715 {
2716 struct vchiq_service *service;
2717 struct vchiq_service __rcu **pservice = NULL;
2718 struct vchiq_service_quota *quota;
2719 int ret;
2720 int i;
2721
2722 ret = vchiq_validate_params(state, params);
2723 if (ret)
2724 return NULL;
2725
2726 service = kzalloc(sizeof(*service), GFP_KERNEL);
2727 if (!service)
2728 return service;
2729
2730 service->base.fourcc = params->fourcc;
2731 service->base.callback = params->callback;
2732 service->base.userdata = params->userdata;
2733 service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
2734 kref_init(&service->ref_count);
2735 service->srvstate = VCHIQ_SRVSTATE_FREE;
2736 service->userdata_term = userdata_term;
2737 service->localport = VCHIQ_PORT_FREE;
2738 service->remoteport = VCHIQ_PORT_FREE;
2739
2740 service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
2741 VCHIQ_FOURCC_INVALID : params->fourcc;
2742 service->auto_close = 1;
2743 atomic_set(&service->poll_flags, 0);
2744 service->version = params->version;
2745 service->version_min = params->version_min;
2746 service->state = state;
2747 service->instance = instance;
2748 init_completion(&service->remove_event);
2749 init_completion(&service->bulk_remove_event);
2750 init_completion(&service->msg_queue_pop);
2751 init_completion(&service->msg_queue_push);
2752 mutex_init(&service->bulk_mutex);
2753
2754 /*
2755 * Although it is perfectly possible to use a spinlock
2756 * to protect the creation of services, it is overkill as it
2757 * disables interrupts while the array is searched.
2758 * The only danger is of another thread trying to create a
2759 * service - service deletion is safe.
2760 * Therefore it is preferable to use state->mutex which,
2761 * although slower to claim, doesn't block interrupts while
2762 * it is held.
2763 */
2764
2765 mutex_lock(&state->mutex);
2766
2767 /* Prepare to use a previously unused service */
2768 if (state->unused_service < VCHIQ_MAX_SERVICES)
2769 pservice = &state->services[state->unused_service];
2770
2771 if (srvstate == VCHIQ_SRVSTATE_OPENING) {
2772 for (i = 0; i < state->unused_service; i++) {
2773 if (!rcu_access_pointer(state->services[i])) {
2774 pservice = &state->services[i];
2775 break;
2776 }
2777 }
2778 } else {
2779 rcu_read_lock();
2780 for (i = (state->unused_service - 1); i >= 0; i--) {
2781 struct vchiq_service *srv;
2782
2783 srv = rcu_dereference(state->services[i]);
2784 if (!srv) {
2785 pservice = &state->services[i];
2786 } else if ((srv->public_fourcc == params->fourcc) &&
2787 ((srv->instance != instance) ||
2788 (srv->base.callback != params->callback))) {
2789 /*
2790 * There is another server using this
2791 * fourcc which doesn't match.
2792 */
2793 pservice = NULL;
2794 break;
2795 }
2796 }
2797 rcu_read_unlock();
2798 }
2799
2800 if (pservice) {
2801 service->localport = (pservice - state->services);
2802 if (!handle_seq)
2803 handle_seq = VCHIQ_MAX_STATES *
2804 VCHIQ_MAX_SERVICES;
2805 service->handle = handle_seq |
2806 (state->id * VCHIQ_MAX_SERVICES) |
2807 service->localport;
2808 handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
2809 rcu_assign_pointer(*pservice, service);
2810 if (pservice == &state->services[state->unused_service])
2811 state->unused_service++;
2812 }
2813
2814 mutex_unlock(&state->mutex);
2815
2816 if (!pservice) {
2817 kfree(service);
2818 return NULL;
2819 }
2820
2821 quota = &state->service_quotas[service->localport];
2822 quota->slot_quota = state->default_slot_quota;
2823 quota->message_quota = state->default_message_quota;
2824 if (quota->slot_use_count == 0)
2825 quota->previous_tx_index =
2826 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
2827 - 1;
2828
2829 /* Bring this service online */
2830 set_service_state(service, srvstate);
2831
2832 dev_dbg(state->dev, "core_msg: %s Service %p4cc SrcPort:%d\n",
2833 (srvstate == VCHIQ_SRVSTATE_OPENING) ? "Open" : "Add",
2834 ¶ms->fourcc, service->localport);
2835
2836 /* Don't unlock the service - leave it with a ref_count of 1. */
2837
2838 return service;
2839 }
2840
2841 int
vchiq_open_service_internal(struct vchiq_service * service,int client_id)2842 vchiq_open_service_internal(struct vchiq_service *service, int client_id)
2843 {
2844 struct vchiq_open_payload payload = {
2845 service->base.fourcc,
2846 client_id,
2847 service->version,
2848 service->version_min
2849 };
2850 int status = 0;
2851
2852 service->client_id = client_id;
2853 vchiq_use_service_internal(service);
2854 status = queue_message(service->state,
2855 NULL, MAKE_OPEN(service->localport),
2856 memcpy_copy_callback,
2857 &payload,
2858 sizeof(payload),
2859 QMFLAGS_IS_BLOCKING);
2860
2861 if (status)
2862 return status;
2863
2864 /* Wait for the ACK/NAK */
2865 if (wait_for_completion_interruptible(&service->remove_event)) {
2866 status = -EAGAIN;
2867 vchiq_release_service_internal(service);
2868 } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
2869 (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
2870 if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
2871 dev_err(service->state->dev,
2872 "core: %d: osi - srvstate = %s (ref %u)\n",
2873 service->state->id, srvstate_names[service->srvstate],
2874 kref_read(&service->ref_count));
2875 status = -EINVAL;
2876 VCHIQ_SERVICE_STATS_INC(service, error_count);
2877 vchiq_release_service_internal(service);
2878 }
2879
2880 return status;
2881 }
2882
2883 static void
release_service_messages(struct vchiq_service * service)2884 release_service_messages(struct vchiq_service *service)
2885 {
2886 struct vchiq_state *state = service->state;
2887 int slot_last = state->remote->slot_last;
2888 int i;
2889
2890 /* Release any claimed messages aimed at this service */
2891
2892 if (service->sync) {
2893 struct vchiq_header *header =
2894 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2895 state->remote->slot_sync);
2896 if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
2897 release_message_sync(state, header);
2898
2899 return;
2900 }
2901
2902 for (i = state->remote->slot_first; i <= slot_last; i++) {
2903 struct vchiq_slot_info *slot_info =
2904 SLOT_INFO_FROM_INDEX(state, i);
2905 unsigned int pos, end;
2906 char *data;
2907
2908 if (slot_info->release_count == slot_info->use_count)
2909 continue;
2910
2911 data = (char *)SLOT_DATA_FROM_INDEX(state, i);
2912 end = VCHIQ_SLOT_SIZE;
2913 if (data == state->rx_data)
2914 /*
2915 * This buffer is still being read from - stop
2916 * at the current read position
2917 */
2918 end = state->rx_pos & VCHIQ_SLOT_MASK;
2919
2920 pos = 0;
2921
2922 while (pos < end) {
2923 struct vchiq_header *header =
2924 (struct vchiq_header *)(data + pos);
2925 int msgid = header->msgid;
2926 int port = VCHIQ_MSG_DSTPORT(msgid);
2927
2928 if ((port == service->localport) && (msgid & VCHIQ_MSGID_CLAIMED)) {
2929 dev_dbg(state->dev, "core: fsi - hdr %pK\n", header);
2930 release_slot(state, slot_info, header, NULL);
2931 }
2932 pos += calc_stride(header->size);
2933 if (pos > VCHIQ_SLOT_SIZE) {
2934 dev_err(state->dev,
2935 "core: fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x\n",
2936 pos, header, msgid, header->msgid, header->size);
2937 WARN(1, "invalid slot position\n");
2938 }
2939 }
2940 }
2941 }
2942
2943 static int
do_abort_bulks(struct vchiq_service * service)2944 do_abort_bulks(struct vchiq_service *service)
2945 {
2946 int status;
2947
2948 /* Abort any outstanding bulk transfers */
2949 if (mutex_lock_killable(&service->bulk_mutex))
2950 return 0;
2951 abort_outstanding_bulks(service, &service->bulk_tx);
2952 abort_outstanding_bulks(service, &service->bulk_rx);
2953 mutex_unlock(&service->bulk_mutex);
2954
2955 status = notify_bulks(service, &service->bulk_tx, NO_RETRY_POLL);
2956 if (status)
2957 return 0;
2958
2959 status = notify_bulks(service, &service->bulk_rx, NO_RETRY_POLL);
2960 return !status;
2961 }
2962
2963 static int
close_service_complete(struct vchiq_service * service,int failstate)2964 close_service_complete(struct vchiq_service *service, int failstate)
2965 {
2966 int status;
2967 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2968 int newstate;
2969
2970 switch (service->srvstate) {
2971 case VCHIQ_SRVSTATE_OPEN:
2972 case VCHIQ_SRVSTATE_CLOSESENT:
2973 case VCHIQ_SRVSTATE_CLOSERECVD:
2974 if (is_server) {
2975 if (service->auto_close) {
2976 service->client_id = 0;
2977 service->remoteport = VCHIQ_PORT_FREE;
2978 newstate = VCHIQ_SRVSTATE_LISTENING;
2979 } else {
2980 newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
2981 }
2982 } else {
2983 newstate = VCHIQ_SRVSTATE_CLOSED;
2984 }
2985 set_service_state(service, newstate);
2986 break;
2987 case VCHIQ_SRVSTATE_LISTENING:
2988 break;
2989 default:
2990 dev_err(service->state->dev, "core: (%x) called in state %s\n",
2991 service->handle, srvstate_names[service->srvstate]);
2992 WARN(1, "%s in unexpected state\n", __func__);
2993 return -EINVAL;
2994 }
2995
2996 status = make_service_callback(service, VCHIQ_SERVICE_CLOSED, NULL, NULL);
2997
2998 if (status != -EAGAIN) {
2999 int uc = service->service_use_count;
3000 int i;
3001 /* Complete the close process */
3002 for (i = 0; i < uc; i++)
3003 /*
3004 * cater for cases where close is forced and the
3005 * client may not close all it's handles
3006 */
3007 vchiq_release_service_internal(service);
3008
3009 service->client_id = 0;
3010 service->remoteport = VCHIQ_PORT_FREE;
3011
3012 if (service->srvstate == VCHIQ_SRVSTATE_CLOSED) {
3013 vchiq_free_service_internal(service);
3014 } else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
3015 if (is_server)
3016 service->closing = 0;
3017
3018 complete(&service->remove_event);
3019 }
3020 } else {
3021 set_service_state(service, failstate);
3022 }
3023
3024 return status;
3025 }
3026
3027 /*
3028 * Prepares a bulk transfer to be queued. The function is interruptible and is
3029 * intended to be called from user threads. It may return -EAGAIN to indicate
3030 * that a signal has been received and the call should be retried after being
3031 * returned to user context.
3032 */
3033 static int
vchiq_bulk_xfer_queue_msg_killable(struct vchiq_service * service,struct vchiq_bulk * bulk_params)3034 vchiq_bulk_xfer_queue_msg_killable(struct vchiq_service *service,
3035 struct vchiq_bulk *bulk_params)
3036 {
3037 struct vchiq_bulk_queue *queue;
3038 struct bulk_waiter *bulk_waiter = NULL;
3039 struct vchiq_bulk *bulk;
3040 struct vchiq_state *state = service->state;
3041 const char dir_char = (bulk_params->dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
3042 const int dir_msgtype = (bulk_params->dir == VCHIQ_BULK_TRANSMIT) ?
3043 VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
3044 int status = -EINVAL;
3045 int payload[2];
3046
3047 if (bulk_params->mode == VCHIQ_BULK_MODE_BLOCKING) {
3048 bulk_waiter = bulk_params->waiter;
3049 init_completion(&bulk_waiter->event);
3050 bulk_waiter->actual = 0;
3051 bulk_waiter->bulk = NULL;
3052 }
3053
3054 queue = (bulk_params->dir == VCHIQ_BULK_TRANSMIT) ?
3055 &service->bulk_tx : &service->bulk_rx;
3056
3057 if (mutex_lock_killable(&service->bulk_mutex))
3058 return -EINTR;
3059
3060 if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
3061 VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
3062 do {
3063 mutex_unlock(&service->bulk_mutex);
3064 if (wait_for_completion_killable(&service->bulk_remove_event))
3065 return -EINTR;
3066 if (mutex_lock_killable(&service->bulk_mutex))
3067 return -EINTR;
3068 } while (queue->local_insert == queue->remove +
3069 VCHIQ_NUM_SERVICE_BULKS);
3070 }
3071
3072 bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
3073
3074 /* Initiliaze the 'bulk' slot with bulk parameters passed in. */
3075 bulk->mode = bulk_params->mode;
3076 bulk->dir = bulk_params->dir;
3077 bulk->waiter = bulk_params->waiter;
3078 bulk->cb_data = bulk_params->cb_data;
3079 bulk->cb_userdata = bulk_params->cb_userdata;
3080 bulk->size = bulk_params->size;
3081 bulk->offset = bulk_params->offset;
3082 bulk->uoffset = bulk_params->uoffset;
3083 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
3084
3085 if (vchiq_prepare_bulk_data(service->instance, bulk))
3086 goto unlock_error_exit;
3087
3088 /*
3089 * Ensure that the bulk data record is visible to the peer
3090 * before proceeding.
3091 */
3092 wmb();
3093
3094 dev_dbg(state->dev, "core: %d: bt (%d->%d) %cx %x@%pad %pK\n",
3095 state->id, service->localport, service->remoteport,
3096 dir_char, bulk->size, &bulk->dma_addr, bulk->cb_data);
3097
3098 /*
3099 * The slot mutex must be held when the service is being closed, so
3100 * claim it here to ensure that isn't happening
3101 */
3102 if (mutex_lock_killable(&state->slot_mutex)) {
3103 status = -EINTR;
3104 goto cancel_bulk_error_exit;
3105 }
3106
3107 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3108 goto unlock_both_error_exit;
3109
3110 payload[0] = lower_32_bits(bulk->dma_addr);
3111 payload[1] = bulk->size;
3112 status = queue_message(state,
3113 NULL,
3114 VCHIQ_MAKE_MSG(dir_msgtype,
3115 service->localport,
3116 service->remoteport),
3117 memcpy_copy_callback,
3118 &payload,
3119 sizeof(payload),
3120 QMFLAGS_IS_BLOCKING |
3121 QMFLAGS_NO_MUTEX_LOCK |
3122 QMFLAGS_NO_MUTEX_UNLOCK);
3123 if (status)
3124 goto unlock_both_error_exit;
3125
3126 queue->local_insert++;
3127
3128 mutex_unlock(&state->slot_mutex);
3129 mutex_unlock(&service->bulk_mutex);
3130
3131 dev_dbg(state->dev, "core: %d: bt:%d %cx li=%x ri=%x p=%x\n",
3132 state->id, service->localport, dir_char, queue->local_insert,
3133 queue->remote_insert, queue->process);
3134
3135 if (bulk_waiter) {
3136 bulk_waiter->bulk = bulk;
3137 if (wait_for_completion_killable(&bulk_waiter->event))
3138 status = -EINTR;
3139 else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
3140 status = -EINVAL;
3141 }
3142
3143 return status;
3144
3145 unlock_both_error_exit:
3146 mutex_unlock(&state->slot_mutex);
3147 cancel_bulk_error_exit:
3148 vchiq_complete_bulk(service->instance, bulk);
3149 unlock_error_exit:
3150 mutex_unlock(&service->bulk_mutex);
3151
3152 return status;
3153 }
3154
3155 /* Called by the slot handler */
3156 int
vchiq_close_service_internal(struct vchiq_service * service,int close_recvd)3157 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
3158 {
3159 struct vchiq_state *state = service->state;
3160 int status = 0;
3161 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
3162 int close_id = MAKE_CLOSE(service->localport,
3163 VCHIQ_MSG_DSTPORT(service->remoteport));
3164
3165 dev_dbg(state->dev, "core: %d: csi:%d,%d (%s)\n",
3166 service->state->id, service->localport, close_recvd,
3167 srvstate_names[service->srvstate]);
3168
3169 switch (service->srvstate) {
3170 case VCHIQ_SRVSTATE_CLOSED:
3171 case VCHIQ_SRVSTATE_HIDDEN:
3172 case VCHIQ_SRVSTATE_LISTENING:
3173 case VCHIQ_SRVSTATE_CLOSEWAIT:
3174 if (close_recvd) {
3175 dev_err(state->dev, "core: (1) called in state %s\n",
3176 srvstate_names[service->srvstate]);
3177 break;
3178 } else if (!is_server) {
3179 vchiq_free_service_internal(service);
3180 break;
3181 }
3182
3183 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
3184 status = -EINVAL;
3185 } else {
3186 service->client_id = 0;
3187 service->remoteport = VCHIQ_PORT_FREE;
3188 if (service->srvstate == VCHIQ_SRVSTATE_CLOSEWAIT)
3189 set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
3190 }
3191 complete(&service->remove_event);
3192 break;
3193 case VCHIQ_SRVSTATE_OPENING:
3194 if (close_recvd) {
3195 /* The open was rejected - tell the user */
3196 set_service_state(service, VCHIQ_SRVSTATE_CLOSEWAIT);
3197 complete(&service->remove_event);
3198 } else {
3199 /* Shutdown mid-open - let the other side know */
3200 status = queue_message(state, service, close_id, NULL, NULL, 0, 0);
3201 }
3202 break;
3203
3204 case VCHIQ_SRVSTATE_OPENSYNC:
3205 mutex_lock(&state->sync_mutex);
3206 fallthrough;
3207 case VCHIQ_SRVSTATE_OPEN:
3208 if (close_recvd) {
3209 if (!do_abort_bulks(service))
3210 status = -EAGAIN;
3211 }
3212
3213 release_service_messages(service);
3214
3215 if (!status)
3216 status = queue_message(state, service, close_id, NULL,
3217 NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
3218
3219 if (status) {
3220 if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)
3221 mutex_unlock(&state->sync_mutex);
3222 break;
3223 }
3224
3225 if (!close_recvd) {
3226 /* Change the state while the mutex is still held */
3227 set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
3228 mutex_unlock(&state->slot_mutex);
3229 if (service->sync)
3230 mutex_unlock(&state->sync_mutex);
3231 break;
3232 }
3233
3234 /* Change the state while the mutex is still held */
3235 set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
3236 mutex_unlock(&state->slot_mutex);
3237 if (service->sync)
3238 mutex_unlock(&state->sync_mutex);
3239
3240 status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
3241 break;
3242
3243 case VCHIQ_SRVSTATE_CLOSESENT:
3244 if (!close_recvd)
3245 /* This happens when a process is killed mid-close */
3246 break;
3247
3248 if (!do_abort_bulks(service)) {
3249 status = -EAGAIN;
3250 break;
3251 }
3252
3253 if (!status)
3254 status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
3255 break;
3256
3257 case VCHIQ_SRVSTATE_CLOSERECVD:
3258 if (!close_recvd && is_server)
3259 /* Force into LISTENING mode */
3260 set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
3261 status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
3262 break;
3263
3264 default:
3265 dev_err(state->dev, "core: (%d) called in state %s\n",
3266 close_recvd, srvstate_names[service->srvstate]);
3267 break;
3268 }
3269
3270 return status;
3271 }
3272
3273 /* Called from the application process upon process death */
3274 void
vchiq_terminate_service_internal(struct vchiq_service * service)3275 vchiq_terminate_service_internal(struct vchiq_service *service)
3276 {
3277 struct vchiq_state *state = service->state;
3278
3279 dev_dbg(state->dev, "core: %d: tsi - (%d<->%d)\n",
3280 state->id, service->localport, service->remoteport);
3281
3282 mark_service_closing(service);
3283
3284 /* Mark the service for removal by the slot handler */
3285 request_poll(state, service, VCHIQ_POLL_REMOVE);
3286 }
3287
3288 /* Called from the slot handler */
3289 void
vchiq_free_service_internal(struct vchiq_service * service)3290 vchiq_free_service_internal(struct vchiq_service *service)
3291 {
3292 struct vchiq_state *state = service->state;
3293
3294 dev_dbg(state->dev, "core: %d: fsi - (%d)\n", state->id, service->localport);
3295
3296 switch (service->srvstate) {
3297 case VCHIQ_SRVSTATE_OPENING:
3298 case VCHIQ_SRVSTATE_CLOSED:
3299 case VCHIQ_SRVSTATE_HIDDEN:
3300 case VCHIQ_SRVSTATE_LISTENING:
3301 case VCHIQ_SRVSTATE_CLOSEWAIT:
3302 break;
3303 default:
3304 dev_err(state->dev, "core: %d: fsi - (%d) in state %s\n",
3305 state->id, service->localport, srvstate_names[service->srvstate]);
3306 return;
3307 }
3308
3309 set_service_state(service, VCHIQ_SRVSTATE_FREE);
3310
3311 complete(&service->remove_event);
3312
3313 /* Release the initial lock */
3314 vchiq_service_put(service);
3315 }
3316
3317 int
vchiq_connect_internal(struct vchiq_state * state,struct vchiq_instance * instance)3318 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
3319 {
3320 struct vchiq_service *service;
3321 int status = 0;
3322 int i;
3323
3324 /* Find all services registered to this client and enable them. */
3325 i = 0;
3326 while ((service = next_service_by_instance(state, instance, &i)) != NULL) {
3327 if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
3328 set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
3329 vchiq_service_put(service);
3330 }
3331
3332 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
3333 status = queue_message(state, NULL, MAKE_CONNECT, NULL, NULL, 0,
3334 QMFLAGS_IS_BLOCKING);
3335 if (status)
3336 return status;
3337
3338 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
3339 }
3340
3341 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
3342 if (wait_for_completion_interruptible(&state->connect))
3343 return -EAGAIN;
3344
3345 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
3346 complete(&state->connect);
3347 }
3348
3349 return status;
3350 }
3351
3352 void
vchiq_shutdown_internal(struct vchiq_state * state,struct vchiq_instance * instance)3353 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
3354 {
3355 struct vchiq_service *service;
3356 int i;
3357
3358 /* Find all services registered to this client and remove them. */
3359 i = 0;
3360 while ((service = next_service_by_instance(state, instance, &i)) != NULL) {
3361 (void)vchiq_remove_service(instance, service->handle);
3362 vchiq_service_put(service);
3363 }
3364 }
3365
3366 int
vchiq_close_service(struct vchiq_instance * instance,unsigned int handle)3367 vchiq_close_service(struct vchiq_instance *instance, unsigned int handle)
3368 {
3369 /* Unregister the service */
3370 struct vchiq_service *service = find_service_by_handle(instance, handle);
3371 int status = 0;
3372
3373 if (!service)
3374 return -EINVAL;
3375
3376 dev_dbg(service->state->dev, "core: %d: close_service:%d\n",
3377 service->state->id, service->localport);
3378
3379 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
3380 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
3381 (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
3382 vchiq_service_put(service);
3383 return -EINVAL;
3384 }
3385
3386 mark_service_closing(service);
3387
3388 if (current == service->state->slot_handler_thread) {
3389 status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
3390 WARN_ON(status == -EAGAIN);
3391 } else {
3392 /* Mark the service for termination by the slot handler */
3393 request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
3394 }
3395
3396 while (1) {
3397 if (wait_for_completion_interruptible(&service->remove_event)) {
3398 status = -EAGAIN;
3399 break;
3400 }
3401
3402 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
3403 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
3404 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
3405 break;
3406
3407 dev_warn(service->state->dev,
3408 "core: %d: close_service:%d - waiting in state %s\n",
3409 service->state->id, service->localport,
3410 srvstate_names[service->srvstate]);
3411 }
3412
3413 if (!status &&
3414 (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
3415 (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
3416 status = -EINVAL;
3417
3418 vchiq_service_put(service);
3419
3420 return status;
3421 }
3422 EXPORT_SYMBOL(vchiq_close_service);
3423
3424 int
vchiq_remove_service(struct vchiq_instance * instance,unsigned int handle)3425 vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle)
3426 {
3427 /* Unregister the service */
3428 struct vchiq_service *service = find_service_by_handle(instance, handle);
3429 int status = 0;
3430
3431 if (!service)
3432 return -EINVAL;
3433
3434 dev_dbg(service->state->dev, "core: %d: remove_service:%d\n",
3435 service->state->id, service->localport);
3436
3437 if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
3438 vchiq_service_put(service);
3439 return -EINVAL;
3440 }
3441
3442 mark_service_closing(service);
3443
3444 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3445 (current == service->state->slot_handler_thread)) {
3446 /*
3447 * Make it look like a client, because it must be removed and
3448 * not left in the LISTENING state.
3449 */
3450 service->public_fourcc = VCHIQ_FOURCC_INVALID;
3451
3452 status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
3453 WARN_ON(status == -EAGAIN);
3454 } else {
3455 /* Mark the service for removal by the slot handler */
3456 request_poll(service->state, service, VCHIQ_POLL_REMOVE);
3457 }
3458 while (1) {
3459 if (wait_for_completion_interruptible(&service->remove_event)) {
3460 status = -EAGAIN;
3461 break;
3462 }
3463
3464 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
3465 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
3466 break;
3467
3468 dev_warn(service->state->dev,
3469 "core: %d: remove_service:%d - waiting in state %s\n",
3470 service->state->id, service->localport,
3471 srvstate_names[service->srvstate]);
3472 }
3473
3474 if (!status && (service->srvstate != VCHIQ_SRVSTATE_FREE))
3475 status = -EINVAL;
3476
3477 vchiq_service_put(service);
3478
3479 return status;
3480 }
3481
3482 int
vchiq_bulk_xfer_blocking(struct vchiq_instance * instance,unsigned int handle,struct vchiq_bulk * bulk_params)3483 vchiq_bulk_xfer_blocking(struct vchiq_instance *instance, unsigned int handle,
3484 struct vchiq_bulk *bulk_params)
3485 {
3486 struct vchiq_service *service = find_service_by_handle(instance, handle);
3487 int status = -EINVAL;
3488
3489 if (!service)
3490 return -EINVAL;
3491
3492 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3493 goto error_exit;
3494
3495 if (!bulk_params->offset && !bulk_params->uoffset)
3496 goto error_exit;
3497
3498 if (vchiq_check_service(service))
3499 goto error_exit;
3500
3501 status = vchiq_bulk_xfer_queue_msg_killable(service, bulk_params);
3502
3503 error_exit:
3504 vchiq_service_put(service);
3505
3506 return status;
3507 }
3508
3509 int
vchiq_bulk_xfer_callback(struct vchiq_instance * instance,unsigned int handle,struct vchiq_bulk * bulk_params)3510 vchiq_bulk_xfer_callback(struct vchiq_instance *instance, unsigned int handle,
3511 struct vchiq_bulk *bulk_params)
3512 {
3513 struct vchiq_service *service = find_service_by_handle(instance, handle);
3514 int status = -EINVAL;
3515
3516 if (!service)
3517 return -EINVAL;
3518
3519 if (bulk_params->mode != VCHIQ_BULK_MODE_CALLBACK &&
3520 bulk_params->mode != VCHIQ_BULK_MODE_NOCALLBACK)
3521 goto error_exit;
3522
3523 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3524 goto error_exit;
3525
3526 if (!bulk_params->offset && !bulk_params->uoffset)
3527 goto error_exit;
3528
3529 if (vchiq_check_service(service))
3530 goto error_exit;
3531
3532 status = vchiq_bulk_xfer_queue_msg_killable(service, bulk_params);
3533
3534 error_exit:
3535 vchiq_service_put(service);
3536
3537 return status;
3538 }
3539
3540 /*
3541 * This function is called by VCHIQ ioctl interface and is interruptible.
3542 * It may receive -EAGAIN to indicate that a signal has been received
3543 * and the call should be retried after being returned to user context.
3544 */
3545 int
vchiq_bulk_xfer_waiting(struct vchiq_instance * instance,unsigned int handle,struct bulk_waiter * waiter)3546 vchiq_bulk_xfer_waiting(struct vchiq_instance *instance,
3547 unsigned int handle, struct bulk_waiter *waiter)
3548 {
3549 struct vchiq_service *service = find_service_by_handle(instance, handle);
3550 struct bulk_waiter *bulk_waiter;
3551 int status = -EINVAL;
3552
3553 if (!service)
3554 return -EINVAL;
3555
3556 if (!waiter)
3557 goto error_exit;
3558
3559 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3560 goto error_exit;
3561
3562 if (vchiq_check_service(service))
3563 goto error_exit;
3564
3565 bulk_waiter = waiter;
3566
3567 vchiq_service_put(service);
3568
3569 status = 0;
3570
3571 if (wait_for_completion_killable(&bulk_waiter->event))
3572 return -EINTR;
3573 else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
3574 return -EINVAL;
3575
3576 return status;
3577
3578 error_exit:
3579 vchiq_service_put(service);
3580
3581 return status;
3582 }
3583
3584 int
vchiq_queue_message(struct vchiq_instance * instance,unsigned int handle,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,size_t size)3585 vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle,
3586 ssize_t (*copy_callback)(void *context, void *dest,
3587 size_t offset, size_t maxsize),
3588 void *context,
3589 size_t size)
3590 {
3591 struct vchiq_service *service = find_service_by_handle(instance, handle);
3592 int status = -EINVAL;
3593 int data_id;
3594
3595 if (!service)
3596 goto error_exit;
3597
3598 if (vchiq_check_service(service))
3599 goto error_exit;
3600
3601 if (!size) {
3602 VCHIQ_SERVICE_STATS_INC(service, error_count);
3603 goto error_exit;
3604 }
3605
3606 if (size > VCHIQ_MAX_MSG_SIZE) {
3607 VCHIQ_SERVICE_STATS_INC(service, error_count);
3608 goto error_exit;
3609 }
3610
3611 data_id = MAKE_DATA(service->localport, service->remoteport);
3612
3613 switch (service->srvstate) {
3614 case VCHIQ_SRVSTATE_OPEN:
3615 status = queue_message(service->state, service, data_id,
3616 copy_callback, context, size,
3617 QMFLAGS_IS_BLOCKING);
3618 break;
3619 case VCHIQ_SRVSTATE_OPENSYNC:
3620 status = queue_message_sync(service->state, service, data_id,
3621 copy_callback, context, size);
3622 break;
3623 default:
3624 status = -EINVAL;
3625 break;
3626 }
3627
3628 error_exit:
3629 if (service)
3630 vchiq_service_put(service);
3631
3632 return status;
3633 }
3634
vchiq_queue_kernel_message(struct vchiq_instance * instance,unsigned int handle,void * data,unsigned int size)3635 int vchiq_queue_kernel_message(struct vchiq_instance *instance, unsigned int handle, void *data,
3636 unsigned int size)
3637 {
3638 return vchiq_queue_message(instance, handle, memcpy_copy_callback,
3639 data, size);
3640 }
3641 EXPORT_SYMBOL(vchiq_queue_kernel_message);
3642
3643 void
vchiq_release_message(struct vchiq_instance * instance,unsigned int handle,struct vchiq_header * header)3644 vchiq_release_message(struct vchiq_instance *instance, unsigned int handle,
3645 struct vchiq_header *header)
3646 {
3647 struct vchiq_service *service = find_service_by_handle(instance, handle);
3648 struct vchiq_shared_state *remote;
3649 struct vchiq_state *state;
3650 int slot_index;
3651
3652 if (!service)
3653 return;
3654
3655 state = service->state;
3656 remote = state->remote;
3657
3658 slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
3659
3660 if ((slot_index >= remote->slot_first) &&
3661 (slot_index <= remote->slot_last)) {
3662 int msgid = header->msgid;
3663
3664 if (msgid & VCHIQ_MSGID_CLAIMED) {
3665 struct vchiq_slot_info *slot_info =
3666 SLOT_INFO_FROM_INDEX(state, slot_index);
3667
3668 release_slot(state, slot_info, header, service);
3669 }
3670 } else if (slot_index == remote->slot_sync) {
3671 release_message_sync(state, header);
3672 }
3673
3674 vchiq_service_put(service);
3675 }
3676 EXPORT_SYMBOL(vchiq_release_message);
3677
3678 static void
release_message_sync(struct vchiq_state * state,struct vchiq_header * header)3679 release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
3680 {
3681 header->msgid = VCHIQ_MSGID_PADDING;
3682 remote_event_signal(state, &state->remote->sync_release);
3683 }
3684
3685 int
vchiq_get_peer_version(struct vchiq_instance * instance,unsigned int handle,short * peer_version)3686 vchiq_get_peer_version(struct vchiq_instance *instance, unsigned int handle, short *peer_version)
3687 {
3688 int status = -EINVAL;
3689 struct vchiq_service *service = find_service_by_handle(instance, handle);
3690
3691 if (!service)
3692 goto exit;
3693
3694 if (vchiq_check_service(service))
3695 goto exit;
3696
3697 if (!peer_version)
3698 goto exit;
3699
3700 *peer_version = service->peer_version;
3701 status = 0;
3702
3703 exit:
3704 if (service)
3705 vchiq_service_put(service);
3706 return status;
3707 }
3708 EXPORT_SYMBOL(vchiq_get_peer_version);
3709
vchiq_get_config(struct vchiq_config * config)3710 void vchiq_get_config(struct vchiq_config *config)
3711 {
3712 config->max_msg_size = VCHIQ_MAX_MSG_SIZE;
3713 config->bulk_threshold = VCHIQ_MAX_MSG_SIZE;
3714 config->max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
3715 config->max_services = VCHIQ_MAX_SERVICES;
3716 config->version = VCHIQ_VERSION;
3717 config->version_min = VCHIQ_VERSION_MIN;
3718 }
3719
3720 int
vchiq_set_service_option(struct vchiq_instance * instance,unsigned int handle,enum vchiq_service_option option,int value)3721 vchiq_set_service_option(struct vchiq_instance *instance, unsigned int handle,
3722 enum vchiq_service_option option, int value)
3723 {
3724 struct vchiq_service *service = find_service_by_handle(instance, handle);
3725 struct vchiq_service_quota *quota;
3726 int ret = -EINVAL;
3727
3728 if (!service)
3729 return -EINVAL;
3730
3731 switch (option) {
3732 case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
3733 service->auto_close = value;
3734 ret = 0;
3735 break;
3736
3737 case VCHIQ_SERVICE_OPTION_SLOT_QUOTA:
3738 quota = &service->state->service_quotas[service->localport];
3739 if (value == 0)
3740 value = service->state->default_slot_quota;
3741 if ((value >= quota->slot_use_count) &&
3742 (value < (unsigned short)~0)) {
3743 quota->slot_quota = value;
3744 if ((value >= quota->slot_use_count) &&
3745 (quota->message_quota >= quota->message_use_count))
3746 /*
3747 * Signal the service that it may have
3748 * dropped below its quota
3749 */
3750 complete("a->quota_event);
3751 ret = 0;
3752 }
3753 break;
3754
3755 case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA:
3756 quota = &service->state->service_quotas[service->localport];
3757 if (value == 0)
3758 value = service->state->default_message_quota;
3759 if ((value >= quota->message_use_count) &&
3760 (value < (unsigned short)~0)) {
3761 quota->message_quota = value;
3762 if ((value >= quota->message_use_count) &&
3763 (quota->slot_quota >= quota->slot_use_count))
3764 /*
3765 * Signal the service that it may have
3766 * dropped below its quota
3767 */
3768 complete("a->quota_event);
3769 ret = 0;
3770 }
3771 break;
3772
3773 case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
3774 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3775 (service->srvstate == VCHIQ_SRVSTATE_LISTENING)) {
3776 service->sync = value;
3777 ret = 0;
3778 }
3779 break;
3780
3781 case VCHIQ_SERVICE_OPTION_TRACE:
3782 service->trace = value;
3783 ret = 0;
3784 break;
3785
3786 default:
3787 break;
3788 }
3789 vchiq_service_put(service);
3790
3791 return ret;
3792 }
3793
3794 static void
vchiq_dump_shared_state(struct seq_file * f,struct vchiq_state * state,struct vchiq_shared_state * shared,const char * label)3795 vchiq_dump_shared_state(struct seq_file *f, struct vchiq_state *state,
3796 struct vchiq_shared_state *shared, const char *label)
3797 {
3798 static const char *const debug_names[] = {
3799 "<entries>",
3800 "SLOT_HANDLER_COUNT",
3801 "SLOT_HANDLER_LINE",
3802 "PARSE_LINE",
3803 "PARSE_HEADER",
3804 "PARSE_MSGID",
3805 "AWAIT_COMPLETION_LINE",
3806 "DEQUEUE_MESSAGE_LINE",
3807 "SERVICE_CALLBACK_LINE",
3808 "MSG_QUEUE_FULL_COUNT",
3809 "COMPLETION_QUEUE_FULL_COUNT"
3810 };
3811 int i;
3812
3813 seq_printf(f, " %s: slots %d-%d tx_pos=0x%x recycle=0x%x\n",
3814 label, shared->slot_first, shared->slot_last,
3815 shared->tx_pos, shared->slot_queue_recycle);
3816
3817 seq_puts(f, " Slots claimed:\n");
3818
3819 for (i = shared->slot_first; i <= shared->slot_last; i++) {
3820 struct vchiq_slot_info slot_info =
3821 *SLOT_INFO_FROM_INDEX(state, i);
3822 if (slot_info.use_count != slot_info.release_count) {
3823 seq_printf(f, " %d: %d/%d\n", i, slot_info.use_count,
3824 slot_info.release_count);
3825 }
3826 }
3827
3828 for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
3829 seq_printf(f, " DEBUG: %s = %d(0x%x)\n",
3830 debug_names[i], shared->debug[i], shared->debug[i]);
3831 }
3832 }
3833
3834 static void
vchiq_dump_service_state(struct seq_file * f,struct vchiq_service * service)3835 vchiq_dump_service_state(struct seq_file *f, struct vchiq_service *service)
3836 {
3837 unsigned int ref_count;
3838
3839 /*Don't include the lock just taken*/
3840 ref_count = kref_read(&service->ref_count) - 1;
3841 seq_printf(f, "Service %u: %s (ref %u)", service->localport,
3842 srvstate_names[service->srvstate], ref_count);
3843
3844 if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
3845 char remoteport[30];
3846 struct vchiq_service_quota *quota =
3847 &service->state->service_quotas[service->localport];
3848 int fourcc = service->base.fourcc;
3849 int tx_pending, rx_pending, tx_size = 0, rx_size = 0;
3850
3851 if (service->remoteport != VCHIQ_PORT_FREE) {
3852 int len2 = scnprintf(remoteport, sizeof(remoteport),
3853 "%u", service->remoteport);
3854
3855 if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
3856 scnprintf(remoteport + len2, sizeof(remoteport) - len2,
3857 " (client 0x%x)", service->client_id);
3858 } else {
3859 strscpy(remoteport, "n/a", sizeof(remoteport));
3860 }
3861
3862 seq_printf(f, " '%p4cc' remote %s (msg use %d/%d, slot use %d/%d)\n",
3863 &fourcc, remoteport,
3864 quota->message_use_count, quota->message_quota,
3865 quota->slot_use_count, quota->slot_quota);
3866
3867 tx_pending = service->bulk_tx.local_insert -
3868 service->bulk_tx.remote_insert;
3869 if (tx_pending) {
3870 unsigned int i = BULK_INDEX(service->bulk_tx.remove);
3871
3872 tx_size = service->bulk_tx.bulks[i].size;
3873 }
3874
3875 rx_pending = service->bulk_rx.local_insert -
3876 service->bulk_rx.remote_insert;
3877 if (rx_pending) {
3878 unsigned int i = BULK_INDEX(service->bulk_rx.remove);
3879
3880 rx_size = service->bulk_rx.bulks[i].size;
3881 }
3882
3883 seq_printf(f, " Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)\n",
3884 tx_pending, tx_size, rx_pending, rx_size);
3885
3886 if (VCHIQ_ENABLE_STATS) {
3887 seq_printf(f, " Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu\n",
3888 service->stats.ctrl_tx_count,
3889 service->stats.ctrl_tx_bytes,
3890 service->stats.ctrl_rx_count,
3891 service->stats.ctrl_rx_bytes);
3892
3893 seq_printf(f, " Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu\n",
3894 service->stats.bulk_tx_count,
3895 service->stats.bulk_tx_bytes,
3896 service->stats.bulk_rx_count,
3897 service->stats.bulk_rx_bytes);
3898
3899 seq_printf(f, " %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors\n",
3900 service->stats.quota_stalls,
3901 service->stats.slot_stalls,
3902 service->stats.bulk_stalls,
3903 service->stats.bulk_aborted_count,
3904 service->stats.error_count);
3905 }
3906 }
3907
3908 vchiq_dump_platform_service_state(f, service);
3909 }
3910
vchiq_dump_state(struct seq_file * f,struct vchiq_state * state)3911 void vchiq_dump_state(struct seq_file *f, struct vchiq_state *state)
3912 {
3913 int i;
3914
3915 seq_printf(f, "State %d: %s\n", state->id,
3916 conn_state_names[state->conn_state]);
3917
3918 seq_printf(f, " tx_pos=0x%x(@%pK), rx_pos=0x%x(@%pK)\n",
3919 state->local->tx_pos,
3920 state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
3921 state->rx_pos,
3922 state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
3923
3924 seq_printf(f, " Version: %d (min %d)\n", VCHIQ_VERSION,
3925 VCHIQ_VERSION_MIN);
3926
3927 if (VCHIQ_ENABLE_STATS) {
3928 seq_printf(f, " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d\n",
3929 state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
3930 state->stats.error_count);
3931 }
3932
3933 seq_printf(f, " Slots: %d available (%d data), %d recyclable, %d stalls (%d data)\n",
3934 ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
3935 state->local_tx_pos) / VCHIQ_SLOT_SIZE,
3936 state->data_quota - state->data_use_count,
3937 state->local->slot_queue_recycle - state->slot_queue_available,
3938 state->stats.slot_stalls, state->stats.data_stalls);
3939
3940 vchiq_dump_platform_state(f);
3941
3942 vchiq_dump_shared_state(f, state, state->local, "Local");
3943
3944 vchiq_dump_shared_state(f, state, state->remote, "Remote");
3945
3946 vchiq_dump_platform_instances(state, f);
3947
3948 for (i = 0; i < state->unused_service; i++) {
3949 struct vchiq_service *service = find_service_by_port(state, i);
3950
3951 if (service) {
3952 vchiq_dump_service_state(f, service);
3953 vchiq_service_put(service);
3954 }
3955 }
3956 }
3957
vchiq_send_remote_use(struct vchiq_state * state)3958 int vchiq_send_remote_use(struct vchiq_state *state)
3959 {
3960 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3961 return -ENOTCONN;
3962
3963 return queue_message(state, NULL, MAKE_REMOTE_USE, NULL, NULL, 0, 0);
3964 }
3965
vchiq_send_remote_use_active(struct vchiq_state * state)3966 int vchiq_send_remote_use_active(struct vchiq_state *state)
3967 {
3968 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3969 return -ENOTCONN;
3970
3971 return queue_message(state, NULL, MAKE_REMOTE_USE_ACTIVE,
3972 NULL, NULL, 0, 0);
3973 }
3974
vchiq_log_dump_mem(struct device * dev,const char * label,u32 addr,const void * void_mem,size_t num_bytes)3975 void vchiq_log_dump_mem(struct device *dev, const char *label, u32 addr,
3976 const void *void_mem, size_t num_bytes)
3977 {
3978 const u8 *mem = void_mem;
3979 size_t offset;
3980 char line_buf[100];
3981 char *s;
3982
3983 while (num_bytes > 0) {
3984 s = line_buf;
3985
3986 for (offset = 0; offset < 16; offset++) {
3987 if (offset < num_bytes)
3988 s += scnprintf(s, 4, "%02x ", mem[offset]);
3989 else
3990 s += scnprintf(s, 4, " ");
3991 }
3992
3993 for (offset = 0; offset < 16; offset++) {
3994 if (offset < num_bytes) {
3995 u8 ch = mem[offset];
3996
3997 if ((ch < ' ') || (ch > '~'))
3998 ch = '.';
3999 *s++ = (char)ch;
4000 }
4001 }
4002 *s++ = '\0';
4003
4004 if (label && (*label != '\0'))
4005 dev_dbg(dev, "core: %s: %08x: %s\n", label, addr, line_buf);
4006 else
4007 dev_dbg(dev, "core: %s: %08x: %s\n", label, addr, line_buf);
4008
4009 addr += 16;
4010 mem += 16;
4011 if (num_bytes > 16)
4012 num_bytes -= 16;
4013 else
4014 num_bytes = 0;
4015 }
4016 }
4017