1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
3 
4 #ifndef VCHIQ_CORE_H
5 #define VCHIQ_CORE_H
6 
7 #include <linux/mutex.h>
8 #include <linux/completion.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dev_printk.h>
11 #include <linux/kthread.h>
12 #include <linux/kref.h>
13 #include <linux/rcupdate.h>
14 #include <linux/seq_file.h>
15 #include <linux/spinlock_types.h>
16 #include <linux/wait.h>
17 
18 #include "../../include/linux/raspberrypi/vchiq.h"
19 #include "vchiq_cfg.h"
20 
21 /* Do this so that we can test-build the code on non-rpi systems */
22 #if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
23 
24 #else
25 
26 #ifndef dsb
27 #define dsb(a)
28 #endif
29 
30 #endif	/* IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE) */
31 
32 #define VCHIQ_SERVICE_HANDLE_INVALID 0
33 
34 #define VCHIQ_SLOT_SIZE     4096
35 #define VCHIQ_MAX_MSG_SIZE  (VCHIQ_SLOT_SIZE - sizeof(struct vchiq_header))
36 
37 #define VCHIQ_SLOT_MASK        (VCHIQ_SLOT_SIZE - 1)
38 #define VCHIQ_SLOT_QUEUE_MASK  (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
39 #define VCHIQ_SLOT_ZERO_SLOTS  DIV_ROUND_UP(sizeof(struct vchiq_slot_zero), \
40 					    VCHIQ_SLOT_SIZE)
41 
42 #define BITSET_SIZE(b)        ((b + 31) >> 5)
43 #define BITSET_WORD(b)        (b >> 5)
44 #define BITSET_BIT(b)         (1 << (b & 31))
45 #define BITSET_IS_SET(bs, b)  (bs[BITSET_WORD(b)] & BITSET_BIT(b))
46 #define BITSET_SET(bs, b)     (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
47 
48 enum {
49 	DEBUG_ENTRIES,
50 #if VCHIQ_ENABLE_DEBUG
51 	DEBUG_SLOT_HANDLER_COUNT,
52 	DEBUG_SLOT_HANDLER_LINE,
53 	DEBUG_PARSE_LINE,
54 	DEBUG_PARSE_HEADER,
55 	DEBUG_PARSE_MSGID,
56 	DEBUG_AWAIT_COMPLETION_LINE,
57 	DEBUG_DEQUEUE_MESSAGE_LINE,
58 	DEBUG_SERVICE_CALLBACK_LINE,
59 	DEBUG_MSG_QUEUE_FULL_COUNT,
60 	DEBUG_COMPLETION_QUEUE_FULL_COUNT,
61 #endif
62 	DEBUG_MAX
63 };
64 
65 #if VCHIQ_ENABLE_DEBUG
66 
67 #define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug
68 #define DEBUG_TRACE(d) \
69 	do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(sy); } while (0)
70 #define DEBUG_VALUE(d, v) \
71 	do { debug_ptr[DEBUG_ ## d] = (v); dsb(sy); } while (0)
72 #define DEBUG_COUNT(d) \
73 	do { debug_ptr[DEBUG_ ## d]++; dsb(sy); } while (0)
74 
75 #else /* VCHIQ_ENABLE_DEBUG */
76 
77 #define DEBUG_INITIALISE(local)
78 #define DEBUG_TRACE(d)
79 #define DEBUG_VALUE(d, v)
80 #define DEBUG_COUNT(d)
81 
82 #endif /* VCHIQ_ENABLE_DEBUG */
83 
84 enum vchiq_connstate {
85 	VCHIQ_CONNSTATE_DISCONNECTED,
86 	VCHIQ_CONNSTATE_CONNECTING,
87 	VCHIQ_CONNSTATE_CONNECTED,
88 	VCHIQ_CONNSTATE_PAUSING,
89 	VCHIQ_CONNSTATE_PAUSE_SENT,
90 	VCHIQ_CONNSTATE_PAUSED,
91 	VCHIQ_CONNSTATE_RESUMING,
92 	VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
93 	VCHIQ_CONNSTATE_RESUME_TIMEOUT
94 };
95 
96 enum {
97 	VCHIQ_SRVSTATE_FREE,
98 	VCHIQ_SRVSTATE_HIDDEN,
99 	VCHIQ_SRVSTATE_LISTENING,
100 	VCHIQ_SRVSTATE_OPENING,
101 	VCHIQ_SRVSTATE_OPEN,
102 	VCHIQ_SRVSTATE_OPENSYNC,
103 	VCHIQ_SRVSTATE_CLOSESENT,
104 	VCHIQ_SRVSTATE_CLOSERECVD,
105 	VCHIQ_SRVSTATE_CLOSEWAIT,
106 	VCHIQ_SRVSTATE_CLOSED
107 };
108 
109 enum vchiq_bulk_dir {
110 	VCHIQ_BULK_TRANSMIT,
111 	VCHIQ_BULK_RECEIVE
112 };
113 
114 struct vchiq_bulk {
115 	short mode;
116 	short dir;
117 	void *cb_data;
118 	void __user *cb_userdata;
119 	struct bulk_waiter *waiter;
120 	dma_addr_t dma_addr;
121 	int size;
122 	void *remote_data;
123 	int remote_size;
124 	int actual;
125 	void *offset;
126 	void __user *uoffset;
127 };
128 
129 struct vchiq_bulk_queue {
130 	int local_insert;  /* Where to insert the next local bulk */
131 	int remote_insert; /* Where to insert the next remote bulk (master) */
132 	int process;       /* Bulk to transfer next */
133 	int remote_notify; /* Bulk to notify the remote client of next (mstr) */
134 	int remove;        /* Bulk to notify the local client of, and remove, next */
135 	struct vchiq_bulk bulks[VCHIQ_NUM_SERVICE_BULKS];
136 };
137 
138 /*
139  * Remote events provide a way of presenting several virtual doorbells to a
140  * peer (ARM host to VPU) using only one physical doorbell. They can be thought
141  * of as a way for the peer to signal a semaphore, in this case implemented as
142  * a workqueue.
143  *
144  * Remote events remain signalled until acknowledged by the receiver, and they
145  * are non-counting. They are designed in such a way as to minimise the number
146  * of interrupts and avoid unnecessary waiting.
147  *
148  * A remote_event is as small data structures that live in shared memory. It
149  * comprises two booleans - armed and fired:
150  *
151  * The sender sets fired when they signal the receiver.
152  * If fired is set, the receiver has been signalled and need not wait.
153  * The receiver sets the armed field before they begin to wait.
154  * If armed is set, the receiver is waiting and wishes to be woken by interrupt.
155  */
156 struct remote_event {
157 	int armed;
158 	int fired;
159 	u32 __unused;
160 };
161 
162 struct opaque_platform_state;
163 
164 struct vchiq_slot {
165 	char data[VCHIQ_SLOT_SIZE];
166 };
167 
168 struct vchiq_slot_info {
169 	/* Use two counters rather than one to avoid the need for a mutex. */
170 	short use_count;
171 	short release_count;
172 };
173 
174 struct vchiq_service {
175 	struct vchiq_service_base base;
176 	unsigned int handle;
177 	struct kref ref_count;
178 	struct rcu_head rcu;
179 	int srvstate;
180 	void (*userdata_term)(void *userdata);
181 	unsigned int localport;
182 	unsigned int remoteport;
183 	int public_fourcc;
184 	int client_id;
185 	char auto_close;
186 	char sync;
187 	char closing;
188 	char trace;
189 	atomic_t poll_flags;
190 	short version;
191 	short version_min;
192 	short peer_version;
193 
194 	struct vchiq_state *state;
195 	struct vchiq_instance *instance;
196 
197 	int service_use_count;
198 
199 	struct vchiq_bulk_queue bulk_tx;
200 	struct vchiq_bulk_queue bulk_rx;
201 
202 	struct completion remove_event;
203 	struct completion bulk_remove_event;
204 	struct mutex bulk_mutex;
205 
206 	struct service_stats_struct {
207 		int quota_stalls;
208 		int slot_stalls;
209 		int bulk_stalls;
210 		int error_count;
211 		int ctrl_tx_count;
212 		int ctrl_rx_count;
213 		int bulk_tx_count;
214 		int bulk_rx_count;
215 		int bulk_aborted_count;
216 		u64 ctrl_tx_bytes;
217 		u64 ctrl_rx_bytes;
218 		u64 bulk_tx_bytes;
219 		u64 bulk_rx_bytes;
220 	} stats;
221 
222 	int msg_queue_read;
223 	int msg_queue_write;
224 	struct completion msg_queue_pop;
225 	struct completion msg_queue_push;
226 	struct vchiq_header *msg_queue[VCHIQ_MAX_SLOTS];
227 };
228 
229 /*
230  * The quota information is outside struct vchiq_service so that it can
231  * be statically allocated, since for accounting reasons a service's slot
232  * usage is carried over between users of the same port number.
233  */
234 struct vchiq_service_quota {
235 	unsigned short slot_quota;
236 	unsigned short slot_use_count;
237 	unsigned short message_quota;
238 	unsigned short message_use_count;
239 	struct completion quota_event;
240 	int previous_tx_index;
241 };
242 
243 struct vchiq_shared_state {
244 	/* A non-zero value here indicates that the content is valid. */
245 	int initialised;
246 
247 	/* The first and last (inclusive) slots allocated to the owner. */
248 	int slot_first;
249 	int slot_last;
250 
251 	/* The slot allocated to synchronous messages from the owner. */
252 	int slot_sync;
253 
254 	/*
255 	 * Signalling this event indicates that owner's slot handler thread
256 	 * should run.
257 	 */
258 	struct remote_event trigger;
259 
260 	/*
261 	 * Indicates the byte position within the stream where the next message
262 	 * will be written. The least significant bits are an index into the
263 	 * slot. The next bits are the index of the slot in slot_queue.
264 	 */
265 	int tx_pos;
266 
267 	/* This event should be signalled when a slot is recycled. */
268 	struct remote_event recycle;
269 
270 	/* The slot_queue index where the next recycled slot will be written. */
271 	int slot_queue_recycle;
272 
273 	/* This event should be signalled when a synchronous message is sent. */
274 	struct remote_event sync_trigger;
275 
276 	/*
277 	 * This event should be signalled when a synchronous message has been
278 	 * released.
279 	 */
280 	struct remote_event sync_release;
281 
282 	/* A circular buffer of slot indexes. */
283 	int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
284 
285 	/* Debugging state */
286 	int debug[DEBUG_MAX];
287 };
288 
289 struct vchiq_slot_zero {
290 	int magic;
291 	short version;
292 	short version_min;
293 	int slot_zero_size;
294 	int slot_size;
295 	int max_slots;
296 	int max_slots_per_side;
297 	int platform_data[2];
298 	struct vchiq_shared_state master;
299 	struct vchiq_shared_state slave;
300 	struct vchiq_slot_info slots[VCHIQ_MAX_SLOTS];
301 };
302 
303 struct vchiq_state {
304 	struct device *dev;
305 	int id;
306 	int initialised;
307 	enum vchiq_connstate conn_state;
308 	short version_common;
309 
310 	struct vchiq_shared_state *local;
311 	struct vchiq_shared_state *remote;
312 	struct vchiq_slot *slot_data;
313 
314 	unsigned short default_slot_quota;
315 	unsigned short default_message_quota;
316 
317 	/* Event indicating connect message received */
318 	struct completion connect;
319 
320 	/* Mutex protecting services */
321 	struct mutex mutex;
322 	struct vchiq_instance **instance;
323 
324 	/* Processes incoming messages */
325 	struct task_struct *slot_handler_thread;
326 
327 	/* Processes recycled slots */
328 	struct task_struct *recycle_thread;
329 
330 	/* Processes synchronous messages */
331 	struct task_struct *sync_thread;
332 
333 	/* Local implementation of the trigger remote event */
334 	wait_queue_head_t trigger_event;
335 
336 	/* Local implementation of the recycle remote event */
337 	wait_queue_head_t recycle_event;
338 
339 	/* Local implementation of the sync trigger remote event */
340 	wait_queue_head_t sync_trigger_event;
341 
342 	/* Local implementation of the sync release remote event */
343 	wait_queue_head_t sync_release_event;
344 
345 	char *tx_data;
346 	char *rx_data;
347 	struct vchiq_slot_info *rx_info;
348 
349 	struct mutex slot_mutex;
350 
351 	struct mutex recycle_mutex;
352 
353 	struct mutex sync_mutex;
354 
355 	spinlock_t msg_queue_spinlock;
356 
357 	spinlock_t bulk_waiter_spinlock;
358 
359 	spinlock_t quota_spinlock;
360 
361 	/*
362 	 * Indicates the byte position within the stream from where the next
363 	 * message will be read. The least significant bits are an index into
364 	 * the slot.The next bits are the index of the slot in
365 	 * remote->slot_queue.
366 	 */
367 	int rx_pos;
368 
369 	/*
370 	 * A cached copy of local->tx_pos. Only write to local->tx_pos, and read
371 	 * from remote->tx_pos.
372 	 */
373 	int local_tx_pos;
374 
375 	/* The slot_queue index of the slot to become available next. */
376 	int slot_queue_available;
377 
378 	/* A flag to indicate if any poll has been requested */
379 	int poll_needed;
380 
381 	/* Ths index of the previous slot used for data messages. */
382 	int previous_data_index;
383 
384 	/* The number of slots occupied by data messages. */
385 	unsigned short data_use_count;
386 
387 	/* The maximum number of slots to be occupied by data messages. */
388 	unsigned short data_quota;
389 
390 	/* An array of bit sets indicating which services must be polled. */
391 	atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
392 
393 	/* The number of the first unused service */
394 	int unused_service;
395 
396 	/* Signalled when a free slot becomes available. */
397 	struct completion slot_available_event;
398 
399 	/* Signalled when a free data slot becomes available. */
400 	struct completion data_quota_event;
401 
402 	struct state_stats_struct {
403 		int slot_stalls;
404 		int data_stalls;
405 		int ctrl_tx_count;
406 		int ctrl_rx_count;
407 		int error_count;
408 	} stats;
409 
410 	struct vchiq_service __rcu *services[VCHIQ_MAX_SERVICES];
411 	struct vchiq_service_quota service_quotas[VCHIQ_MAX_SERVICES];
412 	struct vchiq_slot_info slot_info[VCHIQ_MAX_SLOTS];
413 
414 	struct opaque_platform_state *platform_state;
415 };
416 
417 struct pagelist {
418 	u32 length;
419 	u16 type;
420 	u16 offset;
421 	u32 addrs[1];	/* N.B. 12 LSBs hold the number
422 			 * of following pages at consecutive
423 			 * addresses.
424 			 */
425 };
426 
427 struct vchiq_pagelist_info {
428 	struct pagelist *pagelist;
429 	size_t pagelist_buffer_size;
430 	dma_addr_t dma_addr;
431 	enum dma_data_direction dma_dir;
432 	unsigned int num_pages;
433 	unsigned int pages_need_release;
434 	struct page **pages;
435 	struct scatterlist *scatterlist;
436 	unsigned int scatterlist_mapped;
437 };
438 
vchiq_remote_initialised(const struct vchiq_state * state)439 static inline bool vchiq_remote_initialised(const struct vchiq_state *state)
440 {
441 	return state->remote && state->remote->initialised;
442 }
443 
444 struct bulk_waiter {
445 	struct vchiq_bulk *bulk;
446 	struct completion event;
447 	int actual;
448 };
449 
450 struct vchiq_config {
451 	unsigned int max_msg_size;
452 	unsigned int bulk_threshold;	/* The message size above which it
453 					 * is better to use a bulk transfer
454 					 * (<= max_msg_size)
455 					 */
456 	unsigned int max_outstanding_bulks;
457 	unsigned int max_services;
458 	short version;      /* The version of VCHIQ */
459 	short version_min;  /* The minimum compatible version of VCHIQ */
460 };
461 
462 extern spinlock_t bulk_waiter_spinlock;
463 
464 extern const char *
465 get_conn_state_name(enum vchiq_connstate conn_state);
466 
467 extern struct vchiq_slot_zero *
468 vchiq_init_slots(struct device *dev, void *mem_base, int mem_size);
469 
470 extern int
471 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev);
472 
473 extern int
474 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance);
475 
476 struct vchiq_service *
477 vchiq_add_service_internal(struct vchiq_state *state,
478 			   const struct vchiq_service_params_kernel *params,
479 			   int srvstate, struct vchiq_instance *instance,
480 			   void (*userdata_term)(void *userdata));
481 
482 extern int
483 vchiq_open_service_internal(struct vchiq_service *service, int client_id);
484 
485 extern int
486 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd);
487 
488 extern void
489 vchiq_terminate_service_internal(struct vchiq_service *service);
490 
491 extern void
492 vchiq_free_service_internal(struct vchiq_service *service);
493 
494 extern void
495 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance);
496 
497 extern void
498 remote_event_pollall(struct vchiq_state *state);
499 
500 extern int
501 vchiq_bulk_xfer_waiting(struct vchiq_instance *instance, unsigned int handle,
502 			struct bulk_waiter *userdata);
503 
504 extern int
505 vchiq_bulk_xfer_blocking(struct vchiq_instance *instance, unsigned int handle,
506 			 struct vchiq_bulk *bulk);
507 
508 extern int
509 vchiq_bulk_xfer_callback(struct vchiq_instance *instance, unsigned int handle,
510 			 struct vchiq_bulk *bulk);
511 
512 extern void
513 vchiq_dump_state(struct seq_file *f, struct vchiq_state *state);
514 
515 extern void
516 request_poll(struct vchiq_state *state, struct vchiq_service *service,
517 	     int poll_type);
518 
519 struct vchiq_service *handle_to_service(struct vchiq_instance *instance, unsigned int handle);
520 
521 extern struct vchiq_service *
522 find_service_by_handle(struct vchiq_instance *instance, unsigned int handle);
523 
524 extern struct vchiq_service *
525 find_service_by_port(struct vchiq_state *state, unsigned int localport);
526 
527 extern struct vchiq_service *
528 find_service_for_instance(struct vchiq_instance *instance, unsigned int handle);
529 
530 extern struct vchiq_service *
531 find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle);
532 
533 extern struct vchiq_service *
534 __next_service_by_instance(struct vchiq_state *state,
535 			   struct vchiq_instance *instance,
536 			   int *pidx);
537 
538 extern struct vchiq_service *
539 next_service_by_instance(struct vchiq_state *state,
540 			 struct vchiq_instance *instance,
541 			 int *pidx);
542 
543 extern void
544 vchiq_service_get(struct vchiq_service *service);
545 
546 extern void
547 vchiq_service_put(struct vchiq_service *service);
548 
549 extern int
550 vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle,
551 		    ssize_t (*copy_callback)(void *context, void *dest,
552 					     size_t offset, size_t maxsize),
553 		    void *context,
554 		    size_t size);
555 
556 void vchiq_dump_platform_state(struct seq_file *f);
557 
558 void vchiq_dump_platform_instances(struct vchiq_state *state, struct seq_file *f);
559 
560 void vchiq_dump_platform_service_state(struct seq_file *f, struct vchiq_service *service);
561 
562 int vchiq_use_service_internal(struct vchiq_service *service);
563 
564 int vchiq_release_service_internal(struct vchiq_service *service);
565 
566 void vchiq_on_remote_use(struct vchiq_state *state);
567 
568 void vchiq_on_remote_release(struct vchiq_state *state);
569 
570 int vchiq_platform_init_state(struct vchiq_state *state);
571 
572 int vchiq_check_service(struct vchiq_service *service);
573 
574 int vchiq_send_remote_use(struct vchiq_state *state);
575 
576 int vchiq_send_remote_use_active(struct vchiq_state *state);
577 
578 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
579 				       enum vchiq_connstate oldstate,
580 				  enum vchiq_connstate newstate);
581 
582 void vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate);
583 
584 void vchiq_log_dump_mem(struct device *dev, const char *label, u32 addr,
585 			const void *void_mem, size_t num_bytes);
586 
587 int vchiq_remove_service(struct vchiq_instance *instance, unsigned int service);
588 
589 int vchiq_get_client_id(struct vchiq_instance *instance, unsigned int service);
590 
591 void vchiq_get_config(struct vchiq_config *config);
592 
593 int vchiq_set_service_option(struct vchiq_instance *instance, unsigned int service,
594 			     enum vchiq_service_option option, int value);
595 
596 #endif
597