1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/device/bus.h>
16 #include <linux/mm.h>
17 #include <linux/pagemap.h>
18 #include <linux/bug.h>
19 #include <linux/completion.h>
20 #include <linux/list.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/compat.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/rcupdate.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/io.h>
30 #include <linux/uaccess.h>
31 #include <soc/bcm2835/raspberrypi-firmware.h>
32 
33 #include "vchiq_core.h"
34 #include "vchiq_ioctl.h"
35 #include "vchiq_arm.h"
36 #include "vchiq_bus.h"
37 #include "vchiq_debugfs.h"
38 
39 #define DEVICE_NAME "vchiq"
40 
41 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
42 
43 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
44 
45 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
46 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX  1
47 
48 #define BELL0	0x00
49 
50 #define ARM_DS_ACTIVE	BIT(2)
51 
52 /* Override the default prefix, which would be vchiq_arm (from the filename) */
53 #undef MODULE_PARAM_PREFIX
54 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
55 
56 #define KEEPALIVE_VER 1
57 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
58 
59 /*
60  * The devices implemented in the VCHIQ firmware are not discoverable,
61  * so we need to maintain a list of them in order to register them with
62  * the interface.
63  */
64 static struct vchiq_device *bcm2835_audio;
65 static struct vchiq_device *bcm2835_camera;
66 
67 static const struct vchiq_platform_info bcm2835_info = {
68 	.cache_line_size = 32,
69 };
70 
71 static const struct vchiq_platform_info bcm2836_info = {
72 	.cache_line_size = 64,
73 };
74 
75 struct vchiq_arm_state {
76 	/* Keepalive-related data */
77 	struct task_struct *ka_thread;
78 	struct completion ka_evt;
79 	atomic_t ka_use_count;
80 	atomic_t ka_use_ack_count;
81 	atomic_t ka_release_count;
82 
83 	rwlock_t susp_res_lock;
84 
85 	struct vchiq_state *state;
86 
87 	/*
88 	 * Global use count for videocore.
89 	 * This is equal to the sum of the use counts for all services.  When
90 	 * this hits zero the videocore suspend procedure will be initiated.
91 	 */
92 	int videocore_use_count;
93 
94 	/*
95 	 * Use count to track requests from videocore peer.
96 	 * This use count is not associated with a service, so needs to be
97 	 * tracked separately with the state.
98 	 */
99 	int peer_use_count;
100 
101 	/*
102 	 * Flag to indicate that the first vchiq connect has made it through.
103 	 * This means that both sides should be fully ready, and we should
104 	 * be able to suspend after this point.
105 	 */
106 	int first_connect;
107 };
108 
109 static int
110 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
111 			     struct vchiq_bulk *bulk_params);
112 
113 static irqreturn_t
vchiq_doorbell_irq(int irq,void * dev_id)114 vchiq_doorbell_irq(int irq, void *dev_id)
115 {
116 	struct vchiq_state *state = dev_id;
117 	struct vchiq_drv_mgmt *mgmt;
118 	irqreturn_t ret = IRQ_NONE;
119 	unsigned int status;
120 
121 	mgmt = dev_get_drvdata(state->dev);
122 
123 	/* Read (and clear) the doorbell */
124 	status = readl(mgmt->regs + BELL0);
125 
126 	if (status & ARM_DS_ACTIVE) {  /* Was the doorbell rung? */
127 		remote_event_pollall(state);
128 		ret = IRQ_HANDLED;
129 	}
130 
131 	return ret;
132 }
133 
134 /*
135  * This function is called by the vchiq stack once it has been connected to
136  * the videocore and clients can start to use the stack.
137  */
vchiq_call_connected_callbacks(struct vchiq_drv_mgmt * drv_mgmt)138 static void vchiq_call_connected_callbacks(struct vchiq_drv_mgmt *drv_mgmt)
139 {
140 	int i;
141 
142 	if (mutex_lock_killable(&drv_mgmt->connected_mutex))
143 		return;
144 
145 	for (i = 0; i < drv_mgmt->num_deferred_callbacks; i++)
146 		drv_mgmt->deferred_callback[i]();
147 
148 	drv_mgmt->num_deferred_callbacks = 0;
149 	drv_mgmt->connected = true;
150 	mutex_unlock(&drv_mgmt->connected_mutex);
151 }
152 
153 /*
154  * This function is used to defer initialization until the vchiq stack is
155  * initialized. If the stack is already initialized, then the callback will
156  * be made immediately, otherwise it will be deferred until
157  * vchiq_call_connected_callbacks is called.
158  */
vchiq_add_connected_callback(struct vchiq_device * device,void (* callback)(void))159 void vchiq_add_connected_callback(struct vchiq_device *device, void (*callback)(void))
160 {
161 	struct vchiq_drv_mgmt *drv_mgmt = device->drv_mgmt;
162 
163 	if (mutex_lock_killable(&drv_mgmt->connected_mutex))
164 		return;
165 
166 	if (drv_mgmt->connected) {
167 		/* We're already connected. Call the callback immediately. */
168 		callback();
169 	} else {
170 		if (drv_mgmt->num_deferred_callbacks >= VCHIQ_DRV_MAX_CALLBACKS) {
171 			dev_err(&device->dev,
172 				"core: deferred callbacks(%d) exceeded the maximum limit(%d)\n",
173 				drv_mgmt->num_deferred_callbacks, VCHIQ_DRV_MAX_CALLBACKS);
174 		} else {
175 			drv_mgmt->deferred_callback[drv_mgmt->num_deferred_callbacks] =
176 				callback;
177 			drv_mgmt->num_deferred_callbacks++;
178 		}
179 	}
180 	mutex_unlock(&drv_mgmt->connected_mutex);
181 }
182 EXPORT_SYMBOL(vchiq_add_connected_callback);
183 
vchiq_platform_init(struct platform_device * pdev,struct vchiq_state * state)184 static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
185 {
186 	struct device *dev = &pdev->dev;
187 	struct vchiq_drv_mgmt *drv_mgmt = platform_get_drvdata(pdev);
188 	struct rpi_firmware *fw = drv_mgmt->fw;
189 	struct vchiq_slot_zero *vchiq_slot_zero;
190 	void *slot_mem;
191 	dma_addr_t slot_phys;
192 	u32 channelbase;
193 	int slot_mem_size, frag_mem_size;
194 	int err, irq, i;
195 
196 	/*
197 	 * VCHI messages between the CPU and firmware use
198 	 * 32-bit bus addresses.
199 	 */
200 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
201 
202 	if (err < 0)
203 		return err;
204 
205 	drv_mgmt->fragments_size = 2 * drv_mgmt->info->cache_line_size;
206 
207 	/* Allocate space for the channels in coherent memory */
208 	slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
209 	frag_mem_size = PAGE_ALIGN(drv_mgmt->fragments_size * MAX_FRAGMENTS);
210 
211 	slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
212 				       &slot_phys, GFP_KERNEL);
213 	if (!slot_mem) {
214 		dev_err(dev, "could not allocate DMA memory\n");
215 		return -ENOMEM;
216 	}
217 
218 	WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
219 
220 	vchiq_slot_zero = vchiq_init_slots(dev, slot_mem, slot_mem_size);
221 	if (!vchiq_slot_zero)
222 		return -ENOMEM;
223 
224 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
225 		(int)slot_phys + slot_mem_size;
226 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
227 		MAX_FRAGMENTS;
228 
229 	drv_mgmt->fragments_base = (char *)slot_mem + slot_mem_size;
230 
231 	drv_mgmt->free_fragments = drv_mgmt->fragments_base;
232 	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
233 		*(char **)&drv_mgmt->fragments_base[i * drv_mgmt->fragments_size] =
234 			&drv_mgmt->fragments_base[(i + 1) * drv_mgmt->fragments_size];
235 	}
236 	*(char **)&drv_mgmt->fragments_base[i * drv_mgmt->fragments_size] = NULL;
237 	sema_init(&drv_mgmt->free_fragments_sema, MAX_FRAGMENTS);
238 	sema_init(&drv_mgmt->free_fragments_mutex, 1);
239 
240 	err = vchiq_init_state(state, vchiq_slot_zero, dev);
241 	if (err)
242 		return err;
243 
244 	drv_mgmt->regs = devm_platform_ioremap_resource(pdev, 0);
245 	if (IS_ERR(drv_mgmt->regs))
246 		return PTR_ERR(drv_mgmt->regs);
247 
248 	irq = platform_get_irq(pdev, 0);
249 	if (irq <= 0)
250 		return irq;
251 
252 	err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
253 			       "VCHIQ doorbell", state);
254 	if (err) {
255 		dev_err(dev, "failed to register irq=%d\n", irq);
256 		return err;
257 	}
258 
259 	/* Send the base address of the slots to VideoCore */
260 	channelbase = slot_phys;
261 	err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
262 				    &channelbase, sizeof(channelbase));
263 	if (err) {
264 		dev_err(dev, "failed to send firmware property: %d\n", err);
265 		return err;
266 	}
267 
268 	if (channelbase) {
269 		dev_err(dev, "failed to set channelbase (response: %x)\n",
270 			channelbase);
271 		return -ENXIO;
272 	}
273 
274 	dev_dbg(&pdev->dev, "arm: vchiq_init - done (slots %pK, phys %pad)\n",
275 		vchiq_slot_zero, &slot_phys);
276 
277 	mutex_init(&drv_mgmt->connected_mutex);
278 	vchiq_call_connected_callbacks(drv_mgmt);
279 
280 	return 0;
281 }
282 
283 int
vchiq_platform_init_state(struct vchiq_state * state)284 vchiq_platform_init_state(struct vchiq_state *state)
285 {
286 	struct vchiq_arm_state *platform_state;
287 
288 	platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
289 	if (!platform_state)
290 		return -ENOMEM;
291 
292 	rwlock_init(&platform_state->susp_res_lock);
293 
294 	init_completion(&platform_state->ka_evt);
295 	atomic_set(&platform_state->ka_use_count, 0);
296 	atomic_set(&platform_state->ka_use_ack_count, 0);
297 	atomic_set(&platform_state->ka_release_count, 0);
298 
299 	platform_state->state = state;
300 
301 	state->platform_state = (struct opaque_platform_state *)platform_state;
302 
303 	return 0;
304 }
305 
vchiq_platform_get_arm_state(struct vchiq_state * state)306 static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
307 {
308 	return (struct vchiq_arm_state *)state->platform_state;
309 }
310 
311 static void
vchiq_platform_uninit(struct vchiq_drv_mgmt * mgmt)312 vchiq_platform_uninit(struct vchiq_drv_mgmt *mgmt)
313 {
314 	struct vchiq_arm_state *arm_state;
315 
316 	kthread_stop(mgmt->state.sync_thread);
317 	kthread_stop(mgmt->state.recycle_thread);
318 	kthread_stop(mgmt->state.slot_handler_thread);
319 
320 	arm_state = vchiq_platform_get_arm_state(&mgmt->state);
321 	if (!IS_ERR_OR_NULL(arm_state->ka_thread))
322 		kthread_stop(arm_state->ka_thread);
323 }
324 
vchiq_dump_platform_state(struct seq_file * f)325 void vchiq_dump_platform_state(struct seq_file *f)
326 {
327 	seq_puts(f, "  Platform: 2835 (VC master)\n");
328 }
329 
330 #define VCHIQ_INIT_RETRIES 10
vchiq_initialise(struct vchiq_state * state,struct vchiq_instance ** instance_out)331 int vchiq_initialise(struct vchiq_state *state, struct vchiq_instance **instance_out)
332 {
333 	struct vchiq_instance *instance = NULL;
334 	int i, ret;
335 
336 	/*
337 	 * VideoCore may not be ready due to boot up timing.
338 	 * It may never be ready if kernel and firmware are mismatched,so don't
339 	 * block forever.
340 	 */
341 	for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
342 		if (vchiq_remote_initialised(state))
343 			break;
344 		usleep_range(500, 600);
345 	}
346 	if (i == VCHIQ_INIT_RETRIES) {
347 		dev_err(state->dev, "core: %s: Videocore not initialized\n", __func__);
348 		ret = -ENOTCONN;
349 		goto failed;
350 	} else if (i > 0) {
351 		dev_warn(state->dev, "core: %s: videocore initialized after %d retries\n",
352 			 __func__, i);
353 	}
354 
355 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
356 	if (!instance) {
357 		ret = -ENOMEM;
358 		goto failed;
359 	}
360 
361 	instance->connected = 0;
362 	instance->state = state;
363 	mutex_init(&instance->bulk_waiter_list_mutex);
364 	INIT_LIST_HEAD(&instance->bulk_waiter_list);
365 
366 	*instance_out = instance;
367 
368 	ret = 0;
369 
370 failed:
371 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
372 
373 	return ret;
374 }
375 EXPORT_SYMBOL(vchiq_initialise);
376 
free_bulk_waiter(struct vchiq_instance * instance)377 void free_bulk_waiter(struct vchiq_instance *instance)
378 {
379 	struct bulk_waiter_node *waiter, *next;
380 
381 	list_for_each_entry_safe(waiter, next,
382 				 &instance->bulk_waiter_list, list) {
383 		list_del(&waiter->list);
384 		dev_dbg(instance->state->dev,
385 			"arm: bulk_waiter - cleaned up %pK for pid %d\n",
386 			waiter, waiter->pid);
387 		kfree(waiter);
388 	}
389 }
390 
vchiq_shutdown(struct vchiq_instance * instance)391 int vchiq_shutdown(struct vchiq_instance *instance)
392 {
393 	struct vchiq_state *state = instance->state;
394 	int ret = 0;
395 
396 	if (mutex_lock_killable(&state->mutex))
397 		return -EAGAIN;
398 
399 	/* Remove all services */
400 	vchiq_shutdown_internal(state, instance);
401 
402 	mutex_unlock(&state->mutex);
403 
404 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
405 
406 	free_bulk_waiter(instance);
407 	kfree(instance);
408 
409 	return ret;
410 }
411 EXPORT_SYMBOL(vchiq_shutdown);
412 
vchiq_is_connected(struct vchiq_instance * instance)413 static int vchiq_is_connected(struct vchiq_instance *instance)
414 {
415 	return instance->connected;
416 }
417 
vchiq_connect(struct vchiq_instance * instance)418 int vchiq_connect(struct vchiq_instance *instance)
419 {
420 	struct vchiq_state *state = instance->state;
421 	int ret;
422 
423 	if (mutex_lock_killable(&state->mutex)) {
424 		dev_dbg(state->dev,
425 			"core: call to mutex_lock failed\n");
426 		ret = -EAGAIN;
427 		goto failed;
428 	}
429 	ret = vchiq_connect_internal(state, instance);
430 
431 	if (!ret)
432 		instance->connected = 1;
433 
434 	mutex_unlock(&state->mutex);
435 
436 failed:
437 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
438 
439 	return ret;
440 }
441 EXPORT_SYMBOL(vchiq_connect);
442 
443 static int
vchiq_add_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)444 vchiq_add_service(struct vchiq_instance *instance,
445 		  const struct vchiq_service_params_kernel *params,
446 		  unsigned int *phandle)
447 {
448 	struct vchiq_state *state = instance->state;
449 	struct vchiq_service *service = NULL;
450 	int srvstate, ret;
451 
452 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
453 
454 	srvstate = vchiq_is_connected(instance)
455 		? VCHIQ_SRVSTATE_LISTENING
456 		: VCHIQ_SRVSTATE_HIDDEN;
457 
458 	service = vchiq_add_service_internal(state, params, srvstate, instance, NULL);
459 
460 	if (service) {
461 		*phandle = service->handle;
462 		ret = 0;
463 	} else {
464 		ret = -EINVAL;
465 	}
466 
467 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
468 
469 	return ret;
470 }
471 
472 int
vchiq_open_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)473 vchiq_open_service(struct vchiq_instance *instance,
474 		   const struct vchiq_service_params_kernel *params,
475 		   unsigned int *phandle)
476 {
477 	struct vchiq_state   *state = instance->state;
478 	struct vchiq_service *service = NULL;
479 	int ret = -EINVAL;
480 
481 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
482 
483 	if (!vchiq_is_connected(instance))
484 		goto failed;
485 
486 	service = vchiq_add_service_internal(state, params, VCHIQ_SRVSTATE_OPENING, instance, NULL);
487 
488 	if (service) {
489 		*phandle = service->handle;
490 		ret = vchiq_open_service_internal(service, current->pid);
491 		if (ret) {
492 			vchiq_remove_service(instance, service->handle);
493 			*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
494 		}
495 	}
496 
497 failed:
498 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
499 
500 	return ret;
501 }
502 EXPORT_SYMBOL(vchiq_open_service);
503 
504 int
vchiq_bulk_transmit(struct vchiq_instance * instance,unsigned int handle,const void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)505 vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const void *data,
506 		    unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
507 {
508 	struct vchiq_bulk bulk_params = {};
509 	int ret;
510 
511 	switch (mode) {
512 	case VCHIQ_BULK_MODE_NOCALLBACK:
513 	case VCHIQ_BULK_MODE_CALLBACK:
514 
515 		bulk_params.offset = (void *)data;
516 		bulk_params.mode = mode;
517 		bulk_params.size = size;
518 		bulk_params.cb_data = userdata;
519 		bulk_params.dir = VCHIQ_BULK_TRANSMIT;
520 
521 		ret = vchiq_bulk_xfer_callback(instance, handle, &bulk_params);
522 		break;
523 	case VCHIQ_BULK_MODE_BLOCKING:
524 		bulk_params.offset = (void *)data;
525 		bulk_params.mode = mode;
526 		bulk_params.size = size;
527 		bulk_params.dir = VCHIQ_BULK_TRANSMIT;
528 
529 		ret = vchiq_blocking_bulk_transfer(instance, handle, &bulk_params);
530 		break;
531 	default:
532 		return -EINVAL;
533 	}
534 
535 	return ret;
536 }
537 EXPORT_SYMBOL(vchiq_bulk_transmit);
538 
vchiq_bulk_receive(struct vchiq_instance * instance,unsigned int handle,void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)539 int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle,
540 		       void *data, unsigned int size, void *userdata,
541 		       enum vchiq_bulk_mode mode)
542 {
543 	struct vchiq_bulk bulk_params = {};
544 	int ret;
545 
546 	switch (mode) {
547 	case VCHIQ_BULK_MODE_NOCALLBACK:
548 	case VCHIQ_BULK_MODE_CALLBACK:
549 
550 		bulk_params.offset = (void *)data;
551 		bulk_params.mode = mode;
552 		bulk_params.size = size;
553 		bulk_params.cb_data = userdata;
554 		bulk_params.dir = VCHIQ_BULK_RECEIVE;
555 
556 		ret = vchiq_bulk_xfer_callback(instance, handle, &bulk_params);
557 		break;
558 	case VCHIQ_BULK_MODE_BLOCKING:
559 		bulk_params.offset = (void *)data;
560 		bulk_params.mode = mode;
561 		bulk_params.size = size;
562 		bulk_params.dir = VCHIQ_BULK_RECEIVE;
563 
564 		ret = vchiq_blocking_bulk_transfer(instance, handle, &bulk_params);
565 		break;
566 	default:
567 		return -EINVAL;
568 	}
569 
570 	return ret;
571 }
572 EXPORT_SYMBOL(vchiq_bulk_receive);
573 
574 static int
vchiq_blocking_bulk_transfer(struct vchiq_instance * instance,unsigned int handle,struct vchiq_bulk * bulk_params)575 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
576 			     struct vchiq_bulk *bulk_params)
577 {
578 	struct vchiq_service *service;
579 	struct bulk_waiter_node *waiter = NULL, *iter;
580 	int ret;
581 
582 	service = find_service_by_handle(instance, handle);
583 	if (!service)
584 		return -EINVAL;
585 
586 	vchiq_service_put(service);
587 
588 	mutex_lock(&instance->bulk_waiter_list_mutex);
589 	list_for_each_entry(iter, &instance->bulk_waiter_list, list) {
590 		if (iter->pid == current->pid) {
591 			list_del(&iter->list);
592 			waiter = iter;
593 			break;
594 		}
595 	}
596 	mutex_unlock(&instance->bulk_waiter_list_mutex);
597 
598 	if (waiter) {
599 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
600 
601 		if (bulk) {
602 			/* This thread has an outstanding bulk transfer. */
603 			/* FIXME: why compare a dma address to a pointer? */
604 			if ((bulk->dma_addr != (dma_addr_t)(uintptr_t)bulk_params->dma_addr) ||
605 			    (bulk->size != bulk_params->size)) {
606 				/*
607 				 * This is not a retry of the previous one.
608 				 * Cancel the signal when the transfer completes.
609 				 */
610 				spin_lock(&service->state->bulk_waiter_spinlock);
611 				bulk->waiter = NULL;
612 				spin_unlock(&service->state->bulk_waiter_spinlock);
613 			}
614 		}
615 	} else {
616 		waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
617 		if (!waiter)
618 			return -ENOMEM;
619 	}
620 
621 	bulk_params->waiter = &waiter->bulk_waiter;
622 
623 	ret = vchiq_bulk_xfer_blocking(instance, handle, bulk_params);
624 	if ((ret != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
625 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
626 
627 		if (bulk) {
628 			/* Cancel the signal when the transfer completes. */
629 			spin_lock(&service->state->bulk_waiter_spinlock);
630 			bulk->waiter = NULL;
631 			spin_unlock(&service->state->bulk_waiter_spinlock);
632 		}
633 		kfree(waiter);
634 	} else {
635 		waiter->pid = current->pid;
636 		mutex_lock(&instance->bulk_waiter_list_mutex);
637 		list_add(&waiter->list, &instance->bulk_waiter_list);
638 		mutex_unlock(&instance->bulk_waiter_list_mutex);
639 		dev_dbg(instance->state->dev, "arm: saved bulk_waiter %pK for pid %d\n",
640 			waiter, current->pid);
641 	}
642 
643 	return ret;
644 }
645 
646 static int
add_completion(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,struct user_service * user_service,void * cb_data,void __user * cb_userdata)647 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
648 	       struct vchiq_header *header, struct user_service *user_service,
649 	       void *cb_data, void __user *cb_userdata)
650 {
651 	struct vchiq_completion_data_kernel *completion;
652 	struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(instance->state->dev);
653 	int insert;
654 
655 	DEBUG_INITIALISE(mgmt->state.local);
656 
657 	insert = instance->completion_insert;
658 	while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
659 		/* Out of space - wait for the client */
660 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
661 		dev_dbg(instance->state->dev, "core: completion queue full\n");
662 		DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
663 		if (wait_for_completion_interruptible(&instance->remove_event)) {
664 			dev_dbg(instance->state->dev, "arm: service_callback interrupted\n");
665 			return -EAGAIN;
666 		} else if (instance->closing) {
667 			dev_dbg(instance->state->dev, "arm: service_callback closing\n");
668 			return 0;
669 		}
670 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
671 	}
672 
673 	completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
674 
675 	completion->header = header;
676 	completion->reason = reason;
677 	/* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
678 	completion->service_userdata = user_service->service;
679 	completion->cb_data = cb_data;
680 	completion->cb_userdata = cb_userdata;
681 
682 	if (reason == VCHIQ_SERVICE_CLOSED) {
683 		/*
684 		 * Take an extra reference, to be held until
685 		 * this CLOSED notification is delivered.
686 		 */
687 		vchiq_service_get(user_service->service);
688 		if (instance->use_close_delivered)
689 			user_service->close_pending = 1;
690 	}
691 
692 	/*
693 	 * A write barrier is needed here to ensure that the entire completion
694 	 * record is written out before the insert point.
695 	 */
696 	wmb();
697 
698 	if (reason == VCHIQ_MESSAGE_AVAILABLE)
699 		user_service->message_available_pos = insert;
700 
701 	insert++;
702 	instance->completion_insert = insert;
703 
704 	complete(&instance->insert_event);
705 
706 	return 0;
707 }
708 
709 static int
service_single_message(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_service * service,void * cb_data,void __user * cb_userdata)710 service_single_message(struct vchiq_instance *instance,
711 		       enum vchiq_reason reason, struct vchiq_service *service,
712 		       void *cb_data, void __user *cb_userdata)
713 {
714 	struct user_service *user_service;
715 
716 	user_service = (struct user_service *)service->base.userdata;
717 
718 	dev_dbg(service->state->dev, "arm: msg queue full\n");
719 	/*
720 	 * If there is no MESSAGE_AVAILABLE in the completion
721 	 * queue, add one
722 	 */
723 	if ((user_service->message_available_pos -
724 	     instance->completion_remove) < 0) {
725 		int ret;
726 
727 		dev_dbg(instance->state->dev,
728 			"arm: Inserting extra MESSAGE_AVAILABLE\n");
729 		ret = add_completion(instance, reason, NULL, user_service,
730 				     cb_data, cb_userdata);
731 		if (ret)
732 			return ret;
733 	}
734 
735 	if (wait_for_completion_interruptible(&user_service->remove_event)) {
736 		dev_dbg(instance->state->dev, "arm: interrupted\n");
737 		return -EAGAIN;
738 	} else if (instance->closing) {
739 		dev_dbg(instance->state->dev, "arm: closing\n");
740 		return -EINVAL;
741 	}
742 
743 	return 0;
744 }
745 
746 int
service_callback(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,unsigned int handle,void * cb_data,void __user * cb_userdata)747 service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
748 		 struct vchiq_header *header, unsigned int handle,
749 		 void *cb_data, void __user *cb_userdata)
750 {
751 	/*
752 	 * How do we ensure the callback goes to the right client?
753 	 * The service_user data points to a user_service record
754 	 * containing the original callback and the user state structure, which
755 	 * contains a circular buffer for completion records.
756 	 */
757 	struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(instance->state->dev);
758 	struct user_service *user_service;
759 	struct vchiq_service *service;
760 	bool skip_completion = false;
761 
762 	DEBUG_INITIALISE(mgmt->state.local);
763 
764 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
765 
766 	rcu_read_lock();
767 	service = handle_to_service(instance, handle);
768 	if (WARN_ON(!service)) {
769 		rcu_read_unlock();
770 		return 0;
771 	}
772 
773 	user_service = (struct user_service *)service->base.userdata;
774 
775 	if (instance->closing) {
776 		rcu_read_unlock();
777 		return 0;
778 	}
779 
780 	/*
781 	 * As hopping around different synchronization mechanism,
782 	 * taking an extra reference results in simpler implementation.
783 	 */
784 	vchiq_service_get(service);
785 	rcu_read_unlock();
786 
787 	dev_dbg(service->state->dev,
788 		"arm: service %p(%d,%p), reason %d, header %p, instance %p, cb_data %p, cb_userdata %p\n",
789 		user_service, service->localport, user_service->userdata,
790 		reason, header, instance, cb_data, cb_userdata);
791 
792 	if (header && user_service->is_vchi) {
793 		spin_lock(&service->state->msg_queue_spinlock);
794 		while (user_service->msg_insert ==
795 			(user_service->msg_remove + MSG_QUEUE_SIZE)) {
796 			int ret;
797 
798 			spin_unlock(&service->state->msg_queue_spinlock);
799 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
800 			DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
801 
802 			ret = service_single_message(instance, reason, service,
803 						     cb_data, cb_userdata);
804 			if (ret) {
805 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
806 				vchiq_service_put(service);
807 				return ret;
808 			}
809 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
810 			spin_lock(&service->state->msg_queue_spinlock);
811 		}
812 
813 		user_service->msg_queue[user_service->msg_insert &
814 			(MSG_QUEUE_SIZE - 1)] = header;
815 		user_service->msg_insert++;
816 
817 		/*
818 		 * If there is a thread waiting in DEQUEUE_MESSAGE, or if
819 		 * there is a MESSAGE_AVAILABLE in the completion queue then
820 		 * bypass the completion queue.
821 		 */
822 		if (((user_service->message_available_pos -
823 			instance->completion_remove) >= 0) ||
824 			user_service->dequeue_pending) {
825 			user_service->dequeue_pending = 0;
826 			skip_completion = true;
827 		}
828 
829 		spin_unlock(&service->state->msg_queue_spinlock);
830 		complete(&user_service->insert_event);
831 
832 		header = NULL;
833 	}
834 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
835 	vchiq_service_put(service);
836 
837 	if (skip_completion)
838 		return 0;
839 
840 	return add_completion(instance, reason, header, user_service,
841 			      cb_data, cb_userdata);
842 }
843 
vchiq_dump_platform_instances(struct vchiq_state * state,struct seq_file * f)844 void vchiq_dump_platform_instances(struct vchiq_state *state, struct seq_file *f)
845 {
846 	int i;
847 
848 	if (!vchiq_remote_initialised(state))
849 		return;
850 
851 	/*
852 	 * There is no list of instances, so instead scan all services,
853 	 * marking those that have been dumped.
854 	 */
855 
856 	rcu_read_lock();
857 	for (i = 0; i < state->unused_service; i++) {
858 		struct vchiq_service *service;
859 		struct vchiq_instance *instance;
860 
861 		service = rcu_dereference(state->services[i]);
862 		if (!service || service->base.callback != service_callback)
863 			continue;
864 
865 		instance = service->instance;
866 		if (instance)
867 			instance->mark = 0;
868 	}
869 	rcu_read_unlock();
870 
871 	for (i = 0; i < state->unused_service; i++) {
872 		struct vchiq_service *service;
873 		struct vchiq_instance *instance;
874 
875 		rcu_read_lock();
876 		service = rcu_dereference(state->services[i]);
877 		if (!service || service->base.callback != service_callback) {
878 			rcu_read_unlock();
879 			continue;
880 		}
881 
882 		instance = service->instance;
883 		if (!instance || instance->mark) {
884 			rcu_read_unlock();
885 			continue;
886 		}
887 		rcu_read_unlock();
888 
889 		seq_printf(f, "Instance %pK: pid %d,%s completions %d/%d\n",
890 			   instance, instance->pid,
891 			   instance->connected ? " connected, " :
892 			   "",
893 			   instance->completion_insert -
894 			   instance->completion_remove,
895 			   MAX_COMPLETIONS);
896 		instance->mark = 1;
897 	}
898 }
899 
vchiq_dump_platform_service_state(struct seq_file * f,struct vchiq_service * service)900 void vchiq_dump_platform_service_state(struct seq_file *f,
901 				       struct vchiq_service *service)
902 {
903 	struct user_service *user_service =
904 			(struct user_service *)service->base.userdata;
905 
906 	seq_printf(f, "  instance %pK", service->instance);
907 
908 	if ((service->base.callback == service_callback) && user_service->is_vchi) {
909 		seq_printf(f, ", %d/%d messages",
910 			   user_service->msg_insert - user_service->msg_remove,
911 			   MSG_QUEUE_SIZE);
912 
913 		if (user_service->dequeue_pending)
914 			seq_puts(f, " (dequeue pending)");
915 	}
916 
917 	seq_puts(f, "\n");
918 }
919 
920 /*
921  * Autosuspend related functionality
922  */
923 
924 static int
vchiq_keepalive_vchiq_callback(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,unsigned int service_user,void * cb_data,void __user * cb_userdata)925 vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance,
926 			       enum vchiq_reason reason,
927 			       struct vchiq_header *header,
928 			       unsigned int service_user,
929 			       void *cb_data, void __user *cb_userdata)
930 {
931 	dev_err(instance->state->dev, "suspend: %s: callback reason %d\n",
932 		__func__, reason);
933 	return 0;
934 }
935 
936 static int
vchiq_keepalive_thread_func(void * v)937 vchiq_keepalive_thread_func(void *v)
938 {
939 	struct vchiq_state *state = (struct vchiq_state *)v;
940 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
941 	struct vchiq_instance *instance;
942 	unsigned int ka_handle;
943 	int ret;
944 
945 	struct vchiq_service_params_kernel params = {
946 		.fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
947 		.callback    = vchiq_keepalive_vchiq_callback,
948 		.version     = KEEPALIVE_VER,
949 		.version_min = KEEPALIVE_VER_MIN
950 	};
951 
952 	ret = vchiq_initialise(state, &instance);
953 	if (ret) {
954 		dev_err(state->dev, "suspend: %s: vchiq_initialise failed %d\n", __func__, ret);
955 		goto exit;
956 	}
957 
958 	ret = vchiq_connect(instance);
959 	if (ret) {
960 		dev_err(state->dev, "suspend: %s: vchiq_connect failed %d\n", __func__, ret);
961 		goto shutdown;
962 	}
963 
964 	ret = vchiq_add_service(instance, &params, &ka_handle);
965 	if (ret) {
966 		dev_err(state->dev, "suspend: %s: vchiq_open_service failed %d\n",
967 			__func__, ret);
968 		goto shutdown;
969 	}
970 
971 	while (!kthread_should_stop()) {
972 		long rc = 0, uc = 0;
973 
974 		if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
975 			dev_dbg(state->dev, "suspend: %s: interrupted\n", __func__);
976 			flush_signals(current);
977 			continue;
978 		}
979 
980 		/*
981 		 * read and clear counters.  Do release_count then use_count to
982 		 * prevent getting more releases than uses
983 		 */
984 		rc = atomic_xchg(&arm_state->ka_release_count, 0);
985 		uc = atomic_xchg(&arm_state->ka_use_count, 0);
986 
987 		/*
988 		 * Call use/release service the requisite number of times.
989 		 * Process use before release so use counts don't go negative
990 		 */
991 		while (uc--) {
992 			atomic_inc(&arm_state->ka_use_ack_count);
993 			ret = vchiq_use_service(instance, ka_handle);
994 			if (ret) {
995 				dev_err(state->dev, "suspend: %s: vchiq_use_service error %d\n",
996 					__func__, ret);
997 			}
998 		}
999 		while (rc--) {
1000 			ret = vchiq_release_service(instance, ka_handle);
1001 			if (ret) {
1002 				dev_err(state->dev, "suspend: %s: vchiq_release_service error %d\n",
1003 					__func__, ret);
1004 			}
1005 		}
1006 	}
1007 
1008 shutdown:
1009 	vchiq_shutdown(instance);
1010 exit:
1011 	return 0;
1012 }
1013 
1014 int
vchiq_use_internal(struct vchiq_state * state,struct vchiq_service * service,enum USE_TYPE_E use_type)1015 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
1016 		   enum USE_TYPE_E use_type)
1017 {
1018 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1019 	int ret = 0;
1020 	char entity[64];
1021 	int *entity_uc;
1022 	int local_uc;
1023 
1024 	if (!arm_state) {
1025 		ret = -EINVAL;
1026 		goto out;
1027 	}
1028 
1029 	if (use_type == USE_TYPE_VCHIQ) {
1030 		snprintf(entity, sizeof(entity), "VCHIQ:   ");
1031 		entity_uc = &arm_state->peer_use_count;
1032 	} else if (service) {
1033 		snprintf(entity, sizeof(entity), "%p4cc:%03d",
1034 			 &service->base.fourcc,
1035 			 service->client_id);
1036 		entity_uc = &service->service_use_count;
1037 	} else {
1038 		dev_err(state->dev, "suspend: %s: null service ptr\n", __func__);
1039 		ret = -EINVAL;
1040 		goto out;
1041 	}
1042 
1043 	write_lock_bh(&arm_state->susp_res_lock);
1044 	local_uc = ++arm_state->videocore_use_count;
1045 	++(*entity_uc);
1046 
1047 	dev_dbg(state->dev, "suspend: %s count %d, state count %d\n",
1048 		entity, *entity_uc, local_uc);
1049 
1050 	write_unlock_bh(&arm_state->susp_res_lock);
1051 
1052 	if (!ret) {
1053 		int ret = 0;
1054 		long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
1055 
1056 		while (ack_cnt && !ret) {
1057 			/* Send the use notify to videocore */
1058 			ret = vchiq_send_remote_use_active(state);
1059 			if (!ret)
1060 				ack_cnt--;
1061 			else
1062 				atomic_add(ack_cnt, &arm_state->ka_use_ack_count);
1063 		}
1064 	}
1065 
1066 out:
1067 	dev_dbg(state->dev, "suspend: exit %d\n", ret);
1068 	return ret;
1069 }
1070 
1071 int
vchiq_release_internal(struct vchiq_state * state,struct vchiq_service * service)1072 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
1073 {
1074 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1075 	int ret = 0;
1076 	char entity[64];
1077 	int *entity_uc;
1078 
1079 	if (!arm_state) {
1080 		ret = -EINVAL;
1081 		goto out;
1082 	}
1083 
1084 	if (service) {
1085 		snprintf(entity, sizeof(entity), "%p4cc:%03d",
1086 			 &service->base.fourcc,
1087 			 service->client_id);
1088 		entity_uc = &service->service_use_count;
1089 	} else {
1090 		snprintf(entity, sizeof(entity), "PEER:   ");
1091 		entity_uc = &arm_state->peer_use_count;
1092 	}
1093 
1094 	write_lock_bh(&arm_state->susp_res_lock);
1095 	if (!arm_state->videocore_use_count || !(*entity_uc)) {
1096 		WARN_ON(!arm_state->videocore_use_count);
1097 		WARN_ON(!(*entity_uc));
1098 		ret = -EINVAL;
1099 		goto unlock;
1100 	}
1101 	--arm_state->videocore_use_count;
1102 	--(*entity_uc);
1103 
1104 	dev_dbg(state->dev, "suspend: %s count %d, state count %d\n",
1105 		entity, *entity_uc, arm_state->videocore_use_count);
1106 
1107 unlock:
1108 	write_unlock_bh(&arm_state->susp_res_lock);
1109 
1110 out:
1111 	dev_dbg(state->dev, "suspend: exit %d\n", ret);
1112 	return ret;
1113 }
1114 
1115 void
vchiq_on_remote_use(struct vchiq_state * state)1116 vchiq_on_remote_use(struct vchiq_state *state)
1117 {
1118 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1119 
1120 	atomic_inc(&arm_state->ka_use_count);
1121 	complete(&arm_state->ka_evt);
1122 }
1123 
1124 void
vchiq_on_remote_release(struct vchiq_state * state)1125 vchiq_on_remote_release(struct vchiq_state *state)
1126 {
1127 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1128 
1129 	atomic_inc(&arm_state->ka_release_count);
1130 	complete(&arm_state->ka_evt);
1131 }
1132 
1133 int
vchiq_use_service_internal(struct vchiq_service * service)1134 vchiq_use_service_internal(struct vchiq_service *service)
1135 {
1136 	return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1137 }
1138 
1139 int
vchiq_release_service_internal(struct vchiq_service * service)1140 vchiq_release_service_internal(struct vchiq_service *service)
1141 {
1142 	return vchiq_release_internal(service->state, service);
1143 }
1144 
1145 struct vchiq_debugfs_node *
vchiq_instance_get_debugfs_node(struct vchiq_instance * instance)1146 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
1147 {
1148 	return &instance->debugfs_node;
1149 }
1150 
1151 int
vchiq_instance_get_use_count(struct vchiq_instance * instance)1152 vchiq_instance_get_use_count(struct vchiq_instance *instance)
1153 {
1154 	struct vchiq_service *service;
1155 	int use_count = 0, i;
1156 
1157 	i = 0;
1158 	rcu_read_lock();
1159 	while ((service = __next_service_by_instance(instance->state,
1160 						     instance, &i)))
1161 		use_count += service->service_use_count;
1162 	rcu_read_unlock();
1163 	return use_count;
1164 }
1165 
1166 int
vchiq_instance_get_pid(struct vchiq_instance * instance)1167 vchiq_instance_get_pid(struct vchiq_instance *instance)
1168 {
1169 	return instance->pid;
1170 }
1171 
1172 int
vchiq_instance_get_trace(struct vchiq_instance * instance)1173 vchiq_instance_get_trace(struct vchiq_instance *instance)
1174 {
1175 	return instance->trace;
1176 }
1177 
1178 void
vchiq_instance_set_trace(struct vchiq_instance * instance,int trace)1179 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
1180 {
1181 	struct vchiq_service *service;
1182 	int i;
1183 
1184 	i = 0;
1185 	rcu_read_lock();
1186 	while ((service = __next_service_by_instance(instance->state,
1187 						     instance, &i)))
1188 		service->trace = trace;
1189 	rcu_read_unlock();
1190 	instance->trace = (trace != 0);
1191 }
1192 
1193 int
vchiq_use_service(struct vchiq_instance * instance,unsigned int handle)1194 vchiq_use_service(struct vchiq_instance *instance, unsigned int handle)
1195 {
1196 	int ret = -EINVAL;
1197 	struct vchiq_service *service = find_service_by_handle(instance, handle);
1198 
1199 	if (service) {
1200 		ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1201 		vchiq_service_put(service);
1202 	}
1203 	return ret;
1204 }
1205 EXPORT_SYMBOL(vchiq_use_service);
1206 
1207 int
vchiq_release_service(struct vchiq_instance * instance,unsigned int handle)1208 vchiq_release_service(struct vchiq_instance *instance, unsigned int handle)
1209 {
1210 	int ret = -EINVAL;
1211 	struct vchiq_service *service = find_service_by_handle(instance, handle);
1212 
1213 	if (service) {
1214 		ret = vchiq_release_internal(service->state, service);
1215 		vchiq_service_put(service);
1216 	}
1217 	return ret;
1218 }
1219 EXPORT_SYMBOL(vchiq_release_service);
1220 
1221 struct service_data_struct {
1222 	int fourcc;
1223 	int clientid;
1224 	int use_count;
1225 };
1226 
1227 void
vchiq_dump_service_use_state(struct vchiq_state * state)1228 vchiq_dump_service_use_state(struct vchiq_state *state)
1229 {
1230 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1231 	struct service_data_struct *service_data;
1232 	int i, found = 0;
1233 	/*
1234 	 * If there's more than 64 services, only dump ones with
1235 	 * non-zero counts
1236 	 */
1237 	int only_nonzero = 0;
1238 	static const char *nz = "<-- preventing suspend";
1239 
1240 	int peer_count;
1241 	int vc_use_count;
1242 	int active_services;
1243 
1244 	if (!arm_state)
1245 		return;
1246 
1247 	service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
1248 				     GFP_KERNEL);
1249 	if (!service_data)
1250 		return;
1251 
1252 	read_lock_bh(&arm_state->susp_res_lock);
1253 	peer_count = arm_state->peer_use_count;
1254 	vc_use_count = arm_state->videocore_use_count;
1255 	active_services = state->unused_service;
1256 	if (active_services > MAX_SERVICES)
1257 		only_nonzero = 1;
1258 
1259 	rcu_read_lock();
1260 	for (i = 0; i < active_services; i++) {
1261 		struct vchiq_service *service_ptr =
1262 			rcu_dereference(state->services[i]);
1263 
1264 		if (!service_ptr)
1265 			continue;
1266 
1267 		if (only_nonzero && !service_ptr->service_use_count)
1268 			continue;
1269 
1270 		if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
1271 			continue;
1272 
1273 		service_data[found].fourcc = service_ptr->base.fourcc;
1274 		service_data[found].clientid = service_ptr->client_id;
1275 		service_data[found].use_count = service_ptr->service_use_count;
1276 		found++;
1277 		if (found >= MAX_SERVICES)
1278 			break;
1279 	}
1280 	rcu_read_unlock();
1281 
1282 	read_unlock_bh(&arm_state->susp_res_lock);
1283 
1284 	if (only_nonzero)
1285 		dev_warn(state->dev,
1286 			 "suspend: Too many active services (%d). Only dumping up to first %d services with non-zero use-count\n",
1287 			 active_services, found);
1288 
1289 	for (i = 0; i < found; i++) {
1290 		dev_warn(state->dev,
1291 			 "suspend: %p4cc:%d service count %d %s\n",
1292 			 &service_data[i].fourcc,
1293 			 service_data[i].clientid, service_data[i].use_count,
1294 			 service_data[i].use_count ? nz : "");
1295 	}
1296 	dev_warn(state->dev, "suspend: VCHIQ use count %d\n", peer_count);
1297 	dev_warn(state->dev, "suspend: Overall vchiq instance use count %d\n", vc_use_count);
1298 
1299 	kfree(service_data);
1300 }
1301 
1302 int
vchiq_check_service(struct vchiq_service * service)1303 vchiq_check_service(struct vchiq_service *service)
1304 {
1305 	struct vchiq_arm_state *arm_state;
1306 	int ret = -EINVAL;
1307 
1308 	if (!service || !service->state)
1309 		goto out;
1310 
1311 	arm_state = vchiq_platform_get_arm_state(service->state);
1312 
1313 	read_lock_bh(&arm_state->susp_res_lock);
1314 	if (service->service_use_count)
1315 		ret = 0;
1316 	read_unlock_bh(&arm_state->susp_res_lock);
1317 
1318 	if (ret) {
1319 		dev_err(service->state->dev,
1320 			"suspend: %s:  %p4cc:%d service count %d, state count %d\n",
1321 			__func__, &service->base.fourcc, service->client_id,
1322 			service->service_use_count, arm_state->videocore_use_count);
1323 		vchiq_dump_service_use_state(service->state);
1324 	}
1325 out:
1326 	return ret;
1327 }
1328 
vchiq_platform_conn_state_changed(struct vchiq_state * state,enum vchiq_connstate oldstate,enum vchiq_connstate newstate)1329 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
1330 				       enum vchiq_connstate oldstate,
1331 				       enum vchiq_connstate newstate)
1332 {
1333 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1334 	char threadname[16];
1335 
1336 	dev_dbg(state->dev, "suspend: %d: %s->%s\n",
1337 		state->id, get_conn_state_name(oldstate), get_conn_state_name(newstate));
1338 	if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
1339 		return;
1340 
1341 	write_lock_bh(&arm_state->susp_res_lock);
1342 	if (arm_state->first_connect) {
1343 		write_unlock_bh(&arm_state->susp_res_lock);
1344 		return;
1345 	}
1346 
1347 	arm_state->first_connect = 1;
1348 	write_unlock_bh(&arm_state->susp_res_lock);
1349 	snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
1350 		 state->id);
1351 	arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
1352 					      (void *)state,
1353 					      threadname);
1354 	if (IS_ERR(arm_state->ka_thread)) {
1355 		dev_err(state->dev, "suspend: Couldn't create thread %s\n",
1356 			threadname);
1357 	} else {
1358 		wake_up_process(arm_state->ka_thread);
1359 	}
1360 }
1361 
1362 static const struct of_device_id vchiq_of_match[] = {
1363 	{ .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_info },
1364 	{ .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_info },
1365 	{},
1366 };
1367 MODULE_DEVICE_TABLE(of, vchiq_of_match);
1368 
vchiq_probe(struct platform_device * pdev)1369 static int vchiq_probe(struct platform_device *pdev)
1370 {
1371 	const struct vchiq_platform_info *info;
1372 	struct vchiq_drv_mgmt *mgmt;
1373 	int ret;
1374 
1375 	info = of_device_get_match_data(&pdev->dev);
1376 	if (!info)
1377 		return -EINVAL;
1378 
1379 	struct device_node *fw_node __free(device_node) =
1380 		of_find_compatible_node(NULL, NULL, "raspberrypi,bcm2835-firmware");
1381 	if (!fw_node) {
1382 		dev_err(&pdev->dev, "Missing firmware node\n");
1383 		return -ENOENT;
1384 	}
1385 
1386 	mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
1387 	if (!mgmt)
1388 		return -ENOMEM;
1389 
1390 	mgmt->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
1391 	if (!mgmt->fw)
1392 		return -EPROBE_DEFER;
1393 
1394 	mgmt->info = info;
1395 	platform_set_drvdata(pdev, mgmt);
1396 
1397 	ret = vchiq_platform_init(pdev, &mgmt->state);
1398 	if (ret) {
1399 		dev_err(&pdev->dev, "arm: Could not initialize vchiq platform\n");
1400 		return ret;
1401 	}
1402 
1403 	dev_dbg(&pdev->dev, "arm: platform initialised - version %d (min %d)\n",
1404 		VCHIQ_VERSION, VCHIQ_VERSION_MIN);
1405 
1406 	/*
1407 	 * Simply exit on error since the function handles cleanup in
1408 	 * cases of failure.
1409 	 */
1410 	ret = vchiq_register_chrdev(&pdev->dev);
1411 	if (ret) {
1412 		dev_err(&pdev->dev, "arm: Failed to initialize vchiq cdev\n");
1413 		vchiq_platform_uninit(mgmt);
1414 		return ret;
1415 	}
1416 
1417 	vchiq_debugfs_init(&mgmt->state);
1418 
1419 	bcm2835_audio = vchiq_device_register(&pdev->dev, "bcm2835-audio");
1420 	bcm2835_camera = vchiq_device_register(&pdev->dev, "bcm2835-camera");
1421 
1422 	return 0;
1423 }
1424 
vchiq_remove(struct platform_device * pdev)1425 static void vchiq_remove(struct platform_device *pdev)
1426 {
1427 	struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(&pdev->dev);
1428 
1429 	vchiq_device_unregister(bcm2835_audio);
1430 	vchiq_device_unregister(bcm2835_camera);
1431 	vchiq_debugfs_deinit();
1432 	vchiq_deregister_chrdev();
1433 	vchiq_platform_uninit(mgmt);
1434 }
1435 
1436 static struct platform_driver vchiq_driver = {
1437 	.driver = {
1438 		.name = "bcm2835_vchiq",
1439 		.of_match_table = vchiq_of_match,
1440 	},
1441 	.probe = vchiq_probe,
1442 	.remove = vchiq_remove,
1443 };
1444 
vchiq_driver_init(void)1445 static int __init vchiq_driver_init(void)
1446 {
1447 	int ret;
1448 
1449 	ret = bus_register(&vchiq_bus_type);
1450 	if (ret) {
1451 		pr_err("Failed to register %s\n", vchiq_bus_type.name);
1452 		return ret;
1453 	}
1454 
1455 	ret = platform_driver_register(&vchiq_driver);
1456 	if (ret) {
1457 		pr_err("Failed to register vchiq driver\n");
1458 		bus_unregister(&vchiq_bus_type);
1459 	}
1460 
1461 	return ret;
1462 }
1463 module_init(vchiq_driver_init);
1464 
vchiq_driver_exit(void)1465 static void __exit vchiq_driver_exit(void)
1466 {
1467 	bus_unregister(&vchiq_bus_type);
1468 	platform_driver_unregister(&vchiq_driver);
1469 }
1470 module_exit(vchiq_driver_exit);
1471 
1472 MODULE_LICENSE("Dual BSD/GPL");
1473 MODULE_DESCRIPTION("Videocore VCHIQ driver");
1474 MODULE_AUTHOR("Broadcom Corporation");
1475