1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 // Copyright(c) 2015-17 Intel Corporation.
3 
4 #include <linux/acpi.h>
5 #include <linux/delay.h>
6 #include <linux/mod_devicetable.h>
7 #include <linux/pm_runtime.h>
8 #include <linux/soundwire/sdw_registers.h>
9 #include <linux/soundwire/sdw.h>
10 #include <linux/soundwire/sdw_type.h>
11 #include "bus.h"
12 #include "irq.h"
13 #include "sysfs_local.h"
14 
15 static DEFINE_IDA(sdw_bus_ida);
16 
sdw_get_id(struct sdw_bus * bus)17 static int sdw_get_id(struct sdw_bus *bus)
18 {
19 	int rc = ida_alloc(&sdw_bus_ida, GFP_KERNEL);
20 
21 	if (rc < 0)
22 		return rc;
23 
24 	bus->id = rc;
25 
26 	if (bus->controller_id == -1)
27 		bus->controller_id = rc;
28 
29 	return 0;
30 }
31 
32 /**
33  * sdw_bus_master_add() - add a bus Master instance
34  * @bus: bus instance
35  * @parent: parent device
36  * @fwnode: firmware node handle
37  *
38  * Initializes the bus instance, read properties and create child
39  * devices.
40  */
sdw_bus_master_add(struct sdw_bus * bus,struct device * parent,struct fwnode_handle * fwnode)41 int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
42 		       struct fwnode_handle *fwnode)
43 {
44 	struct sdw_master_prop *prop = NULL;
45 	int ret;
46 
47 	if (!parent) {
48 		pr_err("SoundWire parent device is not set\n");
49 		return -ENODEV;
50 	}
51 
52 	ret = sdw_get_id(bus);
53 	if (ret < 0) {
54 		dev_err(parent, "Failed to get bus id\n");
55 		return ret;
56 	}
57 
58 	ret = sdw_master_device_add(bus, parent, fwnode);
59 	if (ret < 0) {
60 		dev_err(parent, "Failed to add master device at link %d\n",
61 			bus->link_id);
62 		return ret;
63 	}
64 
65 	if (!bus->ops) {
66 		dev_err(bus->dev, "SoundWire Bus ops are not set\n");
67 		return -EINVAL;
68 	}
69 
70 	if (!bus->compute_params) {
71 		dev_err(bus->dev,
72 			"Bandwidth allocation not configured, compute_params no set\n");
73 		return -EINVAL;
74 	}
75 
76 	/*
77 	 * Give each bus_lock and msg_lock a unique key so that lockdep won't
78 	 * trigger a deadlock warning when the locks of several buses are
79 	 * grabbed during configuration of a multi-bus stream.
80 	 */
81 	lockdep_register_key(&bus->msg_lock_key);
82 	__mutex_init(&bus->msg_lock, "msg_lock", &bus->msg_lock_key);
83 
84 	lockdep_register_key(&bus->bus_lock_key);
85 	__mutex_init(&bus->bus_lock, "bus_lock", &bus->bus_lock_key);
86 
87 	INIT_LIST_HEAD(&bus->slaves);
88 	INIT_LIST_HEAD(&bus->m_rt_list);
89 
90 	/*
91 	 * Initialize multi_link flag
92 	 */
93 	bus->multi_link = false;
94 	if (bus->ops->read_prop) {
95 		ret = bus->ops->read_prop(bus);
96 		if (ret < 0) {
97 			dev_err(bus->dev,
98 				"Bus read properties failed:%d\n", ret);
99 			return ret;
100 		}
101 	}
102 
103 	sdw_bus_debugfs_init(bus);
104 
105 	/*
106 	 * Device numbers in SoundWire are 0 through 15. Enumeration device
107 	 * number (0), Broadcast device number (15), Group numbers (12 and
108 	 * 13) and Master device number (14) are not used for assignment so
109 	 * mask these and other higher bits.
110 	 */
111 
112 	/* Set higher order bits */
113 	*bus->assigned = ~GENMASK(SDW_BROADCAST_DEV_NUM, SDW_ENUM_DEV_NUM);
114 
115 	/* Set enumeration device number and broadcast device number */
116 	set_bit(SDW_ENUM_DEV_NUM, bus->assigned);
117 	set_bit(SDW_BROADCAST_DEV_NUM, bus->assigned);
118 
119 	/* Set group device numbers and master device number */
120 	set_bit(SDW_GROUP12_DEV_NUM, bus->assigned);
121 	set_bit(SDW_GROUP13_DEV_NUM, bus->assigned);
122 	set_bit(SDW_MASTER_DEV_NUM, bus->assigned);
123 
124 	/*
125 	 * SDW is an enumerable bus, but devices can be powered off. So,
126 	 * they won't be able to report as present.
127 	 *
128 	 * Create Slave devices based on Slaves described in
129 	 * the respective firmware (ACPI/DT)
130 	 */
131 	if (IS_ENABLED(CONFIG_ACPI) && ACPI_HANDLE(bus->dev))
132 		ret = sdw_acpi_find_slaves(bus);
133 	else if (IS_ENABLED(CONFIG_OF) && bus->dev->of_node)
134 		ret = sdw_of_find_slaves(bus);
135 	else
136 		ret = -ENOTSUPP; /* No ACPI/DT so error out */
137 
138 	if (ret < 0) {
139 		dev_err(bus->dev, "Finding slaves failed:%d\n", ret);
140 		return ret;
141 	}
142 
143 	/*
144 	 * Initialize clock values based on Master properties. The max
145 	 * frequency is read from max_clk_freq property. Current assumption
146 	 * is that the bus will start at highest clock frequency when
147 	 * powered on.
148 	 *
149 	 * Default active bank will be 0 as out of reset the Slaves have
150 	 * to start with bank 0 (Table 40 of Spec)
151 	 */
152 	prop = &bus->prop;
153 	bus->params.max_dr_freq = prop->max_clk_freq * SDW_DOUBLE_RATE_FACTOR;
154 	bus->params.curr_dr_freq = bus->params.max_dr_freq;
155 	bus->params.curr_bank = SDW_BANK0;
156 	bus->params.next_bank = SDW_BANK1;
157 
158 	ret = sdw_irq_create(bus, fwnode);
159 	if (ret)
160 		return ret;
161 
162 	return 0;
163 }
164 EXPORT_SYMBOL(sdw_bus_master_add);
165 
sdw_delete_slave(struct device * dev,void * data)166 static int sdw_delete_slave(struct device *dev, void *data)
167 {
168 	struct sdw_slave *slave = dev_to_sdw_dev(dev);
169 	struct sdw_bus *bus = slave->bus;
170 
171 	pm_runtime_disable(dev);
172 
173 	sdw_slave_debugfs_exit(slave);
174 
175 	mutex_lock(&bus->bus_lock);
176 
177 	if (slave->dev_num) { /* clear dev_num if assigned */
178 		clear_bit(slave->dev_num, bus->assigned);
179 		if (bus->ops && bus->ops->put_device_num)
180 			bus->ops->put_device_num(bus, slave);
181 	}
182 	list_del_init(&slave->node);
183 	mutex_unlock(&bus->bus_lock);
184 
185 	device_unregister(dev);
186 	return 0;
187 }
188 
189 /**
190  * sdw_bus_master_delete() - delete the bus master instance
191  * @bus: bus to be deleted
192  *
193  * Remove the instance, delete the child devices.
194  */
sdw_bus_master_delete(struct sdw_bus * bus)195 void sdw_bus_master_delete(struct sdw_bus *bus)
196 {
197 	device_for_each_child(bus->dev, NULL, sdw_delete_slave);
198 
199 	sdw_irq_delete(bus);
200 
201 	sdw_master_device_del(bus);
202 
203 	sdw_bus_debugfs_exit(bus);
204 	lockdep_unregister_key(&bus->bus_lock_key);
205 	lockdep_unregister_key(&bus->msg_lock_key);
206 	ida_free(&sdw_bus_ida, bus->id);
207 }
208 EXPORT_SYMBOL(sdw_bus_master_delete);
209 
210 /*
211  * SDW IO Calls
212  */
213 
find_response_code(enum sdw_command_response resp)214 static inline int find_response_code(enum sdw_command_response resp)
215 {
216 	switch (resp) {
217 	case SDW_CMD_OK:
218 		return 0;
219 
220 	case SDW_CMD_IGNORED:
221 		return -ENODATA;
222 
223 	case SDW_CMD_TIMEOUT:
224 		return -ETIMEDOUT;
225 
226 	default:
227 		return -EIO;
228 	}
229 }
230 
do_transfer(struct sdw_bus * bus,struct sdw_msg * msg)231 static inline int do_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
232 {
233 	int retry = bus->prop.err_threshold;
234 	enum sdw_command_response resp;
235 	int ret = 0, i;
236 
237 	for (i = 0; i <= retry; i++) {
238 		resp = bus->ops->xfer_msg(bus, msg);
239 		ret = find_response_code(resp);
240 
241 		/* if cmd is ok or ignored return */
242 		if (ret == 0 || ret == -ENODATA)
243 			return ret;
244 	}
245 
246 	return ret;
247 }
248 
do_transfer_defer(struct sdw_bus * bus,struct sdw_msg * msg)249 static inline int do_transfer_defer(struct sdw_bus *bus,
250 				    struct sdw_msg *msg)
251 {
252 	struct sdw_defer *defer = &bus->defer_msg;
253 	int retry = bus->prop.err_threshold;
254 	enum sdw_command_response resp;
255 	int ret = 0, i;
256 
257 	defer->msg = msg;
258 	defer->length = msg->len;
259 	init_completion(&defer->complete);
260 
261 	for (i = 0; i <= retry; i++) {
262 		resp = bus->ops->xfer_msg_defer(bus);
263 		ret = find_response_code(resp);
264 		/* if cmd is ok or ignored return */
265 		if (ret == 0 || ret == -ENODATA)
266 			return ret;
267 	}
268 
269 	return ret;
270 }
271 
sdw_transfer_unlocked(struct sdw_bus * bus,struct sdw_msg * msg)272 static int sdw_transfer_unlocked(struct sdw_bus *bus, struct sdw_msg *msg)
273 {
274 	int ret;
275 
276 	ret = do_transfer(bus, msg);
277 	if (ret != 0 && ret != -ENODATA)
278 		dev_err(bus->dev, "trf on Slave %d failed:%d %s addr %x count %d\n",
279 			msg->dev_num, ret,
280 			(msg->flags & SDW_MSG_FLAG_WRITE) ? "write" : "read",
281 			msg->addr, msg->len);
282 
283 	return ret;
284 }
285 
286 /**
287  * sdw_transfer() - Synchronous transfer message to a SDW Slave device
288  * @bus: SDW bus
289  * @msg: SDW message to be xfered
290  */
sdw_transfer(struct sdw_bus * bus,struct sdw_msg * msg)291 int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
292 {
293 	int ret;
294 
295 	mutex_lock(&bus->msg_lock);
296 
297 	ret = sdw_transfer_unlocked(bus, msg);
298 
299 	mutex_unlock(&bus->msg_lock);
300 
301 	return ret;
302 }
303 
304 /**
305  * sdw_show_ping_status() - Direct report of PING status, to be used by Peripheral drivers
306  * @bus: SDW bus
307  * @sync_delay: Delay before reading status
308  */
sdw_show_ping_status(struct sdw_bus * bus,bool sync_delay)309 void sdw_show_ping_status(struct sdw_bus *bus, bool sync_delay)
310 {
311 	u32 status;
312 
313 	if (!bus->ops->read_ping_status)
314 		return;
315 
316 	/*
317 	 * wait for peripheral to sync if desired. 10-15ms should be more than
318 	 * enough in most cases.
319 	 */
320 	if (sync_delay)
321 		usleep_range(10000, 15000);
322 
323 	mutex_lock(&bus->msg_lock);
324 
325 	status = bus->ops->read_ping_status(bus);
326 
327 	mutex_unlock(&bus->msg_lock);
328 
329 	if (!status)
330 		dev_warn(bus->dev, "%s: no peripherals attached\n", __func__);
331 	else
332 		dev_dbg(bus->dev, "PING status: %#x\n", status);
333 }
334 EXPORT_SYMBOL(sdw_show_ping_status);
335 
336 /**
337  * sdw_transfer_defer() - Asynchronously transfer message to a SDW Slave device
338  * @bus: SDW bus
339  * @msg: SDW message to be xfered
340  *
341  * Caller needs to hold the msg_lock lock while calling this
342  */
sdw_transfer_defer(struct sdw_bus * bus,struct sdw_msg * msg)343 int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg)
344 {
345 	int ret;
346 
347 	if (!bus->ops->xfer_msg_defer)
348 		return -ENOTSUPP;
349 
350 	ret = do_transfer_defer(bus, msg);
351 	if (ret != 0 && ret != -ENODATA)
352 		dev_err(bus->dev, "Defer trf on Slave %d failed:%d\n",
353 			msg->dev_num, ret);
354 
355 	return ret;
356 }
357 
sdw_fill_msg(struct sdw_msg * msg,struct sdw_slave * slave,u32 addr,size_t count,u16 dev_num,u8 flags,u8 * buf)358 int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
359 		 u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf)
360 {
361 	memset(msg, 0, sizeof(*msg));
362 	msg->addr = addr; /* addr is 16 bit and truncated here */
363 	msg->len = count;
364 	msg->dev_num = dev_num;
365 	msg->flags = flags;
366 	msg->buf = buf;
367 
368 	if (addr < SDW_REG_NO_PAGE) /* no paging area */
369 		return 0;
370 
371 	if (addr >= SDW_REG_MAX) { /* illegal addr */
372 		pr_err("SDW: Invalid address %x passed\n", addr);
373 		return -EINVAL;
374 	}
375 
376 	if (addr < SDW_REG_OPTIONAL_PAGE) { /* 32k but no page */
377 		if (slave && !slave->prop.paging_support)
378 			return 0;
379 		/* no need for else as that will fall-through to paging */
380 	}
381 
382 	/* paging mandatory */
383 	if (dev_num == SDW_ENUM_DEV_NUM || dev_num == SDW_BROADCAST_DEV_NUM) {
384 		pr_err("SDW: Invalid device for paging :%d\n", dev_num);
385 		return -EINVAL;
386 	}
387 
388 	if (!slave) {
389 		pr_err("SDW: No slave for paging addr\n");
390 		return -EINVAL;
391 	}
392 
393 	if (!slave->prop.paging_support) {
394 		dev_err(&slave->dev,
395 			"address %x needs paging but no support\n", addr);
396 		return -EINVAL;
397 	}
398 
399 	msg->addr_page1 = FIELD_GET(SDW_SCP_ADDRPAGE1_MASK, addr);
400 	msg->addr_page2 = FIELD_GET(SDW_SCP_ADDRPAGE2_MASK, addr);
401 	msg->addr |= BIT(15);
402 	msg->page = true;
403 
404 	return 0;
405 }
406 
407 /*
408  * Read/Write IO functions.
409  */
410 
sdw_ntransfer_no_pm(struct sdw_slave * slave,u32 addr,u8 flags,size_t count,u8 * val)411 static int sdw_ntransfer_no_pm(struct sdw_slave *slave, u32 addr, u8 flags,
412 			       size_t count, u8 *val)
413 {
414 	struct sdw_msg msg;
415 	size_t size;
416 	int ret;
417 
418 	while (count) {
419 		// Only handle bytes up to next page boundary
420 		size = min_t(size_t, count, (SDW_REGADDR + 1) - (addr & SDW_REGADDR));
421 
422 		ret = sdw_fill_msg(&msg, slave, addr, size, slave->dev_num, flags, val);
423 		if (ret < 0)
424 			return ret;
425 
426 		ret = sdw_transfer(slave->bus, &msg);
427 		if (ret < 0 && !slave->is_mockup_device)
428 			return ret;
429 
430 		addr += size;
431 		val += size;
432 		count -= size;
433 	}
434 
435 	return 0;
436 }
437 
438 /**
439  * sdw_nread_no_pm() - Read "n" contiguous SDW Slave registers with no PM
440  * @slave: SDW Slave
441  * @addr: Register address
442  * @count: length
443  * @val: Buffer for values to be read
444  *
445  * Note that if the message crosses a page boundary each page will be
446  * transferred under a separate invocation of the msg_lock.
447  */
sdw_nread_no_pm(struct sdw_slave * slave,u32 addr,size_t count,u8 * val)448 int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
449 {
450 	return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_READ, count, val);
451 }
452 EXPORT_SYMBOL(sdw_nread_no_pm);
453 
454 /**
455  * sdw_nwrite_no_pm() - Write "n" contiguous SDW Slave registers with no PM
456  * @slave: SDW Slave
457  * @addr: Register address
458  * @count: length
459  * @val: Buffer for values to be written
460  *
461  * Note that if the message crosses a page boundary each page will be
462  * transferred under a separate invocation of the msg_lock.
463  */
sdw_nwrite_no_pm(struct sdw_slave * slave,u32 addr,size_t count,const u8 * val)464 int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
465 {
466 	return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_WRITE, count, (u8 *)val);
467 }
468 EXPORT_SYMBOL(sdw_nwrite_no_pm);
469 
470 /**
471  * sdw_write_no_pm() - Write a SDW Slave register with no PM
472  * @slave: SDW Slave
473  * @addr: Register address
474  * @value: Register value
475  */
sdw_write_no_pm(struct sdw_slave * slave,u32 addr,u8 value)476 int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value)
477 {
478 	return sdw_nwrite_no_pm(slave, addr, 1, &value);
479 }
480 EXPORT_SYMBOL(sdw_write_no_pm);
481 
482 static int
sdw_bread_no_pm(struct sdw_bus * bus,u16 dev_num,u32 addr)483 sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr)
484 {
485 	struct sdw_msg msg;
486 	u8 buf;
487 	int ret;
488 
489 	ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
490 			   SDW_MSG_FLAG_READ, &buf);
491 	if (ret < 0)
492 		return ret;
493 
494 	ret = sdw_transfer(bus, &msg);
495 	if (ret < 0)
496 		return ret;
497 
498 	return buf;
499 }
500 
501 static int
sdw_bwrite_no_pm(struct sdw_bus * bus,u16 dev_num,u32 addr,u8 value)502 sdw_bwrite_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value)
503 {
504 	struct sdw_msg msg;
505 	int ret;
506 
507 	ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
508 			   SDW_MSG_FLAG_WRITE, &value);
509 	if (ret < 0)
510 		return ret;
511 
512 	return sdw_transfer(bus, &msg);
513 }
514 
sdw_bread_no_pm_unlocked(struct sdw_bus * bus,u16 dev_num,u32 addr)515 int sdw_bread_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr)
516 {
517 	struct sdw_msg msg;
518 	u8 buf;
519 	int ret;
520 
521 	ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
522 			   SDW_MSG_FLAG_READ, &buf);
523 	if (ret < 0)
524 		return ret;
525 
526 	ret = sdw_transfer_unlocked(bus, &msg);
527 	if (ret < 0)
528 		return ret;
529 
530 	return buf;
531 }
532 EXPORT_SYMBOL(sdw_bread_no_pm_unlocked);
533 
sdw_bwrite_no_pm_unlocked(struct sdw_bus * bus,u16 dev_num,u32 addr,u8 value)534 int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value)
535 {
536 	struct sdw_msg msg;
537 	int ret;
538 
539 	ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num,
540 			   SDW_MSG_FLAG_WRITE, &value);
541 	if (ret < 0)
542 		return ret;
543 
544 	return sdw_transfer_unlocked(bus, &msg);
545 }
546 EXPORT_SYMBOL(sdw_bwrite_no_pm_unlocked);
547 
548 /**
549  * sdw_read_no_pm() - Read a SDW Slave register with no PM
550  * @slave: SDW Slave
551  * @addr: Register address
552  */
sdw_read_no_pm(struct sdw_slave * slave,u32 addr)553 int sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
554 {
555 	u8 buf;
556 	int ret;
557 
558 	ret = sdw_nread_no_pm(slave, addr, 1, &buf);
559 	if (ret < 0)
560 		return ret;
561 	else
562 		return buf;
563 }
564 EXPORT_SYMBOL(sdw_read_no_pm);
565 
sdw_update_no_pm(struct sdw_slave * slave,u32 addr,u8 mask,u8 val)566 int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
567 {
568 	int tmp;
569 
570 	tmp = sdw_read_no_pm(slave, addr);
571 	if (tmp < 0)
572 		return tmp;
573 
574 	tmp = (tmp & ~mask) | val;
575 	return sdw_write_no_pm(slave, addr, tmp);
576 }
577 EXPORT_SYMBOL(sdw_update_no_pm);
578 
579 /* Read-Modify-Write Slave register */
sdw_update(struct sdw_slave * slave,u32 addr,u8 mask,u8 val)580 int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val)
581 {
582 	int tmp;
583 
584 	tmp = sdw_read(slave, addr);
585 	if (tmp < 0)
586 		return tmp;
587 
588 	tmp = (tmp & ~mask) | val;
589 	return sdw_write(slave, addr, tmp);
590 }
591 EXPORT_SYMBOL(sdw_update);
592 
593 /**
594  * sdw_nread() - Read "n" contiguous SDW Slave registers
595  * @slave: SDW Slave
596  * @addr: Register address
597  * @count: length
598  * @val: Buffer for values to be read
599  *
600  * This version of the function will take a PM reference to the slave
601  * device.
602  * Note that if the message crosses a page boundary each page will be
603  * transferred under a separate invocation of the msg_lock.
604  */
sdw_nread(struct sdw_slave * slave,u32 addr,size_t count,u8 * val)605 int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
606 {
607 	int ret;
608 
609 	ret = pm_runtime_get_sync(&slave->dev);
610 	if (ret < 0 && ret != -EACCES) {
611 		pm_runtime_put_noidle(&slave->dev);
612 		return ret;
613 	}
614 
615 	ret = sdw_nread_no_pm(slave, addr, count, val);
616 
617 	pm_runtime_mark_last_busy(&slave->dev);
618 	pm_runtime_put(&slave->dev);
619 
620 	return ret;
621 }
622 EXPORT_SYMBOL(sdw_nread);
623 
624 /**
625  * sdw_nwrite() - Write "n" contiguous SDW Slave registers
626  * @slave: SDW Slave
627  * @addr: Register address
628  * @count: length
629  * @val: Buffer for values to be written
630  *
631  * This version of the function will take a PM reference to the slave
632  * device.
633  * Note that if the message crosses a page boundary each page will be
634  * transferred under a separate invocation of the msg_lock.
635  */
sdw_nwrite(struct sdw_slave * slave,u32 addr,size_t count,const u8 * val)636 int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
637 {
638 	int ret;
639 
640 	ret = pm_runtime_get_sync(&slave->dev);
641 	if (ret < 0 && ret != -EACCES) {
642 		pm_runtime_put_noidle(&slave->dev);
643 		return ret;
644 	}
645 
646 	ret = sdw_nwrite_no_pm(slave, addr, count, val);
647 
648 	pm_runtime_mark_last_busy(&slave->dev);
649 	pm_runtime_put(&slave->dev);
650 
651 	return ret;
652 }
653 EXPORT_SYMBOL(sdw_nwrite);
654 
655 /**
656  * sdw_read() - Read a SDW Slave register
657  * @slave: SDW Slave
658  * @addr: Register address
659  *
660  * This version of the function will take a PM reference to the slave
661  * device.
662  */
sdw_read(struct sdw_slave * slave,u32 addr)663 int sdw_read(struct sdw_slave *slave, u32 addr)
664 {
665 	u8 buf;
666 	int ret;
667 
668 	ret = sdw_nread(slave, addr, 1, &buf);
669 	if (ret < 0)
670 		return ret;
671 
672 	return buf;
673 }
674 EXPORT_SYMBOL(sdw_read);
675 
676 /**
677  * sdw_write() - Write a SDW Slave register
678  * @slave: SDW Slave
679  * @addr: Register address
680  * @value: Register value
681  *
682  * This version of the function will take a PM reference to the slave
683  * device.
684  */
sdw_write(struct sdw_slave * slave,u32 addr,u8 value)685 int sdw_write(struct sdw_slave *slave, u32 addr, u8 value)
686 {
687 	return sdw_nwrite(slave, addr, 1, &value);
688 }
689 EXPORT_SYMBOL(sdw_write);
690 
691 /*
692  * SDW alert handling
693  */
694 
695 /* called with bus_lock held */
sdw_get_slave(struct sdw_bus * bus,int i)696 static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i)
697 {
698 	struct sdw_slave *slave;
699 
700 	list_for_each_entry(slave, &bus->slaves, node) {
701 		if (slave->dev_num == i)
702 			return slave;
703 	}
704 
705 	return NULL;
706 }
707 
sdw_compare_devid(struct sdw_slave * slave,struct sdw_slave_id id)708 int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id)
709 {
710 	if (slave->id.mfg_id != id.mfg_id ||
711 	    slave->id.part_id != id.part_id ||
712 	    slave->id.class_id != id.class_id ||
713 	    (slave->id.unique_id != SDW_IGNORED_UNIQUE_ID &&
714 	     slave->id.unique_id != id.unique_id))
715 		return -ENODEV;
716 
717 	return 0;
718 }
719 EXPORT_SYMBOL(sdw_compare_devid);
720 
721 /* called with bus_lock held */
sdw_get_device_num(struct sdw_slave * slave)722 static int sdw_get_device_num(struct sdw_slave *slave)
723 {
724 	struct sdw_bus *bus = slave->bus;
725 	int bit;
726 
727 	if (bus->ops && bus->ops->get_device_num) {
728 		bit = bus->ops->get_device_num(bus, slave);
729 		if (bit < 0)
730 			goto err;
731 	} else {
732 		bit = find_first_zero_bit(bus->assigned, SDW_MAX_DEVICES);
733 		if (bit == SDW_MAX_DEVICES) {
734 			bit = -ENODEV;
735 			goto err;
736 		}
737 	}
738 
739 	/*
740 	 * Do not update dev_num in Slave data structure here,
741 	 * Update once program dev_num is successful
742 	 */
743 	set_bit(bit, bus->assigned);
744 
745 err:
746 	return bit;
747 }
748 
sdw_assign_device_num(struct sdw_slave * slave)749 static int sdw_assign_device_num(struct sdw_slave *slave)
750 {
751 	struct sdw_bus *bus = slave->bus;
752 	int ret, dev_num;
753 	bool new_device = false;
754 
755 	/* check first if device number is assigned, if so reuse that */
756 	if (!slave->dev_num) {
757 		if (!slave->dev_num_sticky) {
758 			mutex_lock(&slave->bus->bus_lock);
759 			dev_num = sdw_get_device_num(slave);
760 			mutex_unlock(&slave->bus->bus_lock);
761 			if (dev_num < 0) {
762 				dev_err(bus->dev, "Get dev_num failed: %d\n",
763 					dev_num);
764 				return dev_num;
765 			}
766 			slave->dev_num = dev_num;
767 			slave->dev_num_sticky = dev_num;
768 			new_device = true;
769 		} else {
770 			slave->dev_num = slave->dev_num_sticky;
771 		}
772 	}
773 
774 	if (!new_device)
775 		dev_dbg(bus->dev,
776 			"Slave already registered, reusing dev_num:%d\n",
777 			slave->dev_num);
778 
779 	/* Clear the slave->dev_num to transfer message on device 0 */
780 	dev_num = slave->dev_num;
781 	slave->dev_num = 0;
782 
783 	ret = sdw_write_no_pm(slave, SDW_SCP_DEVNUMBER, dev_num);
784 	if (ret < 0) {
785 		dev_err(bus->dev, "Program device_num %d failed: %d\n",
786 			dev_num, ret);
787 		return ret;
788 	}
789 
790 	/* After xfer of msg, restore dev_num */
791 	slave->dev_num = slave->dev_num_sticky;
792 
793 	if (bus->ops && bus->ops->new_peripheral_assigned)
794 		bus->ops->new_peripheral_assigned(bus, slave, dev_num);
795 
796 	return 0;
797 }
798 
sdw_extract_slave_id(struct sdw_bus * bus,u64 addr,struct sdw_slave_id * id)799 void sdw_extract_slave_id(struct sdw_bus *bus,
800 			  u64 addr, struct sdw_slave_id *id)
801 {
802 	dev_dbg(bus->dev, "SDW Slave Addr: %llx\n", addr);
803 
804 	id->sdw_version = SDW_VERSION(addr);
805 	id->unique_id = SDW_UNIQUE_ID(addr);
806 	id->mfg_id = SDW_MFG_ID(addr);
807 	id->part_id = SDW_PART_ID(addr);
808 	id->class_id = SDW_CLASS_ID(addr);
809 
810 	dev_dbg(bus->dev,
811 		"SDW Slave class_id 0x%02x, mfg_id 0x%04x, part_id 0x%04x, unique_id 0x%x, version 0x%x\n",
812 		id->class_id, id->mfg_id, id->part_id, id->unique_id, id->sdw_version);
813 }
814 EXPORT_SYMBOL(sdw_extract_slave_id);
815 
is_clock_scaling_supported_by_slave(struct sdw_slave * slave)816 bool is_clock_scaling_supported_by_slave(struct sdw_slave *slave)
817 {
818 	/*
819 	 * Dynamic scaling is a defined by SDCA. However, some devices expose the class ID but
820 	 * can't support dynamic scaling. We might need a quirk to handle such devices.
821 	 */
822 	return slave->id.class_id;
823 }
824 EXPORT_SYMBOL(is_clock_scaling_supported_by_slave);
825 
sdw_program_device_num(struct sdw_bus * bus,bool * programmed)826 static int sdw_program_device_num(struct sdw_bus *bus, bool *programmed)
827 {
828 	u8 buf[SDW_NUM_DEV_ID_REGISTERS] = {0};
829 	struct sdw_slave *slave, *_s;
830 	struct sdw_slave_id id;
831 	struct sdw_msg msg;
832 	bool found;
833 	int count = 0, ret;
834 	u64 addr;
835 
836 	*programmed = false;
837 
838 	/* No Slave, so use raw xfer api */
839 	ret = sdw_fill_msg(&msg, NULL, SDW_SCP_DEVID_0,
840 			   SDW_NUM_DEV_ID_REGISTERS, 0, SDW_MSG_FLAG_READ, buf);
841 	if (ret < 0)
842 		return ret;
843 
844 	do {
845 		ret = sdw_transfer(bus, &msg);
846 		if (ret == -ENODATA) { /* end of device id reads */
847 			dev_dbg(bus->dev, "No more devices to enumerate\n");
848 			ret = 0;
849 			break;
850 		}
851 		if (ret < 0) {
852 			dev_err(bus->dev, "DEVID read fail:%d\n", ret);
853 			break;
854 		}
855 
856 		/*
857 		 * Construct the addr and extract. Cast the higher shift
858 		 * bits to avoid truncation due to size limit.
859 		 */
860 		addr = buf[5] | (buf[4] << 8) | (buf[3] << 16) |
861 			((u64)buf[2] << 24) | ((u64)buf[1] << 32) |
862 			((u64)buf[0] << 40);
863 
864 		sdw_extract_slave_id(bus, addr, &id);
865 
866 		found = false;
867 		/* Now compare with entries */
868 		list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
869 			if (sdw_compare_devid(slave, id) == 0) {
870 				found = true;
871 
872 				/*
873 				 * To prevent skipping state-machine stages don't
874 				 * program a device until we've seen it UNATTACH.
875 				 * Must return here because no other device on #0
876 				 * can be detected until this one has been
877 				 * assigned a device ID.
878 				 */
879 				if (slave->status != SDW_SLAVE_UNATTACHED)
880 					return 0;
881 
882 				/*
883 				 * Assign a new dev_num to this Slave and
884 				 * not mark it present. It will be marked
885 				 * present after it reports ATTACHED on new
886 				 * dev_num
887 				 */
888 				ret = sdw_assign_device_num(slave);
889 				if (ret < 0) {
890 					dev_err(bus->dev,
891 						"Assign dev_num failed:%d\n",
892 						ret);
893 					return ret;
894 				}
895 
896 				*programmed = true;
897 
898 				break;
899 			}
900 		}
901 
902 		if (!found) {
903 			/* TODO: Park this device in Group 13 */
904 
905 			/*
906 			 * add Slave device even if there is no platform
907 			 * firmware description. There will be no driver probe
908 			 * but the user/integration will be able to see the
909 			 * device, enumeration status and device number in sysfs
910 			 */
911 			sdw_slave_add(bus, &id, NULL);
912 
913 			dev_err(bus->dev, "Slave Entry not found\n");
914 		}
915 
916 		count++;
917 
918 		/*
919 		 * Check till error out or retry (count) exhausts.
920 		 * Device can drop off and rejoin during enumeration
921 		 * so count till twice the bound.
922 		 */
923 
924 	} while (ret == 0 && count < (SDW_MAX_DEVICES * 2));
925 
926 	return ret;
927 }
928 
sdw_modify_slave_status(struct sdw_slave * slave,enum sdw_slave_status status)929 static void sdw_modify_slave_status(struct sdw_slave *slave,
930 				    enum sdw_slave_status status)
931 {
932 	struct sdw_bus *bus = slave->bus;
933 
934 	mutex_lock(&bus->bus_lock);
935 
936 	dev_vdbg(bus->dev,
937 		 "changing status slave %d status %d new status %d\n",
938 		 slave->dev_num, slave->status, status);
939 
940 	if (status == SDW_SLAVE_UNATTACHED) {
941 		dev_dbg(&slave->dev,
942 			"initializing enumeration and init completion for Slave %d\n",
943 			slave->dev_num);
944 
945 		reinit_completion(&slave->enumeration_complete);
946 		reinit_completion(&slave->initialization_complete);
947 
948 	} else if ((status == SDW_SLAVE_ATTACHED) &&
949 		   (slave->status == SDW_SLAVE_UNATTACHED)) {
950 		dev_dbg(&slave->dev,
951 			"signaling enumeration completion for Slave %d\n",
952 			slave->dev_num);
953 
954 		complete_all(&slave->enumeration_complete);
955 	}
956 	slave->status = status;
957 	mutex_unlock(&bus->bus_lock);
958 }
959 
sdw_slave_clk_stop_callback(struct sdw_slave * slave,enum sdw_clk_stop_mode mode,enum sdw_clk_stop_type type)960 static int sdw_slave_clk_stop_callback(struct sdw_slave *slave,
961 				       enum sdw_clk_stop_mode mode,
962 				       enum sdw_clk_stop_type type)
963 {
964 	int ret = 0;
965 
966 	mutex_lock(&slave->sdw_dev_lock);
967 
968 	if (slave->probed)  {
969 		struct device *dev = &slave->dev;
970 		struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
971 
972 		if (drv->ops && drv->ops->clk_stop)
973 			ret = drv->ops->clk_stop(slave, mode, type);
974 	}
975 
976 	mutex_unlock(&slave->sdw_dev_lock);
977 
978 	return ret;
979 }
980 
sdw_slave_clk_stop_prepare(struct sdw_slave * slave,enum sdw_clk_stop_mode mode,bool prepare)981 static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave,
982 				      enum sdw_clk_stop_mode mode,
983 				      bool prepare)
984 {
985 	bool wake_en;
986 	u32 val = 0;
987 	int ret;
988 
989 	wake_en = slave->prop.wake_capable;
990 
991 	if (prepare) {
992 		val = SDW_SCP_SYSTEMCTRL_CLK_STP_PREP;
993 
994 		if (mode == SDW_CLK_STOP_MODE1)
995 			val |= SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1;
996 
997 		if (wake_en)
998 			val |= SDW_SCP_SYSTEMCTRL_WAKE_UP_EN;
999 	} else {
1000 		ret = sdw_read_no_pm(slave, SDW_SCP_SYSTEMCTRL);
1001 		if (ret < 0) {
1002 			if (ret != -ENODATA)
1003 				dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL read failed:%d\n", ret);
1004 			return ret;
1005 		}
1006 		val = ret;
1007 		val &= ~(SDW_SCP_SYSTEMCTRL_CLK_STP_PREP);
1008 	}
1009 
1010 	ret = sdw_write_no_pm(slave, SDW_SCP_SYSTEMCTRL, val);
1011 
1012 	if (ret < 0 && ret != -ENODATA)
1013 		dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL write failed:%d\n", ret);
1014 
1015 	return ret;
1016 }
1017 
sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus * bus,u16 dev_num,bool prepare)1018 static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num, bool prepare)
1019 {
1020 	int retry = bus->clk_stop_timeout;
1021 	int val;
1022 
1023 	do {
1024 		val = sdw_bread_no_pm(bus, dev_num, SDW_SCP_STAT);
1025 		if (val < 0) {
1026 			if (val != -ENODATA)
1027 				dev_err(bus->dev, "SDW_SCP_STAT bread failed:%d\n", val);
1028 			return val;
1029 		}
1030 		val &= SDW_SCP_STAT_CLK_STP_NF;
1031 		if (!val) {
1032 			dev_dbg(bus->dev, "clock stop %s done slave:%d\n",
1033 				prepare ? "prepare" : "deprepare",
1034 				dev_num);
1035 			return 0;
1036 		}
1037 
1038 		usleep_range(1000, 1500);
1039 		retry--;
1040 	} while (retry);
1041 
1042 	dev_dbg(bus->dev, "clock stop %s did not complete for slave:%d\n",
1043 		prepare ? "prepare" : "deprepare",
1044 		dev_num);
1045 
1046 	return -ETIMEDOUT;
1047 }
1048 
1049 /**
1050  * sdw_bus_prep_clk_stop: prepare Slave(s) for clock stop
1051  *
1052  * @bus: SDW bus instance
1053  *
1054  * Query Slave for clock stop mode and prepare for that mode.
1055  */
sdw_bus_prep_clk_stop(struct sdw_bus * bus)1056 int sdw_bus_prep_clk_stop(struct sdw_bus *bus)
1057 {
1058 	bool simple_clk_stop = true;
1059 	struct sdw_slave *slave;
1060 	bool is_slave = false;
1061 	int ret = 0;
1062 
1063 	/*
1064 	 * In order to save on transition time, prepare
1065 	 * each Slave and then wait for all Slave(s) to be
1066 	 * prepared for clock stop.
1067 	 * If one of the Slave devices has lost sync and
1068 	 * replies with Command Ignored/-ENODATA, we continue
1069 	 * the loop
1070 	 */
1071 	list_for_each_entry(slave, &bus->slaves, node) {
1072 		if (!slave->dev_num)
1073 			continue;
1074 
1075 		if (slave->status != SDW_SLAVE_ATTACHED &&
1076 		    slave->status != SDW_SLAVE_ALERT)
1077 			continue;
1078 
1079 		/* Identify if Slave(s) are available on Bus */
1080 		is_slave = true;
1081 
1082 		ret = sdw_slave_clk_stop_callback(slave,
1083 						  SDW_CLK_STOP_MODE0,
1084 						  SDW_CLK_PRE_PREPARE);
1085 		if (ret < 0 && ret != -ENODATA) {
1086 			dev_err(&slave->dev, "clock stop pre-prepare cb failed:%d\n", ret);
1087 			return ret;
1088 		}
1089 
1090 		/* Only prepare a Slave device if needed */
1091 		if (!slave->prop.simple_clk_stop_capable) {
1092 			simple_clk_stop = false;
1093 
1094 			ret = sdw_slave_clk_stop_prepare(slave,
1095 							 SDW_CLK_STOP_MODE0,
1096 							 true);
1097 			if (ret < 0 && ret != -ENODATA) {
1098 				dev_err(&slave->dev, "clock stop prepare failed:%d\n", ret);
1099 				return ret;
1100 			}
1101 		}
1102 	}
1103 
1104 	/* Skip remaining clock stop preparation if no Slave is attached */
1105 	if (!is_slave)
1106 		return 0;
1107 
1108 	/*
1109 	 * Don't wait for all Slaves to be ready if they follow the simple
1110 	 * state machine
1111 	 */
1112 	if (!simple_clk_stop) {
1113 		ret = sdw_bus_wait_for_clk_prep_deprep(bus,
1114 						       SDW_BROADCAST_DEV_NUM, true);
1115 		/*
1116 		 * if there are no Slave devices present and the reply is
1117 		 * Command_Ignored/-ENODATA, we don't need to continue with the
1118 		 * flow and can just return here. The error code is not modified
1119 		 * and its handling left as an exercise for the caller.
1120 		 */
1121 		if (ret < 0)
1122 			return ret;
1123 	}
1124 
1125 	/* Inform slaves that prep is done */
1126 	list_for_each_entry(slave, &bus->slaves, node) {
1127 		if (!slave->dev_num)
1128 			continue;
1129 
1130 		if (slave->status != SDW_SLAVE_ATTACHED &&
1131 		    slave->status != SDW_SLAVE_ALERT)
1132 			continue;
1133 
1134 		ret = sdw_slave_clk_stop_callback(slave,
1135 						  SDW_CLK_STOP_MODE0,
1136 						  SDW_CLK_POST_PREPARE);
1137 
1138 		if (ret < 0 && ret != -ENODATA) {
1139 			dev_err(&slave->dev, "clock stop post-prepare cb failed:%d\n", ret);
1140 			return ret;
1141 		}
1142 	}
1143 
1144 	return 0;
1145 }
1146 EXPORT_SYMBOL(sdw_bus_prep_clk_stop);
1147 
1148 /**
1149  * sdw_bus_clk_stop: stop bus clock
1150  *
1151  * @bus: SDW bus instance
1152  *
1153  * After preparing the Slaves for clock stop, stop the clock by broadcasting
1154  * write to SCP_CTRL register.
1155  */
sdw_bus_clk_stop(struct sdw_bus * bus)1156 int sdw_bus_clk_stop(struct sdw_bus *bus)
1157 {
1158 	int ret;
1159 
1160 	/*
1161 	 * broadcast clock stop now, attached Slaves will ACK this,
1162 	 * unattached will ignore
1163 	 */
1164 	ret = sdw_bwrite_no_pm(bus, SDW_BROADCAST_DEV_NUM,
1165 			       SDW_SCP_CTRL, SDW_SCP_CTRL_CLK_STP_NOW);
1166 	if (ret < 0) {
1167 		if (ret != -ENODATA)
1168 			dev_err(bus->dev, "ClockStopNow Broadcast msg failed %d\n", ret);
1169 		return ret;
1170 	}
1171 
1172 	return 0;
1173 }
1174 EXPORT_SYMBOL(sdw_bus_clk_stop);
1175 
1176 /**
1177  * sdw_bus_exit_clk_stop: Exit clock stop mode
1178  *
1179  * @bus: SDW bus instance
1180  *
1181  * This De-prepares the Slaves by exiting Clock Stop Mode 0. For the Slaves
1182  * exiting Clock Stop Mode 1, they will be de-prepared after they enumerate
1183  * back.
1184  */
sdw_bus_exit_clk_stop(struct sdw_bus * bus)1185 int sdw_bus_exit_clk_stop(struct sdw_bus *bus)
1186 {
1187 	bool simple_clk_stop = true;
1188 	struct sdw_slave *slave;
1189 	bool is_slave = false;
1190 	int ret;
1191 
1192 	/*
1193 	 * In order to save on transition time, de-prepare
1194 	 * each Slave and then wait for all Slave(s) to be
1195 	 * de-prepared after clock resume.
1196 	 */
1197 	list_for_each_entry(slave, &bus->slaves, node) {
1198 		if (!slave->dev_num)
1199 			continue;
1200 
1201 		if (slave->status != SDW_SLAVE_ATTACHED &&
1202 		    slave->status != SDW_SLAVE_ALERT)
1203 			continue;
1204 
1205 		/* Identify if Slave(s) are available on Bus */
1206 		is_slave = true;
1207 
1208 		ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0,
1209 						  SDW_CLK_PRE_DEPREPARE);
1210 		if (ret < 0)
1211 			dev_warn(&slave->dev, "clock stop pre-deprepare cb failed:%d\n", ret);
1212 
1213 		/* Only de-prepare a Slave device if needed */
1214 		if (!slave->prop.simple_clk_stop_capable) {
1215 			simple_clk_stop = false;
1216 
1217 			ret = sdw_slave_clk_stop_prepare(slave, SDW_CLK_STOP_MODE0,
1218 							 false);
1219 
1220 			if (ret < 0)
1221 				dev_warn(&slave->dev, "clock stop deprepare failed:%d\n", ret);
1222 		}
1223 	}
1224 
1225 	/* Skip remaining clock stop de-preparation if no Slave is attached */
1226 	if (!is_slave)
1227 		return 0;
1228 
1229 	/*
1230 	 * Don't wait for all Slaves to be ready if they follow the simple
1231 	 * state machine
1232 	 */
1233 	if (!simple_clk_stop) {
1234 		ret = sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM, false);
1235 		if (ret < 0)
1236 			dev_warn(bus->dev, "clock stop deprepare wait failed:%d\n", ret);
1237 	}
1238 
1239 	list_for_each_entry(slave, &bus->slaves, node) {
1240 		if (!slave->dev_num)
1241 			continue;
1242 
1243 		if (slave->status != SDW_SLAVE_ATTACHED &&
1244 		    slave->status != SDW_SLAVE_ALERT)
1245 			continue;
1246 
1247 		ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0,
1248 						  SDW_CLK_POST_DEPREPARE);
1249 		if (ret < 0)
1250 			dev_warn(&slave->dev, "clock stop post-deprepare cb failed:%d\n", ret);
1251 	}
1252 
1253 	return 0;
1254 }
1255 EXPORT_SYMBOL(sdw_bus_exit_clk_stop);
1256 
sdw_configure_dpn_intr(struct sdw_slave * slave,int port,bool enable,int mask)1257 int sdw_configure_dpn_intr(struct sdw_slave *slave,
1258 			   int port, bool enable, int mask)
1259 {
1260 	u32 addr;
1261 	int ret;
1262 	u8 val = 0;
1263 
1264 	if (slave->bus->params.s_data_mode != SDW_PORT_DATA_MODE_NORMAL) {
1265 		dev_dbg(&slave->dev, "TEST FAIL interrupt %s\n",
1266 			enable ? "on" : "off");
1267 		mask |= SDW_DPN_INT_TEST_FAIL;
1268 	}
1269 
1270 	addr = SDW_DPN_INTMASK(port);
1271 
1272 	/* Set/Clear port ready interrupt mask */
1273 	if (enable) {
1274 		val |= mask;
1275 		val |= SDW_DPN_INT_PORT_READY;
1276 	} else {
1277 		val &= ~(mask);
1278 		val &= ~SDW_DPN_INT_PORT_READY;
1279 	}
1280 
1281 	ret = sdw_update_no_pm(slave, addr, (mask | SDW_DPN_INT_PORT_READY), val);
1282 	if (ret < 0)
1283 		dev_err(&slave->dev,
1284 			"SDW_DPN_INTMASK write failed:%d\n", val);
1285 
1286 	return ret;
1287 }
1288 
sdw_slave_get_scale_index(struct sdw_slave * slave,u8 * base)1289 int sdw_slave_get_scale_index(struct sdw_slave *slave, u8 *base)
1290 {
1291 	u32 mclk_freq = slave->bus->prop.mclk_freq;
1292 	u32 curr_freq = slave->bus->params.curr_dr_freq >> 1;
1293 	unsigned int scale;
1294 	u8 scale_index;
1295 
1296 	if (!mclk_freq) {
1297 		dev_err(&slave->dev,
1298 			"no bus MCLK, cannot set SDW_SCP_BUS_CLOCK_BASE\n");
1299 		return -EINVAL;
1300 	}
1301 
1302 	/*
1303 	 * map base frequency using Table 89 of SoundWire 1.2 spec.
1304 	 * The order of the tests just follows the specification, this
1305 	 * is not a selection between possible values or a search for
1306 	 * the best value but just a mapping.  Only one case per platform
1307 	 * is relevant.
1308 	 * Some BIOS have inconsistent values for mclk_freq but a
1309 	 * correct root so we force the mclk_freq to avoid variations.
1310 	 */
1311 	if (!(19200000 % mclk_freq)) {
1312 		mclk_freq = 19200000;
1313 		*base = SDW_SCP_BASE_CLOCK_19200000_HZ;
1314 	} else if (!(22579200 % mclk_freq)) {
1315 		mclk_freq = 22579200;
1316 		*base = SDW_SCP_BASE_CLOCK_22579200_HZ;
1317 	} else if (!(24576000 % mclk_freq)) {
1318 		mclk_freq = 24576000;
1319 		*base = SDW_SCP_BASE_CLOCK_24576000_HZ;
1320 	} else if (!(32000000 % mclk_freq)) {
1321 		mclk_freq = 32000000;
1322 		*base = SDW_SCP_BASE_CLOCK_32000000_HZ;
1323 	} else if (!(96000000 % mclk_freq)) {
1324 		mclk_freq = 24000000;
1325 		*base = SDW_SCP_BASE_CLOCK_24000000_HZ;
1326 	} else {
1327 		dev_err(&slave->dev,
1328 			"Unsupported clock base, mclk %d\n",
1329 			mclk_freq);
1330 		return -EINVAL;
1331 	}
1332 
1333 	if (mclk_freq % curr_freq) {
1334 		dev_err(&slave->dev,
1335 			"mclk %d is not multiple of bus curr_freq %d\n",
1336 			mclk_freq, curr_freq);
1337 		return -EINVAL;
1338 	}
1339 
1340 	scale = mclk_freq / curr_freq;
1341 
1342 	/*
1343 	 * map scale to Table 90 of SoundWire 1.2 spec - and check
1344 	 * that the scale is a power of two and maximum 64
1345 	 */
1346 	scale_index = ilog2(scale);
1347 
1348 	if (BIT(scale_index) != scale || scale_index > 6) {
1349 		dev_err(&slave->dev,
1350 			"No match found for scale %d, bus mclk %d curr_freq %d\n",
1351 			scale, mclk_freq, curr_freq);
1352 		return -EINVAL;
1353 	}
1354 	scale_index++;
1355 
1356 	dev_dbg(&slave->dev,
1357 		"Configured bus base %d, scale %d, mclk %d, curr_freq %d\n",
1358 		*base, scale_index, mclk_freq, curr_freq);
1359 
1360 	return scale_index;
1361 }
1362 EXPORT_SYMBOL(sdw_slave_get_scale_index);
1363 
sdw_slave_set_frequency(struct sdw_slave * slave)1364 static int sdw_slave_set_frequency(struct sdw_slave *slave)
1365 {
1366 	int scale_index;
1367 	u8 base;
1368 	int ret;
1369 
1370 	/*
1371 	 * frequency base and scale registers are required for SDCA
1372 	 * devices. They may also be used for 1.2+/non-SDCA devices.
1373 	 * Driver can set the property directly, for now there's no
1374 	 * DisCo property to discover support for the scaling registers
1375 	 * from platform firmware.
1376 	 */
1377 	if (!slave->id.class_id && !slave->prop.clock_reg_supported)
1378 		return 0;
1379 
1380 	scale_index = sdw_slave_get_scale_index(slave, &base);
1381 	if (scale_index < 0)
1382 		return scale_index;
1383 
1384 	ret = sdw_write_no_pm(slave, SDW_SCP_BUS_CLOCK_BASE, base);
1385 	if (ret < 0) {
1386 		dev_err(&slave->dev,
1387 			"SDW_SCP_BUS_CLOCK_BASE write failed:%d\n", ret);
1388 		return ret;
1389 	}
1390 
1391 	/* initialize scale for both banks */
1392 	ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index);
1393 	if (ret < 0) {
1394 		dev_err(&slave->dev,
1395 			"SDW_SCP_BUSCLOCK_SCALE_B0 write failed:%d\n", ret);
1396 		return ret;
1397 	}
1398 	ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index);
1399 	if (ret < 0)
1400 		dev_err(&slave->dev,
1401 			"SDW_SCP_BUSCLOCK_SCALE_B1 write failed:%d\n", ret);
1402 
1403 	return ret;
1404 }
1405 
sdw_initialize_slave(struct sdw_slave * slave)1406 static int sdw_initialize_slave(struct sdw_slave *slave)
1407 {
1408 	struct sdw_slave_prop *prop = &slave->prop;
1409 	int status;
1410 	int ret;
1411 	u8 val;
1412 
1413 	ret = sdw_slave_set_frequency(slave);
1414 	if (ret < 0)
1415 		return ret;
1416 
1417 	if (slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH) {
1418 		/* Clear bus clash interrupt before enabling interrupt mask */
1419 		status = sdw_read_no_pm(slave, SDW_SCP_INT1);
1420 		if (status < 0) {
1421 			dev_err(&slave->dev,
1422 				"SDW_SCP_INT1 (BUS_CLASH) read failed:%d\n", status);
1423 			return status;
1424 		}
1425 		if (status & SDW_SCP_INT1_BUS_CLASH) {
1426 			dev_warn(&slave->dev, "Bus clash detected before INT mask is enabled\n");
1427 			ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_BUS_CLASH);
1428 			if (ret < 0) {
1429 				dev_err(&slave->dev,
1430 					"SDW_SCP_INT1 (BUS_CLASH) write failed:%d\n", ret);
1431 				return ret;
1432 			}
1433 		}
1434 	}
1435 	if ((slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY) &&
1436 	    !(prop->quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY)) {
1437 		/* Clear parity interrupt before enabling interrupt mask */
1438 		status = sdw_read_no_pm(slave, SDW_SCP_INT1);
1439 		if (status < 0) {
1440 			dev_err(&slave->dev,
1441 				"SDW_SCP_INT1 (PARITY) read failed:%d\n", status);
1442 			return status;
1443 		}
1444 		if (status & SDW_SCP_INT1_PARITY) {
1445 			dev_warn(&slave->dev, "PARITY error detected before INT mask is enabled\n");
1446 			ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_PARITY);
1447 			if (ret < 0) {
1448 				dev_err(&slave->dev,
1449 					"SDW_SCP_INT1 (PARITY) write failed:%d\n", ret);
1450 				return ret;
1451 			}
1452 		}
1453 	}
1454 
1455 	/*
1456 	 * Set SCP_INT1_MASK register, typically bus clash and
1457 	 * implementation-defined interrupt mask. The Parity detection
1458 	 * may not always be correct on startup so its use is
1459 	 * device-dependent, it might e.g. only be enabled in
1460 	 * steady-state after a couple of frames.
1461 	 */
1462 	val = prop->scp_int1_mask;
1463 
1464 	/* Enable SCP interrupts */
1465 	ret = sdw_update_no_pm(slave, SDW_SCP_INTMASK1, val, val);
1466 	if (ret < 0) {
1467 		dev_err(&slave->dev,
1468 			"SDW_SCP_INTMASK1 write failed:%d\n", ret);
1469 		return ret;
1470 	}
1471 
1472 	/* No need to continue if DP0 is not present */
1473 	if (!prop->dp0_prop)
1474 		return 0;
1475 
1476 	/* Enable DP0 interrupts */
1477 	val = prop->dp0_prop->imp_def_interrupts;
1478 	val |= SDW_DP0_INT_PORT_READY | SDW_DP0_INT_BRA_FAILURE;
1479 
1480 	ret = sdw_update_no_pm(slave, SDW_DP0_INTMASK, val, val);
1481 	if (ret < 0)
1482 		dev_err(&slave->dev,
1483 			"SDW_DP0_INTMASK read failed:%d\n", ret);
1484 	return ret;
1485 }
1486 
sdw_handle_dp0_interrupt(struct sdw_slave * slave,u8 * slave_status)1487 static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status)
1488 {
1489 	u8 clear, impl_int_mask;
1490 	int status, status2, ret, count = 0;
1491 
1492 	status = sdw_read_no_pm(slave, SDW_DP0_INT);
1493 	if (status < 0) {
1494 		dev_err(&slave->dev,
1495 			"SDW_DP0_INT read failed:%d\n", status);
1496 		return status;
1497 	}
1498 
1499 	do {
1500 		clear = status & ~(SDW_DP0_INTERRUPTS | SDW_DP0_SDCA_CASCADE);
1501 
1502 		if (status & SDW_DP0_INT_TEST_FAIL) {
1503 			dev_err(&slave->dev, "Test fail for port 0\n");
1504 			clear |= SDW_DP0_INT_TEST_FAIL;
1505 		}
1506 
1507 		/*
1508 		 * Assumption: PORT_READY interrupt will be received only for
1509 		 * ports implementing Channel Prepare state machine (CP_SM)
1510 		 */
1511 
1512 		if (status & SDW_DP0_INT_PORT_READY) {
1513 			complete(&slave->port_ready[0]);
1514 			clear |= SDW_DP0_INT_PORT_READY;
1515 		}
1516 
1517 		if (status & SDW_DP0_INT_BRA_FAILURE) {
1518 			dev_err(&slave->dev, "BRA failed\n");
1519 			clear |= SDW_DP0_INT_BRA_FAILURE;
1520 		}
1521 
1522 		impl_int_mask = SDW_DP0_INT_IMPDEF1 |
1523 			SDW_DP0_INT_IMPDEF2 | SDW_DP0_INT_IMPDEF3;
1524 
1525 		if (status & impl_int_mask) {
1526 			clear |= impl_int_mask;
1527 			*slave_status = clear;
1528 		}
1529 
1530 		/* clear the interrupts but don't touch reserved and SDCA_CASCADE fields */
1531 		ret = sdw_write_no_pm(slave, SDW_DP0_INT, clear);
1532 		if (ret < 0) {
1533 			dev_err(&slave->dev,
1534 				"SDW_DP0_INT write failed:%d\n", ret);
1535 			return ret;
1536 		}
1537 
1538 		/* Read DP0 interrupt again */
1539 		status2 = sdw_read_no_pm(slave, SDW_DP0_INT);
1540 		if (status2 < 0) {
1541 			dev_err(&slave->dev,
1542 				"SDW_DP0_INT read failed:%d\n", status2);
1543 			return status2;
1544 		}
1545 		/* filter to limit loop to interrupts identified in the first status read */
1546 		status &= status2;
1547 
1548 		count++;
1549 
1550 		/* we can get alerts while processing so keep retrying */
1551 	} while ((status & SDW_DP0_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY));
1552 
1553 	if (count == SDW_READ_INTR_CLEAR_RETRY)
1554 		dev_warn(&slave->dev, "Reached MAX_RETRY on DP0 read\n");
1555 
1556 	return ret;
1557 }
1558 
sdw_handle_port_interrupt(struct sdw_slave * slave,int port,u8 * slave_status)1559 static int sdw_handle_port_interrupt(struct sdw_slave *slave,
1560 				     int port, u8 *slave_status)
1561 {
1562 	u8 clear, impl_int_mask;
1563 	int status, status2, ret, count = 0;
1564 	u32 addr;
1565 
1566 	if (port == 0)
1567 		return sdw_handle_dp0_interrupt(slave, slave_status);
1568 
1569 	addr = SDW_DPN_INT(port);
1570 	status = sdw_read_no_pm(slave, addr);
1571 	if (status < 0) {
1572 		dev_err(&slave->dev,
1573 			"SDW_DPN_INT read failed:%d\n", status);
1574 
1575 		return status;
1576 	}
1577 
1578 	do {
1579 		clear = status & ~SDW_DPN_INTERRUPTS;
1580 
1581 		if (status & SDW_DPN_INT_TEST_FAIL) {
1582 			dev_err(&slave->dev, "Test fail for port:%d\n", port);
1583 			clear |= SDW_DPN_INT_TEST_FAIL;
1584 		}
1585 
1586 		/*
1587 		 * Assumption: PORT_READY interrupt will be received only
1588 		 * for ports implementing CP_SM.
1589 		 */
1590 		if (status & SDW_DPN_INT_PORT_READY) {
1591 			complete(&slave->port_ready[port]);
1592 			clear |= SDW_DPN_INT_PORT_READY;
1593 		}
1594 
1595 		impl_int_mask = SDW_DPN_INT_IMPDEF1 |
1596 			SDW_DPN_INT_IMPDEF2 | SDW_DPN_INT_IMPDEF3;
1597 
1598 		if (status & impl_int_mask) {
1599 			clear |= impl_int_mask;
1600 			*slave_status = clear;
1601 		}
1602 
1603 		/* clear the interrupt but don't touch reserved fields */
1604 		ret = sdw_write_no_pm(slave, addr, clear);
1605 		if (ret < 0) {
1606 			dev_err(&slave->dev,
1607 				"SDW_DPN_INT write failed:%d\n", ret);
1608 			return ret;
1609 		}
1610 
1611 		/* Read DPN interrupt again */
1612 		status2 = sdw_read_no_pm(slave, addr);
1613 		if (status2 < 0) {
1614 			dev_err(&slave->dev,
1615 				"SDW_DPN_INT read failed:%d\n", status2);
1616 			return status2;
1617 		}
1618 		/* filter to limit loop to interrupts identified in the first status read */
1619 		status &= status2;
1620 
1621 		count++;
1622 
1623 		/* we can get alerts while processing so keep retrying */
1624 	} while ((status & SDW_DPN_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY));
1625 
1626 	if (count == SDW_READ_INTR_CLEAR_RETRY)
1627 		dev_warn(&slave->dev, "Reached MAX_RETRY on port read");
1628 
1629 	return ret;
1630 }
1631 
sdw_handle_slave_alerts(struct sdw_slave * slave)1632 static int sdw_handle_slave_alerts(struct sdw_slave *slave)
1633 {
1634 	struct sdw_slave_intr_status slave_intr;
1635 	u8 clear = 0, bit, port_status[15] = {0};
1636 	int port_num, stat, ret, count = 0;
1637 	unsigned long port;
1638 	bool slave_notify;
1639 	u8 sdca_cascade = 0;
1640 	u8 buf, buf2[2];
1641 	bool parity_check;
1642 	bool parity_quirk;
1643 
1644 	sdw_modify_slave_status(slave, SDW_SLAVE_ALERT);
1645 
1646 	ret = pm_runtime_get_sync(&slave->dev);
1647 	if (ret < 0 && ret != -EACCES) {
1648 		dev_err(&slave->dev, "Failed to resume device: %d\n", ret);
1649 		pm_runtime_put_noidle(&slave->dev);
1650 		return ret;
1651 	}
1652 
1653 	/* Read Intstat 1, Intstat 2 and Intstat 3 registers */
1654 	ret = sdw_read_no_pm(slave, SDW_SCP_INT1);
1655 	if (ret < 0) {
1656 		dev_err(&slave->dev,
1657 			"SDW_SCP_INT1 read failed:%d\n", ret);
1658 		goto io_err;
1659 	}
1660 	buf = ret;
1661 
1662 	ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2);
1663 	if (ret < 0) {
1664 		dev_err(&slave->dev,
1665 			"SDW_SCP_INT2/3 read failed:%d\n", ret);
1666 		goto io_err;
1667 	}
1668 
1669 	if (slave->id.class_id) {
1670 		ret = sdw_read_no_pm(slave, SDW_DP0_INT);
1671 		if (ret < 0) {
1672 			dev_err(&slave->dev,
1673 				"SDW_DP0_INT read failed:%d\n", ret);
1674 			goto io_err;
1675 		}
1676 		sdca_cascade = ret & SDW_DP0_SDCA_CASCADE;
1677 	}
1678 
1679 	do {
1680 		slave_notify = false;
1681 
1682 		/*
1683 		 * Check parity, bus clash and Slave (impl defined)
1684 		 * interrupt
1685 		 */
1686 		if (buf & SDW_SCP_INT1_PARITY) {
1687 			parity_check = slave->prop.scp_int1_mask & SDW_SCP_INT1_PARITY;
1688 			parity_quirk = !slave->first_interrupt_done &&
1689 				(slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY);
1690 
1691 			if (parity_check && !parity_quirk)
1692 				dev_err(&slave->dev, "Parity error detected\n");
1693 			clear |= SDW_SCP_INT1_PARITY;
1694 		}
1695 
1696 		if (buf & SDW_SCP_INT1_BUS_CLASH) {
1697 			if (slave->prop.scp_int1_mask & SDW_SCP_INT1_BUS_CLASH)
1698 				dev_err(&slave->dev, "Bus clash detected\n");
1699 			clear |= SDW_SCP_INT1_BUS_CLASH;
1700 		}
1701 
1702 		/*
1703 		 * When bus clash or parity errors are detected, such errors
1704 		 * are unlikely to be recoverable errors.
1705 		 * TODO: In such scenario, reset bus. Make this configurable
1706 		 * via sysfs property with bus reset being the default.
1707 		 */
1708 
1709 		if (buf & SDW_SCP_INT1_IMPL_DEF) {
1710 			if (slave->prop.scp_int1_mask & SDW_SCP_INT1_IMPL_DEF) {
1711 				dev_dbg(&slave->dev, "Slave impl defined interrupt\n");
1712 				slave_notify = true;
1713 			}
1714 			clear |= SDW_SCP_INT1_IMPL_DEF;
1715 		}
1716 
1717 		/* the SDCA interrupts are cleared in the codec driver .interrupt_callback() */
1718 		if (sdca_cascade)
1719 			slave_notify = true;
1720 
1721 		/* Check port 0 - 3 interrupts */
1722 		port = buf & SDW_SCP_INT1_PORT0_3;
1723 
1724 		/* To get port number corresponding to bits, shift it */
1725 		port = FIELD_GET(SDW_SCP_INT1_PORT0_3, port);
1726 		for_each_set_bit(bit, &port, 8) {
1727 			sdw_handle_port_interrupt(slave, bit,
1728 						  &port_status[bit]);
1729 		}
1730 
1731 		/* Check if cascade 2 interrupt is present */
1732 		if (buf & SDW_SCP_INT1_SCP2_CASCADE) {
1733 			port = buf2[0] & SDW_SCP_INTSTAT2_PORT4_10;
1734 			for_each_set_bit(bit, &port, 8) {
1735 				/* scp2 ports start from 4 */
1736 				port_num = bit + 4;
1737 				sdw_handle_port_interrupt(slave,
1738 						port_num,
1739 						&port_status[port_num]);
1740 			}
1741 		}
1742 
1743 		/* now check last cascade */
1744 		if (buf2[0] & SDW_SCP_INTSTAT2_SCP3_CASCADE) {
1745 			port = buf2[1] & SDW_SCP_INTSTAT3_PORT11_14;
1746 			for_each_set_bit(bit, &port, 8) {
1747 				/* scp3 ports start from 11 */
1748 				port_num = bit + 11;
1749 				sdw_handle_port_interrupt(slave,
1750 						port_num,
1751 						&port_status[port_num]);
1752 			}
1753 		}
1754 
1755 		/* Update the Slave driver */
1756 		if (slave_notify) {
1757 			mutex_lock(&slave->sdw_dev_lock);
1758 
1759 			if (slave->probed) {
1760 				struct device *dev = &slave->dev;
1761 				struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
1762 
1763 				if (slave->prop.use_domain_irq && slave->irq)
1764 					handle_nested_irq(slave->irq);
1765 
1766 				if (drv->ops && drv->ops->interrupt_callback) {
1767 					slave_intr.sdca_cascade = sdca_cascade;
1768 					slave_intr.control_port = clear;
1769 					memcpy(slave_intr.port, &port_status,
1770 					       sizeof(slave_intr.port));
1771 
1772 					drv->ops->interrupt_callback(slave, &slave_intr);
1773 				}
1774 			}
1775 
1776 			mutex_unlock(&slave->sdw_dev_lock);
1777 		}
1778 
1779 		/* Ack interrupt */
1780 		ret = sdw_write_no_pm(slave, SDW_SCP_INT1, clear);
1781 		if (ret < 0) {
1782 			dev_err(&slave->dev,
1783 				"SDW_SCP_INT1 write failed:%d\n", ret);
1784 			goto io_err;
1785 		}
1786 
1787 		/* at this point all initial interrupt sources were handled */
1788 		slave->first_interrupt_done = true;
1789 
1790 		/*
1791 		 * Read status again to ensure no new interrupts arrived
1792 		 * while servicing interrupts.
1793 		 */
1794 		ret = sdw_read_no_pm(slave, SDW_SCP_INT1);
1795 		if (ret < 0) {
1796 			dev_err(&slave->dev,
1797 				"SDW_SCP_INT1 recheck read failed:%d\n", ret);
1798 			goto io_err;
1799 		}
1800 		buf = ret;
1801 
1802 		ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2);
1803 		if (ret < 0) {
1804 			dev_err(&slave->dev,
1805 				"SDW_SCP_INT2/3 recheck read failed:%d\n", ret);
1806 			goto io_err;
1807 		}
1808 
1809 		if (slave->id.class_id) {
1810 			ret = sdw_read_no_pm(slave, SDW_DP0_INT);
1811 			if (ret < 0) {
1812 				dev_err(&slave->dev,
1813 					"SDW_DP0_INT recheck read failed:%d\n", ret);
1814 				goto io_err;
1815 			}
1816 			sdca_cascade = ret & SDW_DP0_SDCA_CASCADE;
1817 		}
1818 
1819 		/*
1820 		 * Make sure no interrupts are pending
1821 		 */
1822 		stat = buf || buf2[0] || buf2[1] || sdca_cascade;
1823 
1824 		/*
1825 		 * Exit loop if Slave is continuously in ALERT state even
1826 		 * after servicing the interrupt multiple times.
1827 		 */
1828 		count++;
1829 
1830 		/* we can get alerts while processing so keep retrying */
1831 	} while (stat != 0 && count < SDW_READ_INTR_CLEAR_RETRY);
1832 
1833 	if (count == SDW_READ_INTR_CLEAR_RETRY)
1834 		dev_warn(&slave->dev, "Reached MAX_RETRY on alert read\n");
1835 
1836 io_err:
1837 	pm_runtime_mark_last_busy(&slave->dev);
1838 	pm_runtime_put_autosuspend(&slave->dev);
1839 
1840 	return ret;
1841 }
1842 
sdw_update_slave_status(struct sdw_slave * slave,enum sdw_slave_status status)1843 static int sdw_update_slave_status(struct sdw_slave *slave,
1844 				   enum sdw_slave_status status)
1845 {
1846 	int ret = 0;
1847 
1848 	mutex_lock(&slave->sdw_dev_lock);
1849 
1850 	if (slave->probed) {
1851 		struct device *dev = &slave->dev;
1852 		struct sdw_driver *drv = drv_to_sdw_driver(dev->driver);
1853 
1854 		if (drv->ops && drv->ops->update_status)
1855 			ret = drv->ops->update_status(slave, status);
1856 	}
1857 
1858 	mutex_unlock(&slave->sdw_dev_lock);
1859 
1860 	return ret;
1861 }
1862 
1863 /**
1864  * sdw_handle_slave_status() - Handle Slave status
1865  * @bus: SDW bus instance
1866  * @status: Status for all Slave(s)
1867  */
sdw_handle_slave_status(struct sdw_bus * bus,enum sdw_slave_status status[])1868 int sdw_handle_slave_status(struct sdw_bus *bus,
1869 			    enum sdw_slave_status status[])
1870 {
1871 	enum sdw_slave_status prev_status;
1872 	struct sdw_slave *slave;
1873 	bool attached_initializing, id_programmed;
1874 	int i, ret = 0;
1875 
1876 	/* first check if any Slaves fell off the bus */
1877 	for (i = 1; i <= SDW_MAX_DEVICES; i++) {
1878 		mutex_lock(&bus->bus_lock);
1879 		if (test_bit(i, bus->assigned) == false) {
1880 			mutex_unlock(&bus->bus_lock);
1881 			continue;
1882 		}
1883 		mutex_unlock(&bus->bus_lock);
1884 
1885 		slave = sdw_get_slave(bus, i);
1886 		if (!slave)
1887 			continue;
1888 
1889 		if (status[i] == SDW_SLAVE_UNATTACHED &&
1890 		    slave->status != SDW_SLAVE_UNATTACHED) {
1891 			dev_warn(&slave->dev, "Slave %d state check1: UNATTACHED, status was %d\n",
1892 				 i, slave->status);
1893 			sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
1894 
1895 			/* Ensure driver knows that peripheral unattached */
1896 			ret = sdw_update_slave_status(slave, status[i]);
1897 			if (ret < 0)
1898 				dev_warn(&slave->dev, "Update Slave status failed:%d\n", ret);
1899 		}
1900 	}
1901 
1902 	if (status[0] == SDW_SLAVE_ATTACHED) {
1903 		dev_dbg(bus->dev, "Slave attached, programming device number\n");
1904 
1905 		/*
1906 		 * Programming a device number will have side effects,
1907 		 * so we deal with other devices at a later time.
1908 		 * This relies on those devices reporting ATTACHED, which will
1909 		 * trigger another call to this function. This will only
1910 		 * happen if at least one device ID was programmed.
1911 		 * Error returns from sdw_program_device_num() are currently
1912 		 * ignored because there's no useful recovery that can be done.
1913 		 * Returning the error here could result in the current status
1914 		 * of other devices not being handled, because if no device IDs
1915 		 * were programmed there's nothing to guarantee a status change
1916 		 * to trigger another call to this function.
1917 		 */
1918 		sdw_program_device_num(bus, &id_programmed);
1919 		if (id_programmed)
1920 			return 0;
1921 	}
1922 
1923 	/* Continue to check other slave statuses */
1924 	for (i = 1; i <= SDW_MAX_DEVICES; i++) {
1925 		mutex_lock(&bus->bus_lock);
1926 		if (test_bit(i, bus->assigned) == false) {
1927 			mutex_unlock(&bus->bus_lock);
1928 			continue;
1929 		}
1930 		mutex_unlock(&bus->bus_lock);
1931 
1932 		slave = sdw_get_slave(bus, i);
1933 		if (!slave)
1934 			continue;
1935 
1936 		attached_initializing = false;
1937 
1938 		switch (status[i]) {
1939 		case SDW_SLAVE_UNATTACHED:
1940 			if (slave->status == SDW_SLAVE_UNATTACHED)
1941 				break;
1942 
1943 			dev_warn(&slave->dev, "Slave %d state check2: UNATTACHED, status was %d\n",
1944 				 i, slave->status);
1945 
1946 			sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
1947 			break;
1948 
1949 		case SDW_SLAVE_ALERT:
1950 			ret = sdw_handle_slave_alerts(slave);
1951 			if (ret < 0)
1952 				dev_err(&slave->dev,
1953 					"Slave %d alert handling failed: %d\n",
1954 					i, ret);
1955 			break;
1956 
1957 		case SDW_SLAVE_ATTACHED:
1958 			if (slave->status == SDW_SLAVE_ATTACHED)
1959 				break;
1960 
1961 			prev_status = slave->status;
1962 			sdw_modify_slave_status(slave, SDW_SLAVE_ATTACHED);
1963 
1964 			if (prev_status == SDW_SLAVE_ALERT)
1965 				break;
1966 
1967 			attached_initializing = true;
1968 
1969 			ret = sdw_initialize_slave(slave);
1970 			if (ret < 0)
1971 				dev_err(&slave->dev,
1972 					"Slave %d initialization failed: %d\n",
1973 					i, ret);
1974 
1975 			break;
1976 
1977 		default:
1978 			dev_err(&slave->dev, "Invalid slave %d status:%d\n",
1979 				i, status[i]);
1980 			break;
1981 		}
1982 
1983 		ret = sdw_update_slave_status(slave, status[i]);
1984 		if (ret < 0)
1985 			dev_err(&slave->dev,
1986 				"Update Slave status failed:%d\n", ret);
1987 		if (attached_initializing) {
1988 			dev_dbg(&slave->dev,
1989 				"signaling initialization completion for Slave %d\n",
1990 				slave->dev_num);
1991 
1992 			complete_all(&slave->initialization_complete);
1993 
1994 			/*
1995 			 * If the manager became pm_runtime active, the peripherals will be
1996 			 * restarted and attach, but their pm_runtime status may remain
1997 			 * suspended. If the 'update_slave_status' callback initiates
1998 			 * any sort of deferred processing, this processing would not be
1999 			 * cancelled on pm_runtime suspend.
2000 			 * To avoid such zombie states, we queue a request to resume.
2001 			 * This would be a no-op in case the peripheral was being resumed
2002 			 * by e.g. the ALSA/ASoC framework.
2003 			 */
2004 			pm_request_resume(&slave->dev);
2005 		}
2006 	}
2007 
2008 	return ret;
2009 }
2010 EXPORT_SYMBOL(sdw_handle_slave_status);
2011 
sdw_clear_slave_status(struct sdw_bus * bus,u32 request)2012 void sdw_clear_slave_status(struct sdw_bus *bus, u32 request)
2013 {
2014 	struct sdw_slave *slave;
2015 	int i;
2016 
2017 	/* Check all non-zero devices */
2018 	for (i = 1; i <= SDW_MAX_DEVICES; i++) {
2019 		mutex_lock(&bus->bus_lock);
2020 		if (test_bit(i, bus->assigned) == false) {
2021 			mutex_unlock(&bus->bus_lock);
2022 			continue;
2023 		}
2024 		mutex_unlock(&bus->bus_lock);
2025 
2026 		slave = sdw_get_slave(bus, i);
2027 		if (!slave)
2028 			continue;
2029 
2030 		if (slave->status != SDW_SLAVE_UNATTACHED) {
2031 			sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED);
2032 			slave->first_interrupt_done = false;
2033 			sdw_update_slave_status(slave, SDW_SLAVE_UNATTACHED);
2034 		}
2035 
2036 		/* keep track of request, used in pm_runtime resume */
2037 		slave->unattach_request = request;
2038 	}
2039 }
2040 EXPORT_SYMBOL(sdw_clear_slave_status);
2041