Lines Matching +full:poll +full:- +full:retry +full:- +full:count

1 // SPDX-License-Identifier: GPL-2.0
13 * little-endian format.
27 * originated from a test-suite and the normal operations of the SCMI drivers,
40 * `-- 0
41 * |-- atomic_threshold_us
42 * |-- instance_name
43 * |-- raw
44 * | |-- channels
45 * | | |-- 0x10
46 * | | | |-- message
47 * | | | `-- message_async
48 * | | `-- 0x13
49 * | | |-- message
50 * | | `-- message_async
51 * | |-- errors
52 * | |-- message
53 * | |-- message_async
54 * | |-- notification
55 * | `-- reset
56 * `-- transport
57 * |-- is_atomic
58 * |-- max_msg_size
59 * |-- max_rx_timeout_ms
60 * |-- rx_max_msg
61 * |-- tx_max_msg
62 * `-- type
66 * - errors: used to read back timed-out and unexpected replies
67 * - message*: used to send sync/async commands and read back immediate and
69 * - notification: used to read any notification being emitted by the system
71 * - reset: used to flush the queues of messages (of any kind) still pending
72 * to be read; this is useful at test-suite start/stop to get
75 * with the per-channel entries rooted at /channels being present only on a
78 * Such per-channel entries can be used to explicitly choose a specific channel
96 * Injection of multiple in-flight requests is supported as long as the user
119 #include <linux/poll.h>
133 * struct scmi_raw_queue - Generic Raw queue descriptor
139 * @wq: A waitqueue used to wait and poll on related @msg_q
152 * struct scmi_raw_mode_info - Structure holding SCMI Raw instance data
157 * @tx_max_msg: Maximum number of concurrent TX in-flight messages
159 * @chans_q: An XArray mapping optional additional per-channel queues
193 * struct scmi_xfer_raw_waiter - Structure to describe an xfer to be waited for
200 * pointed at by xfer->async_done.
212 * struct scmi_raw_buffer - Structure to hold a full SCMI message
226 * struct scmi_dbg_raw_data - Structure holding data needed by the debugfs
253 return raw->q[idx]; in scmi_raw_queue_select()
255 return xa_load(&raw->chans_q, chan_id); in scmi_raw_queue_select()
262 struct list_head *head = &q->free_bufs; in scmi_raw_buffer_get()
264 spin_lock_irqsave(&q->free_bufs_lock, flags); in scmi_raw_buffer_get()
267 list_del_init(&rb->node); in scmi_raw_buffer_get()
269 spin_unlock_irqrestore(&q->free_bufs_lock, flags); in scmi_raw_buffer_get()
280 rb->msg.len = rb->max_len; in scmi_raw_buffer_put()
282 spin_lock_irqsave(&q->free_bufs_lock, flags); in scmi_raw_buffer_put()
283 list_add_tail(&rb->node, &q->free_bufs); in scmi_raw_buffer_put()
284 spin_unlock_irqrestore(&q->free_bufs_lock, flags); in scmi_raw_buffer_put()
292 spin_lock_irqsave(&q->msg_q_lock, flags); in scmi_raw_buffer_enqueue()
293 list_add_tail(&rb->node, &q->msg_q); in scmi_raw_buffer_enqueue()
294 spin_unlock_irqrestore(&q->msg_q_lock, flags); in scmi_raw_buffer_enqueue()
296 wake_up_interruptible(&q->wq); in scmi_raw_buffer_enqueue()
304 if (!list_empty(&q->msg_q)) { in scmi_raw_buffer_dequeue_unlocked()
305 rb = list_first_entry(&q->msg_q, struct scmi_raw_buffer, node); in scmi_raw_buffer_dequeue_unlocked()
306 list_del_init(&rb->node); in scmi_raw_buffer_dequeue_unlocked()
317 spin_lock_irqsave(&q->msg_q_lock, flags); in scmi_raw_buffer_dequeue()
319 spin_unlock_irqrestore(&q->msg_q_lock, flags); in scmi_raw_buffer_dequeue()
341 mutex_lock(&raw->free_mtx); in scmi_xfer_raw_waiter_get()
342 if (!list_empty(&raw->free_waiters)) { in scmi_xfer_raw_waiter_get()
343 rw = list_first_entry(&raw->free_waiters, in scmi_xfer_raw_waiter_get()
345 list_del_init(&rw->node); in scmi_xfer_raw_waiter_get()
348 reinit_completion(&rw->async_response); in scmi_xfer_raw_waiter_get()
349 xfer->async_done = &rw->async_response; in scmi_xfer_raw_waiter_get()
352 rw->cinfo = cinfo; in scmi_xfer_raw_waiter_get()
353 rw->xfer = xfer; in scmi_xfer_raw_waiter_get()
355 mutex_unlock(&raw->free_mtx); in scmi_xfer_raw_waiter_get()
363 if (rw->xfer) { in scmi_xfer_raw_waiter_put()
364 rw->xfer->async_done = NULL; in scmi_xfer_raw_waiter_put()
365 rw->xfer = NULL; in scmi_xfer_raw_waiter_put()
368 mutex_lock(&raw->free_mtx); in scmi_xfer_raw_waiter_put()
369 list_add_tail(&rw->node, &raw->free_waiters); in scmi_xfer_raw_waiter_put()
370 mutex_unlock(&raw->free_mtx); in scmi_xfer_raw_waiter_put()
377 rw->start_jiffies = jiffies; in scmi_xfer_raw_waiter_enqueue()
379 trace_scmi_xfer_response_wait(rw->xfer->transfer_id, rw->xfer->hdr.id, in scmi_xfer_raw_waiter_enqueue()
380 rw->xfer->hdr.protocol_id, in scmi_xfer_raw_waiter_enqueue()
381 rw->xfer->hdr.seq, in scmi_xfer_raw_waiter_enqueue()
382 raw->desc->max_rx_timeout_ms, in scmi_xfer_raw_waiter_enqueue()
383 rw->xfer->hdr.poll_completion); in scmi_xfer_raw_waiter_enqueue()
385 mutex_lock(&raw->active_mtx); in scmi_xfer_raw_waiter_enqueue()
386 list_add_tail(&rw->node, &raw->active_waiters); in scmi_xfer_raw_waiter_enqueue()
387 mutex_unlock(&raw->active_mtx); in scmi_xfer_raw_waiter_enqueue()
390 queue_work(raw->wait_wq, &raw->waiters_work); in scmi_xfer_raw_waiter_enqueue()
398 mutex_lock(&raw->active_mtx); in scmi_xfer_raw_waiter_dequeue()
399 if (!list_empty(&raw->active_waiters)) { in scmi_xfer_raw_waiter_dequeue()
400 rw = list_first_entry(&raw->active_waiters, in scmi_xfer_raw_waiter_dequeue()
402 list_del_init(&rw->node); in scmi_xfer_raw_waiter_dequeue()
404 mutex_unlock(&raw->active_mtx); in scmi_xfer_raw_waiter_dequeue()
410 * scmi_xfer_raw_worker - Work function to wait for Raw xfers completions
414 * In SCMI Raw mode, once a user-provided injected SCMI message is sent, we
418 * Userspace should and will poll/wait instead on the read syscalls which will
439 dev = raw->handle->dev; in scmi_xfer_raw_worker()
440 max_tmo = msecs_to_jiffies(raw->desc->max_rx_timeout_ms); in scmi_xfer_raw_worker()
454 cinfo = rw->cinfo; in scmi_xfer_raw_worker()
455 xfer = rw->xfer; in scmi_xfer_raw_worker()
457 * Waiters are queued by wait-deadline at the end, so some of in scmi_xfer_raw_worker()
465 aging = jiffies - rw->start_jiffies; in scmi_xfer_raw_worker()
467 jiffies_to_msecs(max_tmo - aging) : 1; in scmi_xfer_raw_worker()
471 if (!ret && xfer->hdr.status) in scmi_xfer_raw_worker()
472 ret = scmi_to_linux_errno(xfer->hdr.status); in scmi_xfer_raw_worker()
474 if (raw->desc->ops->mark_txdone) in scmi_xfer_raw_worker()
475 raw->desc->ops->mark_txdone(rw->cinfo, ret, xfer); in scmi_xfer_raw_worker()
477 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id, in scmi_xfer_raw_worker()
478 xfer->hdr.protocol_id, xfer->hdr.seq, ret); in scmi_xfer_raw_worker()
481 if (!ret && xfer->async_done) { in scmi_xfer_raw_worker()
484 if (!wait_for_completion_timeout(xfer->async_done, tmo)) in scmi_xfer_raw_worker()
486 "timed out in RAW delayed resp - HDR:%08X\n", in scmi_xfer_raw_worker()
487 pack_scmi_header(&xfer->hdr)); in scmi_xfer_raw_worker()
491 scmi_xfer_raw_put(raw->handle, xfer); in scmi_xfer_raw_worker()
500 dev_info(raw->handle->dev, "Resetting SCMI Raw stack.\n"); in scmi_xfer_raw_reset()
503 scmi_raw_buffer_queue_flush(raw->q[i]); in scmi_xfer_raw_reset()
507 * scmi_xfer_raw_get_init - An helper to build a valid xfer from the provided
512 * header) in little-endian binary formmat.
521 * sequence-numbers between successive SCMI messages such registration could
523 * had still not released; in such a case we just wait and retry.
533 int ret, retry = SCMI_XFER_RAW_MAX_RETRIES; in scmi_xfer_raw_get_init() local
534 struct device *dev = raw->handle->dev; in scmi_xfer_raw_get_init()
537 return -EINVAL; in scmi_xfer_raw_get_init()
539 tx_size = len - sizeof(u32); in scmi_xfer_raw_get_init()
541 if (tx_size > raw->desc->max_msg_size) in scmi_xfer_raw_get_init()
542 return -ERANGE; in scmi_xfer_raw_get_init()
544 xfer = scmi_xfer_raw_get(raw->handle); in scmi_xfer_raw_get_init()
546 dev_warn(dev, "RAW - Cannot get a free RAW xfer !\n"); in scmi_xfer_raw_get_init()
552 unpack_scmi_header(msg_hdr, &xfer->hdr); in scmi_xfer_raw_get_init()
553 xfer->hdr.seq = (u16)MSG_XTRACT_TOKEN(msg_hdr); in scmi_xfer_raw_get_init()
555 xfer->hdr.poll_completion = false; in scmi_xfer_raw_get_init()
556 xfer->hdr.status = SCMI_SUCCESS; in scmi_xfer_raw_get_init()
557 xfer->tx.len = tx_size; in scmi_xfer_raw_get_init()
558 xfer->rx.len = raw->desc->max_msg_size; in scmi_xfer_raw_get_init()
560 memset(xfer->tx.buf, 0x00, raw->desc->max_msg_size); in scmi_xfer_raw_get_init()
561 if (xfer->tx.len) in scmi_xfer_raw_get_init()
562 memcpy(xfer->tx.buf, (u8 *)buf + sizeof(msg_hdr), xfer->tx.len); in scmi_xfer_raw_get_init()
569 * finally released later by a deferred worker. Just retry for a while. in scmi_xfer_raw_get_init()
572 ret = scmi_xfer_raw_inflight_register(raw->handle, xfer); in scmi_xfer_raw_get_init()
576 retry); in scmi_xfer_raw_get_init()
577 msleep(raw->desc->max_rx_timeout_ms / in scmi_xfer_raw_get_init()
580 } while (ret && --retry); in scmi_xfer_raw_get_init()
584 "RAW - Could NOT register xfer %d in-flight HDR:0x%08X\n", in scmi_xfer_raw_get_init()
585 xfer->hdr.seq, msg_hdr); in scmi_xfer_raw_get_init()
586 scmi_xfer_raw_put(raw->handle, xfer); in scmi_xfer_raw_get_init()
593 * scmi_do_xfer_raw_start - An helper to send a valid raw xfer
607 * NOT a common header-flag stating if the command is asynchronous or not)
618 struct device *dev = raw->handle->dev; in scmi_do_xfer_raw_start()
621 chan_id = xfer->hdr.protocol_id; in scmi_do_xfer_raw_start()
623 xfer->flags |= SCMI_XFER_FLAG_CHAN_SET; in scmi_do_xfer_raw_start()
625 cinfo = scmi_xfer_raw_channel_get(raw->handle, chan_id); in scmi_do_xfer_raw_start()
631 dev_warn(dev, "RAW - Cannot get a free waiter !\n"); in scmi_do_xfer_raw_start()
632 return -ENOMEM; in scmi_do_xfer_raw_start()
636 if (is_polling_enabled(cinfo, raw->desc)) in scmi_do_xfer_raw_start()
637 xfer->hdr.poll_completion = true; in scmi_do_xfer_raw_start()
639 reinit_completion(&xfer->done); in scmi_do_xfer_raw_start()
641 smp_store_mb(xfer->state, SCMI_XFER_SENT_OK); in scmi_do_xfer_raw_start()
643 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id, in scmi_do_xfer_raw_start()
644 xfer->hdr.protocol_id, xfer->hdr.seq, in scmi_do_xfer_raw_start()
645 xfer->hdr.poll_completion); in scmi_do_xfer_raw_start()
647 ret = raw->desc->ops->send_message(rw->cinfo, xfer); in scmi_do_xfer_raw_start()
654 trace_scmi_msg_dump(raw->id, cinfo->id, xfer->hdr.protocol_id, in scmi_do_xfer_raw_start()
655 xfer->hdr.id, "cmnd", xfer->hdr.seq, in scmi_do_xfer_raw_start()
656 xfer->hdr.status, in scmi_do_xfer_raw_start()
657 xfer->tx.buf, xfer->tx.len); in scmi_do_xfer_raw_start()
665 * scmi_raw_message_send - An helper to build and send an SCMI command using
670 * header) in little-endian binary format.
689 scmi_xfer_raw_put(raw->handle, xfer); in scmi_raw_message_send()
700 spin_lock_irqsave(&q->msg_q_lock, flags); in scmi_raw_message_dequeue()
701 while (list_empty(&q->msg_q)) { in scmi_raw_message_dequeue()
702 spin_unlock_irqrestore(&q->msg_q_lock, flags); in scmi_raw_message_dequeue()
705 return ERR_PTR(-EAGAIN); in scmi_raw_message_dequeue()
707 if (wait_event_interruptible(q->wq, !list_empty(&q->msg_q))) in scmi_raw_message_dequeue()
708 return ERR_PTR(-ERESTARTSYS); in scmi_raw_message_dequeue()
710 spin_lock_irqsave(&q->msg_q_lock, flags); in scmi_raw_message_dequeue()
715 spin_unlock_irqrestore(&q->msg_q_lock, flags); in scmi_raw_message_dequeue()
721 * scmi_raw_message_receive - An helper to dequeue and report the next
726 * in little-endian binary format.
731 * @o_nonblock: A flag to request a non-blocking message dequeue.
746 return -ENODEV; in scmi_raw_message_receive()
750 dev_dbg(raw->handle->dev, "RAW - No message available!\n"); in scmi_raw_message_receive()
754 if (rb->msg.len <= len) { in scmi_raw_message_receive()
755 memcpy(buf, rb->msg.buf, rb->msg.len); in scmi_raw_message_receive()
756 *size = rb->msg.len; in scmi_raw_message_receive()
758 ret = -ENOSPC; in scmi_raw_message_receive()
770 size_t count, loff_t *ppos, in scmi_dbg_raw_mode_common_read() argument
774 struct scmi_dbg_raw_data *rd = filp->private_data; in scmi_dbg_raw_mode_common_read()
776 if (!rd->rx_size) { in scmi_dbg_raw_mode_common_read()
779 ret = scmi_raw_message_receive(rd->raw, rd->rx.buf, rd->rx.len, in scmi_dbg_raw_mode_common_read()
780 &rd->rx_size, idx, rd->chan_id, in scmi_dbg_raw_mode_common_read()
781 filp->f_flags & O_NONBLOCK); in scmi_dbg_raw_mode_common_read()
783 rd->rx_size = 0; in scmi_dbg_raw_mode_common_read()
789 } else if (*ppos == rd->rx_size) { in scmi_dbg_raw_mode_common_read()
790 /* Return EOF once all the message has been read-out */ in scmi_dbg_raw_mode_common_read()
791 rd->rx_size = 0; in scmi_dbg_raw_mode_common_read()
795 cnt = simple_read_from_buffer(buf, count, ppos, in scmi_dbg_raw_mode_common_read()
796 rd->rx.buf, rd->rx_size); in scmi_dbg_raw_mode_common_read()
803 size_t count, loff_t *ppos, in scmi_dbg_raw_mode_common_write() argument
807 struct scmi_dbg_raw_data *rd = filp->private_data; in scmi_dbg_raw_mode_common_write()
809 if (count > rd->tx.len - rd->tx_size) in scmi_dbg_raw_mode_common_write()
810 return -ENOSPC; in scmi_dbg_raw_mode_common_write()
812 /* On first write attempt @count carries the total full message size. */ in scmi_dbg_raw_mode_common_write()
813 if (!rd->tx_size) in scmi_dbg_raw_mode_common_write()
814 rd->tx_req_size = count; in scmi_dbg_raw_mode_common_write()
820 if (rd->tx_size < rd->tx_req_size) { in scmi_dbg_raw_mode_common_write()
823 cnt = simple_write_to_buffer(rd->tx.buf, rd->tx.len, ppos, in scmi_dbg_raw_mode_common_write()
824 buf, count); in scmi_dbg_raw_mode_common_write()
828 rd->tx_size += cnt; in scmi_dbg_raw_mode_common_write()
829 if (cnt < count) in scmi_dbg_raw_mode_common_write()
833 ret = scmi_raw_message_send(rd->raw, rd->tx.buf, rd->tx_size, in scmi_dbg_raw_mode_common_write()
834 rd->chan_id, async); in scmi_dbg_raw_mode_common_write()
837 rd->tx_size = 0; in scmi_dbg_raw_mode_common_write()
840 return ret ?: count; in scmi_dbg_raw_mode_common_write()
848 struct scmi_dbg_raw_data *rd = filp->private_data; in scmi_test_dbg_raw_common_poll()
852 q = scmi_raw_queue_select(rd->raw, idx, rd->chan_id); in scmi_test_dbg_raw_common_poll()
856 poll_wait(filp, &q->wq, wait); in scmi_test_dbg_raw_common_poll()
858 spin_lock_irqsave(&q->msg_q_lock, flags); in scmi_test_dbg_raw_common_poll()
859 if (!list_empty(&q->msg_q)) in scmi_test_dbg_raw_common_poll()
861 spin_unlock_irqrestore(&q->msg_q_lock, flags); in scmi_test_dbg_raw_common_poll()
868 size_t count, loff_t *ppos) in scmi_dbg_raw_mode_message_read() argument
870 return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos, in scmi_dbg_raw_mode_message_read()
876 size_t count, loff_t *ppos) in scmi_dbg_raw_mode_message_write() argument
878 return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, false); in scmi_dbg_raw_mode_message_write()
892 if (!inode->i_private) in scmi_dbg_raw_mode_open()
893 return -ENODEV; in scmi_dbg_raw_mode_open()
895 raw = inode->i_private; in scmi_dbg_raw_mode_open()
898 return -ENOMEM; in scmi_dbg_raw_mode_open()
900 rd->rx.len = raw->desc->max_msg_size + sizeof(u32); in scmi_dbg_raw_mode_open()
901 rd->rx.buf = kzalloc(rd->rx.len, GFP_KERNEL); in scmi_dbg_raw_mode_open()
902 if (!rd->rx.buf) { in scmi_dbg_raw_mode_open()
904 return -ENOMEM; in scmi_dbg_raw_mode_open()
907 rd->tx.len = raw->desc->max_msg_size + sizeof(u32); in scmi_dbg_raw_mode_open()
908 rd->tx.buf = kzalloc(rd->tx.len, GFP_KERNEL); in scmi_dbg_raw_mode_open()
909 if (!rd->tx.buf) { in scmi_dbg_raw_mode_open()
910 kfree(rd->rx.buf); in scmi_dbg_raw_mode_open()
912 return -ENOMEM; in scmi_dbg_raw_mode_open()
916 /* not set - reassing 0 we already had after kzalloc() */ in scmi_dbg_raw_mode_open()
917 rd->chan_id = debugfs_get_aux_num(filp); in scmi_dbg_raw_mode_open()
919 rd->raw = raw; in scmi_dbg_raw_mode_open()
920 filp->private_data = rd; in scmi_dbg_raw_mode_open()
927 struct scmi_dbg_raw_data *rd = filp->private_data; in scmi_dbg_raw_mode_release()
929 kfree(rd->rx.buf); in scmi_dbg_raw_mode_release()
930 kfree(rd->tx.buf); in scmi_dbg_raw_mode_release()
938 size_t count, loff_t *ppos) in scmi_dbg_raw_mode_reset_write() argument
940 struct scmi_dbg_raw_data *rd = filp->private_data; in scmi_dbg_raw_mode_reset_write()
942 scmi_xfer_raw_reset(rd->raw); in scmi_dbg_raw_mode_reset_write()
944 return count; in scmi_dbg_raw_mode_reset_write()
959 .poll = scmi_dbg_raw_mode_message_poll,
965 size_t count, loff_t *ppos) in scmi_dbg_raw_mode_message_async_write() argument
967 return scmi_dbg_raw_mode_common_write(filp, buf, count, ppos, true); in scmi_dbg_raw_mode_message_async_write()
975 .poll = scmi_dbg_raw_mode_message_poll,
981 size_t count, loff_t *ppos) in scmi_test_dbg_raw_mode_notif_read() argument
983 return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos, in scmi_test_dbg_raw_mode_notif_read()
998 .poll = scmi_test_dbg_raw_mode_notif_poll,
1004 size_t count, loff_t *ppos) in scmi_test_dbg_raw_mode_errors_read() argument
1006 return scmi_dbg_raw_mode_common_read(filp, buf, count, ppos, in scmi_test_dbg_raw_mode_errors_read()
1021 .poll = scmi_test_dbg_raw_mode_errors_poll,
1030 struct device *dev = raw->handle->dev; in scmi_raw_queue_init()
1035 return ERR_PTR(-ENOMEM); in scmi_raw_queue_init()
1037 rb = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rb), GFP_KERNEL); in scmi_raw_queue_init()
1039 return ERR_PTR(-ENOMEM); in scmi_raw_queue_init()
1041 spin_lock_init(&q->free_bufs_lock); in scmi_raw_queue_init()
1042 INIT_LIST_HEAD(&q->free_bufs); in scmi_raw_queue_init()
1043 for (i = 0; i < raw->tx_max_msg; i++, rb++) { in scmi_raw_queue_init()
1044 rb->max_len = raw->desc->max_msg_size + sizeof(u32); in scmi_raw_queue_init()
1045 rb->msg.buf = devm_kzalloc(dev, rb->max_len, GFP_KERNEL); in scmi_raw_queue_init()
1046 if (!rb->msg.buf) in scmi_raw_queue_init()
1047 return ERR_PTR(-ENOMEM); in scmi_raw_queue_init()
1051 spin_lock_init(&q->msg_q_lock); in scmi_raw_queue_init()
1052 INIT_LIST_HEAD(&q->msg_q); in scmi_raw_queue_init()
1053 init_waitqueue_head(&q->wq); in scmi_raw_queue_init()
1062 struct device *dev = raw->handle->dev; in scmi_xfer_raw_worker_init()
1064 rw = devm_kcalloc(dev, raw->tx_max_msg, sizeof(*rw), GFP_KERNEL); in scmi_xfer_raw_worker_init()
1066 return -ENOMEM; in scmi_xfer_raw_worker_init()
1068 raw->wait_wq = alloc_workqueue("scmi-raw-wait-wq-%d", in scmi_xfer_raw_worker_init()
1070 WQ_HIGHPRI | WQ_SYSFS, 0, raw->id); in scmi_xfer_raw_worker_init()
1071 if (!raw->wait_wq) in scmi_xfer_raw_worker_init()
1072 return -ENOMEM; in scmi_xfer_raw_worker_init()
1074 mutex_init(&raw->free_mtx); in scmi_xfer_raw_worker_init()
1075 INIT_LIST_HEAD(&raw->free_waiters); in scmi_xfer_raw_worker_init()
1076 mutex_init(&raw->active_mtx); in scmi_xfer_raw_worker_init()
1077 INIT_LIST_HEAD(&raw->active_waiters); in scmi_xfer_raw_worker_init()
1079 for (i = 0; i < raw->tx_max_msg; i++, rw++) { in scmi_xfer_raw_worker_init()
1080 init_completion(&rw->async_response); in scmi_xfer_raw_worker_init()
1083 INIT_WORK(&raw->waiters_work, scmi_xfer_raw_worker); in scmi_xfer_raw_worker_init()
1093 struct device *dev = raw->handle->dev; in scmi_raw_mode_setup()
1097 return -ENOMEM; in scmi_raw_mode_setup()
1100 raw->q[idx] = scmi_raw_queue_init(raw); in scmi_raw_mode_setup()
1101 if (IS_ERR(raw->q[idx])) { in scmi_raw_mode_setup()
1102 ret = PTR_ERR(raw->q[idx]); in scmi_raw_mode_setup()
1107 xa_init(&raw->chans_q); in scmi_raw_mode_setup()
1120 ret = xa_insert(&raw->chans_q, channels[i], q, in scmi_raw_mode_setup()
1136 raw->gid = gid; in scmi_raw_mode_setup()
1141 xa_destroy(&raw->chans_q); in scmi_raw_mode_setup()
1148 * scmi_raw_mode_init - Function to initialize the SCMI Raw stack
1157 * @tx_max_msg: Max number of in-flight messages allowed by the transport
1173 return ERR_PTR(-EINVAL); in scmi_raw_mode_init()
1175 dev = handle->dev; in scmi_raw_mode_init()
1178 return ERR_PTR(-ENOMEM); in scmi_raw_mode_init()
1180 raw->handle = handle; in scmi_raw_mode_init()
1181 raw->desc = desc; in scmi_raw_mode_init()
1182 raw->tx_max_msg = tx_max_msg; in scmi_raw_mode_init()
1183 raw->id = instance_id; in scmi_raw_mode_init()
1191 raw->dentry = debugfs_create_dir("raw", top_dentry); in scmi_raw_mode_init()
1193 debugfs_create_file("reset", 0200, raw->dentry, raw, in scmi_raw_mode_init()
1196 debugfs_create_file("message", 0600, raw->dentry, raw, in scmi_raw_mode_init()
1199 debugfs_create_file("message_async", 0600, raw->dentry, raw, in scmi_raw_mode_init()
1202 debugfs_create_file("notification", 0400, raw->dentry, raw, in scmi_raw_mode_init()
1205 debugfs_create_file("errors", 0400, raw->dentry, raw, in scmi_raw_mode_init()
1209 * Expose per-channel entries if multiple channels available. in scmi_raw_mode_init()
1217 top_chans = debugfs_create_dir("channels", raw->dentry); in scmi_raw_mode_init()
1236 dev_info(dev, "SCMI RAW Mode initialized for instance %d\n", raw->id); in scmi_raw_mode_init()
1242 * scmi_raw_mode_cleanup - Function to cleanup the SCMI Raw stack
1253 debugfs_remove_recursive(raw->dentry); in scmi_raw_mode_cleanup()
1255 cancel_work_sync(&raw->waiters_work); in scmi_raw_mode_cleanup()
1256 destroy_workqueue(raw->wait_wq); in scmi_raw_mode_cleanup()
1257 xa_destroy(&raw->chans_q); in scmi_raw_mode_cleanup()
1267 return -EINVAL; in scmi_xfer_raw_collect()
1270 msg_size = xfer->rx.len + sizeof(u32); in scmi_xfer_raw_collect()
1272 if (xfer->hdr.type != MSG_TYPE_NOTIFICATION) in scmi_xfer_raw_collect()
1276 return -ENOSPC; in scmi_xfer_raw_collect()
1279 *m = cpu_to_le32(pack_scmi_header(&xfer->hdr)); in scmi_xfer_raw_collect()
1280 if (xfer->hdr.type != MSG_TYPE_NOTIFICATION) in scmi_xfer_raw_collect()
1281 *++m = cpu_to_le32(xfer->hdr.status); in scmi_xfer_raw_collect()
1283 memcpy(++m, xfer->rx.buf, xfer->rx.len); in scmi_xfer_raw_collect()
1291 * scmi_raw_message_report - Helper to report back valid reponses/notifications
1320 dev = raw->handle->dev; in scmi_raw_message_report()
1325 "RAW[%d] - NO queue for chan 0x%X. Dropping report.\n", in scmi_raw_message_report()
1339 spin_lock_irqsave(&q->msg_q_lock, flags); in scmi_raw_message_report()
1349 spin_unlock_irqrestore(&q->msg_q_lock, flags); in scmi_raw_message_report()
1351 "RAW[%d] - Buffers exhausted. Dropping report.\n", in scmi_raw_message_report()
1369 spin_unlock_irqrestore(&q->msg_q_lock, flags); in scmi_raw_message_report()
1374 rb->msg.len = rb->max_len; in scmi_raw_message_report()
1377 "RAW[%d] - Buffers exhausted. Re-using oldest.\n", in scmi_raw_message_report()
1380 spin_unlock_irqrestore(&q->msg_q_lock, flags); in scmi_raw_message_report()
1382 ret = scmi_xfer_raw_collect(rb->msg.buf, &rb->msg.len, xfer); in scmi_raw_message_report()
1384 dev_warn(dev, "RAW - Cannot collect xfer into buffer !\n"); in scmi_raw_message_report()
1397 unpack_scmi_header(msg_hdr, &xfer->hdr); in scmi_xfer_raw_fill()
1398 xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr); in scmi_xfer_raw_fill()
1400 memset(xfer->rx.buf, 0x00, xfer->rx.len); in scmi_xfer_raw_fill()
1402 raw->desc->ops->fetch_response(cinfo, xfer); in scmi_xfer_raw_fill()
1406 * scmi_raw_error_report - Helper to report back timed-out or generally
1432 xfer.rx.len = raw->desc->max_msg_size; in scmi_raw_error_report()
1435 dev_info(raw->handle->dev, in scmi_raw_error_report()
1436 "Cannot report Raw error for HDR:0x%X - ENOMEM\n", in scmi_raw_error_report()
1441 /* Any transport-provided priv must be passed back down to transport */ in scmi_raw_error_report()