xref: /aosp_15_r20/external/wmediumd/wmediumd/lib/schedctrl.c (revision 621120a22a0cd8ba80b131fe8bcb37c86ff453e3)
1 /*
2  * Copyright (C) 2020 Intel Corporation
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 #include <usfstl/uds.h>
7 #include <usfstl/schedctrl.h>
8 #include <linux/um_timetravel.h>
9 #include "internal.h"
10 #include <stdio.h>
11 #include <stdlib.h>
12 
_usfstl_sched_ctrl_send_msg(struct usfstl_sched_ctrl * ctrl,enum um_timetravel_ops op,uint64_t time,uint32_t seq)13 static void _usfstl_sched_ctrl_send_msg(struct usfstl_sched_ctrl *ctrl,
14 					enum um_timetravel_ops op,
15 					uint64_t time, uint32_t seq)
16 {
17 	struct um_timetravel_msg msg = {
18 		.op = op,
19 		.seq = seq,
20 		.time = time,
21 	};
22 
23 	USFSTL_ASSERT_EQ((int)write(ctrl->fd, &msg, sizeof(msg)),
24 			 (int)sizeof(msg), "%d");
25 }
26 
usfstl_sched_ctrl_sock_read(int fd,void * data)27 static void usfstl_sched_ctrl_sock_read(int fd, void *data)
28 {
29 	struct usfstl_sched_ctrl *ctrl = data;
30 	struct um_timetravel_msg msg;
31 	int sz = read(fd, &msg, sizeof(msg));
32 	uint64_t time;
33 
34 	USFSTL_ASSERT_EQ(sz, (int)sizeof(msg), "%d");
35 
36 	switch (msg.op) {
37 	case UM_TIMETRAVEL_ACK:
38 		if (msg.seq == ctrl->expected_ack_seq) {
39 			ctrl->acked = 1;
40 			ctrl->ack_time = msg.time;
41 		}
42 		return;
43 	case UM_TIMETRAVEL_RUN:
44 		time = DIV_ROUND_UP(msg.time - ctrl->offset,
45 				    ctrl->nsec_per_tick);
46 		usfstl_sched_set_time(ctrl->sched, time);
47 		ctrl->waiting = 0;
48 		break;
49 	case UM_TIMETRAVEL_FREE_UNTIL:
50 		/* round down here, so we don't overshoot */
51 		time = (msg.time - ctrl->offset) / ctrl->nsec_per_tick;
52 		usfstl_sched_set_sync_time(ctrl->sched, time);
53 		break;
54 	case UM_TIMETRAVEL_START:
55 	case UM_TIMETRAVEL_REQUEST:
56 	case UM_TIMETRAVEL_WAIT:
57 	case UM_TIMETRAVEL_GET:
58 	case UM_TIMETRAVEL_UPDATE:
59 	case UM_TIMETRAVEL_GET_TOD:
60 		USFSTL_ASSERT(0);
61 		return;
62 	}
63 
64 	_usfstl_sched_ctrl_send_msg(ctrl, UM_TIMETRAVEL_ACK, 0, msg.seq);
65 }
66 
usfstl_sched_ctrl_send_msg(struct usfstl_sched_ctrl * ctrl,enum um_timetravel_ops op,uint64_t time)67 static void usfstl_sched_ctrl_send_msg(struct usfstl_sched_ctrl *ctrl,
68 				       enum um_timetravel_ops op,
69 				       uint64_t time)
70 {
71 	static uint32_t seq, old_expected;
72 
73 	do {
74 		seq++;
75 	} while (seq == 0);
76 
77 	_usfstl_sched_ctrl_send_msg(ctrl, op, time, seq);
78 	old_expected = ctrl->expected_ack_seq;
79 	ctrl->expected_ack_seq = seq;
80 
81 	USFSTL_ASSERT_EQ((int)ctrl->acked, 0, "%d");
82 
83 	/*
84 	 * Race alert!
85 	 *
86 	 * UM_TIMETRAVEL_WAIT basically passes the run "token" to the
87 	 * controller, which passes it to another participant of the
88 	 * simulation. This other participant might immediately send
89 	 * us another message on a different channel, e.g. if this
90 	 * code is used in a vhost-user device.
91 	 *
92 	 * If here we were to use use usfstl_loop_wait_and_handle(),
93 	 * we could actually get and process the vhost-user message
94 	 * before the ACK for the WAIT message here, depending on the
95 	 * (host) kernel's message ordering and select() handling etc.
96 	 *
97 	 * To avoid this, directly read the ACK message for the WAIT,
98 	 * without handling any other sockets (first).
99 	 */
100 	if (op == UM_TIMETRAVEL_WAIT) {
101 		usfstl_sched_ctrl_sock_read(ctrl->fd, ctrl);
102 		USFSTL_ASSERT(ctrl->acked);
103 	}
104 
105 	while (!ctrl->acked)
106 		usfstl_loop_wait_and_handle();
107 	ctrl->acked = 0;
108 	ctrl->expected_ack_seq = old_expected;
109 
110 	if (op == UM_TIMETRAVEL_GET) {
111 		if (ctrl->frozen) {
112 			uint64_t local;
113 
114 			local = ctrl->sched->current_time * ctrl->nsec_per_tick;
115 			ctrl->offset = ctrl->ack_time - local;
116 		} else {
117 			uint64_t time;
118 
119 			time = DIV_ROUND_UP(ctrl->ack_time - ctrl->offset,
120 					    ctrl->nsec_per_tick);
121 			usfstl_sched_set_time(ctrl->sched, time);
122 		}
123 	}
124 }
125 
usfstl_sched_ctrl_request(struct usfstl_scheduler * sched,uint64_t time)126 static void usfstl_sched_ctrl_request(struct usfstl_scheduler *sched, uint64_t time)
127 {
128 	struct usfstl_sched_ctrl *ctrl = sched->ext.ctrl;
129 
130 	if (!ctrl->started)
131 		return;
132 
133 	usfstl_sched_ctrl_send_msg(ctrl, UM_TIMETRAVEL_REQUEST,
134 				   time * ctrl->nsec_per_tick + ctrl->offset);
135 }
136 
usfstl_sched_ctrl_wait(struct usfstl_scheduler * sched)137 static void usfstl_sched_ctrl_wait(struct usfstl_scheduler *sched)
138 {
139 	struct usfstl_sched_ctrl *ctrl = sched->ext.ctrl;
140 
141 	ctrl->waiting = 1;
142 	usfstl_sched_ctrl_send_msg(ctrl, UM_TIMETRAVEL_WAIT, -1);
143 
144 	while (ctrl->waiting)
145 		usfstl_loop_wait_and_handle();
146 }
147 
148 #define JOB_ASSERT_VAL(j) (j) ? (j)->name : "<NULL>"
149 
usfstl_sched_ctrl_start(struct usfstl_sched_ctrl * ctrl,const char * socket,uint32_t nsec_per_tick,uint64_t client_id,struct usfstl_scheduler * sched)150 void usfstl_sched_ctrl_start(struct usfstl_sched_ctrl *ctrl,
151 			     const char *socket,
152 			     uint32_t nsec_per_tick,
153 			     uint64_t client_id,
154 			     struct usfstl_scheduler *sched)
155 {
156 	struct usfstl_job *job;
157 
158 	USFSTL_ASSERT_EQ(ctrl->sched, NULL, "%p");
159 	USFSTL_ASSERT_EQ(sched->ext.ctrl, NULL, "%p");
160 
161 	memset(ctrl, 0, sizeof(*ctrl));
162 
163 	/*
164 	 * The remote side assumes we start at 0, so if we don't have 0 right
165 	 * now keep the difference in our own offset (in nsec).
166 	 */
167 	ctrl->offset = -sched->current_time * nsec_per_tick;
168 
169 	ctrl->nsec_per_tick = nsec_per_tick;
170 	ctrl->sched = sched;
171 	sched->ext.ctrl = ctrl;
172 
173 	USFSTL_ASSERT_EQ(usfstl_sched_next_pending(sched, NULL),
174 			 (struct usfstl_job *)NULL, "%s", JOB_ASSERT_VAL);
175 	USFSTL_ASSERT_EQ(sched->external_request, NULL, "%p");
176 	USFSTL_ASSERT_EQ(sched->external_wait, NULL, "%p");
177 
178 	sched->external_request = usfstl_sched_ctrl_request;
179 	sched->external_wait = usfstl_sched_ctrl_wait;
180 
181 	ctrl->fd = usfstl_uds_connect(socket, usfstl_sched_ctrl_sock_read,
182 				      ctrl);
183 
184 	/* tell the other side we're starting  */
185 	usfstl_sched_ctrl_send_msg(ctrl, UM_TIMETRAVEL_START, client_id);
186 	ctrl->started = 1;
187 
188 	/* if we have a job already, request it */
189 	job = usfstl_sched_next_pending(sched, NULL);
190 	if (job)
191 		usfstl_sched_ctrl_send_msg(ctrl, UM_TIMETRAVEL_REQUEST,
192 					   job->start * nsec_per_tick);
193 
194 	/*
195 	 * At this point, we're allowed to do further setup work and can
196 	 * request schedule time etc. but must eventually start scheduling
197 	 * the linked scheduler - the remote side is blocked until we do.
198 	 */
199 }
200 
usfstl_sched_ctrl_sync_to(struct usfstl_sched_ctrl * ctrl)201 void usfstl_sched_ctrl_sync_to(struct usfstl_sched_ctrl *ctrl)
202 {
203 	uint64_t time;
204 
205 	USFSTL_ASSERT(ctrl->started, "cannot sync to scheduler until started");
206 
207 	time = usfstl_sched_current_time(ctrl->sched) * ctrl->nsec_per_tick;
208 	time += ctrl->offset;
209 
210 	usfstl_sched_ctrl_send_msg(ctrl, UM_TIMETRAVEL_UPDATE, time);
211 }
212 
usfstl_sched_ctrl_sync_from(struct usfstl_sched_ctrl * ctrl)213 void usfstl_sched_ctrl_sync_from(struct usfstl_sched_ctrl *ctrl)
214 {
215 	if (!ctrl->started)
216 		return;
217 	usfstl_sched_ctrl_send_msg(ctrl, UM_TIMETRAVEL_GET, -1);
218 }
219 
usfstl_sched_ctrl_stop(struct usfstl_sched_ctrl * ctrl)220 void usfstl_sched_ctrl_stop(struct usfstl_sched_ctrl *ctrl)
221 {
222 	USFSTL_ASSERT_EQ(ctrl, ctrl->sched->ext.ctrl, "%p");
223 	usfstl_sched_ctrl_send_msg(ctrl, UM_TIMETRAVEL_WAIT, -1);
224 	usfstl_uds_disconnect(ctrl->fd);
225 	ctrl->sched->ext.ctrl = NULL;
226 	ctrl->sched->external_request = NULL;
227 	ctrl->sched->external_wait = NULL;
228 	ctrl->sched = NULL;
229 }
230 
usfstl_sched_ctrl_set_frozen(struct usfstl_sched_ctrl * ctrl,bool frozen)231 void usfstl_sched_ctrl_set_frozen(struct usfstl_sched_ctrl *ctrl, bool frozen)
232 {
233 	ctrl->frozen = frozen;
234 }
235