xref: /aosp_15_r20/external/mesa3d/src/gfxstream/guest/GoldfishAddressSpace/AddressSpaceStream.cpp (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright 2020 Google
3  * SPDX-License-Identifier: MIT
4  */
5 #include "AddressSpaceStream.h"
6 
7 #include <errno.h>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <unistd.h>
12 
13 #include "VirtGpu.h"
14 #include "util/log.h"
15 #include "util/perf/cpu_trace.h"
16 #include "virtgpu_gfxstream_protocol.h"
17 
18 static const size_t kReadSize = 512 * 1024;
19 static const size_t kWriteOffset = kReadSize;
20 
AddressSpaceStream(address_space_handle_t handle,uint32_t version,struct asg_context context,uint64_t ringOffset,uint64_t writeBufferOffset,struct address_space_ops ops)21 AddressSpaceStream::AddressSpaceStream(address_space_handle_t handle, uint32_t version,
22                                        struct asg_context context, uint64_t ringOffset,
23                                        uint64_t writeBufferOffset, struct address_space_ops ops)
24     : IOStream(context.ring_config->flush_interval),
25       m_ops(ops),
26       m_tmpBuf(0),
27       m_tmpBufSize(0),
28       m_tmpBufXferSize(0),
29       m_usingTmpBuf(0),
30       m_readBuf(0),
31       m_read(0),
32       m_readLeft(0),
33       m_handle(handle),
34       m_version(version),
35       m_context(context),
36       m_ringOffset(ringOffset),
37       m_writeBufferOffset(writeBufferOffset),
38       m_writeBufferSize(context.ring_config->buffer_size),
39       m_writeBufferMask(m_writeBufferSize - 1),
40       m_buf((unsigned char*)context.buffer),
41       m_writeStart(m_buf),
42       m_writeStep(context.ring_config->flush_interval),
43       m_notifs(0),
44       m_written(0),
45       m_backoffIters(0),
46       m_backoffFactor(1),
47       m_ringStorageSize(sizeof(struct asg_ring_storage) + m_writeBufferSize) {
48     // We'll use this in the future, but at the moment,
49     // it's a potential compile Werror.
50     (void)m_ringStorageSize;
51     (void)m_version;
52 }
53 
~AddressSpaceStream()54 AddressSpaceStream::~AddressSpaceStream() {
55     flush();
56     ensureType3Finished();
57     ensureType1Finished();
58 
59     if (!m_mapping) {
60         m_ops.unmap(m_context.to_host, sizeof(struct asg_ring_storage));
61         m_ops.unmap(m_context.buffer, m_writeBufferSize);
62         m_ops.unclaim_shared(m_handle, m_ringOffset);
63         m_ops.unclaim_shared(m_handle, m_writeBufferOffset);
64     }
65 
66     m_ops.close(m_handle);
67     if (m_readBuf) free(m_readBuf);
68     if (m_tmpBuf) free(m_tmpBuf);
69 }
70 
idealAllocSize(size_t len)71 size_t AddressSpaceStream::idealAllocSize(size_t len) {
72     if (len > m_writeStep) return len;
73     return m_writeStep;
74 }
75 
allocBuffer(size_t minSize)76 void* AddressSpaceStream::allocBuffer(size_t minSize) {
77     MESA_TRACE_SCOPE("allocBuffer");
78     ensureType3Finished();
79 
80     if (!m_readBuf) {
81         m_readBuf = (unsigned char*)malloc(kReadSize);
82     }
83 
84     size_t allocSize =
85         (m_writeStep < minSize ? minSize : m_writeStep);
86 
87     if (m_writeStep < allocSize) {
88         if (!m_tmpBuf) {
89             m_tmpBufSize = allocSize * 2;
90             m_tmpBuf = (unsigned char*)malloc(m_tmpBufSize);
91         }
92 
93         if (m_tmpBufSize < allocSize) {
94             m_tmpBufSize = allocSize * 2;
95             m_tmpBuf = (unsigned char*)realloc(m_tmpBuf, m_tmpBufSize);
96         }
97 
98         if (!m_usingTmpBuf) {
99             flush();
100         }
101 
102         m_usingTmpBuf = true;
103         m_tmpBufXferSize = allocSize;
104         return m_tmpBuf;
105     } else {
106         if (m_usingTmpBuf) {
107             writeFully(m_tmpBuf, m_tmpBufXferSize);
108             m_usingTmpBuf = false;
109             m_tmpBufXferSize = 0;
110         }
111 
112         return m_writeStart;
113     }
114 }
115 
commitBuffer(size_t size)116 int AddressSpaceStream::commitBuffer(size_t size)
117 {
118     if (size == 0) return 0;
119 
120     if (m_usingTmpBuf) {
121         writeFully(m_tmpBuf, size);
122         m_tmpBufXferSize = 0;
123         m_usingTmpBuf = false;
124         return 0;
125     } else {
126         int res = type1Write(m_writeStart - m_buf, size);
127         advanceWrite();
128         return res;
129     }
130 }
131 
readFully(void * ptr,size_t totalReadSize)132 const unsigned char *AddressSpaceStream::readFully(void *ptr, size_t totalReadSize)
133 {
134 
135     unsigned char* userReadBuf = static_cast<unsigned char*>(ptr);
136 
137     if (!userReadBuf) {
138         if (totalReadSize > 0) {
139             mesa_loge(
140                 "AddressSpaceStream::commitBufferAndReadFully failed, userReadBuf=NULL, "
141                 "totalReadSize %zu, lethal"
142                 " error, exiting.",
143                 totalReadSize);
144             abort();
145         }
146         return nullptr;
147     }
148 
149     // Advance buffered read if not yet consumed.
150     size_t remaining = totalReadSize;
151     size_t bufferedReadSize =
152         m_readLeft < remaining ? m_readLeft : remaining;
153 
154     if (bufferedReadSize) {
155         memcpy(userReadBuf,
156                m_readBuf + (m_read - m_readLeft),
157                bufferedReadSize);
158         remaining -= bufferedReadSize;
159         m_readLeft -= bufferedReadSize;
160     }
161 
162     if (!remaining) return userReadBuf;
163 
164     // Read up to kReadSize bytes if all buffered read has been consumed.
165     size_t maxRead = m_readLeft ? 0 : kReadSize;
166     ssize_t actual = 0;
167 
168     if (maxRead) {
169         actual = speculativeRead(m_readBuf, maxRead);
170 
171         // Updated buffered read size.
172         if (actual > 0) {
173             m_read = m_readLeft = actual;
174         }
175 
176         if (actual == 0) {
177             mesa_logd("%s: end of pipe", __FUNCTION__);
178             return NULL;
179         }
180     }
181 
182     // Consume buffered read and read more if necessary.
183     while (remaining) {
184         bufferedReadSize = m_readLeft < remaining ? m_readLeft : remaining;
185         if (bufferedReadSize) {
186             memcpy(userReadBuf + (totalReadSize - remaining),
187                    m_readBuf + (m_read - m_readLeft),
188                    bufferedReadSize);
189             remaining -= bufferedReadSize;
190             m_readLeft -= bufferedReadSize;
191             continue;
192         }
193 
194         actual = speculativeRead(m_readBuf, kReadSize);
195 
196         if (actual == 0) {
197             mesa_logd("%s: Failed reading from pipe: %d", __FUNCTION__, errno);
198             return NULL;
199         }
200 
201         if (actual > 0) {
202             m_read = m_readLeft = actual;
203             continue;
204         }
205     }
206 
207     resetBackoff();
208     return userReadBuf;
209 }
210 
read(void * buf,size_t * inout_len)211 const unsigned char *AddressSpaceStream::read(void *buf, size_t *inout_len) {
212     unsigned char* dst = (unsigned char*)buf;
213     size_t wanted = *inout_len;
214     ssize_t actual = speculativeRead(dst, wanted);
215 
216     if (actual >= 0) {
217         *inout_len = actual;
218     } else {
219         return nullptr;
220     }
221 
222     return (const unsigned char*)dst;
223 }
224 
writeFully(const void * buf,size_t size)225 int AddressSpaceStream::writeFully(const void* buf, size_t size) {
226     MESA_TRACE_SCOPE("writeFully");
227     ensureType3Finished();
228     ensureType1Finished();
229 
230     m_context.ring_config->transfer_size = size;
231     m_context.ring_config->transfer_mode = 3;
232 
233     size_t sent = 0;
234     size_t preferredChunkSize = m_writeBufferSize / 4;
235     size_t chunkSize = size < preferredChunkSize ? size : preferredChunkSize;
236     const uint8_t* bufferBytes = (const uint8_t*)buf;
237 
238     bool hostPinged = false;
239     while (sent < size) {
240         size_t remaining = size - sent;
241         size_t sendThisTime = remaining < chunkSize ? remaining : chunkSize;
242 
243         long sentChunks =
244             ring_buffer_view_write(
245                 m_context.to_host_large_xfer.ring,
246                 &m_context.to_host_large_xfer.view,
247                 bufferBytes + sent, sendThisTime, 1);
248 
249         if (!hostPinged && *(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
250             *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
251             notifyAvailable();
252             hostPinged = true;
253         }
254 
255         if (sentChunks == 0) {
256             ring_buffer_yield();
257             backoff();
258         }
259 
260         sent += sentChunks * sendThisTime;
261 
262         if (isInError()) {
263             return -1;
264         }
265     }
266 
267     bool isRenderingAfter = ASG_HOST_STATE_RENDERING == __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
268 
269     if (!isRenderingAfter) {
270         notifyAvailable();
271     }
272 
273     ensureType3Finished();
274 
275     resetBackoff();
276     m_context.ring_config->transfer_mode = 1;
277     m_written += size;
278 
279     float mb = (float)m_written / 1048576.0f;
280     if (mb > 100.0f) {
281         mesa_logd("%s: %f mb in %d notifs. %f mb/notif\n", __func__, mb, m_notifs,
282                   m_notifs ? mb / (float)m_notifs : 0.0f);
283         m_notifs = 0;
284         m_written = 0;
285     }
286     return 0;
287 }
288 
writeFullyAsync(const void * buf,size_t size)289 int AddressSpaceStream::writeFullyAsync(const void* buf, size_t size) {
290     MESA_TRACE_SCOPE("writeFullyAsync");
291     ensureType3Finished();
292     ensureType1Finished();
293 
294     __atomic_store_n(&m_context.ring_config->transfer_size, size, __ATOMIC_RELEASE);
295     m_context.ring_config->transfer_mode = 3;
296 
297     size_t sent = 0;
298     size_t preferredChunkSize = m_writeBufferSize / 2;
299     size_t chunkSize = size < preferredChunkSize ? size : preferredChunkSize;
300     const uint8_t* bufferBytes = (const uint8_t*)buf;
301 
302     bool pingedHost = false;
303 
304     while (sent < size) {
305         size_t remaining = size - sent;
306         size_t sendThisTime = remaining < chunkSize ? remaining : chunkSize;
307 
308         long sentChunks =
309             ring_buffer_view_write(
310                 m_context.to_host_large_xfer.ring,
311                 &m_context.to_host_large_xfer.view,
312                 bufferBytes + sent, sendThisTime, 1);
313 
314         uint32_t hostState = __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
315 
316         if (!pingedHost &&
317             hostState != ASG_HOST_STATE_CAN_CONSUME &&
318             hostState != ASG_HOST_STATE_RENDERING) {
319             pingedHost = true;
320             notifyAvailable();
321         }
322 
323         if (sentChunks == 0) {
324             ring_buffer_yield();
325             backoff();
326         }
327 
328         sent += sentChunks * sendThisTime;
329 
330         if (isInError()) {
331             return -1;
332         }
333     }
334 
335 
336     bool isRenderingAfter = ASG_HOST_STATE_RENDERING == __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
337 
338     if (!isRenderingAfter) {
339         notifyAvailable();
340     }
341 
342     resetBackoff();
343     m_context.ring_config->transfer_mode = 1;
344     m_written += size;
345 
346     float mb = (float)m_written / 1048576.0f;
347     if (mb > 100.0f) {
348         mesa_logd("%s: %f mb in %d notifs. %f mb/notif\n", __func__, mb, m_notifs,
349                   m_notifs ? mb / (float)m_notifs : 0.0f);
350         m_notifs = 0;
351         m_written = 0;
352     }
353     return 0;
354 }
355 
commitBufferAndReadFully(size_t writeSize,void * userReadBufPtr,size_t totalReadSize)356 const unsigned char *AddressSpaceStream::commitBufferAndReadFully(
357     size_t writeSize, void *userReadBufPtr, size_t totalReadSize) {
358 
359     if (m_usingTmpBuf) {
360         writeFully(m_tmpBuf, writeSize);
361         m_usingTmpBuf = false;
362         m_tmpBufXferSize = 0;
363         return readFully(userReadBufPtr, totalReadSize);
364     } else {
365         commitBuffer(writeSize);
366         return readFully(userReadBufPtr, totalReadSize);
367     }
368 }
369 
isInError() const370 bool AddressSpaceStream::isInError() const {
371     return 1 == m_context.ring_config->in_error;
372 }
373 
speculativeRead(unsigned char * readBuffer,size_t trySize)374 ssize_t AddressSpaceStream::speculativeRead(unsigned char* readBuffer, size_t trySize) {
375     ensureType3Finished();
376     ensureType1Finished();
377 
378     size_t actuallyRead = 0;
379 
380     while (!actuallyRead) {
381 
382         uint32_t readAvail =
383             ring_buffer_available_read(
384                 m_context.from_host_large_xfer.ring,
385                 &m_context.from_host_large_xfer.view);
386 
387         if (!readAvail) {
388             ring_buffer_yield();
389             backoff();
390             continue;
391         }
392 
393         uint32_t toRead = readAvail > trySize ?  trySize : readAvail;
394 
395         long stepsRead = ring_buffer_view_read(
396             m_context.from_host_large_xfer.ring,
397             &m_context.from_host_large_xfer.view,
398             readBuffer, toRead, 1);
399 
400         actuallyRead += stepsRead * toRead;
401 
402         if (isInError()) {
403             return -1;
404         }
405     }
406 
407     return actuallyRead;
408 }
409 
notifyAvailable()410 void AddressSpaceStream::notifyAvailable() {
411     MESA_TRACE_SCOPE("PING");
412     struct address_space_ping request;
413     request.metadata = ASG_NOTIFY_AVAILABLE;
414     request.resourceId = m_resourceId;
415     m_ops.ping(m_handle, &request);
416     ++m_notifs;
417 }
418 
getRelativeBufferPos(uint32_t pos)419 uint32_t AddressSpaceStream::getRelativeBufferPos(uint32_t pos) {
420     return pos & m_writeBufferMask;
421 }
422 
advanceWrite()423 void AddressSpaceStream::advanceWrite() {
424     m_writeStart += m_context.ring_config->flush_interval;
425 
426     if (m_writeStart == m_buf + m_context.ring_config->buffer_size) {
427         m_writeStart = m_buf;
428     }
429 }
430 
ensureConsumerFinishing()431 void AddressSpaceStream::ensureConsumerFinishing() {
432     uint32_t currAvailRead = ring_buffer_available_read(m_context.to_host, 0);
433 
434     while (currAvailRead) {
435         ring_buffer_yield();
436         uint32_t nextAvailRead = ring_buffer_available_read(m_context.to_host, 0);
437 
438         if (nextAvailRead != currAvailRead) {
439             break;
440         }
441 
442         if (*(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
443             *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
444             notifyAvailable();
445             break;
446         }
447 
448         backoff();
449     }
450 }
451 
ensureType1Finished()452 void AddressSpaceStream::ensureType1Finished() {
453     MESA_TRACE_SCOPE("ensureType1Finished");
454 
455     uint32_t currAvailRead =
456         ring_buffer_available_read(m_context.to_host, 0);
457 
458     while (currAvailRead) {
459         backoff();
460         ring_buffer_yield();
461         currAvailRead = ring_buffer_available_read(m_context.to_host, 0);
462         if (isInError()) {
463             return;
464         }
465     }
466 }
467 
ensureType3Finished()468 void AddressSpaceStream::ensureType3Finished() {
469     MESA_TRACE_SCOPE("ensureType3Finished");
470     uint32_t availReadLarge =
471         ring_buffer_available_read(
472             m_context.to_host_large_xfer.ring,
473             &m_context.to_host_large_xfer.view);
474     while (availReadLarge) {
475         ring_buffer_yield();
476         backoff();
477         availReadLarge =
478             ring_buffer_available_read(
479                 m_context.to_host_large_xfer.ring,
480                 &m_context.to_host_large_xfer.view);
481         if (*(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
482             *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
483             notifyAvailable();
484         }
485         if (isInError()) {
486             return;
487         }
488     }
489 }
490 
type1Write(uint32_t bufferOffset,size_t size)491 int AddressSpaceStream::type1Write(uint32_t bufferOffset, size_t size) {
492     MESA_TRACE_SCOPE("type1Write");
493 
494     ensureType3Finished();
495 
496     size_t sent = 0;
497     size_t sizeForRing = sizeof(struct asg_type1_xfer);
498 
499     struct asg_type1_xfer xfer = {
500         bufferOffset,
501         (uint32_t)size,
502     };
503 
504     uint8_t* writeBufferBytes = (uint8_t*)(&xfer);
505 
506     uint32_t maxOutstanding = 1;
507     uint32_t maxSteps = m_context.ring_config->buffer_size /
508             m_context.ring_config->flush_interval;
509 
510     if (maxSteps > 1) maxOutstanding = maxSteps - 1;
511 
512     uint32_t ringAvailReadNow = ring_buffer_available_read(m_context.to_host, 0);
513 
514     while (ringAvailReadNow >= maxOutstanding * sizeForRing) {
515         ringAvailReadNow = ring_buffer_available_read(m_context.to_host, 0);
516     }
517 
518     bool hostPinged = false;
519     while (sent < sizeForRing) {
520 
521         long sentChunks = ring_buffer_write(
522             m_context.to_host,
523             writeBufferBytes + sent,
524             sizeForRing - sent, 1);
525 
526         if (!hostPinged &&
527             *(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
528             *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
529             notifyAvailable();
530             hostPinged = true;
531         }
532 
533         if (sentChunks == 0) {
534             ring_buffer_yield();
535             backoff();
536         }
537 
538         sent += sentChunks * (sizeForRing - sent);
539 
540         if (isInError()) {
541             return -1;
542         }
543     }
544 
545     bool isRenderingAfter = ASG_HOST_STATE_RENDERING == __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
546 
547     if (!isRenderingAfter) {
548         notifyAvailable();
549     }
550 
551     m_written += size;
552 
553     float mb = (float)m_written / 1048576.0f;
554     if (mb > 100.0f) {
555         mesa_logd("%s: %f mb in %d notifs. %f mb/notif\n", __func__, mb, m_notifs,
556                   m_notifs ? mb / (float)m_notifs : 0.0f);
557         m_notifs = 0;
558         m_written = 0;
559     }
560 
561     resetBackoff();
562     return 0;
563 }
564 
backoff()565 void AddressSpaceStream::backoff() {
566     static const uint32_t kBackoffItersThreshold = 50000000;
567     static const uint32_t kBackoffFactorDoublingIncrement = 50000000;
568     ++m_backoffIters;
569 
570     if (m_backoffIters > kBackoffItersThreshold) {
571         usleep(m_backoffFactor);
572         uint32_t itersSoFarAfterThreshold = m_backoffIters - kBackoffItersThreshold;
573         if (itersSoFarAfterThreshold > kBackoffFactorDoublingIncrement) {
574             m_backoffFactor = m_backoffFactor << 1;
575             if (m_backoffFactor > 1000) m_backoffFactor = 1000;
576             m_backoffIters = kBackoffItersThreshold;
577         }
578     }
579 }
580 
resetBackoff()581 void AddressSpaceStream::resetBackoff() {
582     m_backoffIters = 0;
583     m_backoffFactor = 1;
584 }
585