1 // Copyright 2019 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "host-common/address_space_graphics.h"
16
17 #include <memory>
18 #include <optional>
19
20 #include "aemu/base/AlignedBuf.h"
21 #include "aemu/base/SubAllocator.h"
22 #include "aemu/base/synchronization/Lock.h"
23 #include "host-common/GfxstreamFatalError.h"
24 #include "host-common/address_space_device.h"
25 #include "host-common/address_space_device.hpp"
26 #include "host-common/crash-handler.h"
27 #include "host-common/crash_reporter.h"
28 #include "host-common/globals.h"
29 #include "host-common/vm_operations.h"
30
31 #define ASGFX_DEBUG 0
32
33 #if ASGFX_DEBUG
34 #define ASGFX_LOG(fmt,...) printf("%s:%d " fmt "\n", __func__, __LINE__, ##__VA_ARGS__);
35 #else
36 #define ASGFX_LOG(fmt,...)
37 #endif
38
39 using android::base::AutoLock;
40 using android::base::Lock;
41 using android::base::SubAllocator;
42 using emugl::ABORT_REASON_OTHER;
43 using emugl::FatalError;
44
45 namespace android {
46 namespace emulation {
47 namespace asg {
48
49 struct AllocationCreateInfo {
50 bool virtioGpu;
51 bool hostmemRegisterFixed;
52 bool fromLoad;
53 uint64_t size;
54 uint64_t hostmemId;
55 void *externalAddr;
56 std::optional<uint32_t> dedicatedContextHandle;
57 };
58
59 struct Block {
60 char* buffer = nullptr;
61 uint64_t bufferSize = 0;
62 SubAllocator* subAlloc = nullptr;
63 uint64_t offsetIntoPhys = 0; // guest claimShared/mmap uses this
64 bool isEmpty = true;
65 std::optional<uint32_t> dedicatedContextHandle;
66 bool usesVirtioGpuHostmem = false;
67 uint64_t hostmemId = 0;
68 bool external = false;
69 };
70
71 class Globals {
72 public:
Globals()73 Globals() :
74 mPerContextBufferSize(
75 aemu_get_android_hw()->hw_gltransport_asg_writeBufferSize) { }
76
~Globals()77 ~Globals() { clear(); }
78
initialize(const address_space_device_control_ops * ops)79 void initialize(const address_space_device_control_ops* ops) {
80 AutoLock lock(mLock);
81
82 if (mInitialized) return;
83
84 mControlOps = ops;
85 mInitialized = true;
86 }
87
setConsumer(ConsumerInterface iface)88 void setConsumer(ConsumerInterface iface) {
89 mConsumerInterface = iface;
90 }
91
getConsumerInterface()92 ConsumerInterface getConsumerInterface() {
93 if (!mConsumerInterface.create ||
94 !mConsumerInterface.destroy ||
95 !mConsumerInterface.preSave ||
96 !mConsumerInterface.globalPreSave ||
97 !mConsumerInterface.save ||
98 !mConsumerInterface.globalPostSave ||
99 !mConsumerInterface.postSave) {
100 crashhandler_die("Consumer interface has not been set\n");
101 }
102 return mConsumerInterface;
103 }
104
controlOps()105 const address_space_device_control_ops* controlOps() {
106 return mControlOps;
107 }
108
clear()109 void clear() {
110 for (auto& block: mRingBlocks) {
111 if (block.isEmpty) continue;
112 destroyBlockLocked(block);
113 }
114
115 for (auto& block: mBufferBlocks) {
116 if (block.isEmpty) continue;
117 destroyBlockLocked(block);
118 }
119
120 for (auto& block: mCombinedBlocks) {
121 if (block.isEmpty) continue;
122 destroyBlockLocked(block);
123 }
124
125 mRingBlocks.clear();
126 mBufferBlocks.clear();
127 mCombinedBlocks.clear();
128 }
129
perContextBufferSize() const130 uint64_t perContextBufferSize() const {
131 return mPerContextBufferSize;
132 }
133
newAllocation(struct AllocationCreateInfo & create,std::vector<Block> & existingBlocks)134 Allocation newAllocation(struct AllocationCreateInfo& create,
135 std::vector<Block>& existingBlocks) {
136 AutoLock lock(mLock);
137
138 if (create.size > ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE) {
139 crashhandler_die(
140 "wanted size 0x%llx which is "
141 "greater than block size 0x%llx",
142 (unsigned long long)create.size,
143 (unsigned long long)ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
144 }
145
146 Allocation res;
147
148 size_t index = 0;
149 for (index = 0; index < existingBlocks.size(); index++) {
150 auto& block = existingBlocks[index];
151
152 if (block.isEmpty) {
153 fillBlockLocked(block, create);
154 }
155
156 if (block.dedicatedContextHandle != create.dedicatedContextHandle) {
157 continue;
158 }
159
160 auto buf = block.subAlloc->alloc(create.size);
161 if (buf) {
162 res.buffer = (char*)buf;
163 res.blockIndex = index;
164 res.offsetIntoPhys =
165 block.offsetIntoPhys +
166 block.subAlloc->getOffset(buf);
167 res.size = create.size;
168 res.dedicatedContextHandle = create.dedicatedContextHandle;
169 res.hostmemId = create.hostmemId;
170 return res;
171 } else {
172 // block full
173 }
174 }
175
176 Block newBlock;
177 fillBlockLocked(newBlock, create);
178
179 auto buf = newBlock.subAlloc->alloc(create.size);
180
181 if (!buf) {
182 crashhandler_die(
183 "failed to allocate size 0x%llx "
184 "(no free slots or out of host memory)",
185 (unsigned long long)create.size);
186 }
187
188 existingBlocks.push_back(newBlock);
189
190 res.buffer = (char*)buf;
191 res.blockIndex = index;
192 res.offsetIntoPhys =
193 newBlock.offsetIntoPhys +
194 newBlock.subAlloc->getOffset(buf);
195 res.size = create.size;
196 res.dedicatedContextHandle = create.dedicatedContextHandle;
197 res.hostmemId = create.hostmemId;
198
199 return res;
200 }
201
deleteAllocation(const Allocation & alloc,std::vector<Block> & existingBlocks)202 void deleteAllocation(const Allocation& alloc, std::vector<Block>& existingBlocks) {
203 if (!alloc.buffer) return;
204
205 AutoLock lock(mLock);
206
207 if (existingBlocks.size() <= alloc.blockIndex) {
208 crashhandler_die(
209 "should be a block at index %zu "
210 "but it is not found", alloc.blockIndex);
211 }
212
213 auto& block = existingBlocks[alloc.blockIndex];
214
215 if (block.external) {
216 destroyBlockLocked(block);
217 return;
218 }
219
220 if (!block.subAlloc->free(alloc.buffer)) {
221 crashhandler_die(
222 "failed to free %p (block start: %p)",
223 alloc.buffer,
224 block.buffer);
225 }
226
227 if (shouldDestryBlockLocked(block)) {
228 destroyBlockLocked(block);
229 }
230 }
231
allocRingStorage()232 Allocation allocRingStorage() {
233 struct AllocationCreateInfo create = {0};
234 create.size = sizeof(struct asg_ring_storage);
235 return newAllocation(create, mRingBlocks);
236 }
237
freeRingStorage(const Allocation & alloc)238 void freeRingStorage(const Allocation& alloc) {
239 if (alloc.isView) return;
240 deleteAllocation(alloc, mRingBlocks);
241 }
242
allocBuffer()243 Allocation allocBuffer() {
244 struct AllocationCreateInfo create = {0};
245 create.size = mPerContextBufferSize;
246 return newAllocation(create, mBufferBlocks);
247 }
248
freeBuffer(const Allocation & alloc)249 void freeBuffer(const Allocation& alloc) {
250 if (alloc.isView) return;
251 deleteAllocation(alloc, mBufferBlocks);
252 }
253
allocRingAndBufferStorageDedicated(const struct AddressSpaceCreateInfo & asgCreate)254 Allocation allocRingAndBufferStorageDedicated(const struct AddressSpaceCreateInfo& asgCreate) {
255 if (!asgCreate.handle) {
256 crashhandler_die("Dedicated ASG allocation requested without dedicated handle.\n");
257 }
258
259 struct AllocationCreateInfo create = {0};
260 create.size = sizeof(struct asg_ring_storage) + mPerContextBufferSize;
261 create.dedicatedContextHandle = asgCreate.handle;
262 create.virtioGpu = true;
263 if (asgCreate.externalAddr) {
264 create.externalAddr = asgCreate.externalAddr;
265 if (asgCreate.externalAddrSize < static_cast<uint64_t>(create.size)) {
266 crashhandler_die("External address size too small\n");
267 }
268 create.size = asgCreate.externalAddrSize;
269 }
270
271 return newAllocation(create, mCombinedBlocks);
272 }
273
allocRingViewIntoCombined(const Allocation & alloc)274 Allocation allocRingViewIntoCombined(const Allocation& alloc) {
275 Allocation res = alloc;
276 res.buffer = alloc.buffer;
277 res.size = sizeof(struct asg_ring_storage);
278 res.isView = true;
279 return res;
280 }
281
allocBufferViewIntoCombined(const Allocation & alloc)282 Allocation allocBufferViewIntoCombined(const Allocation& alloc) {
283 Allocation res = alloc;
284 res.buffer = alloc.buffer + sizeof(asg_ring_storage);
285 res.size = mPerContextBufferSize;
286 res.isView = true;
287 return res;
288 }
289
freeRingAndBuffer(const Allocation & alloc)290 void freeRingAndBuffer(const Allocation& alloc) {
291 deleteAllocation(alloc, mCombinedBlocks);
292 }
293
preSave()294 void preSave() {
295 // mConsumerInterface.globalPreSave();
296 }
297
save(base::Stream * stream)298 void save(base::Stream* stream) {
299 stream->putBe64(mRingBlocks.size());
300 stream->putBe64(mBufferBlocks.size());
301 stream->putBe64(mCombinedBlocks.size());
302
303 for (const auto& block: mRingBlocks) {
304 saveBlockLocked(stream, block);
305 }
306
307 for (const auto& block: mBufferBlocks) {
308 saveBlockLocked(stream, block);
309 }
310
311 for (const auto& block: mCombinedBlocks) {
312 saveBlockLocked(stream, block);
313 }
314 }
315
postSave()316 void postSave() {
317 // mConsumerInterface.globalPostSave();
318 }
319
load(base::Stream * stream,const std::optional<AddressSpaceDeviceLoadResources> & resources)320 bool load(base::Stream* stream,
321 const std::optional<AddressSpaceDeviceLoadResources>& resources) {
322 clear();
323 mConsumerInterface.globalPreLoad();
324
325 uint64_t ringBlockCount = stream->getBe64();
326 uint64_t bufferBlockCount = stream->getBe64();
327 uint64_t combinedBlockCount = stream->getBe64();
328
329 mRingBlocks.resize(ringBlockCount);
330 mBufferBlocks.resize(bufferBlockCount);
331 mCombinedBlocks.resize(combinedBlockCount);
332
333 for (auto& block: mRingBlocks) {
334 loadBlockLocked(stream, resources, block);
335 }
336
337 for (auto& block: mBufferBlocks) {
338 loadBlockLocked(stream, resources, block);
339 }
340
341 for (auto& block: mCombinedBlocks) {
342 loadBlockLocked(stream, resources, block);
343 }
344
345 return true;
346 }
347
348 // Assumes that blocks have been loaded,
349 // and that alloc has its blockIndex/offsetIntoPhys fields filled already
fillAllocFromLoad(Allocation & alloc,AddressSpaceGraphicsContext::AllocType allocType)350 void fillAllocFromLoad(Allocation& alloc, AddressSpaceGraphicsContext::AllocType allocType) {
351 switch (allocType) {
352 case AddressSpaceGraphicsContext::AllocType::AllocTypeRing:
353 if (mRingBlocks.size() <= alloc.blockIndex) return;
354 fillAllocFromLoad(mRingBlocks[alloc.blockIndex], alloc);
355 break;
356 case AddressSpaceGraphicsContext::AllocType::AllocTypeBuffer:
357 if (mBufferBlocks.size() <= alloc.blockIndex) return;
358 fillAllocFromLoad(mBufferBlocks[alloc.blockIndex], alloc);
359 break;
360 case AddressSpaceGraphicsContext::AllocType::AllocTypeCombined:
361 if (mCombinedBlocks.size() <= alloc.blockIndex) return;
362 fillAllocFromLoad(mCombinedBlocks[alloc.blockIndex], alloc);
363 break;
364 default:
365 GFXSTREAM_ABORT(FatalError(ABORT_REASON_OTHER));
366 break;
367 }
368 }
369
370 private:
371
saveBlockLocked(base::Stream * stream,const Block & block)372 void saveBlockLocked(
373 base::Stream* stream,
374 const Block& block) {
375
376 if (block.isEmpty) {
377 stream->putBe32(0);
378 return;
379 } else {
380 stream->putBe32(1);
381 }
382
383 stream->putBe64(block.bufferSize);
384 stream->putBe64(block.offsetIntoPhys);
385 if (block.dedicatedContextHandle) {
386 stream->putBe32(1);
387 stream->putBe32(*block.dedicatedContextHandle);
388 } else {
389 stream->putBe32(0);
390 }
391 stream->putBe32(block.usesVirtioGpuHostmem);
392 stream->putBe64(block.hostmemId);
393 block.subAlloc->save(stream);
394 if (!block.external) {
395 stream->write(block.buffer, block.bufferSize);
396 }
397 }
398
loadBlockLocked(base::Stream * stream,const std::optional<AddressSpaceDeviceLoadResources> & resources,Block & block)399 void loadBlockLocked(base::Stream* stream,
400 const std::optional<AddressSpaceDeviceLoadResources>& resources,
401 Block& block) {
402 uint32_t filled = stream->getBe32();
403 struct AllocationCreateInfo create = {0};
404
405 if (!filled) {
406 block.isEmpty = true;
407 return;
408 } else {
409 block.isEmpty = false;
410 }
411
412 create.size = stream->getBe64(); // `bufferSize`
413 block.offsetIntoPhys = stream->getBe64();
414 if (stream->getBe32() == 1) {
415 create.dedicatedContextHandle = stream->getBe32();
416 }
417 create.virtioGpu = stream->getBe32();
418
419 if (create.virtioGpu) {
420 if (!create.dedicatedContextHandle) {
421 crashhandler_die(
422 "Failed to load ASG context global block: "
423 "Virtio GPU backed blocks are expected to have dedicated context.\n");
424 }
425
426 // Blocks whose memory are backed Virtio GPU resource do not own the external
427 // memory. The external memory must be re-loaded outside of ASG and provided via
428 // `resources`.
429 if (!resources) {
430 crashhandler_die(
431 "Failed to load ASG context global block: "
432 "Virtio GPU backed blocks need external memory resources for loading.\n");
433 }
434
435 const auto externalMemoryIt =
436 resources->contextExternalMemoryMap.find(*create.dedicatedContextHandle);
437 if (externalMemoryIt == resources->contextExternalMemoryMap.end()) {
438 crashhandler_die(
439 "Failed to load ASG context global block: "
440 "Virtio GPU backed blocks an need external memory replacement.\n");
441 }
442 const auto& externalMemory = externalMemoryIt->second;
443 create.externalAddr = externalMemory.externalAddress;
444 }
445
446 create.hostmemRegisterFixed = true;
447 create.fromLoad = true;
448 create.hostmemId = stream->getBe64();
449
450 fillBlockLocked(block, create);
451
452 block.subAlloc->load(stream);
453
454 if (!block.external) {
455 stream->read(block.buffer, block.bufferSize);
456 }
457 }
458
fillAllocFromLoad(const Block & block,Allocation & alloc)459 void fillAllocFromLoad(const Block& block, Allocation& alloc) {
460 alloc.buffer = block.buffer + (alloc.offsetIntoPhys - block.offsetIntoPhys);
461 alloc.dedicatedContextHandle = block.dedicatedContextHandle;
462 alloc.hostmemId = block.hostmemId;
463 }
464
fillBlockLocked(Block & block,struct AllocationCreateInfo & create)465 void fillBlockLocked(Block& block, struct AllocationCreateInfo& create) {
466 if (create.dedicatedContextHandle) {
467 if (!create.virtioGpu) {
468 crashhandler_die("Cannot use dedicated allocation without virtio-gpu hostmem id");
469 }
470
471 if (!create.externalAddr) {
472 crashhandler_die(
473 "Cannot use dedicated allocation without virtio-gpu hostmem id");
474 }
475
476 block.external = true;
477 block.buffer = (char*)create.externalAddr;
478 block.bufferSize = create.size;
479 block.subAlloc =
480 new SubAllocator(block.buffer, block.bufferSize, ADDRESS_SPACE_GRAPHICS_PAGE_SIZE);
481 block.offsetIntoPhys = 0;
482 block.isEmpty = false;
483 block.usesVirtioGpuHostmem = create.virtioGpu;
484 block.hostmemId = create.hostmemId;
485 block.dedicatedContextHandle = create.dedicatedContextHandle;
486 } else {
487 if (create.virtioGpu) {
488 crashhandler_die(
489 "Only dedicated allocation allowed in virtio-gpu hostmem id path");
490 } else {
491 uint64_t offsetIntoPhys;
492 int allocRes = 0;
493
494 if (create.fromLoad) {
495 offsetIntoPhys = block.offsetIntoPhys;
496 allocRes = get_address_space_device_hw_funcs()->
497 allocSharedHostRegionFixedLocked(
498 ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE, offsetIntoPhys);
499 if (allocRes) {
500 // Disregard alloc failures for now. This is because when it fails,
501 // we can assume the correct allocation already exists there (tested)
502 }
503 } else {
504 int allocRes = get_address_space_device_hw_funcs()->
505 allocSharedHostRegionLocked(
506 ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE, &offsetIntoPhys);
507
508 if (allocRes) {
509 crashhandler_die(
510 "Failed to allocate physical address graphics backing memory.");
511 }
512 }
513
514 void* buf =
515 aligned_buf_alloc(
516 ADDRESS_SPACE_GRAPHICS_PAGE_SIZE,
517 ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
518
519 mControlOps->add_memory_mapping(
520 get_address_space_device_hw_funcs()->getPhysAddrStartLocked() +
521 offsetIntoPhys, buf,
522 ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
523
524 block.buffer = (char*)buf;
525 block.bufferSize = ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE;
526 block.subAlloc =
527 new SubAllocator(
528 buf, ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE,
529 ADDRESS_SPACE_GRAPHICS_PAGE_SIZE);
530 block.offsetIntoPhys = offsetIntoPhys;
531 block.isEmpty = false;
532 }
533 }
534 }
535
destroyBlockLocked(Block & block)536 void destroyBlockLocked(Block& block) {
537
538 if (block.usesVirtioGpuHostmem && !block.external) {
539 mControlOps->hostmem_unregister(block.hostmemId);
540 } else if (!block.external) {
541 mControlOps->remove_memory_mapping(
542 get_address_space_device_hw_funcs()->getPhysAddrStartLocked() +
543 block.offsetIntoPhys,
544 block.buffer,
545 ADDRESS_SPACE_GRAPHICS_BLOCK_SIZE);
546
547 get_address_space_device_hw_funcs()->freeSharedHostRegionLocked(
548 block.offsetIntoPhys);
549 }
550
551 delete block.subAlloc;
552 if (!block.external) {
553 aligned_buf_free(block.buffer);
554 }
555
556 block.isEmpty = true;
557 }
558
shouldDestryBlockLocked(const Block & block) const559 bool shouldDestryBlockLocked(const Block& block) const {
560 return block.subAlloc->empty();
561 }
562
563 Lock mLock;
564 uint64_t mPerContextBufferSize;
565 bool mInitialized = false;
566 const address_space_device_control_ops* mControlOps = 0;
567 ConsumerInterface mConsumerInterface;
568 std::vector<Block> mRingBlocks;
569 std::vector<Block> mBufferBlocks;
570 std::vector<Block> mCombinedBlocks;
571 };
572
sGlobals()573 static Globals* sGlobals() {
574 static Globals* g = new Globals;
575 return g;
576 }
577
578 // static
init(const address_space_device_control_ops * ops)579 void AddressSpaceGraphicsContext::init(const address_space_device_control_ops* ops) {
580 sGlobals()->initialize(ops);
581 }
582
583 // static
clear()584 void AddressSpaceGraphicsContext::clear() {
585 sGlobals()->clear();
586 }
587
588 // static
setConsumer(ConsumerInterface iface)589 void AddressSpaceGraphicsContext::setConsumer(
590 ConsumerInterface iface) {
591 sGlobals()->setConsumer(iface);
592 }
593
AddressSpaceGraphicsContext(const struct AddressSpaceCreateInfo & create)594 AddressSpaceGraphicsContext::AddressSpaceGraphicsContext(
595 const struct AddressSpaceCreateInfo& create)
596 : mConsumerCallbacks((ConsumerCallbacks){
597 [this] { return onUnavailableRead(); },
598 [](uint64_t physAddr) { return (char*)sGlobals()->controlOps()->get_host_ptr(physAddr); },
599 }),
600 mConsumerInterface(sGlobals()->getConsumerInterface()) {
601 if (create.fromSnapshot) {
602 // Use load() instead to initialize
603 return;
604 }
605
606 const bool isVirtio = (create.type == AddressSpaceDeviceType::VirtioGpuGraphics);
607 if (isVirtio) {
608 VirtioGpuInfo& info = mVirtioGpuInfo.emplace();
609 info.contextId = create.virtioGpuContextId;
610 info.capsetId = create.virtioGpuCapsetId;
611 if (create.contextNameSize) {
612 info.name = std::string(create.contextName, create.contextNameSize);
613 }
614
615 mCombinedAllocation = sGlobals()->allocRingAndBufferStorageDedicated(create);
616 mRingAllocation = sGlobals()->allocRingViewIntoCombined(mCombinedAllocation);
617 mBufferAllocation = sGlobals()->allocBufferViewIntoCombined(mCombinedAllocation);
618 } else {
619 mRingAllocation = sGlobals()->allocRingStorage();
620 mBufferAllocation = sGlobals()->allocBuffer();
621 }
622
623 if (!mRingAllocation.buffer) {
624 crashhandler_die(
625 "Failed to allocate ring for ASG context");
626 }
627
628 if (!mBufferAllocation.buffer) {
629 crashhandler_die(
630 "Failed to allocate buffer for ASG context");
631 }
632
633 mHostContext = asg_context_create(
634 mRingAllocation.buffer,
635 mBufferAllocation.buffer,
636 sGlobals()->perContextBufferSize());
637 mHostContext.ring_config->buffer_size =
638 sGlobals()->perContextBufferSize();
639 mHostContext.ring_config->flush_interval =
640 aemu_get_android_hw()->hw_gltransport_asg_writeStepSize;
641 mHostContext.ring_config->host_consumed_pos = 0;
642 mHostContext.ring_config->guest_write_pos = 0;
643 mHostContext.ring_config->transfer_mode = 1;
644 mHostContext.ring_config->transfer_size = 0;
645 mHostContext.ring_config->in_error = 0;
646
647 mSavedConfig = *mHostContext.ring_config;
648
649 if (create.createRenderThread) {
650 mCurrentConsumer =
651 mConsumerInterface.create(mHostContext, nullptr, mConsumerCallbacks,
652 mVirtioGpuInfo ? mVirtioGpuInfo->contextId : 0,
653 mVirtioGpuInfo ? mVirtioGpuInfo->capsetId : 0,
654 mVirtioGpuInfo ? mVirtioGpuInfo->name : std::nullopt);
655 }
656 }
657
~AddressSpaceGraphicsContext()658 AddressSpaceGraphicsContext::~AddressSpaceGraphicsContext() {
659 if (mCurrentConsumer) {
660 mExiting = 1;
661 *(mHostContext.host_state) = ASG_HOST_STATE_EXIT;
662 mConsumerMessages.send(ConsumerCommand::Exit);
663 mConsumerInterface.destroy(mCurrentConsumer);
664 }
665
666 sGlobals()->freeBuffer(mBufferAllocation);
667 sGlobals()->freeRingStorage(mRingAllocation);
668 sGlobals()->freeRingAndBuffer(mCombinedAllocation);
669 }
670
perform(AddressSpaceDevicePingInfo * info)671 void AddressSpaceGraphicsContext::perform(AddressSpaceDevicePingInfo* info) {
672 switch (static_cast<asg_command>(info->metadata)) {
673 case ASG_GET_RING:
674 info->metadata = mRingAllocation.offsetIntoPhys;
675 info->size = mRingAllocation.size;
676 break;
677 case ASG_GET_BUFFER:
678 info->metadata = mBufferAllocation.offsetIntoPhys;
679 info->size = mBufferAllocation.size;
680 break;
681 case ASG_SET_VERSION: {
682 auto guestVersion = (uint32_t)info->size;
683 info->size = (uint64_t)(mVersion > guestVersion ? guestVersion : mVersion);
684 mVersion = (uint32_t)info->size;
685 mCurrentConsumer = mConsumerInterface.create(
686 mHostContext, nullptr /* no load stream */, mConsumerCallbacks, 0, 0,
687 std::nullopt);
688
689 if (mVirtioGpuInfo) {
690 info->metadata = mCombinedAllocation.hostmemId;
691 }
692 break;
693 }
694 case ASG_NOTIFY_AVAILABLE:
695 mConsumerMessages.trySend(ConsumerCommand::Wakeup);
696 info->metadata = 0;
697 break;
698 case ASG_GET_CONFIG:
699 *mHostContext.ring_config = mSavedConfig;
700 info->metadata = 0;
701 break;
702 }
703 }
704
onUnavailableRead()705 int AddressSpaceGraphicsContext::onUnavailableRead() {
706 static const uint32_t kMaxUnavailableReads = 8;
707
708 ++mUnavailableReadCount;
709 ring_buffer_yield();
710
711 ConsumerCommand cmd;
712
713 if (mExiting) {
714 mUnavailableReadCount = kMaxUnavailableReads;
715 }
716
717 if (mUnavailableReadCount >= kMaxUnavailableReads) {
718 mUnavailableReadCount = 0;
719
720 sleep:
721 *(mHostContext.host_state) = ASG_HOST_STATE_NEED_NOTIFY;
722 mConsumerMessages.receive(&cmd);
723
724 switch (cmd) {
725 case ConsumerCommand::Wakeup:
726 *(mHostContext.host_state) = ASG_HOST_STATE_CAN_CONSUME;
727 break;
728 case ConsumerCommand::Exit:
729 *(mHostContext.host_state) = ASG_HOST_STATE_EXIT;
730 return -1;
731 case ConsumerCommand::Sleep:
732 goto sleep;
733 case ConsumerCommand::PausePreSnapshot:
734 return -2;
735 case ConsumerCommand::ResumePostSnapshot:
736 return -3;
737 default:
738 crashhandler_die(
739 "AddressSpaceGraphicsContext::onUnavailableRead: "
740 "Unknown command: 0x%x\n",
741 (uint32_t)cmd);
742 }
743
744 return 1;
745 }
746 return 0;
747 }
748
getDeviceType() const749 AddressSpaceDeviceType AddressSpaceGraphicsContext::getDeviceType() const {
750 return AddressSpaceDeviceType::Graphics;
751 }
752
preSave() const753 void AddressSpaceGraphicsContext::preSave() const {
754 if (mCurrentConsumer) {
755 mConsumerInterface.preSave(mCurrentConsumer);
756 mConsumerMessages.send(ConsumerCommand::PausePreSnapshot);
757 }
758 }
759
save(base::Stream * stream) const760 void AddressSpaceGraphicsContext::save(base::Stream* stream) const {
761 if (mVirtioGpuInfo) {
762 const VirtioGpuInfo& info = *mVirtioGpuInfo;
763 stream->putBe32(1);
764 stream->putBe32(info.contextId);
765 stream->putBe32(info.capsetId);
766 if (info.name) {
767 stream->putBe32(1);
768 stream->putString(*info.name);
769 } else {
770 stream->putBe32(0);
771 }
772 } else {
773 stream->putBe32(0);
774 }
775
776 stream->putBe32(mVersion);
777 stream->putBe32(mExiting);
778 stream->putBe32(mUnavailableReadCount);
779
780 saveAllocation(stream, mRingAllocation);
781 saveAllocation(stream, mBufferAllocation);
782 saveAllocation(stream, mCombinedAllocation);
783
784 saveRingConfig(stream, mSavedConfig);
785
786 if (mCurrentConsumer) {
787 stream->putBe32(1);
788 mConsumerInterface.save(mCurrentConsumer, stream);
789 } else {
790 stream->putBe32(0);
791 }
792 }
793
postSave() const794 void AddressSpaceGraphicsContext::postSave() const {
795 if (mCurrentConsumer) {
796 mConsumerMessages.send(ConsumerCommand::ResumePostSnapshot);
797 mConsumerInterface.postSave(mCurrentConsumer);
798 }
799 }
800
load(base::Stream * stream)801 bool AddressSpaceGraphicsContext::load(base::Stream* stream) {
802 const bool hasVirtioGpuInfo = (stream->getBe32() == 1);
803 if (hasVirtioGpuInfo) {
804 VirtioGpuInfo& info = mVirtioGpuInfo.emplace();
805 info.contextId = stream->getBe32();
806 info.capsetId = stream->getBe32();
807 const bool hasName = (stream->getBe32() == 1);
808 if (hasName) {
809 info.name = stream->getString();
810 }
811 }
812
813 mVersion = stream->getBe32();
814 mExiting = stream->getBe32();
815 mUnavailableReadCount = stream->getBe32();
816
817 loadAllocation(stream, mRingAllocation);
818 loadAllocation(stream, mBufferAllocation);
819 loadAllocation(stream, mCombinedAllocation);
820
821 if (mVirtioGpuInfo) {
822 sGlobals()->fillAllocFromLoad(mCombinedAllocation, AllocType::AllocTypeCombined);
823 mRingAllocation = sGlobals()->allocRingViewIntoCombined(mCombinedAllocation);
824 mBufferAllocation = sGlobals()->allocBufferViewIntoCombined(mCombinedAllocation);
825 } else {
826 sGlobals()->fillAllocFromLoad(mRingAllocation, AllocType::AllocTypeRing);
827 sGlobals()->fillAllocFromLoad(mBufferAllocation, AllocType::AllocTypeBuffer);
828 }
829
830 mHostContext = asg_context_create(
831 mRingAllocation.buffer,
832 mBufferAllocation.buffer,
833 sGlobals()->perContextBufferSize());
834 mHostContext.ring_config->buffer_size =
835 sGlobals()->perContextBufferSize();
836 mHostContext.ring_config->flush_interval =
837 aemu_get_android_hw()->hw_gltransport_asg_writeStepSize;
838
839 // In load, the live ring config state is in shared host/guest ram.
840 //
841 // mHostContext.ring_config->host_consumed_pos = 0;
842 // mHostContext.ring_config->transfer_mode = 1;
843 // mHostContext.ring_config->transfer_size = 0;
844 // mHostContext.ring_config->in_error = 0;
845
846 loadRingConfig(stream, mSavedConfig);
847
848 const bool hasConsumer = stream->getBe32() == 1;
849 if (hasConsumer) {
850 mCurrentConsumer =
851 mConsumerInterface.create(mHostContext, stream, mConsumerCallbacks,
852 mVirtioGpuInfo ? mVirtioGpuInfo->contextId : 0,
853 mVirtioGpuInfo ? mVirtioGpuInfo->capsetId : 0,
854 mVirtioGpuInfo ? mVirtioGpuInfo->name : std::nullopt);
855 mConsumerInterface.postLoad(mCurrentConsumer);
856 }
857
858 return true;
859 }
860
globalStatePreSave()861 void AddressSpaceGraphicsContext::globalStatePreSave() {
862 sGlobals()->preSave();
863 }
864
globalStateSave(base::Stream * stream)865 void AddressSpaceGraphicsContext::globalStateSave(base::Stream* stream) {
866 sGlobals()->save(stream);
867 }
868
globalStatePostSave()869 void AddressSpaceGraphicsContext::globalStatePostSave() {
870 sGlobals()->postSave();
871 }
872
globalStateLoad(base::Stream * stream,const std::optional<AddressSpaceDeviceLoadResources> & resources)873 bool AddressSpaceGraphicsContext::globalStateLoad(
874 base::Stream* stream, const std::optional<AddressSpaceDeviceLoadResources>& resources) {
875 return sGlobals()->load(stream, resources);
876 }
877
saveRingConfig(base::Stream * stream,const struct asg_ring_config & config) const878 void AddressSpaceGraphicsContext::saveRingConfig(base::Stream* stream, const struct asg_ring_config& config) const {
879 stream->putBe32(config.buffer_size);
880 stream->putBe32(config.flush_interval);
881 stream->putBe32(config.host_consumed_pos);
882 stream->putBe32(config.guest_write_pos);
883 stream->putBe32(config.transfer_mode);
884 stream->putBe32(config.transfer_size);
885 stream->putBe32(config.in_error);
886 }
887
saveAllocation(base::Stream * stream,const Allocation & alloc) const888 void AddressSpaceGraphicsContext::saveAllocation(base::Stream* stream, const Allocation& alloc) const {
889 stream->putBe64(alloc.blockIndex);
890 stream->putBe64(alloc.offsetIntoPhys);
891 stream->putBe64(alloc.size);
892 stream->putBe32(alloc.isView);
893 }
894
loadRingConfig(base::Stream * stream,struct asg_ring_config & config)895 void AddressSpaceGraphicsContext::loadRingConfig(base::Stream* stream, struct asg_ring_config& config) {
896 config.buffer_size = stream->getBe32();
897 config.flush_interval = stream->getBe32();
898 config.host_consumed_pos = stream->getBe32();
899 config.guest_write_pos = stream->getBe32();
900 config.transfer_mode = stream->getBe32();
901 config.transfer_size = stream->getBe32();
902 config.in_error = stream->getBe32();
903 }
904
loadAllocation(base::Stream * stream,Allocation & alloc)905 void AddressSpaceGraphicsContext::loadAllocation(base::Stream* stream, Allocation& alloc) {
906 alloc.blockIndex = stream->getBe64();
907 alloc.offsetIntoPhys = stream->getBe64();
908 alloc.size = stream->getBe64();
909 alloc.isView = stream->getBe32();
910 }
911
912 } // namespace asg
913 } // namespace emulation
914 } // namespace android
915