1 /*
2 * Copyright (C) 2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "FreeRTOS.h"
18 #include "encoding.h"
19 #include "task.h"
20
21 #include "chre/core/event_loop_manager.h"
22 #include "chre/core/host_comms_manager.h"
23 #include "chre/platform/host_link.h"
24 #include "chre/platform/log.h"
25 #include "chre/platform/shared/host_protocol_chre.h"
26 #include "chre/platform/shared/log_buffer_manager.h"
27 #include "chre/platform/shared/nanoapp_load_manager.h"
28 #include "chre/platform/system_time.h"
29 #include "chre/platform/system_timer.h"
30 #include "chre/util/flatbuffers/helpers.h"
31 #include "chre/util/nested_data_ptr.h"
32 #include "chre_api/chre.h"
33
34 #include "dma_api.h"
35 #include "ipi.h"
36 #include "ipi_id.h"
37 #include "resource_req.h"
38 #include "scp_dram_region.h"
39
40 // Because the LOGx macros are being redirected to logcat through
41 // HostLink::sendLogMessageV2 and HostLink::send, calling them from
42 // inside HostLink impl could result in endless recursion.
43 // So redefine them to just printf function to SCP console.
44 #if CHRE_MINIMUM_LOG_LEVEL >= CHRE_LOG_LEVEL_ERROR
45 #undef LOGE
46 #define LOGE(fmt, arg...) PRINTF_E("[CHRE]" fmt "\n", ##arg)
47 #endif
48
49 #if CHRE_MINIMUM_LOG_LEVEL >= CHRE_LOG_LEVEL_WARN
50 #undef LOGW
51 #define LOGW(fmt, arg...) PRINTF_W("[CHRE]" fmt "\n", ##arg)
52 #endif
53
54 #if CHRE_MINIMUM_LOG_LEVEL >= CHRE_LOG_LEVEL_INFO
55 #undef LOGI
56 #define LOGI(fmt, arg...) PRINTF_I("[CHRE]" fmt "\n", ##arg)
57 #endif
58
59 #if CHRE_MINIMUM_LOG_LEVEL >= CHRE_LOG_LEVEL_DEBUG
60 #undef LOGD
61 #define LOGD(fmt, arg...) PRINTF_D("[CHRE]" fmt "\n", ##arg)
62 #endif
63
64 #if CHRE_MINIMUM_LOG_LEVEL >= CHRE_LOG_LEVEL_VERBOSE
65 #undef LOGV
66 #define LOGV(fmt, arg...) PRINTF_D("[CHRE]" fmt "\n", ##arg)
67 #endif
68
69 namespace chre {
70 namespace {
71
72 struct UnloadNanoappCallbackData {
73 uint64_t appId;
74 uint32_t transactionId;
75 uint16_t hostClientId;
76 bool allowSystemNanoappUnload;
77 };
78
79 SRAM_REGION_BSS uint32_t gChreIpiRecvData[2];
80
81 // SCP reply ack data (AP to SCP)
82 SRAM_REGION_BSS uint32_t gChreIpiAckToHost[2];
83
84 // SCP get ack data from AP (SCP to AP)
85 SRAM_REGION_BSS int gChreIpiAckFromHost[2];
86
87 #ifdef SCP_CHRE_USE_DMA
88 // The min total size of a message to trigger DMA for sending/receiving.
89 constexpr uint32_t kMinMessageSizeForDma = 0x1000; // 4k
90 #endif
91
92 // The buffer used to receive messages from AP.
93 // The size should be consistent with the max sending size on the host side.
94 constexpr uint32_t kChreIpiRecvBufferSize = 0x8000; // 32k
95 DRAM_REGION_VARIABLE uint8_t gChreRecvBuffer[kChreIpiRecvBufferSize]
96 __attribute__((aligned(CACHE_LINE_SIZE)));
97
98 #ifdef SCP_CHRE_USE_DMA
99 // Rounds up the value to be aligned with CACHE_LINE_SIZE.
alignToCacheLine(uint32_t value)100 static inline uint32_t alignToCacheLine(uint32_t value) {
101 // alignment must be a power of 2.
102 static_assert(CACHE_LINE_SIZE > 0 &&
103 (CACHE_LINE_SIZE & (CACHE_LINE_SIZE - 1)) == 0);
104 return (value + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
105 }
106 #endif
107
108 void *gChreSubregionRecvAddr;
109 size_t gChreSubregionRecvSize;
110 void *gChreSubregionSendAddr;
111 size_t gChreSubregionSendSize;
112
113 #define SCP_CHRE_MAGIC 0x67728269
114 struct ScpChreIpiMsg {
115 uint32_t magic;
116 uint32_t size;
117 };
118
119 struct NanoappListData {
120 ChreFlatBufferBuilder *builder;
121 DynamicVector<NanoappListEntryOffset> nanoappEntries;
122 uint16_t hostClientId;
123 };
124
125 enum class PendingMessageType {
126 Shutdown,
127 NanoappMessageToHost,
128 HubInfoResponse,
129 NanoappListResponse,
130 LoadNanoappResponse,
131 UnloadNanoappResponse,
132 DebugDumpData,
133 DebugDumpResponse,
134 TimeSyncRequest,
135 LowPowerMicAccessRequest,
136 LowPowerMicAccessRelease,
137 EncodedLogMessage,
138 SelfTestResponse,
139 MetricLog,
140 NanConfigurationRequest,
141 PulseRequest,
142 PulseResponse,
143 NanoappTokenDatabaseInfo,
144 MessageDeliveryStatus,
145 };
146
147 struct PendingMessage {
PendingMessagechre::__anond3ca4bff0111::PendingMessage148 PendingMessage(PendingMessageType msgType, uint16_t hostClientId) {
149 type = msgType;
150 data.hostClientId = hostClientId;
151 }
152
PendingMessagechre::__anond3ca4bff0111::PendingMessage153 PendingMessage(PendingMessageType msgType,
154 const HostMessage *msgToHost = nullptr) {
155 type = msgType;
156 data.msgToHost = msgToHost;
157 }
158
PendingMessagechre::__anond3ca4bff0111::PendingMessage159 PendingMessage(PendingMessageType msgType, ChreFlatBufferBuilder *builder) {
160 type = msgType;
161 data.builder = builder;
162 }
163
164 PendingMessageType type;
165 union {
166 const HostMessage *msgToHost;
167 uint16_t hostClientId;
168 ChreFlatBufferBuilder *builder;
169 } data;
170 };
171
172 constexpr size_t kOutboundQueueSize = 100;
173 SRAM_REGION_VARIABLE FixedSizeBlockingQueue<PendingMessage, kOutboundQueueSize>
174 gOutboundQueue;
175
176 typedef void(MessageBuilderFunction)(ChreFlatBufferBuilder &builder,
177 void *cookie);
178
getHostCommsManager()179 inline HostCommsManager &getHostCommsManager() {
180 return EventLoopManagerSingleton::get()->getHostCommsManager();
181 }
182
generateMessageFromBuilder(ChreFlatBufferBuilder * builder)183 DRAM_REGION_FUNCTION bool generateMessageFromBuilder(
184 ChreFlatBufferBuilder *builder) {
185 CHRE_ASSERT(builder != nullptr);
186 LOGV("%s: message size %d", __func__, builder->GetSize());
187 bool result =
188 HostLinkBase::send(builder->GetBufferPointer(), builder->GetSize());
189
190 // clean up
191 builder->~ChreFlatBufferBuilder();
192 memoryFree(builder);
193 return result;
194 }
195
generateMessageToHost(const HostMessage * message)196 DRAM_REGION_FUNCTION bool generateMessageToHost(const HostMessage *message) {
197 LOGV("%s: message size %zu", __func__, message->message.size());
198 // TODO(b/285219398): ideally we'd construct our flatbuffer directly in the
199 // host-supplied buffer
200 constexpr size_t kFixedReserveSize = 88;
201 ChreFlatBufferBuilder builder(message->message.size() + kFixedReserveSize);
202 HostProtocolChre::encodeNanoappMessage(
203 builder, message->appId, message->toHostData.messageType,
204 message->toHostData.hostEndpoint, message->message.data(),
205 message->message.size(), message->toHostData.appPermissions,
206 message->toHostData.messagePermissions, message->toHostData.wokeHost);
207 bool result =
208 HostLinkBase::send(builder.GetBufferPointer(), builder.GetSize());
209
210 // clean up
211 getHostCommsManager().onMessageToHostComplete(message);
212 return result;
213 }
214
generateHubInfoResponse(uint16_t hostClientId)215 DRAM_REGION_FUNCTION int generateHubInfoResponse(uint16_t hostClientId) {
216 constexpr size_t kInitialBufferSize = 192;
217
218 constexpr char kHubName[] = "CHRE on Tinysys";
219 constexpr char kVendor[] = "Google";
220 constexpr char kToolchain[] =
221 "Clang " STRINGIFY(__clang_major__) "." STRINGIFY(
222 __clang_minor__) "." STRINGIFY(__clang_patchlevel__);
223 constexpr uint32_t kLegacyPlatformVersion = 0;
224 constexpr uint32_t kLegacyToolchainVersion =
225 ((__clang_major__ & 0xFF) << 24) | ((__clang_minor__ & 0xFF) << 16) |
226 (__clang_patchlevel__ & 0xFFFF);
227 constexpr float kPeakMips = 350;
228 constexpr float kStoppedPower = 0;
229 constexpr float kSleepPower = 1;
230 constexpr float kPeakPower = 15;
231 bool supportsReliableMessages =
232 IS_BIT_SET(chreGetCapabilities(), CHRE_CAPABILITIES_RELIABLE_MESSAGES);
233
234 // Note that this may execute prior to EventLoopManager::lateInit() completing
235 ChreFlatBufferBuilder builder(kInitialBufferSize);
236 HostProtocolChre::encodeHubInfoResponse(
237 builder, kHubName, kVendor, kToolchain, kLegacyPlatformVersion,
238 kLegacyToolchainVersion, kPeakMips, kStoppedPower, kSleepPower,
239 kPeakPower, chreGetMessageToHostMaxSize(), chreGetPlatformId(),
240 chreGetVersion(), hostClientId, supportsReliableMessages);
241
242 return HostLinkBase::send(builder.GetBufferPointer(), builder.GetSize());
243 }
244
dequeueMessage(PendingMessage pendingMsg)245 DRAM_REGION_FUNCTION bool dequeueMessage(PendingMessage pendingMsg) {
246 LOGV("%s: message type %d", __func__, pendingMsg.type);
247 bool result = false;
248 switch (pendingMsg.type) {
249 case PendingMessageType::NanoappMessageToHost:
250 result = generateMessageToHost(pendingMsg.data.msgToHost);
251 break;
252
253 case PendingMessageType::HubInfoResponse:
254 result = generateHubInfoResponse(pendingMsg.data.hostClientId);
255 break;
256 default:
257 result = generateMessageFromBuilder(pendingMsg.data.builder);
258 break;
259 }
260 return result;
261 }
262
263 /**
264 * Wrapper function to enqueue a message on the outbound message queue. All
265 * outgoing message to the host must be called through this function.
266 *
267 * @param message The message to send to host.
268 *
269 * @return true if the message was successfully added to the queue.
270 */
enqueueMessage(PendingMessage pendingMsg)271 DRAM_REGION_FUNCTION bool enqueueMessage(PendingMessage pendingMsg) {
272 return gOutboundQueue.push(pendingMsg);
273 }
274
275 /**
276 * Helper function that takes care of the boilerplate for allocating a
277 * ChreFlatBufferBuilder on the heap and adding it to the outbound message
278 * queue.
279 *
280 * @param msgType Identifies the message while in the outbound queue
281 * @param initialBufferSize Number of bytes to reserve when first allocating the
282 * ChreFlatBufferBuilder
283 * @param buildMsgFunc Synchronous callback used to encode the FlatBuffer
284 * message. Will not be invoked if allocation fails.
285 * @param cookie Opaque pointer that will be passed through to buildMsgFunc
286 *
287 * @return true if the message was successfully added to the queue
288 */
buildAndEnqueueMessage(PendingMessageType msgType,size_t initialBufferSize,MessageBuilderFunction * msgBuilder,void * cookie)289 DRAM_REGION_FUNCTION bool buildAndEnqueueMessage(
290 PendingMessageType msgType, size_t initialBufferSize,
291 MessageBuilderFunction *msgBuilder, void *cookie) {
292 LOGV("%s: message type %d, size %zu", __func__, msgType, initialBufferSize);
293 bool pushed = false;
294
295 auto builder = MakeUnique<ChreFlatBufferBuilder>(initialBufferSize);
296 if (builder.isNull()) {
297 LOGE("Couldn't allocate memory for message type %d",
298 static_cast<int>(msgType));
299 } else {
300 msgBuilder(*builder, cookie);
301
302 if (!enqueueMessage(PendingMessage(msgType, builder.get()))) {
303 LOGE("Couldn't push message type %d to outbound queue",
304 static_cast<int>(msgType));
305 } else {
306 builder.release();
307 pushed = true;
308 }
309 }
310
311 return pushed;
312 }
313
314 /**
315 * FlatBuffer message builder callback used with handleNanoappListRequest()
316 */
buildPulseResponse(ChreFlatBufferBuilder & builder,void *)317 DRAM_REGION_FUNCTION void buildPulseResponse(ChreFlatBufferBuilder &builder,
318 void * /*cookie*/) {
319 HostProtocolChre::encodePulseResponse(builder);
320 }
321
322 /**
323 * FlatBuffer message builder callback used with handleNanoappListRequest()
324 */
buildNanoappListResponse(ChreFlatBufferBuilder & builder,void * cookie)325 DRAM_REGION_FUNCTION void buildNanoappListResponse(
326 ChreFlatBufferBuilder &builder, void *cookie) {
327 LOGV("%s", __func__);
328 auto nanoappAdderCallback = [](const Nanoapp *nanoapp, void *data) {
329 auto *cbData = static_cast<NanoappListData *>(data);
330 HostProtocolChre::addNanoappListEntry(
331 *(cbData->builder), cbData->nanoappEntries, nanoapp->getAppId(),
332 nanoapp->getAppVersion(), true /*enabled*/, nanoapp->isSystemNanoapp(),
333 nanoapp->getAppPermissions(), nanoapp->getRpcServices());
334 };
335
336 // Add a NanoappListEntry to the FlatBuffer for each nanoapp
337 auto *cbData = static_cast<NanoappListData *>(cookie);
338 cbData->builder = &builder;
339 EventLoop &eventLoop = EventLoopManagerSingleton::get()->getEventLoop();
340 eventLoop.forEachNanoapp(nanoappAdderCallback, cbData);
341 HostProtocolChre::finishNanoappListResponse(builder, cbData->nanoappEntries,
342 cbData->hostClientId);
343 }
344
handleUnloadNanoappCallback(uint16_t,void * data,void *)345 DRAM_REGION_FUNCTION void handleUnloadNanoappCallback(uint16_t /*type*/,
346 void *data,
347 void * /*extraData*/) {
348 auto *cbData = static_cast<UnloadNanoappCallbackData *>(data);
349 bool success = false;
350 uint16_t instanceId;
351 EventLoop &eventLoop = EventLoopManagerSingleton::get()->getEventLoop();
352 if (!eventLoop.findNanoappInstanceIdByAppId(cbData->appId, &instanceId)) {
353 LOGE("Couldn't unload app ID 0x%016" PRIx64 ": not found", cbData->appId);
354 } else {
355 success =
356 eventLoop.unloadNanoapp(instanceId, cbData->allowSystemNanoappUnload);
357 }
358
359 constexpr size_t kInitialBufferSize = 52;
360 auto builder = MakeUnique<ChreFlatBufferBuilder>(kInitialBufferSize);
361 HostProtocolChre::encodeUnloadNanoappResponse(*builder, cbData->hostClientId,
362 cbData->transactionId, success);
363
364 if (!enqueueMessage(PendingMessage(PendingMessageType::UnloadNanoappResponse,
365 builder.get()))) {
366 LOGE("Failed to send unload response to host: %x transactionID: 0x%x",
367 cbData->hostClientId, cbData->transactionId);
368 } else {
369 builder.release();
370 }
371
372 memoryFree(data);
373 }
374
sendDebugDumpData(uint16_t hostClientId,const char * debugStr,size_t debugStrSize)375 DRAM_REGION_FUNCTION void sendDebugDumpData(uint16_t hostClientId,
376 const char *debugStr,
377 size_t debugStrSize) {
378 struct DebugDumpMessageData {
379 uint16_t hostClientId;
380 const char *debugStr;
381 size_t debugStrSize;
382 };
383
384 auto msgBuilder = [](ChreFlatBufferBuilder &builder, void *cookie) {
385 const auto *data = static_cast<const DebugDumpMessageData *>(cookie);
386 HostProtocolChre::encodeDebugDumpData(builder, data->hostClientId,
387 data->debugStr, data->debugStrSize);
388 };
389
390 constexpr size_t kFixedSizePortion = 52;
391 DebugDumpMessageData data;
392 data.hostClientId = hostClientId;
393 data.debugStr = debugStr;
394 data.debugStrSize = debugStrSize;
395 buildAndEnqueueMessage(PendingMessageType::DebugDumpData,
396 kFixedSizePortion + debugStrSize, msgBuilder, &data);
397 }
398
sendDebugDumpResponse(uint16_t hostClientId,bool success,uint32_t dataCount)399 DRAM_REGION_FUNCTION void sendDebugDumpResponse(uint16_t hostClientId,
400 bool success,
401 uint32_t dataCount) {
402 struct DebugDumpResponseData {
403 uint16_t hostClientId;
404 bool success;
405 uint32_t dataCount;
406 };
407
408 auto msgBuilder = [](ChreFlatBufferBuilder &builder, void *cookie) {
409 const auto *data = static_cast<const DebugDumpResponseData *>(cookie);
410 HostProtocolChre::encodeDebugDumpResponse(builder, data->hostClientId,
411 data->success, data->dataCount);
412 };
413
414 constexpr size_t kInitialSize = 52;
415 DebugDumpResponseData data;
416 data.hostClientId = hostClientId;
417 data.success = success;
418 data.dataCount = dataCount;
419 buildAndEnqueueMessage(PendingMessageType::DebugDumpResponse, kInitialSize,
420 msgBuilder, &data);
421 }
422 } // anonymous namespace
423
sendDebugDumpResultToHost(uint16_t hostClientId,const char * debugStr,size_t debugStrSize,bool complete,uint32_t dataCount)424 DRAM_REGION_FUNCTION void sendDebugDumpResultToHost(uint16_t hostClientId,
425 const char *debugStr,
426 size_t debugStrSize,
427 bool complete,
428 uint32_t dataCount) {
429 LOGV("%s: host client id %d", __func__, hostClientId);
430 if (debugStrSize > 0) {
431 sendDebugDumpData(hostClientId, debugStr, debugStrSize);
432 }
433 if (complete) {
434 sendDebugDumpResponse(hostClientId, /* success= */ true, dataCount);
435 }
436 }
437
HostLinkBase()438 DRAM_REGION_FUNCTION HostLinkBase::HostLinkBase() {
439 LOGV("HostLinkBase::%s", __func__);
440 initializeIpi();
441 }
442
~HostLinkBase()443 DRAM_REGION_FUNCTION HostLinkBase::~HostLinkBase() {
444 LOGV("HostLinkBase::%s", __func__);
445 }
446
vChreReceiveTask(void * pvParameters)447 DRAM_REGION_FUNCTION void HostLinkBase::vChreReceiveTask(void *pvParameters) {
448 int i = 0;
449 int ret = 0;
450
451 LOGV("%s", __func__);
452 while (true) {
453 LOGV("%s calling ipi_recv_reply(), Cnt=%d", __func__, i++);
454 ret = ipi_recv_reply(IPI_IN_C_HOST_SCP_CHRE, (void *)&gChreIpiAckToHost[0],
455 1);
456 if (ret != IPI_ACTION_DONE)
457 LOGE("%s ipi_recv_reply() ret = %d", __func__, ret);
458 LOGV("%s reply_end", __func__);
459 }
460 }
461
vChreSendTask(void * pvParameters)462 DRAM_REGION_FUNCTION void HostLinkBase::vChreSendTask(void *pvParameters) {
463 while (true) {
464 auto msg = gOutboundQueue.pop();
465 dequeueMessage(msg);
466 }
467 }
468
chreIpiHandler(unsigned int id,void * prdata,void * data,unsigned int len)469 DRAM_REGION_FUNCTION void HostLinkBase::chreIpiHandler(unsigned int id,
470 void *prdata, void *data,
471 unsigned int len) {
472 /* receive magic and cmd */
473 struct ScpChreIpiMsg msg = *(struct ScpChreIpiMsg *)data;
474
475 // check the magic number and payload size need to be copy(if need) */
476 LOGD("%s: Received a message from AP. Size=%u", __func__, msg.size);
477 if (msg.magic != SCP_CHRE_MAGIC) {
478 LOGE("Invalid magic number: 0x%x, skip message", msg.magic);
479 gChreIpiAckToHost[0] = IPI_NO_MEMORY;
480 gChreIpiAckToHost[1] = 0;
481 return;
482 }
483
484 // Mapping the physical address of share memory for SCP
485 uint32_t srcAddr =
486 ap_to_scp(reinterpret_cast<uint32_t>(gChreSubregionRecvAddr));
487
488 #ifdef SCP_CHRE_USE_DMA
489 if (msg.size < kMinMessageSizeForDma) {
490 dvfs_enable_DRAM_resource(CHRE_MEM_ID);
491 memcpy(static_cast<void *>(gChreRecvBuffer),
492 reinterpret_cast<void *>(srcAddr), msg.size);
493 dvfs_disable_DRAM_resource(CHRE_MEM_ID);
494 } else {
495 auto dstAddr = reinterpret_cast<uint32_t>(gChreRecvBuffer);
496
497 // destination address for receiving data is in a cacheable memory, it
498 // should be invalidated/flushed before transferring from share buffer to
499 // SCP
500 scp_dcache_flush(dstAddr, alignToCacheLine(msg.size));
501
502 // Using SCP DMA HW to copy the data from share memory to SCP side.
503 // The dstAddr could be a global variables or a SCP heap memory at SRAM/DRAM
504 DMA_RESULT result = scp_dma_transaction_dram(dstAddr, srcAddr, msg.size,
505 DMA_MEM_ID, NO_RESERVED);
506
507 if (result != DMA_RESULT_DONE) {
508 LOGE("Failed to receive a message from AP using DMA");
509 }
510 }
511 #else // SCP_CHRE_USE_DMA
512
513 dvfs_enable_DRAM_resource(CHRE_MEM_ID);
514 memcpy(static_cast<void *>(gChreRecvBuffer),
515 reinterpret_cast<void *>(srcAddr), msg.size);
516 dvfs_disable_DRAM_resource(CHRE_MEM_ID);
517
518 #endif // SCP_CHRE_USE_DMA
519
520 // process the message
521 receive(static_cast<HostLinkBase *>(prdata), gChreRecvBuffer, msg.size);
522
523 // After finishing the job, akc the message to host
524 gChreIpiAckToHost[0] = IPI_ACTION_DONE;
525 gChreIpiAckToHost[1] = msg.size;
526 }
527
initializeIpi(void)528 DRAM_REGION_FUNCTION void HostLinkBase::initializeIpi(void) {
529 bool success = false;
530 int ret;
531 constexpr size_t kBackgroundTaskStackSize = 1024;
532
533 #ifdef PRI_CHRE_BACKGROUND
534 constexpr UBaseType_t kBackgroundTaskPriority = PRI_CHRE_BACKGROUND;
535 #else
536 constexpr UBaseType_t kBackgroundTaskPriority = 2;
537 #endif
538
539 // prepared share memory information and register the callback functions
540 if (!(ret = scp_get_reserve_mem_by_id(SCP_CHRE_FROM_MEM_ID,
541 &gChreSubregionRecvAddr,
542 &gChreSubregionRecvSize))) {
543 LOGE("%s: get SCP_CHRE_FROM_MEM_ID memory fail", __func__);
544 } else if (!(ret = scp_get_reserve_mem_by_id(SCP_CHRE_TO_MEM_ID,
545 &gChreSubregionSendAddr,
546 &gChreSubregionSendSize))) {
547 LOGE("%s: get SCP_CHRE_TO_MEM_ID memory fail", __func__);
548 } else if (pdPASS != xTaskCreate(vChreReceiveTask, "CHRE_RECEIVE",
549 kBackgroundTaskStackSize, (void *)0,
550 kBackgroundTaskPriority, NULL)) {
551 LOGE("%s failed to create ipi receiver task", __func__);
552 } else if (pdPASS != xTaskCreate(vChreSendTask, "CHRE_SEND",
553 kBackgroundTaskStackSize, (void *)0,
554 kBackgroundTaskPriority, NULL)) {
555 LOGE("%s failed to create ipi outbound message queue task", __func__);
556 } else if (IPI_ACTION_DONE !=
557 (ret = ipi_register(IPI_IN_C_HOST_SCP_CHRE, (void *)chreIpiHandler,
558 (void *)this, (void *)&gChreIpiRecvData[0]))) {
559 LOGE("ipi_register IPI_IN_C_HOST_SCP_CHRE failed, %d", ret);
560 } else if (IPI_ACTION_DONE !=
561 (ret = ipi_register(IPI_OUT_C_SCP_HOST_CHRE, NULL, (void *)this,
562 (void *)&gChreIpiAckFromHost[0]))) {
563 LOGE("ipi_register IPI_OUT_C_SCP_HOST_CHRE failed, %d", ret);
564 } else {
565 success = true;
566 }
567
568 if (!success) {
569 FATAL_ERROR("HostLinkBase::initializeIpi() failed");
570 }
571 }
572
receive(HostLinkBase * instance,void * message,int messageLen)573 DRAM_REGION_FUNCTION void HostLinkBase::receive(HostLinkBase *instance,
574 void *message, int messageLen) {
575 LOGV("%s: message len %d", __func__, messageLen);
576
577 // TODO(b/277128368): A crude way to initially determine daemon's up - set
578 // a flag on the first message received. This is temporary until a better
579 // way to do this is available.
580 instance->setInitialized(true);
581
582 if (!HostProtocolChre::decodeMessageFromHost(message, messageLen)) {
583 LOGE("Failed to decode msg %p of len %u", message, messageLen);
584 }
585 }
586
send(uint8_t * data,size_t dataLen)587 DRAM_REGION_FUNCTION bool HostLinkBase::send(uint8_t *data, size_t dataLen) {
588 #ifndef HOST_LINK_IPI_SEND_TIMEOUT_MS
589 #define HOST_LINK_IPI_SEND_TIMEOUT_MS 100
590 #endif
591 #ifndef HOST_LINK_IPI_RESPONSE_TIMEOUT_MS
592 #define HOST_LINK_IPI_RESPONSE_TIMEOUT_MS 100
593 #endif
594 LOGV("HostLinkBase::%s: %zu, %p", __func__, dataLen, data);
595 struct ScpChreIpiMsg msg;
596 msg.magic = SCP_CHRE_MAGIC;
597 msg.size = dataLen;
598
599 uint32_t dstAddr =
600 ap_to_scp(reinterpret_cast<uint32_t>(gChreSubregionSendAddr));
601
602 #ifdef SCP_CHRE_USE_DMA
603 if (dataLen < kMinMessageSizeForDma) {
604 dvfs_enable_DRAM_resource(CHRE_MEM_ID);
605 memcpy(reinterpret_cast<void *>(dstAddr), data, dataLen);
606 dvfs_disable_DRAM_resource(CHRE_MEM_ID);
607 } else {
608 auto srcAddr = reinterpret_cast<uint32_t>(data);
609 auto msgSize = reinterpret_cast<uint32_t>(msg.size);
610
611 // Separate the message into 2 parts, copySize and dmaSize, and use memcpy
612 // and dma to transfer them respectively. This is needed due to the
613 // alignment requirement of the dma transfer.
614 uint32_t dmaStartSrcAddr = alignToCacheLine(srcAddr);
615 uint32_t copySize = dmaStartSrcAddr - srcAddr;
616 uint32_t dmaSize = msgSize - copySize;
617
618 if (copySize > 0) {
619 dvfs_enable_DRAM_resource(CHRE_MEM_ID);
620 memcpy(reinterpret_cast<void *>(dstAddr), data, copySize);
621 dvfs_disable_DRAM_resource(CHRE_MEM_ID);
622 }
623
624 // source address for sending data is in a cacheable memory, it should
625 // be invalidated/flushed before transferring from SCP to shared buffer
626 scp_dcache_flush(dmaStartSrcAddr, alignToCacheLine(dmaSize));
627
628 // Using SCP DMA HW to copy the data from SCP to shared memory.
629 // The dstAddr could be a global variables or a SCP heap memory at SRAM/DRAM
630 DMA_RESULT result = scp_dma_transaction_dram(
631 dstAddr + copySize, dmaStartSrcAddr, dmaSize, DMA_MEM_ID, NO_RESERVED);
632
633 if (result != DMA_RESULT_DONE) {
634 LOGE("Failed to receive a message from AP using DMA");
635 }
636 }
637 #else
638 dvfs_enable_DRAM_resource(CHRE_MEM_ID);
639 memcpy(reinterpret_cast<void *>(dstAddr), data, dataLen);
640 dvfs_disable_DRAM_resource(CHRE_MEM_ID);
641 #endif
642 // NB: len param for ipi_send is in number of 32-bit words
643 int ret = ipi_send_compl(
644 IPI_OUT_C_SCP_HOST_CHRE, &msg, sizeof(msg) / sizeof(uint32_t),
645 HOST_LINK_IPI_SEND_TIMEOUT_MS, HOST_LINK_IPI_RESPONSE_TIMEOUT_MS);
646 if (ret) {
647 LOGE("chre ipi send fail(%d)", ret);
648 } else {
649 /* check ack data for make sure IPI wasn't busy */
650 if (gChreIpiAckFromHost[0] == IPI_ACTION_DONE) {
651 LOGV("chre ipi send done, you can send another IPI");
652 } else if (gChreIpiAckFromHost[0] == IPI_PIN_BUSY) {
653 /* you may have to re-send the IPI, or drop this one */
654 LOGW(
655 "chre ipi send busy, user thread has not wait the IPI until job "
656 "finished");
657 } else if (gChreIpiAckFromHost[0] == IPI_NO_MEMORY) {
658 LOGW("chre ipi send with wrong size(%zu)", dataLen);
659 } else {
660 LOGW("chre ipi send unknown case: 0x%x", gChreIpiAckFromHost[0]);
661 }
662 }
663
664 return ret == IPI_ACTION_DONE;
665 }
666
sendTimeSyncRequest()667 DRAM_REGION_FUNCTION void HostLinkBase::sendTimeSyncRequest() {}
668
sendNanConfiguration(bool)669 DRAM_REGION_FUNCTION void HostLinkBase::sendNanConfiguration(
670 bool /* enabled */) {
671 LOGE("%s is unsupported", __func__);
672 }
673
sendLogMessageV2(const uint8_t * logMessage,size_t logMessageSize,uint32_t numLogsDropped)674 DRAM_REGION_FUNCTION void HostLinkBase::sendLogMessageV2(
675 const uint8_t *logMessage, size_t logMessageSize, uint32_t numLogsDropped) {
676 LOGV("%s: size %zu", __func__, logMessageSize);
677 struct LogMessageData {
678 const uint8_t *logMsg;
679 size_t logMsgSize;
680 uint32_t numLogsDropped;
681 };
682
683 LogMessageData logMessageData{logMessage, logMessageSize, numLogsDropped};
684
685 auto msgBuilder = [](ChreFlatBufferBuilder &builder, void *cookie) {
686 const auto *data = static_cast<const LogMessageData *>(cookie);
687 HostProtocolChre::encodeLogMessagesV2(
688 builder, data->logMsg, data->logMsgSize, data->numLogsDropped);
689 };
690
691 constexpr size_t kInitialSize = 128;
692 bool result = false;
693 if (isInitialized()) {
694 result = buildAndEnqueueMessage(
695 PendingMessageType::EncodedLogMessage,
696 kInitialSize + logMessageSize + sizeof(numLogsDropped), msgBuilder,
697 &logMessageData);
698 }
699
700 #ifdef CHRE_USE_BUFFERED_LOGGING
701 if (LogBufferManagerSingleton::isInitialized()) {
702 LogBufferManagerSingleton::get()->onLogsSentToHost(result);
703 }
704 #else
705 UNUSED_VAR(result);
706 #endif
707 }
708
sendMessage(HostMessage const * message)709 DRAM_REGION_FUNCTION bool HostLink::sendMessage(HostMessage const *message) {
710 LOGV("HostLink::%s size(%zu)", __func__, message->message.size());
711 bool success = false;
712
713 if (isInitialized()) {
714 success = enqueueMessage(
715 PendingMessage(PendingMessageType::NanoappMessageToHost, message));
716 } else {
717 LOGW("Dropping outbound message: host link not initialized yet");
718 }
719 return success;
720 }
721
sendMessageDeliveryStatus(uint32_t messageSequenceNumber,uint8_t errorCode)722 DRAM_REGION_FUNCTION bool HostLink::sendMessageDeliveryStatus(
723 uint32_t messageSequenceNumber, uint8_t errorCode) {
724 struct DeliveryStatusData {
725 uint32_t messageSequenceNumber;
726 uint8_t errorCode;
727 } args{messageSequenceNumber, errorCode};
728
729 auto msgBuilder = [](ChreFlatBufferBuilder &builder, void *cookie) {
730 auto args = static_cast<const DeliveryStatusData *>(cookie);
731 HostProtocolChre::encodeMessageDeliveryStatus(
732 builder, args->messageSequenceNumber, args->errorCode);
733 };
734
735 return buildAndEnqueueMessage(PendingMessageType::MessageDeliveryStatus,
736 /* initialBufferSize= */ 64, msgBuilder, &args);
737 }
738
739 // TODO(b/285219398): HostMessageHandlers member function implementations are
740 // expected to be (mostly) identical for any platform that uses flatbuffers
741 // to encode messages - refactor the host link to merge the multiple copies
742 // we currently have.
handleNanoappMessage(uint64_t appId,uint32_t messageType,uint16_t hostEndpoint,const void * messageData,size_t messageDataLen,bool isReliable,uint32_t messageSequenceNumber)743 DRAM_REGION_FUNCTION void HostMessageHandlers::handleNanoappMessage(
744 uint64_t appId, uint32_t messageType, uint16_t hostEndpoint,
745 const void *messageData, size_t messageDataLen, bool isReliable,
746 uint32_t messageSequenceNumber) {
747 LOGV("Parsed nanoapp message from host: app ID 0x%016" PRIx64
748 ", endpoint "
749 "0x%" PRIx16 ", msgType %" PRIu32 ", payload size %zu",
750 appId, hostEndpoint, messageType, messageDataLen);
751
752 getHostCommsManager().sendMessageToNanoappFromHost(
753 appId, messageType, hostEndpoint, messageData, messageDataLen, isReliable,
754 messageSequenceNumber);
755 }
756
handleMessageDeliveryStatus(uint32_t messageSequenceNumber,uint8_t errorCode)757 DRAM_REGION_FUNCTION void HostMessageHandlers::handleMessageDeliveryStatus(
758 uint32_t messageSequenceNumber, uint8_t errorCode) {
759 getHostCommsManager().completeTransaction(messageSequenceNumber, errorCode);
760 }
761
handleHubInfoRequest(uint16_t hostClientId)762 DRAM_REGION_FUNCTION void HostMessageHandlers::handleHubInfoRequest(
763 uint16_t hostClientId) {
764 LOGV("%s: host client id %d", __func__, hostClientId);
765 enqueueMessage(
766 PendingMessage(PendingMessageType::HubInfoResponse, hostClientId));
767 }
768
handleNanoappListRequest(uint16_t hostClientId)769 DRAM_REGION_FUNCTION void HostMessageHandlers::handleNanoappListRequest(
770 uint16_t hostClientId) {
771 auto callback = [](uint16_t /*type*/, void *data, void * /*extraData*/) {
772 uint16_t cbHostClientId = NestedDataPtr<uint16_t>(data);
773
774 NanoappListData cbData = {};
775 cbData.hostClientId = cbHostClientId;
776
777 size_t expectedNanoappCount =
778 EventLoopManagerSingleton::get()->getEventLoop().getNanoappCount();
779 if (!cbData.nanoappEntries.reserve(expectedNanoappCount)) {
780 LOG_OOM();
781 } else {
782 constexpr size_t kFixedOverhead = 48;
783 constexpr size_t kPerNanoappSize = 32;
784 size_t initialBufferSize =
785 (kFixedOverhead + expectedNanoappCount * kPerNanoappSize);
786
787 buildAndEnqueueMessage(PendingMessageType::NanoappListResponse,
788 initialBufferSize, buildNanoappListResponse,
789 &cbData);
790 }
791 };
792
793 LOGD("Nanoapp list request from client ID %" PRIu16, hostClientId);
794 EventLoopManagerSingleton::get()->deferCallback(
795 SystemCallbackType::NanoappListResponse,
796 NestedDataPtr<uint16_t>(hostClientId), callback);
797 }
798
sendFragmentResponse(uint16_t hostClientId,uint32_t transactionId,uint32_t fragmentId,bool success)799 DRAM_REGION_FUNCTION void HostMessageHandlers::sendFragmentResponse(
800 uint16_t hostClientId, uint32_t transactionId, uint32_t fragmentId,
801 bool success) {
802 struct FragmentedLoadInfoResponse {
803 uint16_t hostClientId;
804 uint32_t transactionId;
805 uint32_t fragmentId;
806 bool success;
807 };
808
809 auto msgBuilder = [](ChreFlatBufferBuilder &builder, void *cookie) {
810 auto *cbData = static_cast<FragmentedLoadInfoResponse *>(cookie);
811 HostProtocolChre::encodeLoadNanoappResponse(
812 builder, cbData->hostClientId, cbData->transactionId, cbData->success,
813 cbData->fragmentId);
814 };
815
816 FragmentedLoadInfoResponse response = {
817 .hostClientId = hostClientId,
818 .transactionId = transactionId,
819 .fragmentId = fragmentId,
820 .success = success,
821 };
822 constexpr size_t kInitialBufferSize = 52;
823 buildAndEnqueueMessage(PendingMessageType::LoadNanoappResponse,
824 kInitialBufferSize, msgBuilder, &response);
825 }
826
handlePulseRequest()827 DRAM_REGION_FUNCTION void HostMessageHandlers::handlePulseRequest() {
828 auto callback = [](uint16_t /*type*/, void * /*data*/, void * /*extraData*/) {
829 buildAndEnqueueMessage(PendingMessageType::PulseResponse,
830 /*initialBufferSize= */ 48, buildPulseResponse,
831 /* cookie= */ nullptr);
832 };
833 EventLoopManagerSingleton::get()->deferCallback(
834 SystemCallbackType::PulseResponse, /* data= */ nullptr, callback);
835 }
836
handleLoadNanoappRequest(uint16_t hostClientId,uint32_t transactionId,uint64_t appId,uint32_t appVersion,uint32_t appFlags,uint32_t targetApiVersion,const void * buffer,size_t bufferLen,const char * appFileName,uint32_t fragmentId,size_t appBinaryLen,bool respondBeforeStart)837 DRAM_REGION_FUNCTION void HostMessageHandlers::handleLoadNanoappRequest(
838 uint16_t hostClientId, uint32_t transactionId, uint64_t appId,
839 uint32_t appVersion, uint32_t appFlags, uint32_t targetApiVersion,
840 const void *buffer, size_t bufferLen, const char *appFileName,
841 uint32_t fragmentId, size_t appBinaryLen, bool respondBeforeStart) {
842 UNUSED_VAR(appFileName);
843
844 loadNanoappData(hostClientId, transactionId, appId, appVersion, appFlags,
845 targetApiVersion, buffer, bufferLen, fragmentId, appBinaryLen,
846 respondBeforeStart);
847 }
848
handleUnloadNanoappRequest(uint16_t hostClientId,uint32_t transactionId,uint64_t appId,bool allowSystemNanoappUnload)849 DRAM_REGION_FUNCTION void HostMessageHandlers::handleUnloadNanoappRequest(
850 uint16_t hostClientId, uint32_t transactionId, uint64_t appId,
851 bool allowSystemNanoappUnload) {
852 LOGD("Unload nanoapp request from client %" PRIu16 " (txnID %" PRIu32
853 ") for appId 0x%016" PRIx64 " system %d",
854 hostClientId, transactionId, appId, allowSystemNanoappUnload);
855 auto *cbData = memoryAlloc<UnloadNanoappCallbackData>();
856 if (cbData == nullptr) {
857 LOG_OOM();
858 } else {
859 cbData->appId = appId;
860 cbData->transactionId = transactionId;
861 cbData->hostClientId = hostClientId;
862 cbData->allowSystemNanoappUnload = allowSystemNanoappUnload;
863
864 EventLoopManagerSingleton::get()->deferCallback(
865 SystemCallbackType::HandleUnloadNanoapp, cbData,
866 handleUnloadNanoappCallback);
867 }
868 }
869
sendNanoappTokenDatabaseInfo(uint64_t appId,uint32_t tokenDatabaseOffset,size_t tokenDatabaseSize)870 DRAM_REGION_FUNCTION void HostLinkBase::sendNanoappTokenDatabaseInfo(
871 uint64_t appId, uint32_t tokenDatabaseOffset, size_t tokenDatabaseSize) {
872 constexpr size_t kInitialBufferSize = 56;
873 struct DatabaseInfoArgs {
874 uint64_t appId;
875 uint32_t tokenDatabaseOffset;
876 size_t tokenDatabaseSize;
877 } args{appId, tokenDatabaseOffset, tokenDatabaseSize};
878
879 auto msgBuilder = [](ChreFlatBufferBuilder &builder, void *cookie) {
880 DatabaseInfoArgs *args = static_cast<DatabaseInfoArgs *>(cookie);
881 uint16_t instanceId;
882 EventLoopManagerSingleton::get()
883 ->getEventLoop()
884 .findNanoappInstanceIdByAppId(args->appId, &instanceId);
885 HostProtocolChre::encodeNanoappTokenDatabaseInfo(
886 builder, instanceId, args->appId, args->tokenDatabaseOffset,
887 args->tokenDatabaseSize);
888 };
889
890 buildAndEnqueueMessage(PendingMessageType::NanoappTokenDatabaseInfo,
891 kInitialBufferSize, msgBuilder, &args);
892 }
893
flushMessagesSentByNanoapp(uint64_t)894 DRAM_REGION_FUNCTION void HostLink::flushMessagesSentByNanoapp(
895 uint64_t /* appId */) {
896 // Not implemented
897 }
898
handleTimeSyncMessage(int64_t offset)899 DRAM_REGION_FUNCTION void HostMessageHandlers::handleTimeSyncMessage(
900 int64_t offset) {
901 LOGE("%s is unsupported", __func__);
902 }
903
handleDebugDumpRequest(uint16_t hostClientId)904 DRAM_REGION_FUNCTION void HostMessageHandlers::handleDebugDumpRequest(
905 uint16_t hostClientId) {
906 LOGV("%s: host client id %d", __func__, hostClientId);
907 if (!EventLoopManagerSingleton::get()
908 ->getDebugDumpManager()
909 .onDebugDumpRequested(hostClientId)) {
910 LOGE("Couldn't trigger debug dump process");
911 sendDebugDumpResponse(hostClientId, /* success= */ false,
912 /* dataCount= */ 0);
913 }
914 }
915
handleSettingChangeMessage(fbs::Setting setting,fbs::SettingState state)916 DRAM_REGION_FUNCTION void HostMessageHandlers::handleSettingChangeMessage(
917 fbs::Setting setting, fbs::SettingState state) {
918 // TODO(b/285219398): Refactor handleSettingChangeMessage to shared code
919 Setting chreSetting;
920 bool chreSettingEnabled;
921 if (HostProtocolChre::getSettingFromFbs(setting, &chreSetting) &&
922 HostProtocolChre::getSettingEnabledFromFbs(state, &chreSettingEnabled)) {
923 EventLoopManagerSingleton::get()->getSettingManager().postSettingChange(
924 chreSetting, chreSettingEnabled);
925 }
926 }
927
handleSelfTestRequest(uint16_t hostClientId)928 DRAM_REGION_FUNCTION void HostMessageHandlers::handleSelfTestRequest(
929 uint16_t hostClientId) {
930 LOGV("%s: host client id %d", __func__, hostClientId);
931 }
932
handleNanConfigurationUpdate(bool)933 DRAM_REGION_FUNCTION void HostMessageHandlers::handleNanConfigurationUpdate(
934 bool /* enabled */) {
935 LOGE("%s is unsupported", __func__);
936 }
937
handleBtSocketOpen(uint16_t,uint64_t,const char *,uint64_t,uint64_t,uint32_t,uint32_t,uint32_t,uint32_t,uint32_t,uint32_t,uint32_t,uint32_t,uint32_t,uint32_t)938 DRAM_REGION_FUNCTION void HostMessageHandlers::handleBtSocketOpen(
939 uint16_t /* hostClientId */, uint64_t /* socketId */,
940 const char * /* name */, uint64_t /* endpointId */, uint64_t /* hubId */,
941 uint32_t /* aclConnectionHandle */, uint32_t /* localCid */,
942 uint32_t /* remoteCid */, uint32_t /* psm */, uint32_t /* localMtu */,
943 uint32_t /* remoteMtu */, uint32_t /* localMps */, uint32_t /* remoteMps */,
944 uint32_t /* initialRxCredits */, uint32_t /* initialTxCredits */) {
945 LOGE("BT Socket offload not supported");
946 }
947
sendAudioRequest()948 DRAM_REGION_FUNCTION void sendAudioRequest() {
949 auto msgBuilder = [](ChreFlatBufferBuilder &builder, void * /*cookie*/) {
950 HostProtocolChre::encodeLowPowerMicAccessRequest(builder);
951 };
952 constexpr size_t kInitialSize = 32;
953 buildAndEnqueueMessage(PendingMessageType::LowPowerMicAccessRequest,
954 kInitialSize, msgBuilder, /* cookie= */ nullptr);
955 }
956
sendAudioRelease()957 DRAM_REGION_FUNCTION void sendAudioRelease() {
958 auto msgBuilder = [](ChreFlatBufferBuilder &builder, void * /*cookie*/) {
959 HostProtocolChre::encodeLowPowerMicAccessRelease(builder);
960 };
961 constexpr size_t kInitialSize = 32;
962 buildAndEnqueueMessage(PendingMessageType::LowPowerMicAccessRelease,
963 kInitialSize, msgBuilder, /* cookie= */ nullptr);
964 }
965
966 } // namespace chre
967