1 /*
2 * Copyright 2012-2020, 2023 NXP
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include <errno.h>
17 #include <pthread.h>
18 #include <log/log.h>
19
20 #include <phNxpLog.h>
21 #include <phNxpUciHal.h>
22 #include <phNxpUciHal_utils.h>
23
24 using namespace std;
25 map<uint16_t, vector<uint16_t>> input_map;
26 map<uint16_t, vector<uint16_t>> conf_map;
27
28 /****************** Semaphore and mutex helper functions **********************/
29 /* Semaphore and mutex monitor */
30 struct phNxpUciHal_Monitor {
31 public:
CreatephNxpUciHal_Monitor32 static std::unique_ptr<phNxpUciHal_Monitor> Create() {
33 //auto monitor = std::unique_ptr<phNxpUciHal_Monitor>(new phNxpUciHal_Monitor());
34 auto monitor = std::make_unique<phNxpUciHal_Monitor>();
35 if (pthread_mutex_init(&monitor->reentrance_mutex_, NULL) == -1) {
36 return nullptr;
37 }
38 if (pthread_mutex_init(&monitor->concurrency_mutex_, NULL) == -1) {
39 pthread_mutex_destroy(&monitor->reentrance_mutex_);
40 return nullptr;
41 }
42 return monitor;
43 }
44
~phNxpUciHal_MonitorphNxpUciHal_Monitor45 virtual ~phNxpUciHal_Monitor() {
46 pthread_mutex_destroy(&concurrency_mutex_);
47 ReentranceUnlock();
48 pthread_mutex_destroy(&reentrance_mutex_);
49 for (auto p : sems_) {
50 NXPLOG_UCIHAL_E("Unreleased semaphore %p", p);
51 p->status = UWBSTATUS_FAILED;
52 sem_post(&p->sem);
53 }
54 sems_.clear();
55 }
56
AddSemphNxpUciHal_Monitor57 void AddSem(phNxpUciHal_Sem_t* pCallbackData) {
58 std::lock_guard<std::mutex> lock(lock_);
59 auto it = sems_.find(pCallbackData);
60 if (it == sems_.end()) {
61 sems_.insert(pCallbackData);
62 } else {
63 NXPLOG_UCIHAL_E("phNxpUciHal_init_cb_data: duplicated semaphore %p",
64 pCallbackData);
65 }
66 }
67
RemoveSemphNxpUciHal_Monitor68 void RemoveSem(phNxpUciHal_Sem_t* pCallbackData) {
69 std::lock_guard<std::mutex> lock(lock_);
70 auto it = sems_.find(pCallbackData);
71 if (it == sems_.end()) {
72 NXPLOG_UCIHAL_E("phNxpUciHal_cleanup_cb_data: orphan semaphore %p",
73 pCallbackData);
74 } else {
75 sems_.erase(it);
76 }
77 }
78
ReentrancelockphNxpUciHal_Monitor79 void Reentrancelock() {
80 pthread_mutex_lock(&reentrance_mutex_);
81 }
82
ReentranceUnlockphNxpUciHal_Monitor83 void ReentranceUnlock() {
84 pthread_mutex_unlock(&reentrance_mutex_);
85 }
86
ConcurrencylockphNxpUciHal_Monitor87 void Concurrencylock() {
88 pthread_mutex_lock(&concurrency_mutex_);
89 }
90
ConcurrencyUnlockphNxpUciHal_Monitor91 void ConcurrencyUnlock() {
92 pthread_mutex_unlock(&concurrency_mutex_);
93 }
94
95 private:
96 std::unordered_set<phNxpUciHal_Sem_t*> sems_;
97 std::mutex lock_;
98 // Mutex protecting native library against reentrance
99 pthread_mutex_t reentrance_mutex_;
100 // Mutex protecting native library against concurrency
101 pthread_mutex_t concurrency_mutex_;
102 };
103
104 static std::unique_ptr<phNxpUciHal_Monitor> nxpucihal_monitor;
105
106 /*******************************************************************************
107 **
108 ** Function phNxpUciHal_init_monitor
109 **
110 ** Description Initialize the semaphore monitor
111 **
112 ** Returns Pointer to monitor, otherwise NULL if failed
113 **
114 *******************************************************************************/
phNxpUciHal_init_monitor(void)115 bool phNxpUciHal_init_monitor(void) {
116 NXPLOG_UCIHAL_D("Entering phNxpUciHal_init_monitor");
117
118 nxpucihal_monitor = phNxpUciHal_Monitor::Create();
119
120 if (nxpucihal_monitor == nullptr) {
121 NXPLOG_UCIHAL_E("nxphal_monitor creation failed");
122 return false;
123 }
124 return true;
125 }
126
127 /*******************************************************************************
128 **
129 ** Function phNxpUciHal_cleanup_monitor
130 **
131 ** Description Clean up semaphore monitor
132 **
133 ** Returns None
134 **
135 *******************************************************************************/
phNxpUciHal_cleanup_monitor(void)136 void phNxpUciHal_cleanup_monitor(void) {
137 nxpucihal_monitor = nullptr;
138 }
139
140 /* Initialize the callback data */
phNxpUciHal_init_cb_data(phNxpUciHal_Sem_t * pCallbackData,void * pContext)141 tHAL_UWB_STATUS phNxpUciHal_init_cb_data(phNxpUciHal_Sem_t* pCallbackData,
142 void* pContext) {
143 /* Create semaphore */
144 if (sem_init(&pCallbackData->sem, 0, 0) == -1) {
145 NXPLOG_UCIHAL_E("Semaphore creation failed");
146 return UWBSTATUS_FAILED;
147 }
148
149 /* Set default status value */
150 pCallbackData->status = UWBSTATUS_FAILED;
151
152 /* Copy the context */
153 pCallbackData->pContext = pContext;
154
155 /* Add to active semaphore list */
156 if (nxpucihal_monitor != nullptr) {
157 nxpucihal_monitor->AddSem(pCallbackData);
158 }
159
160 return UWBSTATUS_SUCCESS;
161 }
162
163 /*******************************************************************************
164 **
165 ** Function phNxpUciHal_cleanup_cb_data
166 **
167 ** Description Clean up callback data
168 **
169 ** Returns None
170 **
171 *******************************************************************************/
phNxpUciHal_cleanup_cb_data(phNxpUciHal_Sem_t * pCallbackData)172 void phNxpUciHal_cleanup_cb_data(phNxpUciHal_Sem_t* pCallbackData) {
173 /* Destroy semaphore */
174 if (sem_destroy(&pCallbackData->sem)) {
175 NXPLOG_UCIHAL_E(
176 "phNxpUciHal_cleanup_cb_data: Failed to destroy semaphore");
177 }
178 if (nxpucihal_monitor != nullptr) {
179 nxpucihal_monitor->RemoveSem(pCallbackData);
180 }
181 }
182
REENTRANCE_LOCK()183 void REENTRANCE_LOCK() {
184 if (nxpucihal_monitor != nullptr) {
185 nxpucihal_monitor->Reentrancelock();
186 }
187 }
REENTRANCE_UNLOCK()188 void REENTRANCE_UNLOCK() {
189 if (nxpucihal_monitor != nullptr) {
190 nxpucihal_monitor->ReentranceUnlock();
191 }
192 }
CONCURRENCY_LOCK()193 void CONCURRENCY_LOCK() {
194 if (nxpucihal_monitor != nullptr) {
195 nxpucihal_monitor->Concurrencylock();
196 }
197 }
CONCURRENCY_UNLOCK()198 void CONCURRENCY_UNLOCK() {
199 if (nxpucihal_monitor != nullptr) {
200 nxpucihal_monitor->ConcurrencyUnlock();
201 }
202 }
203
phNxpUciHal_sem_timed_wait_msec(phNxpUciHal_Sem_t * pCallbackData,long msec)204 int phNxpUciHal_sem_timed_wait_msec(phNxpUciHal_Sem_t* pCallbackData, long msec)
205 {
206 int ret;
207 struct timespec absTimeout;
208 if (clock_gettime(CLOCK_MONOTONIC, &absTimeout) == -1) {
209 NXPLOG_UCIHAL_E("clock_gettime failed");
210 return -1;
211 }
212
213 if (msec > 1000L) {
214 absTimeout.tv_sec += msec / 1000L;
215 msec = msec % 1000L;
216 }
217 absTimeout.tv_nsec += msec * 1000000L;
218 if (absTimeout.tv_nsec > 1000000000L) {
219 absTimeout.tv_nsec -= 1000000000L;
220 absTimeout.tv_sec += 1;
221 }
222
223 while ((ret = sem_timedwait_monotonic_np(&pCallbackData->sem, &absTimeout)) == -1 && errno == EINTR) {
224 continue;
225 }
226 if (ret == -1 && errno == ETIMEDOUT) {
227 pCallbackData->status = UWBSTATUS_RESPONSE_TIMEOUT;
228 NXPLOG_UCIHAL_E("wait semaphore timed out");
229 return -1;
230 }
231 return 0;
232 }
233
234 /* END Semaphore and mutex helper functions */
235
236 /**************************** Other functions *********************************/
237
238 /*******************************************************************************
239 **
240 ** Function phNxpUciHal_print_packet
241 **
242 ** Description Print packet
243 **
244 ** Returns None
245 **
246 *******************************************************************************/
phNxpUciHal_print_packet(enum phNxpUciHal_Pkt_Type what,const uint8_t * p_data,uint16_t len)247 void phNxpUciHal_print_packet(enum phNxpUciHal_Pkt_Type what, const uint8_t* p_data,
248 uint16_t len) {
249 uint32_t i;
250 char print_buffer[len * 3 + 1];
251
252 if ((gLog_level.ucix_log_level >= NXPLOG_LOG_DEBUG_LOGLEVEL)) {
253 /* OK to print */
254 }
255 else
256 {
257 /* Nothing to print...
258 * Why prepare buffer without printing?
259 */
260 return;
261 }
262
263 memset(print_buffer, 0, sizeof(print_buffer));
264 for (i = 0; i < len; i++) {
265 snprintf(&print_buffer[i * 2], 3, "%02X", p_data[i]);
266 }
267 switch(what) {
268 case NXP_TML_UCI_CMD_AP_2_UWBS:
269 {
270 NXPLOG_UCIX_D("len = %3d > %s", len, print_buffer);
271 }
272 break;
273 case NXP_TML_UCI_RSP_NTF_UWBS_2_AP:
274 {
275 NXPLOG_UCIR_D("len = %3d < %s", len, print_buffer);
276 }
277 break;
278 case NXP_TML_FW_DNLD_CMD_AP_2_UWBS:
279 {
280 // TODO: Should be NXPLOG_FWDNLD_D
281 NXPLOG_UCIX_D("len = %3d > (FW)%s", len, print_buffer);
282 }
283 break;
284 case NXP_TML_FW_DNLD_RSP_UWBS_2_AP:
285 {
286 // TODO: Should be NXPLOG_FWDNLD_D
287 NXPLOG_UCIR_D("len = %3d < (FW)%s", len, print_buffer);
288 }
289 break;
290 }
291
292 return;
293 }
294
295 /*******************************************************************************
296 **
297 ** Function phNxpUciHal_emergency_recovery
298 **
299 ** Description Emergency recovery in case of no other way out
300 **
301 ** Returns None
302 **
303 *******************************************************************************/
304
phNxpUciHal_emergency_recovery(void)305 void phNxpUciHal_emergency_recovery(void) {
306 NXPLOG_UCIHAL_E("%s: abort()", __func__);
307 abort();
308 }
309
310 /*******************************************************************************
311 **
312 ** Function phNxpUciHal_byteArrayToDouble
313 **
314 ** Description convert byte array to double
315 **
316 ** Returns double
317 **
318 *******************************************************************************/
phNxpUciHal_byteArrayToDouble(const uint8_t * p_data)319 double phNxpUciHal_byteArrayToDouble(const uint8_t* p_data) {
320 double d;
321 int size_d = sizeof(d);
322 uint8_t ptr[size_d],ptr_1[size_d];
323 memcpy(&ptr, p_data, size_d);
324 for(int i=0;i<size_d;i++) {
325 ptr_1[i] = ptr[size_d - 1 - i];
326 }
327 memcpy(&d, &ptr_1, sizeof(d));
328 return d; \
329 }
330
331 std::map<uint16_t, std::vector<uint8_t>>
decodeTlvBytes(const std::vector<uint8_t> & ext_ids,const uint8_t * tlv_bytes,size_t tlv_len)332 decodeTlvBytes(const std::vector<uint8_t> &ext_ids, const uint8_t *tlv_bytes, size_t tlv_len)
333 {
334 std::map<uint16_t, std::vector<uint8_t>> ret;
335
336 size_t i = 0;
337 while ((i + 1) < tlv_len) {
338 uint16_t tag;
339 uint8_t len;
340
341 uint8_t byte0 = tlv_bytes[i++];
342 uint8_t byte1 = tlv_bytes[i++];
343 if (std::find(ext_ids.begin(), ext_ids.end(), byte0) != ext_ids.end()) {
344 if (i >= tlv_len) {
345 NXPLOG_UCIHAL_E("Failed to decode TLV bytes (offset=%zu).", i);
346 break;
347 }
348 tag = (byte0 << 8) | byte1; // 2 bytes tag as big endiann
349 len = tlv_bytes[i++];
350 } else {
351 tag = byte0;
352 len = byte1;
353 }
354 if ((i + len) > tlv_len) {
355 NXPLOG_UCIHAL_E("Failed to decode TLV bytes (offset=%zu).", i);
356 break;
357 }
358 ret[tag] = std::vector(&tlv_bytes[i], &tlv_bytes[i + len]);
359 i += len;
360 }
361
362 return ret;
363 }
364
encodeTlvBytes(const std::map<uint16_t,std::vector<uint8_t>> & tlvs)365 std::vector<uint8_t> encodeTlvBytes(const std::map<uint16_t, std::vector<uint8_t>> &tlvs)
366 {
367 std::vector<uint8_t> bytes;
368
369 for (auto const & [tag, val] : tlvs) {
370 // Tag
371 if (tag > 0xff) {
372 bytes.push_back(tag >> 8);
373 }
374 bytes.push_back(tag & 0xff);
375
376 // Length
377 bytes.push_back(val.size());
378
379 // Value
380 bytes.insert(bytes.end(), val.begin(), val.end());
381 }
382
383 return bytes;
384 }
385