xref: /aosp_15_r20/system/libfmq/fuzzer/fmq_fuzzer.cpp (revision be431cd81a9a2349eaea34eb56fcf6d1608da596)
1 /*
2  * Copyright (C) 2021 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <stddef.h>
18 #include <stdint.h>
19 #include <iostream>
20 #include <limits>
21 #include <thread>
22 
23 #include <android-base/logging.h>
24 #include <android-base/scopeguard.h>
25 #include <fmq/AidlMessageQueue.h>
26 #include <fmq/ConvertMQDescriptors.h>
27 #include <fmq/EventFlag.h>
28 #include <fmq/MessageQueue.h>
29 
30 #include "fuzzer/FuzzedDataProvider.h"
31 
32 using aidl::android::hardware::common::fmq::SynchronizedReadWrite;
33 using aidl::android::hardware::common::fmq::UnsynchronizedWrite;
34 using android::hardware::kSynchronizedReadWrite;
35 using android::hardware::kUnsynchronizedWrite;
36 
37 typedef int32_t payload_t;
38 
39 // The reader/writers will wait during blocking calls
40 static constexpr int kBlockingTimeoutNs = 100000;
41 
42 /*
43  * MessageQueueBase.h contains asserts when memory allocation fails. So we need
44  * to set a reasonable limit if we want to avoid those asserts.
45  */
46 static constexpr size_t kAlignment = 8;
47 static const size_t kPageSize = getpagesize();
48 static const size_t kMaxNumElements = kPageSize * 10 / sizeof(payload_t) - kAlignment + 1;
49 /*
50  * limit the custom grantor case to one page of memory.
51  * If we want to increase this, we need to make sure that all of grantors offset
52  * plus extent are less than the size of the page aligned ashmem region that is
53  * created
54  */
55 static const size_t kMaxCustomGrantorMemoryBytes = kPageSize;
56 
57 /*
58  * The read counter can be found in the shared memory 16 bytes before the start
59  * of the ring buffer.
60  */
61 static constexpr int kReadCounterOffsetBytes = 16;
62 /*
63  * The write counter can be found in the shared memory 8 bytes before the start
64  * of the ring buffer.
65  */
66 static constexpr int kWriteCounterOffsetBytes = 8;
67 
68 static constexpr int kMaxNumSyncReaders = 1;
69 static constexpr int kMaxNumUnsyncReaders = 5;
70 static constexpr int kMaxDataPerReader = 1000;
71 
72 typedef android::AidlMessageQueue<payload_t, SynchronizedReadWrite> AidlMessageQueueSync;
73 typedef android::AidlMessageQueue<payload_t, UnsynchronizedWrite> AidlMessageQueueUnsync;
74 typedef android::hardware::MessageQueue<payload_t, kSynchronizedReadWrite> MessageQueueSync;
75 typedef android::hardware::MessageQueue<payload_t, kUnsynchronizedWrite> MessageQueueUnsync;
76 typedef aidl::android::hardware::common::fmq::MQDescriptor<payload_t, SynchronizedReadWrite>
77         AidlMQDescSync;
78 typedef aidl::android::hardware::common::fmq::MQDescriptor<payload_t, UnsynchronizedWrite>
79         AidlMQDescUnsync;
80 typedef android::hardware::MQDescriptorSync<payload_t> MQDescSync;
81 typedef android::hardware::MQDescriptorUnsync<payload_t> MQDescUnsync;
82 
83 // AIDL and HIDL have different ways of accessing the grantors
84 template <typename Desc>
85 uint64_t* getCounterPtr(payload_t* start, const Desc& desc, int grantorIndx);
86 
createCounterPtr(payload_t * start,uint32_t offset,uint32_t data_offset)87 uint64_t* createCounterPtr(payload_t* start, uint32_t offset, uint32_t data_offset) {
88     // start is the address of the beginning of the FMQ data section in memory
89     // offset is overall offset of the counter in the FMQ memory
90     // data_offset is the overall offset of the data section in the FMQ memory
91     // start - (data_offset) = beginning address of the FMQ memory
92     return reinterpret_cast<uint64_t*>(reinterpret_cast<uint8_t*>(start) - data_offset + offset);
93 }
94 
getCounterPtr(payload_t * start,const MQDescSync & desc,int grantorIndx)95 uint64_t* getCounterPtr(payload_t* start, const MQDescSync& desc, int grantorIndx) {
96     uint32_t offset = desc.grantors()[grantorIndx].offset;
97     uint32_t data_offset = desc.grantors()[android::hardware::details::DATAPTRPOS].offset;
98     return createCounterPtr(start, offset, data_offset);
99 }
100 
getCounterPtr(payload_t * start,const MQDescUnsync & desc,int grantorIndx)101 uint64_t* getCounterPtr(payload_t* start, const MQDescUnsync& desc, int grantorIndx) {
102     uint32_t offset = desc.grantors()[grantorIndx].offset;
103     uint32_t data_offset = desc.grantors()[android::hardware::details::DATAPTRPOS].offset;
104     return createCounterPtr(start, offset, data_offset);
105 }
106 
getCounterPtr(payload_t * start,const AidlMQDescSync & desc,int grantorIndx)107 uint64_t* getCounterPtr(payload_t* start, const AidlMQDescSync& desc, int grantorIndx) {
108     uint32_t offset = desc.grantors[grantorIndx].offset;
109     uint32_t data_offset = desc.grantors[android::hardware::details::DATAPTRPOS].offset;
110     return createCounterPtr(start, offset, data_offset);
111 }
112 
getCounterPtr(payload_t * start,const AidlMQDescUnsync & desc,int grantorIndx)113 uint64_t* getCounterPtr(payload_t* start, const AidlMQDescUnsync& desc, int grantorIndx) {
114     uint32_t offset = desc.grantors[grantorIndx].offset;
115     uint32_t data_offset = desc.grantors[android::hardware::details::DATAPTRPOS].offset;
116     return createCounterPtr(start, offset, data_offset);
117 }
118 
119 template <typename Queue, typename Desc>
reader(const Desc & desc,std::vector<uint8_t> readerData,bool userFd)120 void reader(const Desc& desc, std::vector<uint8_t> readerData, bool userFd) {
121     Queue readMq(desc);
122     if (!readMq.isValid()) {
123         LOG(ERROR) << "read mq invalid";
124         return;
125     }
126     FuzzedDataProvider fdp(&readerData[0], readerData.size());
127     payload_t* ring = reinterpret_cast<payload_t*>(readMq.getRingBufferPtr());
128     while (fdp.remaining_bytes()) {
129         typename Queue::MemTransaction tx;
130         size_t numElements = fdp.ConsumeIntegralInRange<size_t>(0, kMaxNumElements);
131         if (!readMq.beginRead(numElements, &tx)) {
132             continue;
133         }
134         const auto& region = tx.getFirstRegion();
135         payload_t* firstStart = region.getAddress();
136 
137         // the ring buffer is only next to the read/write counters when there is
138         // no user supplied fd
139         if (!userFd) {
140             if (fdp.ConsumeIntegral<uint8_t>() == 1) {
141                 uint64_t* writeCounter =
142                         getCounterPtr(ring, desc, android::hardware::details::WRITEPTRPOS);
143                 *writeCounter = fdp.ConsumeIntegral<uint64_t>();
144             }
145         }
146         (void)std::to_string(*firstStart);
147 
148         readMq.commitRead(numElements);
149     }
150 }
151 
152 template <typename Queue, typename Desc>
readerBlocking(const Desc & desc,std::vector<uint8_t> & readerData,std::atomic<size_t> & readersNotFinished,std::atomic<size_t> & writersNotFinished)153 void readerBlocking(const Desc& desc, std::vector<uint8_t>& readerData,
154                     std::atomic<size_t>& readersNotFinished,
155                     std::atomic<size_t>& writersNotFinished) {
156     android::base::ScopeGuard guard([&readersNotFinished]() { readersNotFinished--; });
157     Queue readMq(desc);
158     if (!readMq.isValid()) {
159         LOG(ERROR) << "read mq invalid";
160         return;
161     }
162     FuzzedDataProvider fdp(&readerData[0], readerData.size());
163     do {
164         size_t count = fdp.remaining_bytes()
165                                ? fdp.ConsumeIntegralInRange<size_t>(0, readMq.getQuantumCount() + 1)
166                                : 1;
167         std::vector<payload_t> data;
168         data.resize(count);
169         readMq.readBlocking(data.data(), count, kBlockingTimeoutNs);
170     } while (fdp.remaining_bytes() > sizeof(size_t) && writersNotFinished > 0);
171 }
172 
173 // Can't use blocking calls with Unsync queues(there is a static_assert)
174 template <>
readerBlocking(const AidlMQDescUnsync &,std::vector<uint8_t> &,std::atomic<size_t> &,std::atomic<size_t> &)175 void readerBlocking<AidlMessageQueueUnsync, AidlMQDescUnsync>(const AidlMQDescUnsync&,
176                                                               std::vector<uint8_t>&,
177                                                               std::atomic<size_t>&,
178                                                               std::atomic<size_t>&) {}
179 template <>
readerBlocking(const MQDescUnsync &,std::vector<uint8_t> &,std::atomic<size_t> &,std::atomic<size_t> &)180 void readerBlocking<MessageQueueUnsync, MQDescUnsync>(const MQDescUnsync&, std::vector<uint8_t>&,
181                                                       std::atomic<size_t>&, std::atomic<size_t>&) {}
182 
183 template <typename Queue, typename Desc>
writer(const Desc & desc,Queue & writeMq,FuzzedDataProvider & fdp,bool userFd)184 void writer(const Desc& desc, Queue& writeMq, FuzzedDataProvider& fdp, bool userFd) {
185     payload_t* ring = reinterpret_cast<payload_t*>(writeMq.getRingBufferPtr());
186     while (fdp.remaining_bytes()) {
187         typename Queue::MemTransaction tx;
188         size_t numElements = 1;
189         if (!writeMq.beginWrite(numElements, &tx)) {
190             // need to consume something so we don't end up looping forever
191             fdp.ConsumeIntegral<uint8_t>();
192             continue;
193         }
194 
195         const auto& region = tx.getFirstRegion();
196         payload_t* firstStart = region.getAddress();
197         // the ring buffer is only next to the read/write counters when there is
198         // no user supplied fd
199         if (!userFd) {
200             if (fdp.ConsumeIntegral<uint8_t>() == 1) {
201                 uint64_t* readCounter =
202                         getCounterPtr(ring, desc, android::hardware::details::READPTRPOS);
203                 *readCounter = fdp.ConsumeIntegral<uint64_t>();
204             }
205         }
206         *firstStart = fdp.ConsumeIntegral<uint8_t>();
207 
208         writeMq.commitWrite(numElements);
209     }
210 }
211 
212 template <typename Queue>
writerBlocking(Queue & writeMq,FuzzedDataProvider & fdp,std::atomic<size_t> & writersNotFinished,std::atomic<size_t> & readersNotFinished)213 void writerBlocking(Queue& writeMq, FuzzedDataProvider& fdp,
214                     std::atomic<size_t>& writersNotFinished,
215                     std::atomic<size_t>& readersNotFinished) {
216     android::base::ScopeGuard guard([&writersNotFinished]() { writersNotFinished--; });
217     while (fdp.remaining_bytes() > sizeof(size_t) && readersNotFinished > 0) {
218         size_t count = fdp.ConsumeIntegralInRange<size_t>(0, writeMq.getQuantumCount() + 1);
219         std::vector<payload_t> data;
220         for (int i = 0; i < count; i++) {
221             data.push_back(fdp.ConsumeIntegral<uint8_t>());
222         }
223         writeMq.writeBlocking(data.data(), count, kBlockingTimeoutNs);
224     }
225 }
226 
227 // Can't use blocking calls with Unsync queues(there is a static_assert)
228 template <>
writerBlocking(AidlMessageQueueUnsync &,FuzzedDataProvider &,std::atomic<size_t> &,std::atomic<size_t> &)229 void writerBlocking<AidlMessageQueueUnsync>(AidlMessageQueueUnsync&, FuzzedDataProvider&,
230                                             std::atomic<size_t>&, std::atomic<size_t>&) {}
231 template <>
writerBlocking(MessageQueueUnsync &,FuzzedDataProvider &,std::atomic<size_t> &,std::atomic<size_t> &)232 void writerBlocking<MessageQueueUnsync>(MessageQueueUnsync&, FuzzedDataProvider&,
233                                         std::atomic<size_t>&, std::atomic<size_t>&) {}
234 
235 template <typename Queue, typename Desc>
236 inline std::optional<Desc> getDesc(std::unique_ptr<Queue>& queue, FuzzedDataProvider& fdp);
237 
238 template <typename Queue, typename Desc>
getAidlDesc(std::unique_ptr<Queue> & queue,FuzzedDataProvider & fdp)239 inline std::optional<Desc> getAidlDesc(std::unique_ptr<Queue>& queue, FuzzedDataProvider& fdp) {
240     if (queue) {
241         // get the existing descriptor from the queue
242         Desc desc = queue->dupeDesc();
243         if (desc.handle.fds[0].get() == -1) {
244             return std::nullopt;
245         } else {
246             return std::make_optional(std::move(desc));
247         }
248     } else {
249         // create a custom descriptor
250         std::vector<aidl::android::hardware::common::fmq::GrantorDescriptor> grantors;
251         size_t numGrantors = fdp.ConsumeIntegralInRange<size_t>(0, 4);
252         for (int i = 0; i < numGrantors; i++) {
253             grantors.push_back({fdp.ConsumeIntegralInRange<int32_t>(0, 2) /* fdIndex */,
254                                 fdp.ConsumeIntegralInRange<int32_t>(
255                                         0, kMaxCustomGrantorMemoryBytes) /* offset */,
256                                 fdp.ConsumeIntegralInRange<int64_t>(
257                                         0, kMaxCustomGrantorMemoryBytes) /* extent */});
258             // ashmem region is kPageSize and we need to make sure all of the
259             // pointers and data region fit inside
260             if (grantors.back().offset + grantors.back().extent > kPageSize) return std::nullopt;
261         }
262 
263         android::base::unique_fd fd(
264                 ashmem_create_region("AidlCustomGrantors", kMaxCustomGrantorMemoryBytes));
265         ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE);
266         aidl::android::hardware::common::NativeHandle handle;
267         handle.fds.emplace_back(fd.get());
268 
269         return std::make_optional<Desc>(
270                 {grantors, std::move(handle), sizeof(payload_t), fdp.ConsumeBool()});
271     }
272 }
273 
274 template <>
getDesc(std::unique_ptr<AidlMessageQueueSync> & queue,FuzzedDataProvider & fdp)275 inline std::optional<AidlMQDescSync> getDesc(std::unique_ptr<AidlMessageQueueSync>& queue,
276                                              FuzzedDataProvider& fdp) {
277     return getAidlDesc<AidlMessageQueueSync, AidlMQDescSync>(queue, fdp);
278 }
279 
280 template <>
getDesc(std::unique_ptr<AidlMessageQueueUnsync> & queue,FuzzedDataProvider & fdp)281 inline std::optional<AidlMQDescUnsync> getDesc(std::unique_ptr<AidlMessageQueueUnsync>& queue,
282                                                FuzzedDataProvider& fdp) {
283     return getAidlDesc<AidlMessageQueueUnsync, AidlMQDescUnsync>(queue, fdp);
284 }
285 
286 template <typename Queue, typename Desc>
getHidlDesc(std::unique_ptr<Queue> & queue,FuzzedDataProvider & fdp)287 inline std::optional<Desc> getHidlDesc(std::unique_ptr<Queue>& queue, FuzzedDataProvider& fdp) {
288     if (queue) {
289         auto desc = queue->getDesc();
290         if (!desc->isHandleValid()) {
291             return std::nullopt;
292         } else {
293             return std::make_optional(std::move(*desc));
294         }
295     } else {
296         // create a custom descriptor
297         std::vector<android::hardware::GrantorDescriptor> grantors;
298         size_t numGrantors = fdp.ConsumeIntegralInRange<size_t>(0, 4);
299         for (int i = 0; i < numGrantors; i++) {
300             grantors.push_back({fdp.ConsumeIntegral<uint32_t>() /* flags */,
301                                 fdp.ConsumeIntegralInRange<uint32_t>(0, 2) /* fdIndex */,
302                                 fdp.ConsumeIntegralInRange<uint32_t>(
303                                         0, kMaxCustomGrantorMemoryBytes) /* offset */,
304                                 fdp.ConsumeIntegralInRange<uint64_t>(
305                                         0, kMaxCustomGrantorMemoryBytes) /* extent */});
306             // ashmem region is kPageSize and we need to make sure all of the
307             // pointers and data region fit inside
308             if (grantors.back().offset + grantors.back().extent > kPageSize) return std::nullopt;
309         }
310 
311         native_handle_t* handle = native_handle_create(1, 0);
312         int ashmemFd = ashmem_create_region("HidlCustomGrantors", kMaxCustomGrantorMemoryBytes);
313         ashmem_set_prot_region(ashmemFd, PROT_READ | PROT_WRITE);
314         handle->data[0] = ashmemFd;
315 
316         return std::make_optional<Desc>(grantors, handle, sizeof(payload_t));
317     }
318 }
319 
320 template <>
getDesc(std::unique_ptr<MessageQueueSync> & queue,FuzzedDataProvider & fdp)321 inline std::optional<MQDescSync> getDesc(std::unique_ptr<MessageQueueSync>& queue,
322                                          FuzzedDataProvider& fdp) {
323     return getHidlDesc<MessageQueueSync, MQDescSync>(queue, fdp);
324 }
325 
326 template <>
getDesc(std::unique_ptr<MessageQueueUnsync> & queue,FuzzedDataProvider & fdp)327 inline std::optional<MQDescUnsync> getDesc(std::unique_ptr<MessageQueueUnsync>& queue,
328                                            FuzzedDataProvider& fdp) {
329     return getHidlDesc<MessageQueueUnsync, MQDescUnsync>(queue, fdp);
330 }
331 
332 template <typename Queue, typename Desc>
fuzzWithReaders(std::vector<uint8_t> & writerData,std::vector<std::vector<uint8_t>> & readerData,bool blocking)333 void fuzzWithReaders(std::vector<uint8_t>& writerData,
334                      std::vector<std::vector<uint8_t>>& readerData, bool blocking) {
335     FuzzedDataProvider fdp(&writerData[0], writerData.size());
336     bool evFlag = blocking || fdp.ConsumeBool();
337     size_t numElements = fdp.ConsumeIntegralInRange<size_t>(1, kMaxNumElements);
338     size_t bufferSize = numElements * sizeof(payload_t);
339     bool userFd = fdp.ConsumeBool();
340     bool manualGrantors = fdp.ConsumeBool();
341     std::unique_ptr<Queue> writeMq = nullptr;
342     if (manualGrantors) {
343         std::optional<Desc> customDesc(getDesc<Queue, Desc>(writeMq, fdp));
344         if (customDesc) {
345             writeMq = std::make_unique<Queue>(*customDesc);
346         }
347     } else {
348         android::base::unique_fd dataFd;
349         if (userFd) {
350             // run test with our own data region
351             dataFd.reset(::ashmem_create_region("CustomData", bufferSize));
352         }
353         writeMq = std::make_unique<Queue>(numElements, evFlag, std::move(dataFd), bufferSize);
354     }
355 
356     if (writeMq == nullptr || !writeMq->isValid()) {
357         return;
358     }
359     // get optional desc
360     const std::optional<Desc> desc(std::move(getDesc<Queue, Desc>(writeMq, fdp)));
361     CHECK(desc != std::nullopt);
362 
363     std::atomic<size_t> readersNotFinished = readerData.size();
364     std::atomic<size_t> writersNotFinished = 1;
365     std::vector<std::thread> readers;
366     for (int i = 0; i < readerData.size(); i++) {
367         if (blocking) {
368             readers.emplace_back(readerBlocking<Queue, Desc>, std::ref(*desc),
369                                  std::ref(readerData[i]), std::ref(readersNotFinished),
370                                  std::ref(writersNotFinished));
371         } else {
372             readers.emplace_back(reader<Queue, Desc>, std::ref(*desc), std::ref(readerData[i]),
373                                  userFd);
374         }
375     }
376 
377     if (blocking) {
378         writerBlocking<Queue>(*writeMq, fdp, writersNotFinished, readersNotFinished);
379     } else {
380         writer<Queue>(*desc, *writeMq, fdp, userFd);
381     }
382 
383     for (auto& reader : readers) {
384         reader.join();
385     }
386 }
387 
LLVMFuzzerTestOneInput(const uint8_t * data,size_t size)388 extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
389     if (size < 1 || size > 50000) {
390         return 0;
391     }
392     FuzzedDataProvider fdp(data, size);
393 
394     bool fuzzSync = fdp.ConsumeBool();
395     std::vector<std::vector<uint8_t>> readerData;
396     uint8_t numReaders = fuzzSync ? fdp.ConsumeIntegralInRange<uint8_t>(0, kMaxNumSyncReaders)
397                                   : fdp.ConsumeIntegralInRange<uint8_t>(0, kMaxNumUnsyncReaders);
398     for (int i = 0; i < numReaders; i++) {
399         readerData.emplace_back(fdp.ConsumeBytes<uint8_t>(kMaxDataPerReader));
400     }
401     bool fuzzBlocking = fdp.ConsumeBool();
402     std::vector<uint8_t> writerData = fdp.ConsumeRemainingBytes<uint8_t>();
403     if (fuzzSync) {
404         fuzzWithReaders<MessageQueueSync, MQDescSync>(writerData, readerData, fuzzBlocking);
405         fuzzWithReaders<AidlMessageQueueSync, AidlMQDescSync>(writerData, readerData, fuzzBlocking);
406     } else {
407         fuzzWithReaders<MessageQueueUnsync, MQDescUnsync>(writerData, readerData, false);
408         fuzzWithReaders<AidlMessageQueueUnsync, AidlMQDescUnsync>(writerData, readerData, false);
409     }
410 
411     return 0;
412 }
413