diff options
author | Devin Moore <devinmoore@google.com> | 2022-09-29 19:25:06 +0000 |
---|---|---|
committer | Devin Moore <devinmoore@google.com> | 2022-10-03 23:31:16 +0000 |
commit | 03967efd6f96f4e19ab7fc6abef44d3076353a99 (patch) | |
tree | 5f9d295d6233199820fcb5696195f06a70cc0313 | |
parent | aa8cf9e852ed841868d61a4870c957c506df28d8 (diff) | |
download | libfmq-03967efd6f96f4e19ab7fc6abef44d3076353a99.tar.gz |
fmq_fuzzer: fuzz manually created grantors and descriptors
There is nothing preventing users from creating their own grantors and
descriptors. It's not recomended.
Test: fmq_fuzzer
Bug: 244713317
Change-Id: I9db8f13c895e78e4604499479537d0d888716953
-rw-r--r-- | fuzzer/fmq_fuzzer.cpp | 177 |
1 files changed, 141 insertions, 36 deletions
diff --git a/fuzzer/fmq_fuzzer.cpp b/fuzzer/fmq_fuzzer.cpp index f196848..12871d1 100644 --- a/fuzzer/fmq_fuzzer.cpp +++ b/fuzzer/fmq_fuzzer.cpp @@ -45,6 +45,13 @@ static constexpr int kBlockingTimeoutNs = 10000000; */ static constexpr size_t kAlignment = 8; static constexpr size_t kMaxNumElements = PAGE_SIZE * 10 / sizeof(payload_t) - kAlignment + 1; +/* + * limit the custom grantor case to one page of memory. + * If we want to increase this, we need to make sure that all of grantors offset + * plus extent are less than the size of the page aligned ashmem region that is + * created + */ +static constexpr size_t kMaxCustomGrantorMemoryBytes = PAGE_SIZE; /* * The read counter can be found in the shared memory 16 bytes before the start @@ -72,8 +79,40 @@ typedef aidl::android::hardware::common::fmq::MQDescriptor<payload_t, Unsynchron typedef android::hardware::MQDescriptorSync<payload_t> MQDescSync; typedef android::hardware::MQDescriptorUnsync<payload_t> MQDescUnsync; -static inline uint64_t* getCounterPtr(payload_t* start, int byteOffset) { - return reinterpret_cast<uint64_t*>(reinterpret_cast<uint8_t*>(start) - byteOffset); +// AIDL and HIDL have different ways of accessing the grantors +template <typename Desc> +uint64_t* getCounterPtr(payload_t* start, const Desc& desc, int grantorIndx); + +uint64_t* createCounterPtr(payload_t* start, uint32_t offset, uint32_t data_offset) { + // start is the address of the beginning of the FMQ data section in memory + // offset is overall offset of the counter in the FMQ memory + // data_offset is the overall offset of the data section in the FMQ memory + // start - (data_offset) = beginning address of the FMQ memory + return reinterpret_cast<uint64_t*>(reinterpret_cast<uint8_t*>(start) - data_offset + offset); +} + +uint64_t* getCounterPtr(payload_t* start, const MQDescSync& desc, int grantorIndx) { + uint32_t offset = desc.grantors()[grantorIndx].offset; + uint32_t data_offset = desc.grantors()[android::hardware::details::DATAPTRPOS].offset; + return createCounterPtr(start, offset, data_offset); +} + +uint64_t* getCounterPtr(payload_t* start, const MQDescUnsync& desc, int grantorIndx) { + uint32_t offset = desc.grantors()[grantorIndx].offset; + uint32_t data_offset = desc.grantors()[android::hardware::details::DATAPTRPOS].offset; + return createCounterPtr(start, offset, data_offset); +} + +uint64_t* getCounterPtr(payload_t* start, const AidlMQDescSync& desc, int grantorIndx) { + uint32_t offset = desc.grantors[grantorIndx].offset; + uint32_t data_offset = desc.grantors[android::hardware::details::DATAPTRPOS].offset; + return createCounterPtr(start, offset, data_offset); +} + +uint64_t* getCounterPtr(payload_t* start, const AidlMQDescUnsync& desc, int grantorIndx) { + uint32_t offset = desc.grantors[grantorIndx].offset; + uint32_t data_offset = desc.grantors[android::hardware::details::DATAPTRPOS].offset; + return createCounterPtr(start, offset, data_offset); } template <typename Queue, typename Desc> @@ -101,7 +140,8 @@ void reader(const Desc& desc, std::vector<uint8_t> readerData, bool userFd) { ring = firstStart; } if (fdp.ConsumeIntegral<uint8_t>() == 1) { - uint64_t* writeCounter = getCounterPtr(ring, kWriteCounterOffsetBytes); + uint64_t* writeCounter = + getCounterPtr(ring, desc, android::hardware::details::WRITEPTRPOS); *writeCounter = fdp.ConsumeIntegral<uint64_t>(); } } @@ -142,8 +182,8 @@ template <> void readerBlocking<MessageQueueUnsync, MQDescUnsync>(const MQDescUnsync&, std::vector<uint8_t>&, std::atomic<size_t>&, std::atomic<size_t>&) {} -template <typename Queue> -void writer(Queue& writeMq, FuzzedDataProvider& fdp, bool userFd) { +template <typename Queue, typename Desc> +void writer(const Desc& desc, Queue& writeMq, FuzzedDataProvider& fdp, bool userFd) { payload_t* ring = nullptr; while (fdp.remaining_bytes()) { typename Queue::MemTransaction tx; @@ -163,7 +203,8 @@ void writer(Queue& writeMq, FuzzedDataProvider& fdp, bool userFd) { ring = firstStart; } if (fdp.ConsumeIntegral<uint8_t>() == 1) { - uint64_t* readCounter = getCounterPtr(ring, kReadCounterOffsetBytes); + uint64_t* readCounter = + getCounterPtr(ring, desc, android::hardware::details::READPTRPOS); *readCounter = fdp.ConsumeIntegral<uint64_t>(); } } @@ -197,46 +238,100 @@ void writerBlocking<MessageQueueUnsync>(MessageQueueUnsync&, FuzzedDataProvider& std::atomic<size_t>&, std::atomic<size_t>&) {} template <typename Queue, typename Desc> -inline std::optional<Desc> getDesc(Queue&); +inline std::optional<Desc> getDesc(std::unique_ptr<Queue>& queue, FuzzedDataProvider& fdp); template <typename Queue, typename Desc> -inline std::optional<Desc> getAidlDesc(Queue& queue) { - Desc desc = queue.dupeDesc(); - if (desc.handle.fds[0].get() == -1) { - return std::nullopt; +inline std::optional<Desc> getAidlDesc(std::unique_ptr<Queue>& queue, FuzzedDataProvider& fdp) { + if (queue) { + // get the existing descriptor from the queue + Desc desc = queue->dupeDesc(); + if (desc.handle.fds[0].get() == -1) { + return std::nullopt; + } else { + return std::make_optional(std::move(desc)); + } } else { - return std::make_optional(std::move(desc)); + // create a custom descriptor + std::vector<aidl::android::hardware::common::fmq::GrantorDescriptor> grantors; + size_t numGrantors = fdp.ConsumeIntegralInRange<size_t>(0, 4); + for (int i = 0; i < numGrantors; i++) { + grantors.push_back({fdp.ConsumeIntegralInRange<int32_t>(-2, 2) /* fdIndex */, + fdp.ConsumeIntegralInRange<int32_t>( + 0, kMaxCustomGrantorMemoryBytes) /* offset */, + fdp.ConsumeIntegralInRange<int64_t>( + 0, kMaxCustomGrantorMemoryBytes) /* extent */}); + // ashmem region is PAGE_SIZE and we need to make sure all of the + // pointers and data region fit inside + if (grantors.back().offset + grantors.back().extent > PAGE_SIZE) return std::nullopt; + } + + android::base::unique_fd fd( + ashmem_create_region("AidlCustomGrantors", kMaxCustomGrantorMemoryBytes)); + ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE); + aidl::android::hardware::common::NativeHandle handle; + handle.fds.emplace_back(fd.get()); + + return std::make_optional<Desc>( + {grantors, std::move(handle), sizeof(payload_t), fdp.ConsumeBool()}); } } template <> -inline std::optional<AidlMQDescSync> getDesc(AidlMessageQueueSync& queue) { - return getAidlDesc<AidlMessageQueueSync, AidlMQDescSync>(queue); +inline std::optional<AidlMQDescSync> getDesc(std::unique_ptr<AidlMessageQueueSync>& queue, + FuzzedDataProvider& fdp) { + return getAidlDesc<AidlMessageQueueSync, AidlMQDescSync>(queue, fdp); } template <> -inline std::optional<AidlMQDescUnsync> getDesc(AidlMessageQueueUnsync& queue) { - return getAidlDesc<AidlMessageQueueUnsync, AidlMQDescUnsync>(queue); +inline std::optional<AidlMQDescUnsync> getDesc(std::unique_ptr<AidlMessageQueueUnsync>& queue, + FuzzedDataProvider& fdp) { + return getAidlDesc<AidlMessageQueueUnsync, AidlMQDescUnsync>(queue, fdp); } template <typename Queue, typename Desc> -inline std::optional<Desc> getHidlDesc(Queue& queue) { - auto desc = queue.getDesc(); - if (!desc->isHandleValid()) { - return std::nullopt; +inline std::optional<Desc> getHidlDesc(std::unique_ptr<Queue>& queue, FuzzedDataProvider& fdp) { + if (queue) { + auto desc = queue->getDesc(); + if (!desc->isHandleValid()) { + return std::nullopt; + } else { + return std::make_optional(std::move(*desc)); + } } else { - return std::make_optional(std::move(*desc)); + // create a custom descriptor + std::vector<android::hardware::GrantorDescriptor> grantors; + size_t numGrantors = fdp.ConsumeIntegralInRange<size_t>(0, 4); + for (int i = 0; i < numGrantors; i++) { + grantors.push_back({fdp.ConsumeIntegral<uint32_t>() /* flags */, + fdp.ConsumeIntegralInRange<uint32_t>(0, 2) /* fdIndex */, + fdp.ConsumeIntegralInRange<uint32_t>( + 0, kMaxCustomGrantorMemoryBytes) /* offset */, + fdp.ConsumeIntegralInRange<uint64_t>( + 0, kMaxCustomGrantorMemoryBytes) /* extent */}); + // ashmem region is PAGE_SIZE and we need to make sure all of the + // pointers and data region fit inside + if (grantors.back().offset + grantors.back().extent > PAGE_SIZE) return std::nullopt; + } + + native_handle_t* handle = native_handle_create(1, 0); + int ashmemFd = ashmem_create_region("HidlCustomGrantors", kMaxCustomGrantorMemoryBytes); + ashmem_set_prot_region(ashmemFd, PROT_READ | PROT_WRITE); + handle->data[0] = ashmemFd; + + return std::make_optional<Desc>(grantors, handle, sizeof(payload_t)); } } template <> -inline std::optional<MQDescSync> getDesc(MessageQueueSync& queue) { - return getHidlDesc<MessageQueueSync, MQDescSync>(queue); +inline std::optional<MQDescSync> getDesc(std::unique_ptr<MessageQueueSync>& queue, + FuzzedDataProvider& fdp) { + return getHidlDesc<MessageQueueSync, MQDescSync>(queue, fdp); } template <> -inline std::optional<MQDescUnsync> getDesc(MessageQueueUnsync& queue) { - return getHidlDesc<MessageQueueUnsync, MQDescUnsync>(queue); +inline std::optional<MQDescUnsync> getDesc(std::unique_ptr<MessageQueueUnsync>& queue, + FuzzedDataProvider& fdp) { + return getHidlDesc<MessageQueueUnsync, MQDescUnsync>(queue, fdp); } template <typename Queue, typename Desc> @@ -244,20 +339,30 @@ void fuzzWithReaders(std::vector<uint8_t>& writerData, std::vector<std::vector<uint8_t>>& readerData, bool blocking) { FuzzedDataProvider fdp(&writerData[0], writerData.size()); bool evFlag = blocking || fdp.ConsumeBool(); - android::base::unique_fd dataFd; - size_t bufferSize = 0; size_t numElements = fdp.ConsumeIntegralInRange<size_t>(1, kMaxNumElements); + size_t bufferSize = numElements * sizeof(payload_t); bool userFd = fdp.ConsumeBool(); - if (userFd) { - // run test with our own data region - bufferSize = numElements * sizeof(payload_t); - dataFd.reset(::ashmem_create_region("SyncReadWrite", bufferSize)); + bool manualGrantors = fdp.ConsumeBool(); + std::unique_ptr<Queue> writeMq = nullptr; + if (manualGrantors) { + std::optional<Desc> customDesc(getDesc<Queue, Desc>(writeMq, fdp)); + if (customDesc) { + writeMq = std::make_unique<Queue>(*customDesc); + } + } else { + android::base::unique_fd dataFd; + if (userFd) { + // run test with our own data region + dataFd.reset(::ashmem_create_region("CustomData", 4096)); + } + writeMq = std::make_unique<Queue>(numElements, evFlag, std::move(dataFd), bufferSize); } - Queue writeMq(numElements, evFlag, std::move(dataFd), bufferSize); - if (!writeMq.isValid()) { + + if (writeMq == nullptr || !writeMq->isValid()) { return; } - const std::optional<Desc> desc(std::move(getDesc<Queue, Desc>(writeMq))); + // get optional desc + const std::optional<Desc> desc(std::move(getDesc<Queue, Desc>(writeMq, fdp))); CHECK(desc != std::nullopt); std::atomic<size_t> readersNotFinished = readerData.size(); @@ -275,9 +380,9 @@ void fuzzWithReaders(std::vector<uint8_t>& writerData, } if (blocking) { - writerBlocking<Queue>(writeMq, fdp, writersNotFinished, readersNotFinished); + writerBlocking<Queue>(*writeMq, fdp, writersNotFinished, readersNotFinished); } else { - writer<Queue>(writeMq, fdp, userFd); + writer<Queue>(*desc, *writeMq, fdp, userFd); } for (auto& reader : readers) { |