aboutsummaryrefslogtreecommitdiff
path: root/components
diff options
context:
space:
mode:
Diffstat (limited to 'components')
-rw-r--r--components/Android.bp13
-rw-r--r--components/ComponentStore.cpp207
-rw-r--r--components/DecodeComponent.cpp (renamed from components/V4L2DecodeComponent.cpp)357
-rw-r--r--components/DecodeInterface.cpp (renamed from components/V4L2DecodeInterface.cpp)321
-rw-r--r--components/EncodeComponent.cpp (renamed from components/V4L2EncodeComponent.cpp)324
-rw-r--r--components/EncodeInterface.cpp (renamed from components/V4L2EncodeInterface.cpp)115
-rw-r--r--components/V4L2ComponentFactory.cpp106
-rw-r--r--components/V4L2ComponentStore.cpp208
-rw-r--r--components/V4L2Decoder.cpp795
-rw-r--r--components/V4L2Encoder.cpp1095
-rw-r--r--components/VideoEncoder.cpp5
-rw-r--r--components/VideoFramePool.cpp17
-rw-r--r--components/include/v4l2_codec2/components/ComponentStore.h (renamed from components/include/v4l2_codec2/components/V4L2ComponentStore.h)53
-rw-r--r--components/include/v4l2_codec2/components/DecodeComponent.h (renamed from components/include/v4l2_codec2/components/V4L2DecodeComponent.h)43
-rw-r--r--components/include/v4l2_codec2/components/DecodeInterface.h (renamed from components/include/v4l2_codec2/components/V4L2DecodeInterface.h)24
-rw-r--r--components/include/v4l2_codec2/components/EncodeComponent.h (renamed from components/include/v4l2_codec2/components/V4L2EncodeComponent.h)57
-rw-r--r--components/include/v4l2_codec2/components/EncodeInterface.h (renamed from components/include/v4l2_codec2/components/V4L2EncodeInterface.h)20
-rw-r--r--components/include/v4l2_codec2/components/V4L2ComponentFactory.h39
-rw-r--r--components/include/v4l2_codec2/components/V4L2Decoder.h115
-rw-r--r--components/include/v4l2_codec2/components/V4L2Encoder.h201
-rw-r--r--components/include/v4l2_codec2/components/VideoEncoder.h10
21 files changed, 875 insertions, 3250 deletions
diff --git a/components/Android.bp b/components/Android.bp
index 5bee73b..73efcf2 100644
--- a/components/Android.bp
+++ b/components/Android.bp
@@ -18,14 +18,11 @@ cc_library {
srcs: [
"VideoFrame.cpp",
"VideoFramePool.cpp",
- "V4L2ComponentFactory.cpp",
- "V4L2ComponentStore.cpp",
- "V4L2Decoder.cpp",
- "V4L2DecodeComponent.cpp",
- "V4L2DecodeInterface.cpp",
- "V4L2Encoder.cpp",
- "V4L2EncodeComponent.cpp",
- "V4L2EncodeInterface.cpp",
+ "ComponentStore.cpp",
+ "DecodeComponent.cpp",
+ "DecodeInterface.cpp",
+ "EncodeComponent.cpp",
+ "EncodeInterface.cpp",
"VideoDecoder.cpp",
"VideoEncoder.cpp",
],
diff --git a/components/ComponentStore.cpp b/components/ComponentStore.cpp
new file mode 100644
index 0000000..aa4eba8
--- /dev/null
+++ b/components/ComponentStore.cpp
@@ -0,0 +1,207 @@
+// Copyright 2023 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ComponentStore"
+
+#include <v4l2_codec2/components/ComponentStore.h>
+
+#include <stdint.h>
+
+#include <memory>
+#include <mutex>
+
+#include <C2.h>
+#include <C2Config.h>
+#include <log/log.h>
+#include <media/stagefright/foundation/MediaDefs.h>
+
+#include <v4l2_codec2/common/VideoTypes.h>
+
+namespace android {
+namespace {
+const uint32_t kComponentRank = 0x80;
+
+} // namespace
+
+ComponentStore::ComponentStore(C2String storeName)
+ : mStoreName(std::move(storeName)), mReflector(std::make_shared<C2ReflectorHelper>()) {
+ ALOGV("%s()", __func__);
+}
+
+ComponentStore::~ComponentStore() {
+ ALOGV("%s()", __func__);
+
+ std::lock_guard<std::mutex> lock(mCachedFactoriesLock);
+ mCachedFactories.clear();
+}
+
+C2String ComponentStore::getName() const {
+ return mStoreName;
+}
+
+c2_status_t ComponentStore::createComponent(C2String name,
+ std::shared_ptr<C2Component>* const component) {
+ ALOGV("%s(%s)", __func__, name.c_str());
+
+ const auto& decl = mDeclarations.find(name);
+ if (decl == mDeclarations.end()) {
+ ALOGI("%s(): Invalid component name: %s", __func__, name.c_str());
+ return C2_NOT_FOUND;
+ }
+
+ auto factory = getFactory(name);
+ if (factory == nullptr) return C2_CORRUPTED;
+
+ component->reset();
+ return factory->createComponent(0, component);
+}
+
+c2_status_t ComponentStore::createInterface(
+ C2String name, std::shared_ptr<C2ComponentInterface>* const interface) {
+ ALOGV("%s(%s)", __func__, name.c_str());
+
+ const auto& decl = mDeclarations.find(name);
+ if (decl == mDeclarations.end()) {
+ ALOGI("%s(): Invalid component name: %s", __func__, name.c_str());
+ return C2_NOT_FOUND;
+ }
+
+ auto factory = getFactory(name);
+ if (factory == nullptr) return C2_CORRUPTED;
+
+ interface->reset();
+ return factory->createInterface(0, interface);
+}
+
+std::vector<std::shared_ptr<const C2Component::Traits>> ComponentStore::listComponents() {
+ ALOGV("%s()", __func__);
+
+ std::vector<std::shared_ptr<const C2Component::Traits>> ret;
+ for (const auto& decl : mDeclarations) {
+ ret.push_back(getTraits(decl.first));
+ }
+
+ return ret;
+}
+
+std::shared_ptr<C2ParamReflector> ComponentStore::getParamReflector() const {
+ return mReflector;
+}
+
+c2_status_t ComponentStore::copyBuffer(std::shared_ptr<C2GraphicBuffer> /* src */,
+ std::shared_ptr<C2GraphicBuffer> /* dst */) {
+ return C2_OMITTED;
+}
+
+c2_status_t ComponentStore::querySupportedParams_nb(
+ std::vector<std::shared_ptr<C2ParamDescriptor>>* const /* params */) const {
+ return C2_OK;
+}
+
+c2_status_t ComponentStore::query_sm(
+ const std::vector<C2Param*>& stackParams,
+ const std::vector<C2Param::Index>& heapParamIndices,
+ std::vector<std::unique_ptr<C2Param>>* const /* heapParams */) const {
+ // There are no supported config params.
+ return stackParams.empty() && heapParamIndices.empty() ? C2_OK : C2_BAD_INDEX;
+}
+
+c2_status_t ComponentStore::config_sm(
+ const std::vector<C2Param*>& params,
+ std::vector<std::unique_ptr<C2SettingResult>>* const /* failures */) {
+ // There are no supported config params.
+ return params.empty() ? C2_OK : C2_BAD_INDEX;
+}
+
+c2_status_t ComponentStore::querySupportedValues_sm(
+ std::vector<C2FieldSupportedValuesQuery>& fields) const {
+ // There are no supported config params.
+ return fields.empty() ? C2_OK : C2_BAD_INDEX;
+}
+
+::C2ComponentFactory* ComponentStore::getFactory(const C2String& name) {
+ ALOGV("%s(%s)", __func__, name.c_str());
+ ALOG_ASSERT(V4L2ComponentName::isValid(name.c_str()));
+
+ std::lock_guard<std::mutex> lock(mCachedFactoriesLock);
+ const auto it = mCachedFactories.find(name);
+ if (it != mCachedFactories.end()) return it->second.get();
+
+ const auto& decl = mDeclarations.find(name);
+ if (decl == mDeclarations.end()) {
+ ALOGI("%s(): Invalid component name: %s", __func__, name.c_str());
+ return nullptr;
+ }
+
+ std::unique_ptr<::C2ComponentFactory> factory = decl->second.factory(name, mReflector);
+ if (factory == nullptr) {
+ ALOGE("Failed to create factory for %s", name.c_str());
+ return nullptr;
+ }
+
+ auto ret = factory.get();
+ mCachedFactories.emplace(name, std::move(factory));
+ return ret;
+}
+
+std::shared_ptr<const C2Component::Traits> ComponentStore::getTraits(const C2String& name) {
+ ALOGV("%s(%s)", __func__, name.c_str());
+
+ const auto& iter = mDeclarations.find(name);
+ if (iter == mDeclarations.end()) {
+ ALOGE("Invalid component name: %s", name.c_str());
+ return nullptr;
+ }
+
+ const Declaration& decl = iter->second;
+
+ std::lock_guard<std::mutex> lock(mCachedTraitsLock);
+ auto it = mCachedTraits.find(name);
+ if (it != mCachedTraits.end()) return it->second;
+
+ auto traits = std::make_shared<C2Component::Traits>();
+ traits->name = name;
+ traits->domain = C2Component::DOMAIN_VIDEO;
+ traits->rank = kComponentRank;
+ traits->kind = decl.kind;
+
+ switch (decl.codec) {
+ case VideoCodec::H264:
+ traits->mediaType = MEDIA_MIMETYPE_VIDEO_AVC;
+ break;
+ case VideoCodec::VP8:
+ traits->mediaType = MEDIA_MIMETYPE_VIDEO_VP8;
+ break;
+ case VideoCodec::VP9:
+ traits->mediaType = MEDIA_MIMETYPE_VIDEO_VP9;
+ break;
+ case VideoCodec::HEVC:
+ traits->mediaType = MEDIA_MIMETYPE_VIDEO_HEVC;
+ break;
+ }
+
+ mCachedTraits.emplace(name, traits);
+ return traits;
+}
+
+ComponentStore::Builder::Builder(C2String storeName)
+ : mStore(new ComponentStore(std::move(storeName))) {}
+
+ComponentStore::Builder& ComponentStore::Builder::decoder(std::string name, VideoCodec codec,
+ GetFactory factory) {
+ mStore->mDeclarations[name] = Declaration{codec, C2Component::KIND_DECODER, std::move(factory)};
+ return *this;
+}
+
+ComponentStore::Builder& ComponentStore::Builder::encoder(std::string name, VideoCodec codec,
+ GetFactory factory) {
+ mStore->mDeclarations[name] = Declaration{codec, C2Component::KIND_ENCODER, std::move(factory)};
+ return *this;
+}
+
+std::shared_ptr<ComponentStore> ComponentStore::Builder::build() && {
+ return std::shared_ptr<ComponentStore>(std::move(mStore));
+}
+} // namespace android
diff --git a/components/V4L2DecodeComponent.cpp b/components/DecodeComponent.cpp
index 2770b1e..c88fa03 100644
--- a/components/V4L2DecodeComponent.cpp
+++ b/components/DecodeComponent.cpp
@@ -1,11 +1,12 @@
-// Copyright 2020 The Chromium Authors. All rights reserved.
+// Copyright 2023 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//#define LOG_NDEBUG 0
-#define LOG_TAG "V4L2DecodeComponent"
+#define ATRACE_TAG ATRACE_TAG_VIDEO
+#define LOG_TAG "DecodeComponent"
-#include <v4l2_codec2/components/V4L2DecodeComponent.h>
+#include <v4l2_codec2/components/DecodeComponent.h>
#include <inttypes.h>
#include <linux/videodev2.h>
@@ -19,60 +20,48 @@
#include <SimpleC2Interface.h>
#include <base/bind.h>
#include <base/callback_helpers.h>
+#include <base/strings/stringprintf.h>
#include <base/time/time.h>
#include <cutils/properties.h>
#include <log/log.h>
#include <media/stagefright/foundation/ColorUtils.h>
+#include <utils/Trace.h>
#include <v4l2_codec2/common/Common.h>
-#include <v4l2_codec2/common/NalParser.h>
+#include <v4l2_codec2/common/H264NalParser.h>
+#include <v4l2_codec2/common/HEVCNalParser.h>
#include <v4l2_codec2/common/VideoTypes.h>
#include <v4l2_codec2/components/BitstreamBuffer.h>
-#include <v4l2_codec2/components/V4L2Decoder.h>
#include <v4l2_codec2/components/VideoFramePool.h>
namespace android {
namespace {
-// CCBC pauses sending input buffers to the component when all the output slots are filled by
-// pending decoded buffers. If the available output buffers are exhausted before CCBC pauses sending
-// input buffers, CCodec may timeout due to waiting for a available output buffer.
-// This function returns the minimum number of output buffers to prevent the buffers from being
-// exhausted before CCBC pauses sending input buffers.
-size_t getMinNumOutputBuffers(VideoCodec codec) {
- // The constant values copied from CCodecBufferChannel.cpp.
- // (b/184020290): Check the value still sync when seeing error message from CCodec:
- // "previous call to queue exceeded timeout".
- constexpr size_t kSmoothnessFactor = 4;
- constexpr size_t kRenderingDepth = 3;
- // Extra number of needed output buffers for V4L2Decoder.
- constexpr size_t kExtraNumOutputBuffersForDecoder = 2;
-
- // The total needed number of output buffers at pipeline are:
- // - MediaCodec output slots: output delay + kSmoothnessFactor
- // - Surface: kRenderingDepth
- // - Component: kExtraNumOutputBuffersForDecoder
- return V4L2DecodeInterface::getOutputDelay(codec) + kSmoothnessFactor + kRenderingDepth +
- kExtraNumOutputBuffersForDecoder;
-}
-
// Mask against 30 bits to avoid (undefined) wraparound on signed integer.
int32_t frameIndexToBitstreamId(c2_cntr64_t frameIndex) {
return static_cast<int32_t>(frameIndex.peeku() & 0x3FFFFFFF);
}
-bool parseCodedColorAspects(const C2ConstLinearBlock& input,
+bool parseCodedColorAspects(const C2ConstLinearBlock& input, std::optional<VideoCodec> codec,
C2StreamColorAspectsInfo::input* codedAspects) {
C2ReadView view = input.map().get();
- NalParser parser(view.data(), view.capacity());
+ NalParser::ColorAspects aspects;
+ std::unique_ptr<NalParser> parser;
+ if (codec == VideoCodec::H264) {
+ parser = std::make_unique<H264NalParser>(view.data(), view.capacity());
+ } else if (codec == VideoCodec::HEVC) {
+ parser = std::make_unique<HEVCNalParser>(view.data(), view.capacity());
+ } else {
+ ALOGV("Unsupported codec for finding color aspects");
+ return false;
+ }
- if (!parser.locateSPS()) {
+ if (!parser->locateSPS()) {
ALOGV("Couldn't find SPS");
return false;
}
- NalParser::ColorAspects aspects;
- if (!parser.findCodedColorAspects(&aspects)) {
+ if (!parser->findCodedColorAspects(&aspects)) {
ALOGV("Couldn't find color description in SPS");
return false;
}
@@ -137,55 +126,26 @@ bool isNoShowFrameWork(const C2Work& work, const C2WorkOrdinalStruct& currOrdina
} // namespace
-// static
-std::atomic<int32_t> V4L2DecodeComponent::sConcurrentInstances = 0;
-
-// static
-std::shared_ptr<C2Component> V4L2DecodeComponent::create(
- const std::string& name, c2_node_id_t id, const std::shared_ptr<C2ReflectorHelper>& helper,
- C2ComponentFactory::ComponentDeleter deleter) {
- static const int32_t kMaxConcurrentInstances =
- property_get_int32("ro.vendor.v4l2_codec2.decode_concurrent_instances", -1);
- static std::mutex mutex;
-
- std::lock_guard<std::mutex> lock(mutex);
-
- if (kMaxConcurrentInstances >= 0 && sConcurrentInstances.load() >= kMaxConcurrentInstances) {
- ALOGW("Reject to Initialize() due to too many instances: %d", sConcurrentInstances.load());
- return nullptr;
- }
-
- auto intfImpl = std::make_shared<V4L2DecodeInterface>(name, helper);
- if (intfImpl->status() != C2_OK) {
- ALOGE("Failed to initialize V4L2DecodeInterface.");
- return nullptr;
- }
-
- return std::shared_ptr<C2Component>(new V4L2DecodeComponent(name, id, helper, intfImpl),
- deleter);
-}
-
-V4L2DecodeComponent::V4L2DecodeComponent(const std::string& name, c2_node_id_t id,
- const std::shared_ptr<C2ReflectorHelper>& helper,
- const std::shared_ptr<V4L2DecodeInterface>& intfImpl)
- : mIntfImpl(intfImpl),
- mIntf(std::make_shared<SimpleInterface<V4L2DecodeInterface>>(name.c_str(), id, mIntfImpl)) {
+DecodeComponent::DecodeComponent(uint32_t debugStreamId, const std::string& name, c2_node_id_t id,
+ const std::shared_ptr<DecodeInterface>& intfImpl)
+ : mDebugStreamId(debugStreamId),
+ mIntfImpl(intfImpl),
+ mIntf(std::make_shared<SimpleInterface<DecodeInterface>>(name.c_str(), id, mIntfImpl)) {
ALOGV("%s(%s)", __func__, name.c_str());
-
- sConcurrentInstances.fetch_add(1, std::memory_order_relaxed);
mIsSecure = name.find(".secure") != std::string::npos;
}
-V4L2DecodeComponent::~V4L2DecodeComponent() {
+DecodeComponent::~DecodeComponent() {
ALOGV("%s()", __func__);
-
- release();
-
- sConcurrentInstances.fetch_sub(1, std::memory_order_relaxed);
+ if (mDecoderThread.IsRunning() && !mDecoderTaskRunner->RunsTasksInCurrentSequence()) {
+ mDecoderTaskRunner->PostTask(FROM_HERE,
+ ::base::BindOnce(&DecodeComponent::releaseTask, mWeakThis));
+ mDecoderThread.Stop();
+ }
ALOGV("%s() done", __func__);
}
-c2_status_t V4L2DecodeComponent::start() {
+c2_status_t DecodeComponent::start() {
ALOGV("%s()", __func__);
std::lock_guard<std::mutex> lock(mStartStopLock);
@@ -205,7 +165,7 @@ c2_status_t V4L2DecodeComponent::start() {
c2_status_t status = C2_CORRUPTED;
::base::WaitableEvent done;
mDecoderTaskRunner->PostTask(
- FROM_HERE, ::base::BindOnce(&V4L2DecodeComponent::startTask, mWeakThis,
+ FROM_HERE, ::base::BindOnce(&DecodeComponent::startTask, mWeakThis,
::base::Unretained(&status), ::base::Unretained(&done)));
done.Wait();
@@ -213,55 +173,15 @@ c2_status_t V4L2DecodeComponent::start() {
return status;
}
-void V4L2DecodeComponent::startTask(c2_status_t* status, ::base::WaitableEvent* done) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
-
- ::base::ScopedClosureRunner done_caller(
- ::base::BindOnce(&::base::WaitableEvent::Signal, ::base::Unretained(done)));
- *status = C2_CORRUPTED;
-
- const auto codec = mIntfImpl->getVideoCodec();
- if (!codec) {
- ALOGE("Failed to get video codec.");
- return;
- }
- const size_t inputBufferSize = mIntfImpl->getInputBufferSize();
- const size_t minNumOutputBuffers = getMinNumOutputBuffers(*codec);
-
- // ::base::Unretained(this) is safe here because |mDecoder| is always destroyed before
- // |mDecoderThread| is stopped, so |*this| is always valid during |mDecoder|'s lifetime.
- mDecoder = V4L2Decoder::Create(*codec, inputBufferSize, minNumOutputBuffers,
- ::base::BindRepeating(&V4L2DecodeComponent::getVideoFramePool,
- ::base::Unretained(this)),
- ::base::BindRepeating(&V4L2DecodeComponent::onOutputFrameReady,
- ::base::Unretained(this)),
- ::base::BindRepeating(&V4L2DecodeComponent::reportError,
- ::base::Unretained(this), C2_CORRUPTED),
- mDecoderTaskRunner);
- if (!mDecoder) {
- ALOGE("Failed to create V4L2Decoder for %s", VideoCodecToString(*codec));
- return;
- }
-
- // Get default color aspects on start.
- if (!mIsSecure && *codec == VideoCodec::H264) {
- if (mIntfImpl->queryColorAspects(&mCurrentColorAspects) != C2_OK) return;
- mPendingColorAspectsChange = false;
- }
-
- *status = C2_OK;
-}
-
-std::unique_ptr<VideoFramePool> V4L2DecodeComponent::getVideoFramePool(const ui::Size& size,
- HalPixelFormat pixelFormat,
- size_t numBuffers) {
+std::unique_ptr<VideoFramePool> DecodeComponent::getVideoFramePool(const ui::Size& size,
+ HalPixelFormat pixelFormat,
+ size_t numBuffers) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
auto sharedThis = weak_from_this().lock();
if (sharedThis == nullptr) {
- ALOGE("%s(): V4L2DecodeComponent instance is destroyed.", __func__);
+ ALOGE("%s(): DecodeComponent instance is destroyed.", __func__);
return nullptr;
}
@@ -289,7 +209,7 @@ std::unique_ptr<VideoFramePool> V4L2DecodeComponent::getVideoFramePool(const ui:
mDecoderTaskRunner);
}
-c2_status_t V4L2DecodeComponent::stop() {
+c2_status_t DecodeComponent::stop() {
ALOGV("%s()", __func__);
std::lock_guard<std::mutex> lock(mStartStopLock);
@@ -301,7 +221,7 @@ c2_status_t V4L2DecodeComponent::stop() {
if (mDecoderThread.IsRunning()) {
mDecoderTaskRunner->PostTask(FROM_HERE,
- ::base::BindOnce(&V4L2DecodeComponent::stopTask, mWeakThis));
+ ::base::BindOnce(&DecodeComponent::stopTask, mWeakThis));
mDecoderThread.Stop();
mDecoderTaskRunner = nullptr;
}
@@ -310,7 +230,8 @@ c2_status_t V4L2DecodeComponent::stop() {
return C2_OK;
}
-void V4L2DecodeComponent::stopTask() {
+void DecodeComponent::stopTask() {
+ ATRACE_CALL();
ALOGV("%s()", __func__);
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
@@ -320,19 +241,19 @@ void V4L2DecodeComponent::stopTask() {
releaseTask();
}
-c2_status_t V4L2DecodeComponent::reset() {
+c2_status_t DecodeComponent::reset() {
ALOGV("%s()", __func__);
return stop();
}
-c2_status_t V4L2DecodeComponent::release() {
+c2_status_t DecodeComponent::release() {
ALOGV("%s()", __func__);
std::lock_guard<std::mutex> lock(mStartStopLock);
if (mDecoderThread.IsRunning()) {
- mDecoderTaskRunner->PostTask(
- FROM_HERE, ::base::BindOnce(&V4L2DecodeComponent::releaseTask, mWeakThis));
+ mDecoderTaskRunner->PostTask(FROM_HERE,
+ ::base::BindOnce(&DecodeComponent::releaseTask, mWeakThis));
mDecoderThread.Stop();
mDecoderTaskRunner = nullptr;
}
@@ -341,7 +262,8 @@ c2_status_t V4L2DecodeComponent::release() {
return C2_OK;
}
-void V4L2DecodeComponent::releaseTask() {
+void DecodeComponent::releaseTask() {
+ ATRACE_CALL();
ALOGV("%s()", __func__);
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
@@ -349,8 +271,8 @@ void V4L2DecodeComponent::releaseTask() {
mDecoder = nullptr;
}
-c2_status_t V4L2DecodeComponent::setListener_vb(
- const std::shared_ptr<C2Component::Listener>& listener, c2_blocking_t mayBlock) {
+c2_status_t DecodeComponent::setListener_vb(const std::shared_ptr<C2Component::Listener>& listener,
+ c2_blocking_t mayBlock) {
ALOGV("%s()", __func__);
auto currentState = mComponentState.load();
@@ -372,14 +294,14 @@ c2_status_t V4L2DecodeComponent::setListener_vb(
}
::base::WaitableEvent done;
- mDecoderTaskRunner->PostTask(FROM_HERE, ::base::Bind(&V4L2DecodeComponent::setListenerTask,
- mWeakThis, listener, &done));
+ mDecoderTaskRunner->PostTask(
+ FROM_HERE, ::base::Bind(&DecodeComponent::setListenerTask, mWeakThis, listener, &done));
done.Wait();
return C2_OK;
}
-void V4L2DecodeComponent::setListenerTask(const std::shared_ptr<Listener>& listener,
- ::base::WaitableEvent* done) {
+void DecodeComponent::setListenerTask(const std::shared_ptr<Listener>& listener,
+ ::base::WaitableEvent* done) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
@@ -387,7 +309,7 @@ void V4L2DecodeComponent::setListenerTask(const std::shared_ptr<Listener>& liste
done->Signal();
}
-c2_status_t V4L2DecodeComponent::queue_nb(std::list<std::unique_ptr<C2Work>>* const items) {
+c2_status_t DecodeComponent::queue_nb(std::list<std::unique_ptr<C2Work>>* const items) {
ALOGV("%s()", __func__);
auto currentState = mComponentState.load();
@@ -397,15 +319,22 @@ c2_status_t V4L2DecodeComponent::queue_nb(std::list<std::unique_ptr<C2Work>>* co
}
while (!items->empty()) {
+ if (ATRACE_ENABLED()) {
+ const std::string atraceLabel = ::base::StringPrintf("#%u C2Work", mDebugStreamId);
+ ATRACE_ASYNC_BEGIN(atraceLabel.c_str(),
+ items->front()->input.ordinal.frameIndex.peekull());
+ }
+
mDecoderTaskRunner->PostTask(FROM_HERE,
- ::base::BindOnce(&V4L2DecodeComponent::queueTask, mWeakThis,
+ ::base::BindOnce(&DecodeComponent::queueTask, mWeakThis,
std::move(items->front())));
items->pop_front();
}
return C2_OK;
}
-void V4L2DecodeComponent::queueTask(std::unique_ptr<C2Work> work) {
+void DecodeComponent::queueTask(std::unique_ptr<C2Work> work) {
+ ATRACE_CALL();
ALOGV("%s(): flags=0x%x, index=%llu, timestamp=%llu", __func__, work->input.flags,
work->input.ordinal.frameIndex.peekull(), work->input.ordinal.timestamp.peekull());
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
@@ -440,7 +369,8 @@ void V4L2DecodeComponent::queueTask(std::unique_ptr<C2Work> work) {
pumpPendingWorks();
}
-void V4L2DecodeComponent::pumpPendingWorks() {
+void DecodeComponent::pumpPendingWorks() {
+ ATRACE_CALL();
ALOGV("%s()", __func__);
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
@@ -466,43 +396,15 @@ void V4L2DecodeComponent::pumpPendingWorks() {
ALOGW_IF(!res.second, "We already inserted bitstreamId %d to decoder?", bitstreamId);
if (!isEmptyWork) {
- // If input.buffers is not empty, the buffer should have meaningful content inside.
- C2ConstLinearBlock linearBlock =
- work->input.buffers.front()->data().linearBlocks().front();
- ALOG_ASSERT(linearBlock.size() > 0u, "Input buffer of work(%d) is empty.", bitstreamId);
-
- // Try to parse color aspects from bitstream for CSD work of non-secure H264 codec.
- if (isCSDWork && !mIsSecure && (mIntfImpl->getVideoCodec() == VideoCodec::H264)) {
- C2StreamColorAspectsInfo::input codedAspects = {0u};
- if (parseCodedColorAspects(linearBlock, &codedAspects)) {
- std::vector<std::unique_ptr<C2SettingResult>> failures;
- c2_status_t status =
- mIntfImpl->config({&codedAspects}, C2_MAY_BLOCK, &failures);
- if (status != C2_OK) {
- ALOGE("Failed to config color aspects to interface: %d", status);
- reportError(status);
- return;
- }
-
- // Record current frame index, color aspects should be updated only for output
- // buffers whose frame indices are not less than this one.
- mPendingColorAspectsChange = true;
- mPendingColorAspectsChangeFrameIndex = work->input.ordinal.frameIndex.peeku();
- }
- }
-
- std::unique_ptr<ConstBitstreamBuffer> buffer = std::make_unique<ConstBitstreamBuffer>(
- bitstreamId, linearBlock, linearBlock.offset(), linearBlock.size());
- if (!buffer) {
- reportError(C2_CORRUPTED);
- return;
+ if (isCSDWork) {
+ processCSDWork(bitstreamId, work);
+ } else {
+ processWork(bitstreamId, work);
}
- mDecoder->decode(std::move(buffer), ::base::BindOnce(&V4L2DecodeComponent::onDecodeDone,
- mWeakThis, bitstreamId));
}
if (isEOSWork) {
- mDecoder->drain(::base::BindOnce(&V4L2DecodeComponent::onDrainDone, mWeakThis));
+ mDecoder->drain(::base::BindOnce(&DecodeComponent::onDrainDone, mWeakThis));
mIsDraining = true;
}
@@ -511,7 +413,63 @@ void V4L2DecodeComponent::pumpPendingWorks() {
}
}
-void V4L2DecodeComponent::onDecodeDone(int32_t bitstreamId, VideoDecoder::DecodeStatus status) {
+void DecodeComponent::processCSDWork(const int32_t bitstreamId, const C2Work* work) {
+ // If input.buffers is not empty, the buffer should have meaningful content inside.
+ C2ConstLinearBlock linearBlock = work->input.buffers.front()->data().linearBlocks().front();
+ ALOG_ASSERT(linearBlock.size() > 0u, "Input buffer of work(%d) is empty.", bitstreamId);
+
+ if (mIntfImpl->getVideoCodec() == VideoCodec::VP9) {
+ // The VP9 decoder does not support and does not need the Codec Specific Data (CSD):
+ // https://www.webmproject.org/docs/container/#vp9-codec-feature-metadata-codecprivate.
+ // The most of its content (profile, level, bit depth and chroma subsampling)
+ // can be extracted directly from VP9 bitstream. Ignore CSD if it was passed.
+ reportWorkIfFinished(bitstreamId);
+ return;
+ } else if ((!mIsSecure && mIntfImpl->getVideoCodec() == VideoCodec::H264) ||
+ mIntfImpl->getVideoCodec() == VideoCodec::HEVC) {
+ // Try to parse color aspects from bitstream for CSD work of non-secure H264 codec or HEVC
+ // codec (HEVC will only be CENCv3 which is parseable for secure).
+ C2StreamColorAspectsInfo::input codedAspects = {0u};
+ if (parseCodedColorAspects(linearBlock, mIntfImpl->getVideoCodec(), &codedAspects)) {
+ std::vector<std::unique_ptr<C2SettingResult>> failures;
+ c2_status_t status = mIntfImpl->config({&codedAspects}, C2_MAY_BLOCK, &failures);
+ if (status != C2_OK) {
+ ALOGE("Failed to config color aspects to interface: %d", status);
+ reportError(status);
+ return;
+ }
+ // Record current frame index, color aspects should be updated only for output
+ // buffers whose frame indices are not less than this one.
+ mPendingColorAspectsChange = true;
+ mPendingColorAspectsChangeFrameIndex = work->input.ordinal.frameIndex.peeku();
+ }
+ }
+
+ processWorkBuffer(bitstreamId, linearBlock);
+}
+
+void DecodeComponent::processWork(const int32_t bitstreamId, const C2Work* work) {
+ // If input.buffers is not empty, the buffer should have meaningful content inside.
+ C2ConstLinearBlock linearBlock = work->input.buffers.front()->data().linearBlocks().front();
+ ALOG_ASSERT(linearBlock.size() > 0u, "Input buffer of work(%d) is empty.", bitstreamId);
+
+ processWorkBuffer(bitstreamId, linearBlock);
+}
+
+void DecodeComponent::processWorkBuffer(const int32_t bitstreamId,
+ const C2ConstLinearBlock& linearBlock) {
+ std::unique_ptr<ConstBitstreamBuffer> buffer = std::make_unique<ConstBitstreamBuffer>(
+ bitstreamId, linearBlock, linearBlock.offset(), linearBlock.size());
+ if (!buffer) {
+ reportError(C2_CORRUPTED);
+ return;
+ }
+ mDecoder->decode(std::move(buffer),
+ ::base::BindOnce(&DecodeComponent::onDecodeDone, mWeakThis, bitstreamId));
+}
+
+void DecodeComponent::onDecodeDone(int32_t bitstreamId, VideoDecoder::DecodeStatus status) {
+ ATRACE_CALL();
ALOGV("%s(bitstreamId=%d, status=%s)", __func__, bitstreamId,
VideoDecoder::DecodeStatusToString(status));
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
@@ -548,7 +506,7 @@ void V4L2DecodeComponent::onDecodeDone(int32_t bitstreamId, VideoDecoder::Decode
}
}
-void V4L2DecodeComponent::onOutputFrameReady(std::unique_ptr<VideoFrame> frame) {
+void DecodeComponent::onOutputFrameReady(std::unique_ptr<VideoFrame> frame) {
ALOGV("%s(bitstreamId=%d)", __func__, frame->getBitstreamId());
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
@@ -583,8 +541,9 @@ void V4L2DecodeComponent::onOutputFrameReady(std::unique_ptr<VideoFrame> frame)
pumpReportWork();
}
-void V4L2DecodeComponent::detectNoShowFrameWorksAndReportIfFinished(
+void DecodeComponent::detectNoShowFrameWorksAndReportIfFinished(
const C2WorkOrdinalStruct& currOrdinal) {
+ ATRACE_CALL();
ALOGV("%s()", __func__);
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
@@ -613,7 +572,8 @@ void V4L2DecodeComponent::detectNoShowFrameWorksAndReportIfFinished(
for (const int32_t bitstreamId : noShowFrameBitstreamIds) reportWorkIfFinished(bitstreamId);
}
-void V4L2DecodeComponent::pumpReportWork() {
+void DecodeComponent::pumpReportWork() {
+ ATRACE_CALL();
ALOGV("%s()", __func__);
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
@@ -623,7 +583,8 @@ void V4L2DecodeComponent::pumpReportWork() {
}
}
-bool V4L2DecodeComponent::reportWorkIfFinished(int32_t bitstreamId) {
+bool DecodeComponent::reportWorkIfFinished(int32_t bitstreamId) {
+ ATRACE_CALL();
ALOGV("%s(bitstreamId = %d)", __func__, bitstreamId);
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
@@ -657,7 +618,8 @@ bool V4L2DecodeComponent::reportWorkIfFinished(int32_t bitstreamId) {
return reportWork(std::move(work));
}
-bool V4L2DecodeComponent::reportEOSWork() {
+bool DecodeComponent::reportEOSWork() {
+ ATRACE_CALL();
ALOGV("%s()", __func__);
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
@@ -691,7 +653,8 @@ bool V4L2DecodeComponent::reportEOSWork() {
return reportWork(std::move(eosWork));
}
-bool V4L2DecodeComponent::reportWork(std::unique_ptr<C2Work> work) {
+bool DecodeComponent::reportWork(std::unique_ptr<C2Work> work) {
+ ATRACE_CALL();
ALOGV("%s(work=%llu)", __func__, work->input.ordinal.frameIndex.peekull());
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
@@ -700,14 +663,19 @@ bool V4L2DecodeComponent::reportWork(std::unique_ptr<C2Work> work) {
return false;
}
+ if (ATRACE_ENABLED()) {
+ const std::string atraceLabel = ::base::StringPrintf("#%u C2Work", mDebugStreamId);
+ ATRACE_ASYNC_END(atraceLabel.c_str(), work->input.ordinal.frameIndex.peekull());
+ }
std::list<std::unique_ptr<C2Work>> finishedWorks;
finishedWorks.emplace_back(std::move(work));
mListener->onWorkDone_nb(weak_from_this(), std::move(finishedWorks));
return true;
}
-c2_status_t V4L2DecodeComponent::flush_sm(
- flush_mode_t mode, std::list<std::unique_ptr<C2Work>>* const /* flushedWork */) {
+c2_status_t DecodeComponent::flush_sm(flush_mode_t mode,
+ std::list<std::unique_ptr<C2Work>>* const /* flushedWork */) {
+ ATRACE_CALL();
ALOGV("%s()", __func__);
auto currentState = mComponentState.load();
@@ -720,11 +688,12 @@ c2_status_t V4L2DecodeComponent::flush_sm(
}
mDecoderTaskRunner->PostTask(FROM_HERE,
- ::base::BindOnce(&V4L2DecodeComponent::flushTask, mWeakThis));
+ ::base::BindOnce(&DecodeComponent::flushTask, mWeakThis));
return C2_OK;
}
-void V4L2DecodeComponent::flushTask() {
+void DecodeComponent::flushTask() {
+ ATRACE_CALL();
ALOGV("%s()", __func__);
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
@@ -735,7 +704,7 @@ void V4L2DecodeComponent::flushTask() {
mIsDraining = false;
}
-void V4L2DecodeComponent::reportAbandonedWorks() {
+void DecodeComponent::reportAbandonedWorks() {
ALOGV("%s()", __func__);
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
@@ -756,6 +725,11 @@ void V4L2DecodeComponent::reportAbandonedWorks() {
if (!work->input.buffers.empty()) {
work->input.buffers.front().reset();
}
+
+ if (ATRACE_ENABLED()) {
+ const std::string atraceLabel = ::base::StringPrintf("#%u C2Work", mDebugStreamId);
+ ATRACE_ASYNC_END(atraceLabel.c_str(), work->input.ordinal.frameIndex.peekull());
+ }
}
if (!abandonedWorks.empty()) {
if (!mListener) {
@@ -766,7 +740,7 @@ void V4L2DecodeComponent::reportAbandonedWorks() {
}
}
-c2_status_t V4L2DecodeComponent::drain_nb(drain_mode_t mode) {
+c2_status_t DecodeComponent::drain_nb(drain_mode_t mode) {
ALOGV("%s(mode=%u)", __func__, mode);
auto currentState = mComponentState.load();
@@ -784,12 +758,13 @@ c2_status_t V4L2DecodeComponent::drain_nb(drain_mode_t mode) {
case DRAIN_COMPONENT_WITH_EOS:
mDecoderTaskRunner->PostTask(FROM_HERE,
- ::base::BindOnce(&V4L2DecodeComponent::drainTask, mWeakThis));
+ ::base::BindOnce(&DecodeComponent::drainTask, mWeakThis));
return C2_OK;
}
}
-void V4L2DecodeComponent::drainTask() {
+void DecodeComponent::drainTask() {
+ ATRACE_CALL();
ALOGV("%s()", __func__);
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
@@ -802,12 +777,12 @@ void V4L2DecodeComponent::drainTask() {
if (!mWorksAtDecoder.empty()) {
ALOGV("Drain the pending works at the decoder.");
- mDecoder->drain(::base::BindOnce(&V4L2DecodeComponent::onDrainDone, mWeakThis));
+ mDecoder->drain(::base::BindOnce(&DecodeComponent::onDrainDone, mWeakThis));
mIsDraining = true;
}
}
-void V4L2DecodeComponent::onDrainDone(VideoDecoder::DecodeStatus status) {
+void DecodeComponent::onDrainDone(VideoDecoder::DecodeStatus status) {
ALOGV("%s(status=%s)", __func__, VideoDecoder::DecodeStatusToString(status));
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
@@ -827,12 +802,12 @@ void V4L2DecodeComponent::onDrainDone(VideoDecoder::DecodeStatus status) {
}
mDecoderTaskRunner->PostTask(
- FROM_HERE, ::base::BindOnce(&V4L2DecodeComponent::pumpPendingWorks, mWeakThis));
+ FROM_HERE, ::base::BindOnce(&DecodeComponent::pumpPendingWorks, mWeakThis));
return;
}
}
-void V4L2DecodeComponent::reportError(c2_status_t error) {
+void DecodeComponent::reportError(c2_status_t error) {
ALOGE("%s(error=%u)", __func__, static_cast<uint32_t>(error));
ALOG_ASSERT(mDecoderTaskRunner->RunsTasksInCurrentSequence());
@@ -846,16 +821,16 @@ void V4L2DecodeComponent::reportError(c2_status_t error) {
mListener->onError_nb(weak_from_this(), static_cast<uint32_t>(error));
}
-c2_status_t V4L2DecodeComponent::announce_nb(const std::vector<C2WorkOutline>& /* items */) {
+c2_status_t DecodeComponent::announce_nb(const std::vector<C2WorkOutline>& /* items */) {
return C2_OMITTED; // Tunneling is not supported by now
}
-std::shared_ptr<C2ComponentInterface> V4L2DecodeComponent::intf() {
+std::shared_ptr<C2ComponentInterface> DecodeComponent::intf() {
return mIntf;
}
// static
-const char* V4L2DecodeComponent::ComponentStateToString(ComponentState state) {
+const char* DecodeComponent::ComponentStateToString(ComponentState state) {
switch (state) {
case ComponentState::STOPPED:
return "STOPPED";
diff --git a/components/V4L2DecodeInterface.cpp b/components/DecodeInterface.cpp
index 32483be..53689bd 100644
--- a/components/V4L2DecodeInterface.cpp
+++ b/components/DecodeInterface.cpp
@@ -1,11 +1,11 @@
-// Copyright 2020 The Chromium Authors. All rights reserved.
+// Copyright 2023 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//#define LOG_NDEBUG 0
-#define LOG_TAG "V4L2DecodeInterface"
+#define LOG_TAG "DecodeInterface"
-#include <v4l2_codec2/components/V4L2DecodeInterface.h>
+#include <v4l2_codec2/components/DecodeInterface.h>
#include <C2PlatformSupport.h>
#include <SimpleC2Interface.h>
@@ -13,8 +13,7 @@
#include <log/log.h>
#include <media/stagefright/foundation/MediaDefs.h>
-#include <v4l2_codec2/common/V4L2ComponentCommon.h>
-#include <v4l2_codec2/common/V4L2Device.h>
+#include <v4l2_codec2/common/Common.h>
#include <v4l2_codec2/plugin_store/V4L2AllocatorId.h>
namespace android {
@@ -23,24 +22,14 @@ namespace {
constexpr size_t k1080pArea = 1920 * 1088;
constexpr size_t k4KArea = 3840 * 2160;
// Input bitstream buffer size for up to 1080p streams.
-constexpr size_t kInputBufferSizeFor1080p = 1024 * 1024; // 1MB
+// Set it to 2MB since it is possible for the encoded bitstream to exceed the size of 1MB
+// when using higher bitrates, like 1Mb/s on slower devices. Also, this brings up compability
+// with the Chrome browser as it is using 2MB buffer size for a 1080p stream, ref:
+// https://source.chromium.org/chromium/chromium/src/+/main:media/gpu/gpu_video_encode_accelerator_helpers.cc;l=25
+constexpr size_t kInputBufferSizeFor1080p = 2 * 1024 * 1024; // 2MB
// Input bitstream buffer size for up to 4k streams.
constexpr size_t kInputBufferSizeFor4K = 4 * kInputBufferSizeFor1080p;
-std::optional<VideoCodec> getCodecFromComponentName(const std::string& name) {
- if (name == V4L2ComponentName::kH264Decoder || name == V4L2ComponentName::kH264SecureDecoder)
- return VideoCodec::H264;
- if (name == V4L2ComponentName::kVP8Decoder || name == V4L2ComponentName::kVP8SecureDecoder)
- return VideoCodec::VP8;
- if (name == V4L2ComponentName::kVP9Decoder || name == V4L2ComponentName::kVP9SecureDecoder)
- return VideoCodec::VP9;
- if (name == V4L2ComponentName::kHEVCDecoder || name == V4L2ComponentName::kHEVCSecureDecoder)
- return VideoCodec::HEVC;
-
- ALOGE("Unknown name: %s", name.c_str());
- return std::nullopt;
-}
-
size_t calculateInputBufferSize(size_t area) {
if (area > k4KArea) {
ALOGW("Input buffer size for video size (%zu) larger than 4K (%zu) might be too small.",
@@ -54,24 +43,29 @@ size_t calculateInputBufferSize(size_t area) {
} // namespace
// static
-C2R V4L2DecodeInterface::ProfileLevelSetter(bool /* mayBlock */,
- C2P<C2StreamProfileLevelInfo::input>& info) {
+C2R DecodeInterface::ProfileLevelSetter(bool /* mayBlock */,
+ C2P<C2StreamProfileLevelInfo::input>& info) {
return info.F(info.v.profile)
.validatePossible(info.v.profile)
.plus(info.F(info.v.level).validatePossible(info.v.level));
}
// static
-C2R V4L2DecodeInterface::SizeSetter(bool /* mayBlock */,
- C2P<C2StreamPictureSizeInfo::output>& videoSize) {
+C2R DecodeInterface::SizeSetter(bool /* mayBlock */,
+ C2P<C2StreamPictureSizeInfo::output>& videoSize) {
return videoSize.F(videoSize.v.width)
.validatePossible(videoSize.v.width)
.plus(videoSize.F(videoSize.v.height).validatePossible(videoSize.v.height));
}
+C2R DecodeInterface::InputSizeSetter(bool /* mayBlock */,
+ C2P<C2StreamMaxBufferSizeInfo::input>& inputSize) {
+ return inputSize.F(inputSize.v.value).validatePossible(inputSize.v.value);
+}
+
// static
template <typename T>
-C2R V4L2DecodeInterface::DefaultColorAspectsSetter(bool /* mayBlock */, C2P<T>& def) {
+C2R DecodeInterface::DefaultColorAspectsSetter(bool /* mayBlock */, C2P<T>& def) {
if (def.v.range > C2Color::RANGE_OTHER) {
def.set().range = C2Color::RANGE_OTHER;
}
@@ -88,10 +82,10 @@ C2R V4L2DecodeInterface::DefaultColorAspectsSetter(bool /* mayBlock */, C2P<T>&
}
// static
-C2R V4L2DecodeInterface::MergedColorAspectsSetter(
- bool /* mayBlock */, C2P<C2StreamColorAspectsInfo::output>& merged,
- const C2P<C2StreamColorAspectsTuning::output>& def,
- const C2P<C2StreamColorAspectsInfo::input>& coded) {
+C2R DecodeInterface::MergedColorAspectsSetter(bool /* mayBlock */,
+ C2P<C2StreamColorAspectsInfo::output>& merged,
+ const C2P<C2StreamColorAspectsTuning::output>& def,
+ const C2P<C2StreamColorAspectsInfo::input>& coded) {
// Take coded values for all specified fields, and default values for unspecified ones.
merged.set().range = coded.v.range == RANGE_UNSPECIFIED ? def.v.range : coded.v.range;
merged.set().primaries =
@@ -103,58 +97,123 @@ C2R V4L2DecodeInterface::MergedColorAspectsSetter(
}
// static
-C2R V4L2DecodeInterface::MaxInputBufferSizeCalculator(
+C2R DecodeInterface::MaxInputBufferSizeCalculator(
bool /* mayBlock */, C2P<C2StreamMaxBufferSizeInfo::input>& me,
const C2P<C2StreamPictureSizeInfo::output>& size) {
- me.set().value = calculateInputBufferSize(size.v.width * size.v.height);
+ size_t calculatedSize = calculateInputBufferSize(size.v.width * size.v.height);
+
+ if (me.v.value < calculatedSize) me.set().value = calculatedSize;
+
return C2R::Ok();
}
-V4L2DecodeInterface::V4L2DecodeInterface(const std::string& name,
- const std::shared_ptr<C2ReflectorHelper>& helper)
- : C2InterfaceHelper(helper), mInitStatus(C2_OK) {
+DecodeInterface::DecodeInterface(const std::string& name,
+ const std::shared_ptr<C2ReflectorHelper>& helper,
+ const SupportedCapabilities& caps)
+ : C2InterfaceHelper(helper), mInitStatus(C2_OK), mVideoCodec(caps.codec) {
ALOGV("%s(%s)", __func__, name.c_str());
setDerivedInstance(this);
- mVideoCodec = getCodecFromComponentName(name);
- if (!mVideoCodec) {
- ALOGE("Invalid component name: %s", name.c_str());
- mInitStatus = C2_BAD_VALUE;
- return;
- }
-
addParameter(DefineParam(mKind, C2_PARAMKEY_COMPONENT_KIND)
.withConstValue(new C2ComponentKindSetting(C2Component::KIND_DECODER))
.build());
std::string inputMime;
+
+ ui::Size maxSize(1, 1);
+
+ std::vector<uint32_t> profiles;
+ for (const auto& supportedProfile : caps.supportedProfiles) {
+ if (isValidProfileForCodec(mVideoCodec.value(), supportedProfile.profile)) {
+ profiles.push_back(static_cast<uint32_t>(supportedProfile.profile));
+ maxSize.setWidth(std::max(maxSize.width, supportedProfile.max_resolution.width));
+ maxSize.setHeight(std::max(maxSize.height, supportedProfile.max_resolution.height));
+ }
+ }
+
+ // In case of no supported profile or uninitialized device maxSize is set to default
+ if (maxSize == ui::Size(1, 1)) maxSize = ui::Size(4096, 4096);
+
+ if (profiles.empty()) {
+ ALOGW("No supported profiles for H264 codec");
+ switch (*mVideoCodec) { //default values used when querry is not supported
+ case VideoCodec::H264:
+ profiles = {
+ C2Config::PROFILE_AVC_BASELINE,
+ C2Config::PROFILE_AVC_CONSTRAINED_BASELINE,
+ C2Config::PROFILE_AVC_MAIN,
+ C2Config::PROFILE_AVC_HIGH,
+ };
+ break;
+ case VideoCodec::VP8:
+ profiles = {C2Config::PROFILE_VP8_0};
+ break;
+ case VideoCodec::VP9:
+ profiles = {C2Config::PROFILE_VP9_0};
+ break;
+ case VideoCodec::HEVC:
+ profiles = {C2Config::PROFILE_HEVC_MAIN};
+ break;
+ }
+ }
+
+ uint32_t defaultProfile = caps.defaultProfile;
+ if (defaultProfile == C2Config::PROFILE_UNUSED)
+ defaultProfile = *std::min_element(profiles.begin(), profiles.end());
+
+ std::vector<unsigned int> levels;
+ std::vector<C2Config::level_t> supportedLevels = caps.supportedLevels;
+ for (const auto& supportedLevel : supportedLevels) {
+ levels.push_back(static_cast<unsigned int>(supportedLevel));
+ }
+
+ if (levels.empty()) {
+ ALOGE("No supported levels for H264 codec");
+ switch (*mVideoCodec) { //default values used when querry is not supported
+ case VideoCodec::H264:
+ levels = {C2Config::LEVEL_AVC_1, C2Config::LEVEL_AVC_1B, C2Config::LEVEL_AVC_1_1,
+ C2Config::LEVEL_AVC_1_2, C2Config::LEVEL_AVC_1_3, C2Config::LEVEL_AVC_2,
+ C2Config::LEVEL_AVC_2_1, C2Config::LEVEL_AVC_2_2, C2Config::LEVEL_AVC_3,
+ C2Config::LEVEL_AVC_3_1, C2Config::LEVEL_AVC_3_2, C2Config::LEVEL_AVC_4,
+ C2Config::LEVEL_AVC_4_1, C2Config::LEVEL_AVC_4_2, C2Config::LEVEL_AVC_5,
+ C2Config::LEVEL_AVC_5_1, C2Config::LEVEL_AVC_5_2};
+ break;
+ case VideoCodec::VP8:
+ levels = {C2Config::LEVEL_UNUSED};
+ break;
+ case VideoCodec::VP9:
+ levels = {C2Config::LEVEL_VP9_1, C2Config::LEVEL_VP9_1_1, C2Config::LEVEL_VP9_2,
+ C2Config::LEVEL_VP9_2_1, C2Config::LEVEL_VP9_3, C2Config::LEVEL_VP9_3_1,
+ C2Config::LEVEL_VP9_4, C2Config::LEVEL_VP9_4_1, C2Config::LEVEL_VP9_5};
+ break;
+ case VideoCodec::HEVC:
+ levels = {C2Config::LEVEL_HEVC_MAIN_1, C2Config::LEVEL_HEVC_MAIN_2,
+ C2Config::LEVEL_HEVC_MAIN_2_1, C2Config::LEVEL_HEVC_MAIN_3,
+ C2Config::LEVEL_HEVC_MAIN_3_1, C2Config::LEVEL_HEVC_MAIN_4,
+ C2Config::LEVEL_HEVC_MAIN_4_1, C2Config::LEVEL_HEVC_MAIN_5,
+ C2Config::LEVEL_HEVC_MAIN_5_1, C2Config::LEVEL_HEVC_MAIN_5_2,
+ C2Config::LEVEL_HEVC_MAIN_6, C2Config::LEVEL_HEVC_MAIN_6_1,
+ C2Config::LEVEL_HEVC_MAIN_6_2};
+ break;
+ }
+ }
+
+ uint32_t defaultLevel = caps.defaultLevel;
+ if (defaultLevel == C2Config::LEVEL_UNUSED)
+ defaultLevel = *std::min_element(levels.begin(), levels.end());
+
switch (*mVideoCodec) {
case VideoCodec::H264:
inputMime = MEDIA_MIMETYPE_VIDEO_AVC;
- addParameter(
- DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
- .withDefault(new C2StreamProfileLevelInfo::input(
- 0u, C2Config::PROFILE_AVC_MAIN, C2Config::LEVEL_AVC_4))
- .withFields(
- {C2F(mProfileLevel, profile)
- .oneOf({C2Config::PROFILE_AVC_BASELINE,
- C2Config::PROFILE_AVC_CONSTRAINED_BASELINE,
- C2Config::PROFILE_AVC_MAIN,
- C2Config::PROFILE_AVC_HIGH,
- C2Config::PROFILE_AVC_CONSTRAINED_HIGH}),
- C2F(mProfileLevel, level)
- .oneOf({C2Config::LEVEL_AVC_1, C2Config::LEVEL_AVC_1B,
- C2Config::LEVEL_AVC_1_1, C2Config::LEVEL_AVC_1_2,
- C2Config::LEVEL_AVC_1_3, C2Config::LEVEL_AVC_2,
- C2Config::LEVEL_AVC_2_1, C2Config::LEVEL_AVC_2_2,
- C2Config::LEVEL_AVC_3, C2Config::LEVEL_AVC_3_1,
- C2Config::LEVEL_AVC_3_2, C2Config::LEVEL_AVC_4,
- C2Config::LEVEL_AVC_4_1, C2Config::LEVEL_AVC_4_2,
- C2Config::LEVEL_AVC_5, C2Config::LEVEL_AVC_5_1,
- C2Config::LEVEL_AVC_5_2})})
- .withSetter(ProfileLevelSetter)
- .build());
+ addParameter(DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+ .withDefault(new C2StreamProfileLevelInfo::input(
+ 0u, static_cast<C2Config::profile_t>(defaultProfile),
+ static_cast<C2Config::level_t>(defaultLevel)))
+ .withFields({C2F(mProfileLevel, profile).oneOf(profiles),
+ C2F(mProfileLevel, level).oneOf(levels)})
+ .withSetter(ProfileLevelSetter)
+ .build());
break;
case VideoCodec::VP8:
@@ -167,48 +226,26 @@ V4L2DecodeInterface::V4L2DecodeInterface(const std::string& name,
case VideoCodec::VP9:
inputMime = MEDIA_MIMETYPE_VIDEO_VP9;
- addParameter(
- DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
- .withDefault(new C2StreamProfileLevelInfo::input(
- 0u, C2Config::PROFILE_VP9_0, C2Config::LEVEL_VP9_5))
- .withFields({C2F(mProfileLevel, profile).oneOf({C2Config::PROFILE_VP9_0}),
- C2F(mProfileLevel, level)
- .oneOf({C2Config::LEVEL_VP9_1, C2Config::LEVEL_VP9_1_1,
- C2Config::LEVEL_VP9_2, C2Config::LEVEL_VP9_2_1,
- C2Config::LEVEL_VP9_3, C2Config::LEVEL_VP9_3_1,
- C2Config::LEVEL_VP9_4, C2Config::LEVEL_VP9_4_1,
- C2Config::LEVEL_VP9_5})})
- .withSetter(ProfileLevelSetter)
- .build());
+ addParameter(DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+ .withDefault(new C2StreamProfileLevelInfo::input(
+ 0u, static_cast<C2Config::profile_t>(defaultProfile),
+ static_cast<C2Config::level_t>(defaultLevel)))
+ .withFields({C2F(mProfileLevel, profile).oneOf(profiles),
+ C2F(mProfileLevel, level).oneOf(levels)})
+ .withSetter(ProfileLevelSetter)
+ .build());
break;
case VideoCodec::HEVC:
inputMime = MEDIA_MIMETYPE_VIDEO_HEVC;
- addParameter(
- DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
- .withDefault(new C2StreamProfileLevelInfo::input(
- 0u, C2Config::PROFILE_HEVC_MAIN, C2Config::LEVEL_HEVC_MAIN_5_1))
- .withFields({C2F(mProfileLevel, profile)
- .oneOf({C2Config::PROFILE_HEVC_MAIN,
- C2Config::PROFILE_HEVC_MAIN_STILL}),
- C2F(mProfileLevel, level)
- .oneOf({C2Config::LEVEL_HEVC_MAIN_1,
- C2Config::LEVEL_HEVC_MAIN_2,
- C2Config::LEVEL_HEVC_MAIN_2_1,
- C2Config::LEVEL_HEVC_MAIN_3,
- C2Config::LEVEL_HEVC_MAIN_3_1,
- C2Config::LEVEL_HEVC_MAIN_4,
- C2Config::LEVEL_HEVC_MAIN_4_1,
- C2Config::LEVEL_HEVC_MAIN_5,
- C2Config::LEVEL_HEVC_MAIN_5_1,
- C2Config::LEVEL_HEVC_MAIN_5_2,
- C2Config::LEVEL_HEVC_HIGH_4,
- C2Config::LEVEL_HEVC_HIGH_4_1,
- C2Config::LEVEL_HEVC_HIGH_5,
- C2Config::LEVEL_HEVC_HIGH_5_1,
- C2Config::LEVEL_HEVC_HIGH_5_2})})
- .withSetter(ProfileLevelSetter)
- .build());
+ addParameter(DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+ .withDefault(new C2StreamProfileLevelInfo::input(
+ 0u, static_cast<C2Config::profile_t>(defaultProfile),
+ static_cast<C2Config::level_t>(defaultLevel)))
+ .withFields({C2F(mProfileLevel, profile).oneOf(profiles),
+ C2F(mProfileLevel, level).oneOf(levels)})
+ .withSetter(ProfileLevelSetter)
+ .build());
break;
}
@@ -232,6 +269,17 @@ V4L2DecodeInterface::V4L2DecodeInterface(const std::string& name,
.withConstValue(new C2PortDelayTuning::output(getOutputDelay(*mVideoCodec)))
.build());
+ // This value is set according to the relation between kNumInputBuffers = 16 and the current
+ // codec2 framework implementation. Specifically, this generally limits the framework to using
+ // <= 16 input buffers, although certain timing of events can result in a few more input buffers
+ // being allocated but rarely used. This lets us avoid remapping v4l2 input buffers and DMA
+ // buffers in the common case. We could go up to 4 here, to limit the framework to
+ // simultaneously enqueuing 16 input buffers, but there doesn't seem to be much of an a
+ // performance improvement from that.
+ addParameter(DefineParam(mPipelineDelay, C2_PARAMKEY_PIPELINE_DELAY)
+ .withConstValue(new C2PipelineDelayTuning(3))
+ .build());
+
addParameter(DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
.withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
inputMime.c_str()))
@@ -246,10 +294,11 @@ V4L2DecodeInterface::V4L2DecodeInterface(const std::string& name,
// In order to fasten the bootup time, we use the maximum supported size instead of querying the
// capability from the V4L2 device.
addParameter(DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
- .withDefault(new C2StreamPictureSizeInfo::output(0u, 320, 240))
+ .withDefault(new C2StreamPictureSizeInfo::output(
+ 0u, std::min(320, maxSize.width), std::min(240, maxSize.height)))
.withFields({
- C2F(mSize, width).inRange(16, 4096, 16),
- C2F(mSize, height).inRange(16, 4096, 16),
+ C2F(mSize, width).inRange(16, maxSize.width, 16),
+ C2F(mSize, height).inRange(16, maxSize.height, 16),
})
.withSetter(SizeSetter)
.build());
@@ -260,6 +309,7 @@ V4L2DecodeInterface::V4L2DecodeInterface(const std::string& name,
.withFields({
C2F(mMaxInputSize, value).any(),
})
+ .withSetter(InputSizeSetter)
.calculatedAs(MaxInputBufferSizeCalculator, mSize)
.build());
@@ -332,31 +382,36 @@ V4L2DecodeInterface::V4L2DecodeInterface(const std::string& name,
.withSetter(DefaultColorAspectsSetter)
.build());
- addParameter(
- DefineParam(mColorAspects, C2_PARAMKEY_COLOR_ASPECTS)
- .withDefault(new C2StreamColorAspectsInfo::output(
- 0u, C2Color::RANGE_UNSPECIFIED, C2Color::PRIMARIES_UNSPECIFIED,
- C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
- .withFields(
- {C2F(mColorAspects, range)
- .inRange(C2Color::RANGE_UNSPECIFIED, C2Color::RANGE_OTHER),
- C2F(mColorAspects, primaries)
- .inRange(C2Color::PRIMARIES_UNSPECIFIED,
- C2Color::PRIMARIES_OTHER),
- C2F(mColorAspects, transfer)
- .inRange(C2Color::TRANSFER_UNSPECIFIED,
- C2Color::TRANSFER_OTHER),
- C2F(mColorAspects, matrix)
- .inRange(C2Color::MATRIX_UNSPECIFIED, C2Color::MATRIX_OTHER)})
- .withSetter(MergedColorAspectsSetter, mDefaultColorAspects, mCodedColorAspects)
- .build());
+ // At this moment v4l2_codec2 support decoding this information only for
+ // unprotected H264 and both protected and unprotected HEVC.
+ if ((mVideoCodec == VideoCodec::H264 && !secureMode) || mVideoCodec == VideoCodec::HEVC) {
+ addParameter(DefineParam(mColorAspects, C2_PARAMKEY_COLOR_ASPECTS)
+ .withDefault(new C2StreamColorAspectsInfo::output(
+ 0u, C2Color::RANGE_UNSPECIFIED, C2Color::PRIMARIES_UNSPECIFIED,
+ C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
+ .withFields({C2F(mColorAspects, range)
+ .inRange(C2Color::RANGE_UNSPECIFIED,
+ C2Color::RANGE_OTHER),
+ C2F(mColorAspects, primaries)
+ .inRange(C2Color::PRIMARIES_UNSPECIFIED,
+ C2Color::PRIMARIES_OTHER),
+ C2F(mColorAspects, transfer)
+ .inRange(C2Color::TRANSFER_UNSPECIFIED,
+ C2Color::TRANSFER_OTHER),
+ C2F(mColorAspects, matrix)
+ .inRange(C2Color::MATRIX_UNSPECIFIED,
+ C2Color::MATRIX_OTHER)})
+ .withSetter(MergedColorAspectsSetter, mDefaultColorAspects,
+ mCodedColorAspects)
+ .build());
+ }
}
-size_t V4L2DecodeInterface::getInputBufferSize() const {
- return calculateInputBufferSize(mSize->width * mSize->height);
+size_t DecodeInterface::getInputBufferSize() const {
+ return mMaxInputSize->value;
}
-c2_status_t V4L2DecodeInterface::queryColorAspects(
+c2_status_t DecodeInterface::queryColorAspects(
std::shared_ptr<C2StreamColorAspectsInfo::output>* targetColorAspects) {
std::unique_ptr<C2StreamColorAspectsInfo::output> colorAspects =
std::make_unique<C2StreamColorAspectsInfo::output>(
@@ -369,7 +424,7 @@ c2_status_t V4L2DecodeInterface::queryColorAspects(
return status;
}
-uint32_t V4L2DecodeInterface::getOutputDelay(VideoCodec codec) {
+uint32_t DecodeInterface::getOutputDelay(VideoCodec codec) {
switch (codec) {
case VideoCodec::H264:
// Due to frame reordering an H264 decoder might need multiple additional input frames to be
@@ -380,9 +435,17 @@ uint32_t V4L2DecodeInterface::getOutputDelay(VideoCodec codec) {
case VideoCodec::HEVC:
return 16;
case VideoCodec::VP8:
- return 0;
+ // The decoder might held a few frames as a reference for decoding. Since Android T
+ // the Codec2 is more prone to timeout the component if one is not producing frames. This
+ // might especially occur when those frames are held for reference and playback/decoding
+ // is paused. With increased output delay we inform Codec2 not to timeout the component,
+ // if number of frames in components is less then the number of maximum reference frames
+ // that could be held by decoder.
+ // Reference: RFC 6386 Section 3. Compressed Frame Types
+ return 3;
case VideoCodec::VP9:
- return 0;
+ // Reference: https://www.webmproject.org/vp9/levels/
+ return 8;
}
}
diff --git a/components/V4L2EncodeComponent.cpp b/components/EncodeComponent.cpp
index b266a6e..0c7d044 100644
--- a/components/V4L2EncodeComponent.cpp
+++ b/components/EncodeComponent.cpp
@@ -1,11 +1,11 @@
-// Copyright 2020 The Chromium Authors. All rights reserved.
+// Copyright 2023 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
+// found in the LICENSE file
//#define LOG_NDEBUG 0
-#define LOG_TAG "V4L2EncodeComponent"
+#define LOG_TAG "EncodeComponent"
-#include <v4l2_codec2/components/V4L2EncodeComponent.h>
+#include <v4l2_codec2/components/EncodeComponent.h>
#include <inttypes.h>
@@ -18,30 +18,36 @@
#include <android/hardware/graphics/common/1.0/types.h>
#include <base/bind.h>
#include <base/bind_helpers.h>
-#include <cutils/properties.h>
#include <log/log.h>
#include <media/stagefright/MediaDefs.h>
#include <ui/GraphicBuffer.h>
#include <ui/Size.h>
-#include <v4l2_codec2/common/Common.h>
#include <v4l2_codec2/common/EncodeHelpers.h>
#include <v4l2_codec2/common/FormatConverter.h>
-#include <v4l2_codec2/common/VideoPixelFormat.h>
#include <v4l2_codec2/components/BitstreamBuffer.h>
-#include <v4l2_codec2/components/V4L2EncodeInterface.h>
-#include <v4l2_codec2/components/V4L2Encoder.h>
+#include <v4l2_codec2/components/EncodeInterface.h>
+#include <v4l2_codec2/components/VideoEncoder.h>
using android::hardware::graphics::common::V1_0::BufferUsage;
namespace android {
namespace {
+// Create an input frame from the specified graphic block.
+std::unique_ptr<VideoEncoder::InputFrame> createInputFrame(
+ const C2ConstGraphicBlock& block, VideoPixelFormat format,
+ const std::vector<VideoFramePlane>& planes, uint64_t index, int64_t timestamp) {
+ std::vector<int> fds;
+ const C2Handle* const handle = block.handle();
+ for (int i = 0; i < handle->numFds; i++) {
+ fds.emplace_back(handle->data[i]);
+ }
-const VideoPixelFormat kInputPixelFormat = VideoPixelFormat::NV12;
-
-// The peak bitrate in function of the target bitrate, used when the bitrate mode is VBR.
-constexpr uint32_t kPeakBitrateMultiplier = 2u;
+ return std::make_unique<VideoEncoder::InputFrame>(std::move(fds), planes, format, index,
+ timestamp);
+}
+} // namespace
// Get the video frame layout from the specified |inputBlock|.
// TODO(dstaessens): Clean up code extracting layout from a C2GraphicBlock.
@@ -58,7 +64,7 @@ std::optional<std::vector<VideoFramePlane>> getVideoFrameLayout(const C2ConstGra
// IMPLEMENTATION_DEFINED and its backed format is RGB. We fill the layout by using
// ImplDefinedToRGBXMap in the case.
if (layout.type == C2PlanarLayout::TYPE_UNKNOWN) {
- std::unique_ptr<ImplDefinedToRGBXMap> idMap = ImplDefinedToRGBXMap::Create(block);
+ std::unique_ptr<ImplDefinedToRGBXMap> idMap = ImplDefinedToRGBXMap::create(block);
if (idMap == nullptr) {
ALOGE("Unable to parse RGBX_8888 from IMPLEMENTATION_DEFINED");
return std::nullopt;
@@ -169,94 +175,35 @@ std::optional<uint32_t> getVideoFrameStride(VideoPixelFormat format, ui::Size si
return planes.value()[0].mStride;
}
-// Create an input frame from the specified graphic block.
-std::unique_ptr<V4L2Encoder::InputFrame> CreateInputFrame(const C2ConstGraphicBlock& block,
- uint64_t index, int64_t timestamp) {
- VideoPixelFormat format;
- std::optional<std::vector<VideoFramePlane>> planes = getVideoFrameLayout(block, &format);
- if (!planes) {
- ALOGE("Failed to get input block's layout");
- return nullptr;
- }
-
- std::vector<int> fds;
- const C2Handle* const handle = block.handle();
- for (int i = 0; i < handle->numFds; i++) {
- fds.emplace_back(handle->data[i]);
- }
-
- return std::make_unique<V4L2Encoder::InputFrame>(std::move(fds), std::move(planes.value()),
- format, index, timestamp);
-}
-
-// Check whether the specified |profile| is an H.264 profile.
-bool IsH264Profile(C2Config::profile_t profile) {
- return (profile >= C2Config::PROFILE_AVC_BASELINE &&
- profile <= C2Config::PROFILE_AVC_ENHANCED_MULTIVIEW_DEPTH_HIGH);
-}
-
-} // namespace
-
-// static
-std::atomic<int32_t> V4L2EncodeComponent::sConcurrentInstances = 0;
-
-// static
-std::shared_ptr<C2Component> V4L2EncodeComponent::create(
- C2String name, c2_node_id_t id, std::shared_ptr<C2ReflectorHelper> helper,
- C2ComponentFactory::ComponentDeleter deleter) {
- ALOGV("%s(%s)", __func__, name.c_str());
-
- static const int32_t kMaxConcurrentInstances =
- property_get_int32("ro.vendor.v4l2_codec2.encode_concurrent_instances", -1);
-
- static std::mutex mutex;
- std::lock_guard<std::mutex> lock(mutex);
- if (kMaxConcurrentInstances >= 0 && sConcurrentInstances.load() >= kMaxConcurrentInstances) {
- ALOGW("Cannot create additional encoder, maximum number of instances reached: %d",
- kMaxConcurrentInstances);
- return nullptr;
- }
-
- auto interface = std::make_shared<V4L2EncodeInterface>(name, std::move(helper));
- if (interface->status() != C2_OK) {
- ALOGE("Component interface initialization failed (error code %d)", interface->status());
- return nullptr;
- }
-
- return std::shared_ptr<C2Component>(new V4L2EncodeComponent(name, id, std::move(interface)),
- deleter);
-}
-
-V4L2EncodeComponent::V4L2EncodeComponent(C2String name, c2_node_id_t id,
- std::shared_ptr<V4L2EncodeInterface> interface)
+EncodeComponent::EncodeComponent(C2String name, c2_node_id_t id,
+ std::shared_ptr<EncodeInterface> interface)
: mName(name),
mId(id),
mInterface(std::move(interface)),
mComponentState(ComponentState::LOADED) {
ALOGV("%s(%s)", __func__, name.c_str());
-
- sConcurrentInstances.fetch_add(1, std::memory_order_relaxed);
}
-V4L2EncodeComponent::~V4L2EncodeComponent() {
+EncodeComponent::~EncodeComponent() {
ALOGV("%s()", __func__);
// Stop encoder thread and invalidate pointers if component wasn't stopped before destroying.
- if (mEncoderThread.IsRunning()) {
+ if (mEncoderThread.IsRunning() && !mEncoderTaskRunner->RunsTasksInCurrentSequence()) {
mEncoderTaskRunner->PostTask(
FROM_HERE, ::base::BindOnce(
- [](::base::WeakPtrFactory<V4L2EncodeComponent>* weakPtrFactory) {
+ [](::base::WeakPtrFactory<EncodeComponent>* weakPtrFactory,
+ std::unique_ptr<VideoEncoder>* encoder) {
weakPtrFactory->InvalidateWeakPtrs();
+ encoder->reset();
},
- &mWeakThisFactory));
+ &mWeakThisFactory, &mEncoder));
mEncoderThread.Stop();
}
- sConcurrentInstances.fetch_sub(1, std::memory_order_relaxed);
ALOGV("%s(): done", __func__);
}
-c2_status_t V4L2EncodeComponent::start() {
+c2_status_t EncodeComponent::start() {
ALOGV("%s()", __func__);
// Lock while starting, to synchronize start/stop/reset/release calls.
@@ -278,7 +225,7 @@ c2_status_t V4L2EncodeComponent::start() {
::base::WaitableEvent done;
bool success = false;
mEncoderTaskRunner->PostTask(
- FROM_HERE, ::base::Bind(&V4L2EncodeComponent::startTask, mWeakThis, &success, &done));
+ FROM_HERE, ::base::Bind(&EncodeComponent::startTask, mWeakThis, &success, &done));
done.Wait();
if (!success) {
@@ -290,7 +237,7 @@ c2_status_t V4L2EncodeComponent::start() {
return C2_OK;
}
-c2_status_t V4L2EncodeComponent::stop() {
+c2_status_t EncodeComponent::stop() {
ALOGV("%s()", __func__);
// Lock while stopping, to synchronize start/stop/reset/release calls.
@@ -307,8 +254,8 @@ c2_status_t V4L2EncodeComponent::stop() {
// Wait for the component to stop.
::base::WaitableEvent done;
- mEncoderTaskRunner->PostTask(
- FROM_HERE, ::base::BindOnce(&V4L2EncodeComponent::stopTask, mWeakThis, &done));
+ mEncoderTaskRunner->PostTask(FROM_HERE,
+ ::base::BindOnce(&EncodeComponent::stopTask, mWeakThis, &done));
done.Wait();
mEncoderThread.Stop();
@@ -318,7 +265,7 @@ c2_status_t V4L2EncodeComponent::stop() {
return C2_OK;
}
-c2_status_t V4L2EncodeComponent::reset() {
+c2_status_t EncodeComponent::reset() {
ALOGV("%s()", __func__);
// The interface specification says: "This method MUST be supported in all (including tripped)
@@ -333,7 +280,7 @@ c2_status_t V4L2EncodeComponent::reset() {
return C2_OK;
}
-c2_status_t V4L2EncodeComponent::release() {
+c2_status_t EncodeComponent::release() {
ALOGV("%s()", __func__);
// The interface specification says: "This method MUST be supported in stopped state.", but the
@@ -344,7 +291,7 @@ c2_status_t V4L2EncodeComponent::release() {
return C2_OK;
}
-c2_status_t V4L2EncodeComponent::queue_nb(std::list<std::unique_ptr<C2Work>>* const items) {
+c2_status_t EncodeComponent::queue_nb(std::list<std::unique_ptr<C2Work>>* const items) {
ALOGV("%s()", __func__);
if (mComponentState != ComponentState::RUNNING) {
@@ -354,7 +301,7 @@ c2_status_t V4L2EncodeComponent::queue_nb(std::list<std::unique_ptr<C2Work>>* co
while (!items->empty()) {
mEncoderTaskRunner->PostTask(FROM_HERE,
- ::base::BindOnce(&V4L2EncodeComponent::queueTask, mWeakThis,
+ ::base::BindOnce(&EncodeComponent::queueTask, mWeakThis,
std::move(items->front())));
items->pop_front();
}
@@ -362,7 +309,7 @@ c2_status_t V4L2EncodeComponent::queue_nb(std::list<std::unique_ptr<C2Work>>* co
return C2_OK;
}
-c2_status_t V4L2EncodeComponent::drain_nb(drain_mode_t mode) {
+c2_status_t EncodeComponent::drain_nb(drain_mode_t mode) {
ALOGV("%s()", __func__);
if (mode == DRAIN_CHAIN) {
@@ -373,13 +320,13 @@ c2_status_t V4L2EncodeComponent::drain_nb(drain_mode_t mode) {
return C2_BAD_STATE;
}
- mEncoderTaskRunner->PostTask(
- FROM_HERE, ::base::BindOnce(&V4L2EncodeComponent::drainTask, mWeakThis, mode));
+ mEncoderTaskRunner->PostTask(FROM_HERE,
+ ::base::BindOnce(&EncodeComponent::drainTask, mWeakThis, mode));
return C2_OK;
}
-c2_status_t V4L2EncodeComponent::flush_sm(flush_mode_t mode,
- std::list<std::unique_ptr<C2Work>>* const flushedWork) {
+c2_status_t EncodeComponent::flush_sm(flush_mode_t mode,
+ std::list<std::unique_ptr<C2Work>>* const flushedWork) {
ALOGV("%s()", __func__);
if (mode != FLUSH_COMPONENT) {
@@ -395,19 +342,19 @@ c2_status_t V4L2EncodeComponent::flush_sm(flush_mode_t mode,
// immediately abandon all non-started work on the encoder thread. We can return all work that
// can't be immediately discarded using onWorkDone() later.
::base::WaitableEvent done;
- mEncoderTaskRunner->PostTask(FROM_HERE, ::base::BindOnce(&V4L2EncodeComponent::flushTask,
- mWeakThis, &done, flushedWork));
+ mEncoderTaskRunner->PostTask(FROM_HERE, ::base::BindOnce(&EncodeComponent::flushTask, mWeakThis,
+ &done, flushedWork));
done.Wait();
return C2_OK;
}
-c2_status_t V4L2EncodeComponent::announce_nb(const std::vector<C2WorkOutline>& items) {
+c2_status_t EncodeComponent::announce_nb(const std::vector<C2WorkOutline>& items) {
return C2_OMITTED; // Tunneling is not supported by now
}
-c2_status_t V4L2EncodeComponent::setListener_vb(const std::shared_ptr<Listener>& listener,
- c2_blocking_t mayBlock) {
+c2_status_t EncodeComponent::setListener_vb(const std::shared_ptr<Listener>& listener,
+ c2_blocking_t mayBlock) {
ALOG_ASSERT(mComponentState != ComponentState::UNLOADED);
// Lock so we're sure the component isn't currently starting or stopping.
@@ -425,18 +372,18 @@ c2_status_t V4L2EncodeComponent::setListener_vb(const std::shared_ptr<Listener>&
ALOG_ASSERT(mayBlock == c2_blocking_t::C2_MAY_BLOCK);
::base::WaitableEvent done;
- mEncoderTaskRunner->PostTask(FROM_HERE, ::base::BindOnce(&V4L2EncodeComponent::setListenerTask,
+ mEncoderTaskRunner->PostTask(FROM_HERE, ::base::BindOnce(&EncodeComponent::setListenerTask,
mWeakThis, listener, &done));
done.Wait();
return C2_OK;
}
-std::shared_ptr<C2ComponentInterface> V4L2EncodeComponent::intf() {
- return std::make_shared<SimpleInterface<V4L2EncodeInterface>>(mName.c_str(), mId, mInterface);
+std::shared_ptr<C2ComponentInterface> EncodeComponent::intf() {
+ return std::make_shared<SimpleInterface<EncodeInterface>>(mName.c_str(), mId, mInterface);
}
-void V4L2EncodeComponent::startTask(bool* success, ::base::WaitableEvent* done) {
+void EncodeComponent::startTask(bool* success, ::base::WaitableEvent* done) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
@@ -444,7 +391,7 @@ void V4L2EncodeComponent::startTask(bool* success, ::base::WaitableEvent* done)
done->Signal();
}
-void V4L2EncodeComponent::stopTask(::base::WaitableEvent* done) {
+void EncodeComponent::stopTask(::base::WaitableEvent* done) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
@@ -452,6 +399,8 @@ void V4L2EncodeComponent::stopTask(::base::WaitableEvent* done) {
flush();
mInputFormatConverter.reset();
+ mInputPixelFormat = VideoPixelFormat::UNKNOWN;
+ mInputLayout.clear();
mEncoder.reset();
mOutputBlockPool.reset();
@@ -462,7 +411,7 @@ void V4L2EncodeComponent::stopTask(::base::WaitableEvent* done) {
done->Signal();
}
-void V4L2EncodeComponent::queueTask(std::unique_ptr<C2Work> work) {
+void EncodeComponent::queueTask(std::unique_ptr<C2Work> work) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
ALOG_ASSERT(mEncoder);
@@ -496,6 +445,31 @@ void V4L2EncodeComponent::queueTask(std::unique_ptr<C2Work> work) {
return;
}
+ // If this is the first input frame, create an input format converter if the V4L2 device doesn't
+ // support the requested input format.
+ if ((mInputPixelFormat == VideoPixelFormat::UNKNOWN) && !work->input.buffers.empty()) {
+ VideoPixelFormat format = VideoPixelFormat::UNKNOWN;
+ if (!getVideoFrameLayout(work->input.buffers.front()->data().graphicBlocks().front(),
+ &format)) {
+ ALOGE("Failed to get input block's layout");
+ reportError(C2_CORRUPTED);
+ return;
+ }
+ if (mEncoder->inputFormat() != format) {
+ ALOG_ASSERT(!mInputFormatConverter);
+ ALOGV("Creating input format convertor (%s)",
+ videoPixelFormatToString(mEncoder->inputFormat()).c_str());
+ mInputFormatConverter =
+ FormatConverter::create(mEncoder->inputFormat(), mEncoder->visibleSize(),
+ VideoEncoder::kInputBufferCount, mEncoder->codedSize());
+ if (!mInputFormatConverter) {
+ ALOGE("Failed to created input format convertor");
+ reportError(C2_CORRUPTED);
+ return;
+ }
+ }
+ }
+
// If conversion is required but no free buffers are available we queue the work item.
if (mInputFormatConverter && !mInputFormatConverter->isReady()) {
ALOGV("Input format convertor ran out of buffers");
@@ -510,8 +484,8 @@ void V4L2EncodeComponent::queueTask(std::unique_ptr<C2Work> work) {
work->input.buffers.front()->data().graphicBlocks().front();
if (mInputFormatConverter) {
ALOGV("Converting input block (index: %" PRIu64 ")", index);
- c2_status_t status = C2_CORRUPTED;
- inputBlock = mInputFormatConverter->convertBlock(index, inputBlock, &status);
+ c2_status_t status =
+ mInputFormatConverter->convertBlock(index, inputBlock, &inputBlock);
if (status != C2_OK) {
ALOGE("Failed to convert input block (index: %" PRIu64 ")", index);
reportError(status);
@@ -555,7 +529,7 @@ void V4L2EncodeComponent::queueTask(std::unique_ptr<C2Work> work) {
}
}
-void V4L2EncodeComponent::drainTask(drain_mode_t /*drainMode*/) {
+void EncodeComponent::drainTask(drain_mode_t /*drainMode*/) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
@@ -579,7 +553,7 @@ void V4L2EncodeComponent::drainTask(drain_mode_t /*drainMode*/) {
}
}
-void V4L2EncodeComponent::onDrainDone(bool success) {
+void EncodeComponent::onDrainDone(bool success) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
ALOG_ASSERT(!mWorkQueue.empty());
@@ -618,8 +592,8 @@ void V4L2EncodeComponent::onDrainDone(bool success) {
mWorkQueue.pop_front();
}
-void V4L2EncodeComponent::flushTask(::base::WaitableEvent* done,
- std::list<std::unique_ptr<C2Work>>* const flushedWork) {
+void EncodeComponent::flushTask(::base::WaitableEvent* done,
+ std::list<std::unique_ptr<C2Work>>* const flushedWork) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
@@ -637,8 +611,8 @@ void V4L2EncodeComponent::flushTask(::base::WaitableEvent* done,
flush();
}
-void V4L2EncodeComponent::setListenerTask(const std::shared_ptr<Listener>& listener,
- ::base::WaitableEvent* done) {
+void EncodeComponent::setListenerTask(const std::shared_ptr<Listener>& listener,
+ ::base::WaitableEvent* done) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
@@ -646,76 +620,7 @@ void V4L2EncodeComponent::setListenerTask(const std::shared_ptr<Listener>& liste
done->Signal();
}
-bool V4L2EncodeComponent::initializeEncoder() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(!mInputFormatConverter);
- ALOG_ASSERT(!mEncoder);
-
- mLastFrameTime = std::nullopt;
-
- // Get the requested profile and level.
- C2Config::profile_t outputProfile = mInterface->getOutputProfile();
-
- // CSD only needs to be extracted when using an H.264 profile.
- mExtractCSD = IsH264Profile(outputProfile);
-
- std::optional<uint8_t> h264Level;
- if (IsH264Profile(outputProfile)) {
- h264Level = c2LevelToV4L2Level(mInterface->getOutputLevel());
- }
-
- // Get the stride used by the C2 framework, as this might be different from the stride used by
- // the V4L2 encoder.
- std::optional<uint32_t> stride =
- getVideoFrameStride(kInputPixelFormat, mInterface->getInputVisibleSize());
- if (!stride) {
- ALOGE("Failed to get video frame stride");
- reportError(C2_CORRUPTED);
- return false;
- }
-
- // Get the requested bitrate mode and bitrate. The C2 framework doesn't offer a parameter to
- // configure the peak bitrate, so we use a multiple of the target bitrate.
- mBitrateMode = mInterface->getBitrateMode();
- if (property_get_bool("persist.vendor.v4l2_codec2.disable_vbr", false)) {
- // NOTE: This is a workaround for b/235771157.
- ALOGW("VBR is disabled on this device");
- mBitrateMode = C2Config::BITRATE_CONST;
- }
-
- mBitrate = mInterface->getBitrate();
-
- mEncoder = V4L2Encoder::create(
- outputProfile, h264Level, mInterface->getInputVisibleSize(), *stride,
- mInterface->getKeyFramePeriod(), mBitrateMode, mBitrate,
- mBitrate * kPeakBitrateMultiplier,
- ::base::BindRepeating(&V4L2EncodeComponent::fetchOutputBlock, mWeakThis),
- ::base::BindRepeating(&V4L2EncodeComponent::onInputBufferDone, mWeakThis),
- ::base::BindRepeating(&V4L2EncodeComponent::onOutputBufferDone, mWeakThis),
- ::base::BindRepeating(&V4L2EncodeComponent::onDrainDone, mWeakThis),
- ::base::BindRepeating(&V4L2EncodeComponent::reportError, mWeakThis, C2_CORRUPTED),
- mEncoderTaskRunner);
- if (!mEncoder) {
- ALOGE("Failed to create V4L2Encoder (profile: %s)", profileToString(outputProfile));
- return false;
- }
-
- // Add an input format convertor if the device doesn't support the requested input format.
- ALOGV("Creating input format convertor (%s)",
- videoPixelFormatToString(mEncoder->inputFormat()).c_str());
- mInputFormatConverter =
- FormatConverter::Create(mEncoder->inputFormat(), mEncoder->visibleSize(),
- V4L2Encoder::kInputBufferCount, mEncoder->codedSize());
- if (!mInputFormatConverter) {
- ALOGE("Failed to created input format convertor");
- return false;
- }
-
- return true;
-}
-
-bool V4L2EncodeComponent::updateEncodingParameters() {
+bool EncodeComponent::updateEncodingParameters() {
ALOGV("%s()", __func__);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
@@ -733,10 +638,10 @@ bool V4L2EncodeComponent::updateEncodingParameters() {
mBitrate = bitrate;
if (mBitrateMode == C2Config::BITRATE_VARIABLE) {
- ALOGV("Setting peak bitrate to %u", bitrate * kPeakBitrateMultiplier);
+ ALOGV("Setting peak bitrate to %u", bitrate * VideoEncoder::kPeakBitrateMultiplier);
// TODO(b/190336806): Our stack doesn't support dynamic peak bitrate changes yet, ignore
// errors for now.
- mEncoder->setPeakBitrate(bitrate * kPeakBitrateMultiplier);
+ mEncoder->setPeakBitrate(bitrate * VideoEncoder::kPeakBitrateMultiplier);
}
}
@@ -777,7 +682,7 @@ bool V4L2EncodeComponent::updateEncodingParameters() {
return true;
}
-bool V4L2EncodeComponent::encode(C2ConstGraphicBlock block, uint64_t index, int64_t timestamp) {
+bool EncodeComponent::encode(C2ConstGraphicBlock block, uint64_t index, int64_t timestamp) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
ALOG_ASSERT(mEncoder);
@@ -785,6 +690,21 @@ bool V4L2EncodeComponent::encode(C2ConstGraphicBlock block, uint64_t index, int6
ALOGV("Encoding input block (index: %" PRIu64 ", timestamp: %" PRId64 ", size: %dx%d)", index,
timestamp, block.width(), block.height());
+ // If this is the first input frame, determine the pixel format and layout.
+ if (mInputPixelFormat == VideoPixelFormat::UNKNOWN) {
+ ALOG_ASSERT(mInputLayout.empty());
+ VideoPixelFormat format = VideoPixelFormat::UNKNOWN;
+ std::optional<std::vector<VideoFramePlane>> inputLayout =
+ getVideoFrameLayout(block, &format);
+ if (!inputLayout) {
+ ALOGE("Failed to get input block's layout");
+ reportError(C2_CORRUPTED);
+ return false;
+ }
+ mInputPixelFormat = format;
+ mInputLayout = std::move(*inputLayout);
+ }
+
// Dynamically adjust framerate based on the frame's timestamp if required.
constexpr int64_t kMaxFramerateDiff = 5;
if (mLastFrameTime && (timestamp > *mLastFrameTime)) {
@@ -802,7 +722,8 @@ bool V4L2EncodeComponent::encode(C2ConstGraphicBlock block, uint64_t index, int6
if (!updateEncodingParameters()) return false;
// Create an input frame from the graphic block.
- std::unique_ptr<V4L2Encoder::InputFrame> frame = CreateInputFrame(block, index, timestamp);
+ std::unique_ptr<VideoEncoder::InputFrame> frame =
+ createInputFrame(block, mInputPixelFormat, mInputLayout, index, timestamp);
if (!frame) {
ALOGE("Failed to create video frame from input block (index: %" PRIu64
", timestamp: %" PRId64 ")",
@@ -818,7 +739,7 @@ bool V4L2EncodeComponent::encode(C2ConstGraphicBlock block, uint64_t index, int6
return true;
}
-void V4L2EncodeComponent::flush() {
+void EncodeComponent::flush() {
ALOGV("%s()", __func__);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
@@ -849,8 +770,7 @@ void V4L2EncodeComponent::flush() {
}
}
-void V4L2EncodeComponent::fetchOutputBlock(uint32_t size,
- std::unique_ptr<BitstreamBuffer>* buffer) {
+void EncodeComponent::fetchOutputBlock(uint32_t size, std::unique_ptr<BitstreamBuffer>* buffer) {
ALOGV("Fetching linear block (size: %u)", size);
std::shared_ptr<C2LinearBlock> block;
c2_status_t status = mOutputBlockPool->fetchLinearBlock(
@@ -866,7 +786,7 @@ void V4L2EncodeComponent::fetchOutputBlock(uint32_t size,
*buffer = std::make_unique<BitstreamBuffer>(std::move(block), 0, size);
}
-void V4L2EncodeComponent::onInputBufferDone(uint64_t index) {
+void EncodeComponent::onInputBufferDone(uint64_t index) {
ALOGV("%s(): Input buffer done (index: %" PRIu64 ")", __func__, index);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
ALOG_ASSERT(mEncoder);
@@ -908,8 +828,8 @@ void V4L2EncodeComponent::onInputBufferDone(uint64_t index) {
}
}
-void V4L2EncodeComponent::onOutputBufferDone(size_t dataSize, int64_t timestamp, bool keyFrame,
- std::unique_ptr<BitstreamBuffer> buffer) {
+void EncodeComponent::onOutputBufferDone(size_t dataSize, int64_t timestamp, bool keyFrame,
+ std::unique_ptr<BitstreamBuffer> buffer) {
ALOGV("%s(): output buffer done (timestamp: %" PRId64 ", size: %zu, keyframe: %d)", __func__,
timestamp, dataSize, keyFrame);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
@@ -971,7 +891,7 @@ void V4L2EncodeComponent::onOutputBufferDone(size_t dataSize, int64_t timestamp,
}
}
-C2Work* V4L2EncodeComponent::getWorkByIndex(uint64_t index) {
+C2Work* EncodeComponent::getWorkByIndex(uint64_t index) {
ALOGV("%s(): getting work item (index: %" PRIu64 ")", __func__, index);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
@@ -986,7 +906,7 @@ C2Work* V4L2EncodeComponent::getWorkByIndex(uint64_t index) {
return it->get();
}
-C2Work* V4L2EncodeComponent::getWorkByTimestamp(int64_t timestamp) {
+C2Work* EncodeComponent::getWorkByTimestamp(int64_t timestamp) {
ALOGV("%s(): getting work item (timestamp: %" PRId64 ")", __func__, timestamp);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
ALOG_ASSERT(timestamp >= 0);
@@ -1006,7 +926,7 @@ C2Work* V4L2EncodeComponent::getWorkByTimestamp(int64_t timestamp) {
return it->get();
}
-bool V4L2EncodeComponent::isWorkDone(const C2Work& work) const {
+bool EncodeComponent::isWorkDone(const C2Work& work) const {
ALOGV("%s()", __func__);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
@@ -1033,7 +953,7 @@ bool V4L2EncodeComponent::isWorkDone(const C2Work& work) const {
return true;
}
-void V4L2EncodeComponent::reportWork(std::unique_ptr<C2Work> work) {
+void EncodeComponent::reportWork(std::unique_ptr<C2Work> work) {
ALOG_ASSERT(work);
ALOGV("%s(): Reporting work item as finished (index: %llu, timestamp: %llu)", __func__,
work->input.ordinal.frameIndex.peekull(), work->input.ordinal.timestamp.peekull());
@@ -1047,7 +967,7 @@ void V4L2EncodeComponent::reportWork(std::unique_ptr<C2Work> work) {
mListener->onWorkDone_nb(weak_from_this(), std::move(finishedWorkList));
}
-bool V4L2EncodeComponent::getBlockPool() {
+bool EncodeComponent::getBlockPool() {
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
auto sharedThis = weak_from_this().lock();
@@ -1068,7 +988,7 @@ bool V4L2EncodeComponent::getBlockPool() {
return true;
}
-void V4L2EncodeComponent::reportError(c2_status_t error) {
+void EncodeComponent::reportError(c2_status_t error) {
ALOGV("%s()", __func__);
ALOG_ASSERT(mEncoderTaskRunner->RunsTasksInCurrentSequence());
@@ -1080,7 +1000,7 @@ void V4L2EncodeComponent::reportError(c2_status_t error) {
}
}
-void V4L2EncodeComponent::setComponentState(ComponentState state) {
+void EncodeComponent::setComponentState(ComponentState state) {
// Check whether the state change is valid.
switch (state) {
case ComponentState::UNLOADED:
@@ -1103,7 +1023,7 @@ void V4L2EncodeComponent::setComponentState(ComponentState state) {
mComponentState = state;
}
-const char* V4L2EncodeComponent::componentStateToString(V4L2EncodeComponent::ComponentState state) {
+const char* EncodeComponent::componentStateToString(EncodeComponent::ComponentState state) {
switch (state) {
case ComponentState::UNLOADED:
return "UNLOADED";
diff --git a/components/V4L2EncodeInterface.cpp b/components/EncodeInterface.cpp
index 03d8c37..9d7d81f 100644
--- a/components/V4L2EncodeInterface.cpp
+++ b/components/EncodeInterface.cpp
@@ -1,11 +1,11 @@
-// Copyright 2020 The Chromium Authors. All rights reserved.
+// Copyright 2023 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//#define LOG_NDEBUG 0
#define LOG_TAG "V4L2EncodeInterface"
-#include <v4l2_codec2/components/V4L2EncodeInterface.h>
+#include <v4l2_codec2/components/EncodeInterface.h>
#include <inttypes.h>
#include <algorithm>
@@ -16,8 +16,7 @@
#include <media/stagefright/MediaDefs.h>
#include <utils/Log.h>
-#include <v4l2_codec2/common/V4L2ComponentCommon.h>
-#include <v4l2_codec2/common/V4L2Device.h>
+#include <v4l2_codec2/common/Common.h>
#include <v4l2_codec2/common/VideoTypes.h>
using android::hardware::graphics::common::V1_0::BufferUsage;
@@ -41,41 +40,17 @@ constexpr uint32_t kDefaultBitrate = 64000;
// The maximal output bitrate in bits per second. It's the max bitrate of AVC Level4.1.
// TODO: increase this in the future for supporting higher level/resolution encoding.
constexpr uint32_t kMaxBitrate = 50000000;
-
-std::optional<VideoCodec> getCodecFromComponentName(const std::string& name) {
- if (name == V4L2ComponentName::kH264Encoder) return VideoCodec::H264;
- if (name == V4L2ComponentName::kVP8Encoder) return VideoCodec::VP8;
- if (name == V4L2ComponentName::kVP9Encoder) return VideoCodec::VP9;
-
- ALOGE("Unknown name: %s", name.c_str());
- return std::nullopt;
-}
-
-// Check whether the specified profile is a valid profile for the specified codec.
-bool IsValidProfileForCodec(VideoCodec codec, C2Config::profile_t profile) {
- switch (codec) {
- case VideoCodec::H264:
- return ((profile >= C2Config::PROFILE_AVC_BASELINE) &&
- (profile <= C2Config::PROFILE_AVC_ENHANCED_MULTIVIEW_DEPTH_HIGH));
- case VideoCodec::VP8:
- return ((profile >= C2Config::PROFILE_VP8_0) && (profile <= C2Config::PROFILE_VP8_3));
- case VideoCodec::VP9:
- return ((profile >= C2Config::PROFILE_VP9_0) && (profile <= C2Config::PROFILE_VP9_3));
- default:
- return false;
- }
-}
-
} // namespace
-// static
-C2R V4L2EncodeInterface::H264ProfileLevelSetter(
- bool /*mayBlock*/, C2P<C2StreamProfileLevelInfo::output>& info,
- const C2P<C2StreamPictureSizeInfo::input>& videoSize,
- const C2P<C2StreamFrameRateInfo::output>& frameRate,
- const C2P<C2StreamBitrateInfo::output>& bitrate) {
- static C2Config::level_t lowestConfigLevel = C2Config::LEVEL_UNUSED;
+//static
+C2Config::level_t EncodeInterface::lowestConfigLevel = C2Config::LEVEL_UNUSED;
+// static
+C2R EncodeInterface::H264ProfileLevelSetter(bool /*mayBlock*/,
+ C2P<C2StreamProfileLevelInfo::output>& info,
+ const C2P<C2StreamPictureSizeInfo::input>& videoSize,
+ const C2P<C2StreamFrameRateInfo::output>& frameRate,
+ const C2P<C2StreamBitrateInfo::output>& bitrate) {
// Adopt default minimal profile instead if the requested profile is not supported, or lower
// than the default minimal one.
constexpr C2Config::profile_t minProfile = C2Config::PROFILE_AVC_BASELINE;
@@ -185,11 +160,11 @@ C2R V4L2EncodeInterface::H264ProfileLevelSetter(
return C2R::Ok();
}
-C2R V4L2EncodeInterface::VP9ProfileLevelSetter(
- bool /*mayBlock*/, C2P<C2StreamProfileLevelInfo::output>& info,
- const C2P<C2StreamPictureSizeInfo::input>& /*videoSize*/,
- const C2P<C2StreamFrameRateInfo::output>& /*frameRate*/,
- const C2P<C2StreamBitrateInfo::output>& /*bitrate*/) {
+C2R EncodeInterface::VP9ProfileLevelSetter(bool /*mayBlock*/,
+ C2P<C2StreamProfileLevelInfo::output>& info,
+ const C2P<C2StreamPictureSizeInfo::input>& /*videoSize*/,
+ const C2P<C2StreamFrameRateInfo::output>& /*frameRate*/,
+ const C2P<C2StreamBitrateInfo::output>& /*bitrate*/) {
// Adopt default minimal profile instead if the requested profile is not supported, or lower
// than the default minimal one.
constexpr C2Config::profile_t defaultMinProfile = C2Config::PROFILE_VP9_0;
@@ -209,7 +184,7 @@ C2R V4L2EncodeInterface::VP9ProfileLevelSetter(
}
// static
-C2R V4L2EncodeInterface::SizeSetter(bool mayBlock, C2P<C2StreamPictureSizeInfo::input>& videoSize) {
+C2R EncodeInterface::SizeSetter(bool mayBlock, C2P<C2StreamPictureSizeInfo::input>& videoSize) {
(void)mayBlock;
// TODO: maybe apply block limit?
return videoSize.F(videoSize.v.width)
@@ -218,8 +193,8 @@ C2R V4L2EncodeInterface::SizeSetter(bool mayBlock, C2P<C2StreamPictureSizeInfo::
}
// static
-C2R V4L2EncodeInterface::IntraRefreshPeriodSetter(bool mayBlock,
- C2P<C2StreamIntraRefreshTuning::output>& period) {
+C2R EncodeInterface::IntraRefreshPeriodSetter(bool mayBlock,
+ C2P<C2StreamIntraRefreshTuning::output>& period) {
(void)mayBlock;
if (period.v.period < 1) {
period.set().mode = C2Config::INTRA_REFRESH_DISABLED;
@@ -231,40 +206,23 @@ C2R V4L2EncodeInterface::IntraRefreshPeriodSetter(bool mayBlock,
return C2R::Ok();
}
-V4L2EncodeInterface::V4L2EncodeInterface(const C2String& name,
- std::shared_ptr<C2ReflectorHelper> helper)
+EncodeInterface::EncodeInterface(const C2String& name, std::shared_ptr<C2ReflectorHelper> helper,
+ const SupportedCapabilities& caps)
: C2InterfaceHelper(std::move(helper)) {
ALOGV("%s(%s)", __func__, name.c_str());
setDerivedInstance(this);
- Initialize(name);
+ Initialize(name, caps);
}
-void V4L2EncodeInterface::Initialize(const C2String& name) {
- scoped_refptr<V4L2Device> device = V4L2Device::create();
- if (!device) {
- ALOGE("Failed to create V4L2 device");
- mInitStatus = C2_CORRUPTED;
- return;
- }
-
- auto codec = getCodecFromComponentName(name);
- if (!codec) {
- ALOGE("Invalid component name");
- mInitStatus = C2_BAD_VALUE;
- return;
- }
-
- V4L2Device::SupportedEncodeProfiles supported_profiles = device->getSupportedEncodeProfiles();
-
- // Compile the list of supported profiles.
+void EncodeInterface::Initialize(const C2String& name, const SupportedCapabilities& caps) {
// Note: unsigned int is used here, since std::vector<C2Config::profile_t> cannot convert to
// std::vector<unsigned int> required by the c2 framework below.
std::vector<unsigned int> profiles;
ui::Size maxSize;
- for (const auto& supportedProfile : supported_profiles) {
- if (!IsValidProfileForCodec(codec.value(), supportedProfile.profile)) {
+ for (const auto& supportedProfile : caps.supportedProfiles) {
+ if (!isValidProfileForCodec(caps.codec, supportedProfile.profile)) {
continue; // Ignore unrecognizable or unsupported profiles.
}
ALOGV("Queried c2_profile = 0x%x : max_size = %d x %d", supportedProfile.profile,
@@ -320,7 +278,7 @@ void V4L2EncodeInterface::Initialize(const C2String& name) {
.build());
std::string outputMime;
- if (getCodecFromComponentName(name) == VideoCodec::H264) {
+ if (caps.codec == VideoCodec::H264) {
outputMime = MEDIA_MIMETYPE_VIDEO_AVC;
C2Config::profile_t minProfile = static_cast<C2Config::profile_t>(
*std::min_element(profiles.begin(), profiles.end()));
@@ -342,14 +300,14 @@ void V4L2EncodeInterface::Initialize(const C2String& name) {
C2Config::LEVEL_AVC_5, C2Config::LEVEL_AVC_5_1})})
.withSetter(H264ProfileLevelSetter, mInputVisibleSize, mFrameRate, mBitrate)
.build());
- } else if (getCodecFromComponentName(name) == VideoCodec::VP8) {
+ } else if (caps.codec == VideoCodec::VP8) {
outputMime = MEDIA_MIMETYPE_VIDEO_VP8;
// VP8 doesn't have conventional profiles, we'll use profile0 if the VP8 codec is requested.
addParameter(DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
.withConstValue(new C2StreamProfileLevelInfo::output(
0u, C2Config::PROFILE_VP8_0, C2Config::LEVEL_UNUSED))
.build());
- } else if (getCodecFromComponentName(name) == VideoCodec::VP9) {
+ } else if (caps.codec == VideoCodec::VP9) {
outputMime = MEDIA_MIMETYPE_VIDEO_VP9;
C2Config::profile_t minProfile = static_cast<C2Config::profile_t>(
*std::min_element(profiles.begin(), profiles.end()));
@@ -382,17 +340,10 @@ void V4L2EncodeInterface::Initialize(const C2String& name) {
.withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::GRAPHIC))
.build());
- // TODO(b/167640667) Add VIDEO_ENCODER flag once input convertor is not enabled by default.
- // When using the format convertor (which is currently always enabled) it's not useful to add
- // the VIDEO_ENCODER buffer flag for input buffers here. Currently zero-copy is not supported
- // yet, so when using this flag an additional buffer will be allocated on host side and a copy
- // will be performed between the guest and host buffer to keep them in sync. This is wasteful as
- // the buffer is only used on guest side by the format convertor which converts and copies the
- // buffer into another buffer.
- //addParameter(DefineParam(mInputMemoryUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
- // .withConstValue(new C2StreamUsageTuning::input(
- // 0u, static_cast<uint64_t>(BufferUsage::VIDEO_ENCODER)))
- // .build());
+ addParameter(DefineParam(mInputMemoryUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
+ .withConstValue(new C2StreamUsageTuning::input(
+ 0u, static_cast<uint64_t>(BufferUsage::VIDEO_ENCODER)))
+ .build());
addParameter(
DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
@@ -458,7 +409,7 @@ void V4L2EncodeInterface::Initialize(const C2String& name) {
mInitStatus = C2_OK;
}
-uint32_t V4L2EncodeInterface::getKeyFramePeriod() const {
+uint32_t EncodeInterface::getKeyFramePeriod() const {
if (mKeyFramePeriodUs->value < 0 || mKeyFramePeriodUs->value == INT64_MAX) {
return 0;
}
diff --git a/components/V4L2ComponentFactory.cpp b/components/V4L2ComponentFactory.cpp
deleted file mode 100644
index a3f8837..0000000
--- a/components/V4L2ComponentFactory.cpp
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2020 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "V4L2ComponentFactory"
-
-#include <v4l2_codec2/components/V4L2ComponentFactory.h>
-
-#include <codec2/hidl/1.0/InputBufferManager.h>
-#include <log/log.h>
-
-#include <v4l2_codec2/common/V4L2ComponentCommon.h>
-#include <v4l2_codec2/components/V4L2DecodeComponent.h>
-#include <v4l2_codec2/components/V4L2DecodeInterface.h>
-#include <v4l2_codec2/components/V4L2EncodeComponent.h>
-#include <v4l2_codec2/components/V4L2EncodeInterface.h>
-
-namespace android {
-
-// static
-std::unique_ptr<V4L2ComponentFactory> V4L2ComponentFactory::create(
- const std::string& componentName, std::shared_ptr<C2ReflectorHelper> reflector) {
- ALOGV("%s(%s)", __func__, componentName.c_str());
-
- if (!android::V4L2ComponentName::isValid(componentName.c_str())) {
- ALOGE("Invalid component name: %s", componentName.c_str());
- return nullptr;
- }
- if (reflector == nullptr) {
- ALOGE("reflector is null");
- return nullptr;
- }
-
- bool isEncoder = android::V4L2ComponentName::isEncoder(componentName.c_str());
- return std::make_unique<V4L2ComponentFactory>(componentName, isEncoder, std::move(reflector));
-}
-
-V4L2ComponentFactory::V4L2ComponentFactory(const std::string& componentName, bool isEncoder,
- std::shared_ptr<C2ReflectorHelper> reflector)
- : mComponentName(componentName), mIsEncoder(isEncoder), mReflector(std::move(reflector)) {
- using namespace ::android::hardware::media::c2::V1_0;
- // To minimize IPC, we generally want the codec2 framework to release and
- // recycle input buffers when the corresponding work item is done. However,
- // sometimes it is necessary to provide more input to unblock a decoder.
- //
- // Optimally we would configure this on a per-context basis. However, the
- // InputBufferManager is a process-wide singleton, so we need to configure it
- // pessimistically. Basing the interval on frame timing can be suboptimal if
- // the decoded output isn't being displayed, but that's not a primary use case
- // and few videos will actually rely on this behavior.
- constexpr nsecs_t kMinFrameIntervalNs = 1000000000ull / 60;
- uint32_t delayCount = 0;
- for (auto c : kAllCodecs) {
- delayCount = std::max(delayCount, V4L2DecodeInterface::getOutputDelay(c));
- }
- utils::InputBufferManager::setNotificationInterval(delayCount * kMinFrameIntervalNs / 2);
-}
-
-c2_status_t V4L2ComponentFactory::createComponent(c2_node_id_t id,
- std::shared_ptr<C2Component>* const component,
- ComponentDeleter deleter) {
- ALOGV("%s(%d), componentName: %s, isEncoder: %d", __func__, id, mComponentName.c_str(),
- mIsEncoder);
-
- if (mReflector == nullptr) {
- ALOGE("mReflector doesn't exist.");
- return C2_CORRUPTED;
- }
-
- if (mIsEncoder) {
- *component = V4L2EncodeComponent::create(mComponentName, id, mReflector, deleter);
- } else {
- *component = V4L2DecodeComponent::create(mComponentName, id, mReflector, deleter);
- }
- return *component ? C2_OK : C2_NO_MEMORY;
-}
-
-c2_status_t V4L2ComponentFactory::createInterface(
- c2_node_id_t id, std::shared_ptr<C2ComponentInterface>* const interface,
- InterfaceDeleter deleter) {
- ALOGV("%s(), componentName: %s", __func__, mComponentName.c_str());
-
- if (mReflector == nullptr) {
- ALOGE("mReflector doesn't exist.");
- return C2_CORRUPTED;
- }
-
- if (mIsEncoder) {
- *interface = std::shared_ptr<C2ComponentInterface>(
- new SimpleInterface<V4L2EncodeInterface>(
- mComponentName.c_str(), id,
- std::make_shared<V4L2EncodeInterface>(mComponentName, mReflector)),
- deleter);
- return C2_OK;
- } else {
- *interface = std::shared_ptr<C2ComponentInterface>(
- new SimpleInterface<V4L2DecodeInterface>(
- mComponentName.c_str(), id,
- std::make_shared<V4L2DecodeInterface>(mComponentName, mReflector)),
- deleter);
- return C2_OK;
- }
-}
-
-} // namespace android
diff --git a/components/V4L2ComponentStore.cpp b/components/V4L2ComponentStore.cpp
deleted file mode 100644
index feb5799..0000000
--- a/components/V4L2ComponentStore.cpp
+++ /dev/null
@@ -1,208 +0,0 @@
-// Copyright 2020 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "V4L2ComponentStore"
-
-#include <v4l2_codec2/components/V4L2ComponentStore.h>
-
-#include <stdint.h>
-
-#include <memory>
-#include <mutex>
-
-#include <C2.h>
-#include <C2Config.h>
-#include <log/log.h>
-#include <media/stagefright/foundation/MediaDefs.h>
-
-#include <v4l2_codec2/common/V4L2ComponentCommon.h>
-#include <v4l2_codec2/components/V4L2ComponentFactory.h>
-
-namespace android {
-namespace {
-const uint32_t kComponentRank = 0x80;
-
-std::string getMediaTypeFromComponentName(const std::string& name) {
- if (name == V4L2ComponentName::kH264Decoder || name == V4L2ComponentName::kH264SecureDecoder ||
- name == V4L2ComponentName::kH264Encoder) {
- return MEDIA_MIMETYPE_VIDEO_AVC;
- }
- if (name == V4L2ComponentName::kVP8Decoder || name == V4L2ComponentName::kVP8SecureDecoder ||
- name == V4L2ComponentName::kVP8Encoder) {
- return MEDIA_MIMETYPE_VIDEO_VP8;
- }
- if (name == V4L2ComponentName::kVP9Decoder || name == V4L2ComponentName::kVP9SecureDecoder ||
- name == V4L2ComponentName::kVP9Encoder) {
- return MEDIA_MIMETYPE_VIDEO_VP9;
- }
- if (name == V4L2ComponentName::kHEVCDecoder || name == V4L2ComponentName::kHEVCSecureDecoder) {
- return MEDIA_MIMETYPE_VIDEO_HEVC;
- }
- return "";
-}
-
-} // namespace
-
-// static
-std::shared_ptr<C2ComponentStore> V4L2ComponentStore::Create() {
- ALOGV("%s()", __func__);
-
- static std::mutex mutex;
- static std::weak_ptr<C2ComponentStore> platformStore;
-
- std::lock_guard<std::mutex> lock(mutex);
- std::shared_ptr<C2ComponentStore> store = platformStore.lock();
- if (store != nullptr) return store;
-
- store = std::shared_ptr<C2ComponentStore>(new V4L2ComponentStore());
- platformStore = store;
- return store;
-}
-
-V4L2ComponentStore::V4L2ComponentStore() : mReflector(std::make_shared<C2ReflectorHelper>()) {
- ALOGV("%s()", __func__);
-}
-
-V4L2ComponentStore::~V4L2ComponentStore() {
- ALOGV("%s()", __func__);
-
- std::lock_guard<std::mutex> lock(mCachedFactoriesLock);
- mCachedFactories.clear();
-}
-
-C2String V4L2ComponentStore::getName() const {
- return "android.componentStore.v4l2";
-}
-
-c2_status_t V4L2ComponentStore::createComponent(C2String name,
- std::shared_ptr<C2Component>* const component) {
- ALOGV("%s(%s)", __func__, name.c_str());
-
- if (!V4L2ComponentName::isValid(name.c_str())) {
- ALOGI("%s(): Invalid component name: %s", __func__, name.c_str());
- return C2_NOT_FOUND;
- }
-
- auto factory = GetFactory(name);
- if (factory == nullptr) return C2_CORRUPTED;
-
- component->reset();
- return factory->createComponent(0, component);
-}
-
-c2_status_t V4L2ComponentStore::createInterface(
- C2String name, std::shared_ptr<C2ComponentInterface>* const interface) {
- ALOGV("%s(%s)", __func__, name.c_str());
-
- if (!V4L2ComponentName::isValid(name.c_str())) {
- ALOGI("%s(): Invalid component name: %s", __func__, name.c_str());
- return C2_NOT_FOUND;
- }
-
- auto factory = GetFactory(name);
- if (factory == nullptr) return C2_CORRUPTED;
-
- interface->reset();
- return factory->createInterface(0, interface);
-}
-
-std::vector<std::shared_ptr<const C2Component::Traits>> V4L2ComponentStore::listComponents() {
- ALOGV("%s()", __func__);
-
- std::vector<std::shared_ptr<const C2Component::Traits>> ret;
- ret.push_back(GetTraits(V4L2ComponentName::kH264Encoder));
- ret.push_back(GetTraits(V4L2ComponentName::kH264Decoder));
- ret.push_back(GetTraits(V4L2ComponentName::kH264SecureDecoder));
- ret.push_back(GetTraits(V4L2ComponentName::kVP8Encoder));
- ret.push_back(GetTraits(V4L2ComponentName::kVP8Decoder));
- ret.push_back(GetTraits(V4L2ComponentName::kVP8SecureDecoder));
- ret.push_back(GetTraits(V4L2ComponentName::kVP9Encoder));
- ret.push_back(GetTraits(V4L2ComponentName::kVP9Decoder));
- ret.push_back(GetTraits(V4L2ComponentName::kVP9SecureDecoder));
- ret.push_back(GetTraits(V4L2ComponentName::kHEVCDecoder));
- ret.push_back(GetTraits(V4L2ComponentName::kHEVCSecureDecoder));
- return ret;
-}
-
-std::shared_ptr<C2ParamReflector> V4L2ComponentStore::getParamReflector() const {
- return mReflector;
-}
-
-c2_status_t V4L2ComponentStore::copyBuffer(std::shared_ptr<C2GraphicBuffer> /* src */,
- std::shared_ptr<C2GraphicBuffer> /* dst */) {
- return C2_OMITTED;
-}
-
-c2_status_t V4L2ComponentStore::querySupportedParams_nb(
- std::vector<std::shared_ptr<C2ParamDescriptor>>* const /* params */) const {
- return C2_OK;
-}
-
-c2_status_t V4L2ComponentStore::query_sm(
- const std::vector<C2Param*>& stackParams,
- const std::vector<C2Param::Index>& heapParamIndices,
- std::vector<std::unique_ptr<C2Param>>* const /* heapParams */) const {
- // There are no supported config params.
- return stackParams.empty() && heapParamIndices.empty() ? C2_OK : C2_BAD_INDEX;
-}
-
-c2_status_t V4L2ComponentStore::config_sm(
- const std::vector<C2Param*>& params,
- std::vector<std::unique_ptr<C2SettingResult>>* const /* failures */) {
- // There are no supported config params.
- return params.empty() ? C2_OK : C2_BAD_INDEX;
-}
-
-c2_status_t V4L2ComponentStore::querySupportedValues_sm(
- std::vector<C2FieldSupportedValuesQuery>& fields) const {
- // There are no supported config params.
- return fields.empty() ? C2_OK : C2_BAD_INDEX;
-}
-
-::C2ComponentFactory* V4L2ComponentStore::GetFactory(const C2String& name) {
- ALOGV("%s(%s)", __func__, name.c_str());
- ALOG_ASSERT(V4L2ComponentName::isValid(name.c_str()));
-
- std::lock_guard<std::mutex> lock(mCachedFactoriesLock);
- const auto it = mCachedFactories.find(name);
- if (it != mCachedFactories.end()) return it->second.get();
-
- std::unique_ptr<::C2ComponentFactory> factory = V4L2ComponentFactory::create(
- name, std::static_pointer_cast<C2ReflectorHelper>(getParamReflector()));
- if (factory == nullptr) {
- ALOGE("Failed to create factory for %s", name.c_str());
- return nullptr;
- }
-
- auto ret = factory.get();
- mCachedFactories.emplace(name, std::move(factory));
- return ret;
-}
-
-std::shared_ptr<const C2Component::Traits> V4L2ComponentStore::GetTraits(const C2String& name) {
- ALOGV("%s(%s)", __func__, name.c_str());
-
- if (!V4L2ComponentName::isValid(name.c_str())) {
- ALOGE("Invalid component name: %s", name.c_str());
- return nullptr;
- }
-
- std::lock_guard<std::mutex> lock(mCachedTraitsLock);
- auto it = mCachedTraits.find(name);
- if (it != mCachedTraits.end()) return it->second;
-
- auto traits = std::make_shared<C2Component::Traits>();
- traits->name = name;
- traits->domain = C2Component::DOMAIN_VIDEO;
- traits->rank = kComponentRank;
- traits->mediaType = getMediaTypeFromComponentName(name);
- traits->kind = V4L2ComponentName::isEncoder(name.c_str()) ? C2Component::KIND_ENCODER
- : C2Component::KIND_DECODER;
-
- mCachedTraits.emplace(name, traits);
- return traits;
-}
-
-} // namespace android
diff --git a/components/V4L2Decoder.cpp b/components/V4L2Decoder.cpp
deleted file mode 100644
index cc2c1d1..0000000
--- a/components/V4L2Decoder.cpp
+++ /dev/null
@@ -1,795 +0,0 @@
-// Copyright 2020 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "V4L2Decoder"
-
-#include <v4l2_codec2/components/V4L2Decoder.h>
-
-#include <stdint.h>
-
-#include <algorithm>
-#include <vector>
-
-#include <base/bind.h>
-#include <base/files/scoped_file.h>
-#include <base/memory/ptr_util.h>
-#include <log/log.h>
-
-#include <v4l2_codec2/common/Common.h>
-#include <v4l2_codec2/common/Fourcc.h>
-
-namespace android {
-namespace {
-
-constexpr size_t kNumInputBuffers = 16;
-// Extra buffers for transmitting in the whole video pipeline.
-constexpr size_t kNumExtraOutputBuffers = 4;
-
-// Currently we only support flexible pixel 420 format YCBCR_420_888 in Android.
-// Here is the list of flexible 420 format.
-constexpr std::initializer_list<uint32_t> kSupportedOutputFourccs = {
- Fourcc::YU12, Fourcc::YV12, Fourcc::YM12, Fourcc::YM21,
- Fourcc::NV12, Fourcc::NV21, Fourcc::NM12, Fourcc::NM21,
-};
-
-uint32_t VideoCodecToV4L2PixFmt(VideoCodec codec) {
- switch (codec) {
- case VideoCodec::H264:
- return V4L2_PIX_FMT_H264;
- case VideoCodec::VP8:
- return V4L2_PIX_FMT_VP8;
- case VideoCodec::VP9:
- return V4L2_PIX_FMT_VP9;
- case VideoCodec::HEVC:
- return V4L2_PIX_FMT_HEVC;
- }
-}
-
-} // namespace
-
-// static
-std::unique_ptr<VideoDecoder> V4L2Decoder::Create(
- const VideoCodec& codec, const size_t inputBufferSize, const size_t minNumOutputBuffers,
- GetPoolCB getPoolCb, OutputCB outputCb, ErrorCB errorCb,
- scoped_refptr<::base::SequencedTaskRunner> taskRunner) {
- std::unique_ptr<V4L2Decoder> decoder =
- ::base::WrapUnique<V4L2Decoder>(new V4L2Decoder(taskRunner));
- if (!decoder->start(codec, inputBufferSize, minNumOutputBuffers, std::move(getPoolCb),
- std::move(outputCb), std::move(errorCb))) {
- return nullptr;
- }
- return decoder;
-}
-
-V4L2Decoder::V4L2Decoder(scoped_refptr<::base::SequencedTaskRunner> taskRunner)
- : mTaskRunner(std::move(taskRunner)) {
- ALOGV("%s()", __func__);
-
- mWeakThis = mWeakThisFactory.GetWeakPtr();
-}
-
-V4L2Decoder::~V4L2Decoder() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- mWeakThisFactory.InvalidateWeakPtrs();
-
- // Streamoff input and output queue.
- if (mOutputQueue) {
- mOutputQueue->streamoff();
- mOutputQueue->deallocateBuffers();
- mOutputQueue = nullptr;
- }
- if (mInputQueue) {
- mInputQueue->streamoff();
- mInputQueue->deallocateBuffers();
- mInputQueue = nullptr;
- }
- if (mDevice) {
- mDevice->stopPolling();
- mDevice = nullptr;
- }
-}
-
-bool V4L2Decoder::start(const VideoCodec& codec, const size_t inputBufferSize,
- const size_t minNumOutputBuffers, GetPoolCB getPoolCb, OutputCB outputCb,
- ErrorCB errorCb) {
- ALOGV("%s(codec=%s, inputBufferSize=%zu, minNumOutputBuffers=%zu)", __func__,
- VideoCodecToString(codec), inputBufferSize, minNumOutputBuffers);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- mMinNumOutputBuffers = minNumOutputBuffers;
- mGetPoolCb = std::move(getPoolCb);
- mOutputCb = std::move(outputCb);
- mErrorCb = std::move(errorCb);
-
- if (mState == State::Error) {
- ALOGE("Ignore due to error state.");
- return false;
- }
-
- mDevice = V4L2Device::create();
-
- const uint32_t inputPixelFormat = VideoCodecToV4L2PixFmt(codec);
- if (!mDevice->open(V4L2Device::Type::kDecoder, inputPixelFormat)) {
- ALOGE("Failed to open device for %s", VideoCodecToString(codec));
- return false;
- }
-
- if (!mDevice->hasCapabilities(V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING)) {
- ALOGE("Device does not have VIDEO_M2M_MPLANE and STREAMING capabilities.");
- return false;
- }
-
- struct v4l2_decoder_cmd cmd;
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd = V4L2_DEC_CMD_STOP;
- if (mDevice->ioctl(VIDIOC_TRY_DECODER_CMD, &cmd) != 0) {
- ALOGE("Device does not support flushing (V4L2_DEC_CMD_STOP)");
- return false;
- }
-
- // Subscribe to the resolution change event.
- struct v4l2_event_subscription sub;
- memset(&sub, 0, sizeof(sub));
- sub.type = V4L2_EVENT_SOURCE_CHANGE;
- if (mDevice->ioctl(VIDIOC_SUBSCRIBE_EVENT, &sub) != 0) {
- ALOGE("ioctl() failed: VIDIOC_SUBSCRIBE_EVENT: V4L2_EVENT_SOURCE_CHANGE");
- return false;
- }
-
- // Create Input/Output V4L2Queue, and setup input queue.
- mInputQueue = mDevice->getQueue(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- mOutputQueue = mDevice->getQueue(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
- if (!mInputQueue || !mOutputQueue) {
- ALOGE("Failed to create V4L2 queue.");
- return false;
- }
- if (!setupInputFormat(inputPixelFormat, inputBufferSize)) {
- ALOGE("Failed to setup input format.");
- return false;
- }
-
- if (!mDevice->startPolling(::base::BindRepeating(&V4L2Decoder::serviceDeviceTask, mWeakThis),
- ::base::BindRepeating(&V4L2Decoder::onError, mWeakThis))) {
- ALOGE("Failed to start polling V4L2 device.");
- return false;
- }
-
- setState(State::Idle);
- return true;
-}
-
-bool V4L2Decoder::setupInputFormat(const uint32_t inputPixelFormat, const size_t inputBufferSize) {
- ALOGV("%s(inputPixelFormat=%u, inputBufferSize=%zu)", __func__, inputPixelFormat,
- inputBufferSize);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- // Check if the format is supported.
- std::vector<uint32_t> formats =
- mDevice->enumerateSupportedPixelformats(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- if (std::find(formats.begin(), formats.end(), inputPixelFormat) == formats.end()) {
- ALOGE("Input codec s not supported by device.");
- return false;
- }
-
- // Setup the input format.
- auto format = mInputQueue->setFormat(inputPixelFormat, ui::Size(), inputBufferSize, 0);
- if (!format) {
- ALOGE("Failed to call IOCTL to set input format.");
- return false;
- }
- ALOG_ASSERT(format->fmt.pix_mp.pixelformat == inputPixelFormat);
-
- if (mInputQueue->allocateBuffers(kNumInputBuffers, V4L2_MEMORY_DMABUF) == 0) {
- ALOGE("Failed to allocate input buffer.");
- return false;
- }
- if (!mInputQueue->streamon()) {
- ALOGE("Failed to streamon input queue.");
- return false;
- }
- return true;
-}
-
-void V4L2Decoder::decode(std::unique_ptr<ConstBitstreamBuffer> buffer, DecodeCB decodeCb) {
- ALOGV("%s(id=%d)", __func__, buffer->id);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- if (mState == State::Error) {
- ALOGE("Ignore due to error state.");
- mTaskRunner->PostTask(FROM_HERE, ::base::BindOnce(std::move(decodeCb),
- VideoDecoder::DecodeStatus::kError));
- return;
- }
-
- if (mState == State::Idle) {
- setState(State::Decoding);
- }
-
- mDecodeRequests.push(DecodeRequest(std::move(buffer), std::move(decodeCb)));
- pumpDecodeRequest();
-}
-
-void V4L2Decoder::drain(DecodeCB drainCb) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- switch (mState) {
- case State::Idle:
- ALOGV("Nothing need to drain, ignore.");
- mTaskRunner->PostTask(
- FROM_HERE, ::base::BindOnce(std::move(drainCb), VideoDecoder::DecodeStatus::kOk));
- return;
-
- case State::Decoding:
- mDecodeRequests.push(DecodeRequest(nullptr, std::move(drainCb)));
- pumpDecodeRequest();
- return;
-
- case State::Draining:
- case State::Error:
- ALOGE("Ignore due to wrong state: %s", StateToString(mState));
- mTaskRunner->PostTask(FROM_HERE, ::base::BindOnce(std::move(drainCb),
- VideoDecoder::DecodeStatus::kError));
- return;
- }
-}
-
-void V4L2Decoder::pumpDecodeRequest() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- if (mState != State::Decoding) return;
-
- while (!mDecodeRequests.empty()) {
- // Drain the decoder.
- if (mDecodeRequests.front().buffer == nullptr) {
- ALOGV("Get drain request.");
- // Send the flush command after all input buffers are dequeued. This makes
- // sure all previous resolution changes have been handled because the
- // driver must hold the input buffer that triggers resolution change. The
- // driver cannot decode data in it without new output buffers. If we send
- // the flush now and a queued input buffer triggers resolution change
- // later, the driver will send an output buffer that has
- // V4L2_BUF_FLAG_LAST. But some queued input buffer have not been decoded
- // yet. Also, V4L2VDA calls STREAMOFF and STREAMON after resolution
- // change. They implicitly send a V4L2_DEC_CMD_STOP and V4L2_DEC_CMD_START
- // to the decoder.
- if (mInputQueue->queuedBuffersCount() > 0) {
- ALOGV("Wait for all input buffers dequeued.");
- return;
- }
-
- auto request = std::move(mDecodeRequests.front());
- mDecodeRequests.pop();
-
- if (!sendV4L2DecoderCmd(false)) {
- std::move(request.decodeCb).Run(VideoDecoder::DecodeStatus::kError);
- onError();
- return;
- }
- mDrainCb = std::move(request.decodeCb);
- setState(State::Draining);
- return;
- }
-
- // Pause if no free input buffer. We resume decoding after dequeueing input buffers.
- auto inputBuffer = mInputQueue->getFreeBuffer();
- if (!inputBuffer) {
- ALOGV("There is no free input buffer.");
- return;
- }
-
- auto request = std::move(mDecodeRequests.front());
- mDecodeRequests.pop();
-
- const int32_t bitstreamId = request.buffer->id;
- ALOGV("QBUF to input queue, bitstreadId=%d", bitstreamId);
- inputBuffer->setTimeStamp({.tv_sec = bitstreamId});
- size_t planeSize = inputBuffer->getPlaneSize(0);
- if (request.buffer->size > planeSize) {
- ALOGE("The input size (%zu) is not enough, we need %zu", planeSize,
- request.buffer->size);
- onError();
- return;
- }
-
- ALOGV("Set bytes_used=%zu, offset=%zu", request.buffer->offset + request.buffer->size,
- request.buffer->offset);
- inputBuffer->setPlaneDataOffset(0, request.buffer->offset);
- inputBuffer->setPlaneBytesUsed(0, request.buffer->offset + request.buffer->size);
- std::vector<int> fds;
- fds.push_back(std::move(request.buffer->dmabuf.handle()->data[0]));
- if (!std::move(*inputBuffer).queueDMABuf(fds)) {
- ALOGE("%s(): Failed to QBUF to input queue, bitstreamId=%d", __func__, bitstreamId);
- onError();
- return;
- }
-
- mPendingDecodeCbs.insert(std::make_pair(bitstreamId, std::move(request.decodeCb)));
- }
-}
-
-void V4L2Decoder::flush() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- if (mState == State::Idle) {
- ALOGV("Nothing need to flush, ignore.");
- return;
- }
- if (mState == State::Error) {
- ALOGE("Ignore due to error state.");
- return;
- }
-
- // Call all pending callbacks.
- for (auto& item : mPendingDecodeCbs) {
- std::move(item.second).Run(VideoDecoder::DecodeStatus::kAborted);
- }
- mPendingDecodeCbs.clear();
- if (mDrainCb) {
- std::move(mDrainCb).Run(VideoDecoder::DecodeStatus::kAborted);
- }
-
- // Streamoff both V4L2 queues to drop input and output buffers.
- const bool isOutputStreaming = mOutputQueue->isStreaming();
- mDevice->stopPolling();
- mOutputQueue->streamoff();
- mFrameAtDevice.clear();
- mInputQueue->streamoff();
-
- // Streamon both V4L2 queues.
- mInputQueue->streamon();
- if (isOutputStreaming) {
- mOutputQueue->streamon();
- }
-
- // If there is no free buffer at mOutputQueue, tryFetchVideoFrame() should be triggerred after
- // a buffer is DQBUF from output queue. Now all the buffers are dropped at mOutputQueue, we
- // have to trigger tryFetchVideoFrame() here.
- if (mVideoFramePool) {
- tryFetchVideoFrame();
- }
-
- if (!mDevice->startPolling(::base::BindRepeating(&V4L2Decoder::serviceDeviceTask, mWeakThis),
- ::base::BindRepeating(&V4L2Decoder::onError, mWeakThis))) {
- ALOGE("Failed to start polling V4L2 device.");
- onError();
- return;
- }
-
- setState(State::Idle);
-}
-
-void V4L2Decoder::serviceDeviceTask(bool event) {
- ALOGV("%s(event=%d) state=%s InputQueue(%s):%zu+%zu/%zu, OutputQueue(%s):%zu+%zu/%zu", __func__,
- event, StateToString(mState), (mInputQueue->isStreaming() ? "streamon" : "streamoff"),
- mInputQueue->freeBuffersCount(), mInputQueue->queuedBuffersCount(),
- mInputQueue->allocatedBuffersCount(),
- (mOutputQueue->isStreaming() ? "streamon" : "streamoff"),
- mOutputQueue->freeBuffersCount(), mOutputQueue->queuedBuffersCount(),
- mOutputQueue->allocatedBuffersCount());
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- if (mState == State::Error) return;
-
- // Dequeue output and input queue.
- bool inputDequeued = false;
- while (mInputQueue->queuedBuffersCount() > 0) {
- bool success;
- V4L2ReadableBufferRef dequeuedBuffer;
- std::tie(success, dequeuedBuffer) = mInputQueue->dequeueBuffer();
- if (!success) {
- ALOGE("Failed to dequeue buffer from input queue.");
- onError();
- return;
- }
- if (!dequeuedBuffer) break;
-
- inputDequeued = true;
-
- // Run the corresponding decode callback.
- int32_t id = dequeuedBuffer->getTimeStamp().tv_sec;
- ALOGV("DQBUF from input queue, bitstreamId=%d", id);
- auto it = mPendingDecodeCbs.find(id);
- if (it == mPendingDecodeCbs.end()) {
- ALOGW("Callback is already abandoned.");
- continue;
- }
- std::move(it->second).Run(VideoDecoder::DecodeStatus::kOk);
- mPendingDecodeCbs.erase(it);
- }
-
- bool outputDequeued = false;
- while (mOutputQueue->queuedBuffersCount() > 0) {
- bool success;
- V4L2ReadableBufferRef dequeuedBuffer;
- std::tie(success, dequeuedBuffer) = mOutputQueue->dequeueBuffer();
- if (!success) {
- ALOGE("Failed to dequeue buffer from output queue.");
- onError();
- return;
- }
- if (!dequeuedBuffer) break;
-
- outputDequeued = true;
-
- const size_t bufferId = dequeuedBuffer->bufferId();
- const int32_t bitstreamId = static_cast<int32_t>(dequeuedBuffer->getTimeStamp().tv_sec);
- const size_t bytesUsed = dequeuedBuffer->getPlaneBytesUsed(0);
- const bool isLast = dequeuedBuffer->isLast();
- ALOGV("DQBUF from output queue, bufferId=%zu, bitstreamId=%d, bytesused=%zu, isLast=%d",
- bufferId, bitstreamId, bytesUsed, isLast);
-
- // Get the corresponding VideoFrame of the dequeued buffer.
- auto it = mFrameAtDevice.find(bufferId);
- ALOG_ASSERT(it != mFrameAtDevice.end(), "buffer %zu is not found at mFrameAtDevice",
- bufferId);
- auto frame = std::move(it->second);
- mFrameAtDevice.erase(it);
-
- if (bytesUsed > 0) {
- ALOGV("Send output frame(bitstreamId=%d) to client", bitstreamId);
- frame->setBitstreamId(bitstreamId);
- frame->setVisibleRect(mVisibleRect);
- mOutputCb.Run(std::move(frame));
- } else {
- // Workaround(b/168750131): If the buffer is not enqueued before the next drain is done,
- // then the driver will fail to notify EOS. So we recycle the buffer immediately.
- ALOGV("Recycle empty buffer %zu back to V4L2 output queue.", bufferId);
- dequeuedBuffer.reset();
- auto outputBuffer = mOutputQueue->getFreeBuffer(bufferId);
- ALOG_ASSERT(outputBuffer, "V4L2 output queue slot %zu is not freed.", bufferId);
-
- if (!std::move(*outputBuffer).queueDMABuf(frame->getFDs())) {
- ALOGE("%s(): Failed to recycle empty buffer to output queue.", __func__);
- onError();
- return;
- }
- mFrameAtDevice.insert(std::make_pair(bufferId, std::move(frame)));
- }
-
- if (mDrainCb && isLast) {
- ALOGV("All buffers are drained.");
- sendV4L2DecoderCmd(true);
- std::move(mDrainCb).Run(VideoDecoder::DecodeStatus::kOk);
- setState(State::Idle);
- }
- }
-
- // Handle resolution change event.
- if (event && dequeueResolutionChangeEvent()) {
- if (!changeResolution()) {
- onError();
- return;
- }
- }
-
- // We freed some input buffers, continue handling decode requests.
- if (inputDequeued) {
- mTaskRunner->PostTask(FROM_HERE,
- ::base::BindOnce(&V4L2Decoder::pumpDecodeRequest, mWeakThis));
- }
- // We free some output buffers, try to get VideoFrame.
- if (outputDequeued) {
- mTaskRunner->PostTask(FROM_HERE,
- ::base::BindOnce(&V4L2Decoder::tryFetchVideoFrame, mWeakThis));
- }
-}
-
-bool V4L2Decoder::dequeueResolutionChangeEvent() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- struct v4l2_event ev;
- memset(&ev, 0, sizeof(ev));
- while (mDevice->ioctl(VIDIOC_DQEVENT, &ev) == 0) {
- if (ev.type == V4L2_EVENT_SOURCE_CHANGE &&
- ev.u.src_change.changes & V4L2_EVENT_SRC_CH_RESOLUTION) {
- return true;
- }
- }
- return false;
-}
-
-bool V4L2Decoder::changeResolution() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- const std::optional<struct v4l2_format> format = getFormatInfo();
- std::optional<size_t> numOutputBuffers = getNumOutputBuffers();
- if (!format || !numOutputBuffers) {
- return false;
- }
- *numOutputBuffers = std::max(*numOutputBuffers, mMinNumOutputBuffers);
-
- const ui::Size codedSize(format->fmt.pix_mp.width, format->fmt.pix_mp.height);
- if (!setupOutputFormat(codedSize)) {
- return false;
- }
-
- const std::optional<struct v4l2_format> adjustedFormat = getFormatInfo();
- if (!adjustedFormat) {
- return false;
- }
- mCodedSize.set(adjustedFormat->fmt.pix_mp.width, adjustedFormat->fmt.pix_mp.height);
- mVisibleRect = getVisibleRect(mCodedSize);
-
- ALOGI("Need %zu output buffers. coded size: %s, visible rect: %s", *numOutputBuffers,
- toString(mCodedSize).c_str(), toString(mVisibleRect).c_str());
- if (isEmpty(mCodedSize)) {
- ALOGE("Failed to get resolution from V4L2 driver.");
- return false;
- }
-
- mOutputQueue->streamoff();
- mOutputQueue->deallocateBuffers();
- mFrameAtDevice.clear();
- mBlockIdToV4L2Id.clear();
-
- const size_t adjustedNumOutputBuffers =
- mOutputQueue->allocateBuffers(*numOutputBuffers, V4L2_MEMORY_DMABUF);
- if (adjustedNumOutputBuffers == 0) {
- ALOGE("Failed to allocate output buffer.");
- return false;
- }
- ALOGV("Allocated %zu output buffers.", adjustedNumOutputBuffers);
- if (!mOutputQueue->streamon()) {
- ALOGE("Failed to streamon output queue.");
- return false;
- }
-
- // Release the previous VideoFramePool before getting a new one to guarantee only one pool
- // exists at the same time.
- mVideoFramePool.reset();
- // Always use flexible pixel 420 format YCBCR_420_888 in Android.
- mVideoFramePool =
- mGetPoolCb.Run(mCodedSize, HalPixelFormat::YCBCR_420_888, adjustedNumOutputBuffers);
- if (!mVideoFramePool) {
- ALOGE("Failed to get block pool with size: %s", toString(mCodedSize).c_str());
- return false;
- }
-
- tryFetchVideoFrame();
- return true;
-}
-
-bool V4L2Decoder::setupOutputFormat(const ui::Size& size) {
- for (const uint32_t& pixfmt :
- mDevice->enumerateSupportedPixelformats(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) {
- if (std::find(kSupportedOutputFourccs.begin(), kSupportedOutputFourccs.end(), pixfmt) ==
- kSupportedOutputFourccs.end()) {
- ALOGD("Pixel format %s is not supported, skipping...", fourccToString(pixfmt).c_str());
- continue;
- }
-
- if (mOutputQueue->setFormat(pixfmt, size, 0) != std::nullopt) {
- return true;
- }
- }
-
- ALOGE("Failed to find supported pixel format");
- return false;
-}
-
-void V4L2Decoder::tryFetchVideoFrame() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- if (!mVideoFramePool) {
- ALOGE("mVideoFramePool is null, failed to get the instance after resolution change?");
- onError();
- return;
- }
-
- if (mOutputQueue->freeBuffersCount() == 0) {
- ALOGV("No free V4L2 output buffers, ignore.");
- return;
- }
-
- if (!mVideoFramePool->getVideoFrame(
- ::base::BindOnce(&V4L2Decoder::onVideoFrameReady, mWeakThis))) {
- ALOGV("%s(): Previous callback is running, ignore.", __func__);
- }
-}
-
-void V4L2Decoder::onVideoFrameReady(
- std::optional<VideoFramePool::FrameWithBlockId> frameWithBlockId) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- if (!frameWithBlockId) {
- ALOGE("Got nullptr VideoFrame.");
- onError();
- return;
- }
-
- // Unwrap our arguments.
- std::unique_ptr<VideoFrame> frame;
- uint32_t blockId;
- std::tie(frame, blockId) = std::move(*frameWithBlockId);
-
- std::optional<V4L2WritableBufferRef> outputBuffer;
- // Find the V4L2 buffer that is associated with this block.
- auto iter = mBlockIdToV4L2Id.find(blockId);
- if (iter != mBlockIdToV4L2Id.end()) {
- // If we have met this block in the past, reuse the same V4L2 buffer.
- outputBuffer = mOutputQueue->getFreeBuffer(iter->second);
- } else if (mBlockIdToV4L2Id.size() < mOutputQueue->allocatedBuffersCount()) {
- // If this is the first time we see this block, give it the next
- // available V4L2 buffer.
- const size_t v4l2BufferId = mBlockIdToV4L2Id.size();
- mBlockIdToV4L2Id.emplace(blockId, v4l2BufferId);
- outputBuffer = mOutputQueue->getFreeBuffer(v4l2BufferId);
- } else {
- // If this happens, this is a bug in VideoFramePool. It should never
- // provide more blocks than we have V4L2 buffers.
- ALOGE("Got more different blocks than we have V4L2 buffers for.");
- }
-
- if (!outputBuffer) {
- ALOGE("V4L2 buffer not available. blockId=%u", blockId);
- onError();
- return;
- }
-
- uint32_t v4l2Id = outputBuffer->bufferId();
- ALOGV("QBUF to output queue, blockId=%u, V4L2Id=%u", blockId, v4l2Id);
-
- if (!std::move(*outputBuffer).queueDMABuf(frame->getFDs())) {
- ALOGE("%s(): Failed to QBUF to output queue, blockId=%u, V4L2Id=%u", __func__, blockId,
- v4l2Id);
- onError();
- return;
- }
- if (mFrameAtDevice.find(v4l2Id) != mFrameAtDevice.end()) {
- ALOGE("%s(): V4L2 buffer %d already enqueued.", __func__, v4l2Id);
- onError();
- return;
- }
- mFrameAtDevice.insert(std::make_pair(v4l2Id, std::move(frame)));
-
- tryFetchVideoFrame();
-}
-
-std::optional<size_t> V4L2Decoder::getNumOutputBuffers() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- struct v4l2_control ctrl;
- memset(&ctrl, 0, sizeof(ctrl));
- ctrl.id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE;
- if (mDevice->ioctl(VIDIOC_G_CTRL, &ctrl) != 0) {
- ALOGE("ioctl() failed: VIDIOC_G_CTRL");
- return std::nullopt;
- }
- ALOGV("%s() V4L2_CID_MIN_BUFFERS_FOR_CAPTURE returns %u", __func__, ctrl.value);
-
- return ctrl.value + kNumExtraOutputBuffers;
-}
-
-std::optional<struct v4l2_format> V4L2Decoder::getFormatInfo() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- struct v4l2_format format;
- memset(&format, 0, sizeof(format));
- format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- if (mDevice->ioctl(VIDIOC_G_FMT, &format) != 0) {
- ALOGE("ioctl() failed: VIDIOC_G_FMT");
- return std::nullopt;
- }
-
- return format;
-}
-
-Rect V4L2Decoder::getVisibleRect(const ui::Size& codedSize) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- struct v4l2_rect* visible_rect = nullptr;
- struct v4l2_selection selection_arg;
- memset(&selection_arg, 0, sizeof(selection_arg));
- selection_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- selection_arg.target = V4L2_SEL_TGT_COMPOSE;
-
- if (mDevice->ioctl(VIDIOC_G_SELECTION, &selection_arg) == 0) {
- ALOGV("VIDIOC_G_SELECTION is supported");
- visible_rect = &selection_arg.r;
- } else {
- ALOGV("Fallback to VIDIOC_G_CROP");
- struct v4l2_crop crop_arg;
- memset(&crop_arg, 0, sizeof(crop_arg));
- crop_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
-
- if (mDevice->ioctl(VIDIOC_G_CROP, &crop_arg) != 0) {
- ALOGW("ioctl() VIDIOC_G_CROP failed");
- return Rect(codedSize.width, codedSize.height);
- }
- visible_rect = &crop_arg.c;
- }
-
- Rect rect(visible_rect->left, visible_rect->top, visible_rect->left + visible_rect->width,
- visible_rect->top + visible_rect->height);
- ALOGV("visible rectangle is %s", toString(rect).c_str());
- if (!contains(Rect(codedSize.width, codedSize.height), rect)) {
- ALOGW("visible rectangle %s is not inside coded size %s", toString(rect).c_str(),
- toString(codedSize).c_str());
- return Rect(codedSize.width, codedSize.height);
- }
- if (rect.isEmpty()) {
- ALOGW("visible size is empty");
- return Rect(codedSize.width, codedSize.height);
- }
-
- return rect;
-}
-
-bool V4L2Decoder::sendV4L2DecoderCmd(bool start) {
- ALOGV("%s(start=%d)", __func__, start);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- struct v4l2_decoder_cmd cmd;
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd = start ? V4L2_DEC_CMD_START : V4L2_DEC_CMD_STOP;
- if (mDevice->ioctl(VIDIOC_DECODER_CMD, &cmd) != 0) {
- ALOGE("ioctl() VIDIOC_DECODER_CMD failed: start=%d", start);
- return false;
- }
-
- return true;
-}
-
-void V4L2Decoder::onError() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- setState(State::Error);
- mErrorCb.Run();
-}
-
-void V4L2Decoder::setState(State newState) {
- ALOGV("%s(%s)", __func__, StateToString(newState));
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- if (mState == newState) return;
- if (mState == State::Error) {
- ALOGV("Already in Error state.");
- return;
- }
-
- switch (newState) {
- case State::Idle:
- break;
- case State::Decoding:
- break;
- case State::Draining:
- if (mState != State::Decoding) newState = State::Error;
- break;
- case State::Error:
- break;
- }
-
- ALOGI("Set state %s => %s", StateToString(mState), StateToString(newState));
- mState = newState;
-}
-
-// static
-const char* V4L2Decoder::StateToString(State state) {
- switch (state) {
- case State::Idle:
- return "Idle";
- case State::Decoding:
- return "Decoding";
- case State::Draining:
- return "Draining";
- case State::Error:
- return "Error";
- }
-}
-
-} // namespace android
diff --git a/components/V4L2Encoder.cpp b/components/V4L2Encoder.cpp
deleted file mode 100644
index cd20cb5..0000000
--- a/components/V4L2Encoder.cpp
+++ /dev/null
@@ -1,1095 +0,0 @@
-// Copyright 2021 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "V4L2Encoder"
-
-#include <v4l2_codec2/components/V4L2Encoder.h>
-
-#include <stdint.h>
-#include <optional>
-#include <vector>
-
-#include <base/bind.h>
-#include <base/files/scoped_file.h>
-#include <base/memory/ptr_util.h>
-#include <log/log.h>
-#include <ui/Rect.h>
-
-#include <v4l2_codec2/common/EncodeHelpers.h>
-#include <v4l2_codec2/common/Fourcc.h>
-#include <v4l2_codec2/common/V4L2Device.h>
-#include <v4l2_codec2/components/BitstreamBuffer.h>
-
-namespace android {
-
-namespace {
-
-const VideoPixelFormat kInputPixelFormat = VideoPixelFormat::NV12;
-
-// The maximum size for output buffer, which is chosen empirically for a 1080p video.
-constexpr size_t kMaxBitstreamBufferSizeInBytes = 2 * 1024 * 1024; // 2MB
-// The frame size for 1080p (FHD) video in pixels.
-constexpr int k1080PSizeInPixels = 1920 * 1080;
-// The frame size for 1440p (QHD) video in pixels.
-constexpr int k1440PSizeInPixels = 2560 * 1440;
-
-// Use quadruple size of kMaxBitstreamBufferSizeInBytes when the input frame size is larger than
-// 1440p, double if larger than 1080p. This is chosen empirically for some 4k encoding use cases and
-// the Android CTS VideoEncoderTest (crbug.com/927284).
-size_t GetMaxOutputBufferSize(const ui::Size& size) {
- if (getArea(size) > k1440PSizeInPixels) return kMaxBitstreamBufferSizeInBytes * 4;
- if (getArea(size) > k1080PSizeInPixels) return kMaxBitstreamBufferSizeInBytes * 2;
- return kMaxBitstreamBufferSizeInBytes;
-}
-
-// Define V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR control code if not present in header files.
-#ifndef V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR
-#define V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR (V4L2_CID_MPEG_BASE + 644)
-#endif
-
-} // namespace
-
-// static
-std::unique_ptr<VideoEncoder> V4L2Encoder::create(
- C2Config::profile_t outputProfile, std::optional<uint8_t> level,
- const ui::Size& visibleSize, uint32_t stride, uint32_t keyFramePeriod,
- C2Config::bitrate_mode_t bitrateMode, uint32_t bitrate, std::optional<uint32_t> peakBitrate,
- FetchOutputBufferCB fetchOutputBufferCb, InputBufferDoneCB inputBufferDoneCb,
- OutputBufferDoneCB outputBufferDoneCb, DrainDoneCB drainDoneCb, ErrorCB errorCb,
- scoped_refptr<::base::SequencedTaskRunner> taskRunner) {
- ALOGV("%s()", __func__);
-
- std::unique_ptr<V4L2Encoder> encoder = ::base::WrapUnique<V4L2Encoder>(new V4L2Encoder(
- std::move(taskRunner), std::move(fetchOutputBufferCb), std::move(inputBufferDoneCb),
- std::move(outputBufferDoneCb), std::move(drainDoneCb), std::move(errorCb)));
- if (!encoder->initialize(outputProfile, level, visibleSize, stride, keyFramePeriod, bitrateMode,
- bitrate, peakBitrate)) {
- return nullptr;
- }
- return encoder;
-}
-
-V4L2Encoder::V4L2Encoder(scoped_refptr<::base::SequencedTaskRunner> taskRunner,
- FetchOutputBufferCB fetchOutputBufferCb,
- InputBufferDoneCB inputBufferDoneCb, OutputBufferDoneCB outputBufferDoneCb,
- DrainDoneCB drainDoneCb, ErrorCB errorCb)
- : mFetchOutputBufferCb(fetchOutputBufferCb),
- mInputBufferDoneCb(inputBufferDoneCb),
- mOutputBufferDoneCb(outputBufferDoneCb),
- mDrainDoneCb(std::move(drainDoneCb)),
- mErrorCb(std::move(errorCb)),
- mTaskRunner(std::move(taskRunner)) {
- ALOGV("%s()", __func__);
-
- mWeakThis = mWeakThisFactory.GetWeakPtr();
-}
-
-V4L2Encoder::~V4L2Encoder() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- mWeakThisFactory.InvalidateWeakPtrs();
-
- // Flushing the encoder will stop polling and streaming on the V4L2 device queues.
- flush();
-
- // Deallocate all V4L2 device input and output buffers.
- destroyInputBuffers();
- destroyOutputBuffers();
-}
-
-bool V4L2Encoder::encode(std::unique_ptr<InputFrame> frame) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mState != State::UNINITIALIZED);
-
- // If we're in the error state we can immediately return, freeing the input buffer.
- if (mState == State::ERROR) {
- return false;
- }
-
- if (!frame) {
- ALOGW("Empty encode request scheduled");
- return false;
- }
-
- mEncodeRequests.push(EncodeRequest(std::move(frame)));
-
- // If we were waiting for encode requests, start encoding again.
- if (mState == State::WAITING_FOR_INPUT_FRAME) {
- setState(State::ENCODING);
- mTaskRunner->PostTask(FROM_HERE,
- ::base::BindOnce(&V4L2Encoder::handleEncodeRequest, mWeakThis));
- }
-
- return true;
-}
-
-void V4L2Encoder::drain() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- // We can only start draining if all the requests in our input queue has been queued on the V4L2
- // device input queue, so we mark the last item in the input queue as EOS.
- if (!mEncodeRequests.empty()) {
- ALOGV("Marking last item (index: %" PRIu64 ") in encode request queue as EOS",
- mEncodeRequests.back().video_frame->index());
- mEncodeRequests.back().end_of_stream = true;
- return;
- }
-
- // Start a drain operation on the device. If no buffers are currently queued the device will
- // return an empty buffer with the V4L2_BUF_FLAG_LAST flag set.
- handleDrainRequest();
-}
-
-void V4L2Encoder::flush() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- handleFlushRequest();
-}
-
-bool V4L2Encoder::setBitrate(uint32_t bitrate) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
- {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_BITRATE, bitrate)})) {
- ALOGE("Setting bitrate to %u failed", bitrate);
- return false;
- }
- return true;
-}
-
-bool V4L2Encoder::setPeakBitrate(uint32_t peakBitrate) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
- {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, peakBitrate)})) {
- // TODO(b/190336806): Our stack doesn't support dynamic peak bitrate changes yet, ignore
- // errors for now.
- ALOGW("Setting peak bitrate to %u failed", peakBitrate);
- }
- return true;
-}
-
-bool V4L2Encoder::setFramerate(uint32_t framerate) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- struct v4l2_streamparm parms;
- memset(&parms, 0, sizeof(v4l2_streamparm));
- parms.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- parms.parm.output.timeperframe.numerator = 1;
- parms.parm.output.timeperframe.denominator = framerate;
- if (mDevice->ioctl(VIDIOC_S_PARM, &parms) != 0) {
- ALOGE("Setting framerate to %u failed", framerate);
- return false;
- }
- return true;
-}
-
-void V4L2Encoder::requestKeyframe() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- mKeyFrameCounter = 0;
-}
-
-VideoPixelFormat V4L2Encoder::inputFormat() const {
- return mInputLayout ? mInputLayout.value().mFormat : VideoPixelFormat::UNKNOWN;
-}
-
-bool V4L2Encoder::initialize(C2Config::profile_t outputProfile, std::optional<uint8_t> level,
- const ui::Size& visibleSize, uint32_t stride, uint32_t keyFramePeriod,
- C2Config::bitrate_mode_t bitrateMode, uint32_t bitrate,
- std::optional<uint32_t> peakBitrate) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(keyFramePeriod > 0);
-
- mVisibleSize = visibleSize;
- mKeyFramePeriod = keyFramePeriod;
- mKeyFrameCounter = 0;
-
- // Open the V4L2 device for encoding to the requested output format.
- // TODO(dstaessens): Avoid conversion to VideoCodecProfile and use C2Config::profile_t directly.
- uint32_t outputPixelFormat = V4L2Device::C2ProfileToV4L2PixFmt(outputProfile, false);
- if (!outputPixelFormat) {
- ALOGE("Invalid output profile %s", profileToString(outputProfile));
- return false;
- }
-
- mDevice = V4L2Device::create();
- if (!mDevice) {
- ALOGE("Failed to create V4L2 device");
- return false;
- }
-
- if (!mDevice->open(V4L2Device::Type::kEncoder, outputPixelFormat)) {
- ALOGE("Failed to open device for profile %s (%s)", profileToString(outputProfile),
- fourccToString(outputPixelFormat).c_str());
- return false;
- }
-
- // Make sure the device has all required capabilities (multi-planar Memory-To-Memory and
- // streaming I/O), and whether flushing is supported.
- if (!mDevice->hasCapabilities(V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING)) {
- ALOGE("Device doesn't have the required capabilities");
- return false;
- }
- if (!mDevice->isCommandSupported(V4L2_ENC_CMD_STOP)) {
- ALOGE("Device does not support flushing (V4L2_ENC_CMD_STOP)");
- return false;
- }
-
- // Get input/output queues so we can send encode request to the device and get back the results.
- mInputQueue = mDevice->getQueue(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- mOutputQueue = mDevice->getQueue(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
- if (!mInputQueue || !mOutputQueue) {
- ALOGE("Failed to get V4L2 device queues");
- return false;
- }
-
- // Configure the requested bitrate mode and bitrate on the device.
- if (!configureBitrateMode(bitrateMode) || !setBitrate(bitrate)) return false;
-
- // If the bitrate mode is VBR we also need to configure the peak bitrate on the device.
- if ((bitrateMode == C2Config::BITRATE_VARIABLE) && !setPeakBitrate(*peakBitrate)) return false;
-
- // First try to configure the specified output format, as changing the output format can affect
- // the configured input format.
- if (!configureOutputFormat(outputProfile)) return false;
-
- // Configure the input format. If the device doesn't support the specified format we'll use one
- // of the device's preferred formats in combination with an input format convertor.
- if (!configureInputFormat(kInputPixelFormat, stride)) return false;
-
- // Create input and output buffers.
- if (!createInputBuffers() || !createOutputBuffers()) return false;
-
- // Configure the device, setting all required controls.
- if (!configureDevice(outputProfile, level)) return false;
-
- // We're ready to start encoding now.
- setState(State::WAITING_FOR_INPUT_FRAME);
- return true;
-}
-
-void V4L2Encoder::handleEncodeRequest() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mState == State::ENCODING || mState == State::ERROR);
-
- // If we're in the error state we can immediately return.
- if (mState == State::ERROR) {
- return;
- }
-
- // It's possible we flushed the encoder since this function was scheduled.
- if (mEncodeRequests.empty()) {
- return;
- }
-
- // Get the next encode request from the queue.
- EncodeRequest& encodeRequest = mEncodeRequests.front();
-
- // Check if the device has free input buffers available. If not we'll switch to the
- // WAITING_FOR_INPUT_BUFFERS state, and resume encoding once we've dequeued an input buffer.
- // Note: The input buffers are not copied into the device's input buffers, but rather a memory
- // pointer is imported. We still have to throttle the number of enqueues queued simultaneously
- // on the device however.
- if (mInputQueue->freeBuffersCount() == 0) {
- ALOGV("Waiting for device to return input buffers");
- setState(State::WAITING_FOR_V4L2_BUFFER);
- return;
- }
-
- // Request the next frame to be a key frame each time the counter reaches 0.
- if (mKeyFrameCounter == 0) {
- if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
- {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME)})) {
- ALOGE("Failed requesting key frame");
- onError();
- return;
- }
- }
- mKeyFrameCounter = (mKeyFrameCounter + 1) % mKeyFramePeriod;
-
- // Enqueue the input frame in the V4L2 device.
- uint64_t index = encodeRequest.video_frame->index();
- uint64_t timestamp = encodeRequest.video_frame->timestamp();
- bool end_of_stream = encodeRequest.end_of_stream;
- if (!enqueueInputBuffer(std::move(encodeRequest.video_frame))) {
- ALOGE("Failed to enqueue input frame (index: %" PRIu64 ", timestamp: %" PRId64 ")", index,
- timestamp);
- onError();
- return;
- }
- mEncodeRequests.pop();
-
- // Start streaming and polling on the input and output queue if required.
- if (!mInputQueue->isStreaming()) {
- ALOG_ASSERT(!mOutputQueue->isStreaming());
- if (!mOutputQueue->streamon() || !mInputQueue->streamon()) {
- ALOGE("Failed to start streaming on input and output queue");
- onError();
- return;
- }
- startDevicePoll();
- }
-
- // Queue buffers on output queue. These buffers will be used to store the encoded bitstream.
- while (mOutputQueue->freeBuffersCount() > 0) {
- if (!enqueueOutputBuffer()) return;
- }
-
- // Drain the encoder if requested.
- if (end_of_stream) {
- handleDrainRequest();
- return;
- }
-
- if (mEncodeRequests.empty()) {
- setState(State::WAITING_FOR_INPUT_FRAME);
- return;
- }
-
- // Schedule the next buffer to be encoded.
- mTaskRunner->PostTask(FROM_HERE,
- ::base::BindOnce(&V4L2Encoder::handleEncodeRequest, mWeakThis));
-}
-
-void V4L2Encoder::handleFlushRequest() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- // Stop the device poll thread.
- stopDevicePoll();
-
- // Stop streaming on the V4L2 device, which stops all currently queued encode operations and
- // releases all buffers currently in use by the device.
- for (auto& queue : {mInputQueue, mOutputQueue}) {
- if (queue && queue->isStreaming() && !queue->streamoff()) {
- ALOGE("Failed to stop streaming on the device queue");
- onError();
- }
- }
-
- // Clear all outstanding encode requests and references to input and output queue buffers.
- while (!mEncodeRequests.empty()) {
- mEncodeRequests.pop();
- }
- for (auto& buf : mInputBuffers) {
- buf = nullptr;
- }
- for (auto& buf : mOutputBuffers) {
- buf = nullptr;
- }
-
- // Streaming and polling on the V4L2 device input and output queues will be resumed once new
- // encode work is queued.
- if (mState != State::ERROR) {
- setState(State::WAITING_FOR_INPUT_FRAME);
- }
-}
-
-void V4L2Encoder::handleDrainRequest() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- if (mState == State::DRAINING || mState == State::ERROR) {
- return;
- }
-
- setState(State::DRAINING);
-
- // If we're not streaming we can consider the request completed immediately.
- if (!mInputQueue->isStreaming()) {
- onDrainDone(true);
- return;
- }
-
- struct v4l2_encoder_cmd cmd;
- memset(&cmd, 0, sizeof(v4l2_encoder_cmd));
- cmd.cmd = V4L2_ENC_CMD_STOP;
- if (mDevice->ioctl(VIDIOC_ENCODER_CMD, &cmd) != 0) {
- ALOGE("Failed to stop encoder");
- onDrainDone(false);
- return;
- }
- ALOGV("%s(): Sent STOP command to encoder", __func__);
-}
-
-void V4L2Encoder::onDrainDone(bool done) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mState == State::DRAINING || mState == State::ERROR);
-
- if (mState == State::ERROR) {
- return;
- }
-
- if (!done) {
- ALOGE("draining the encoder failed");
- mDrainDoneCb.Run(false);
- onError();
- return;
- }
-
- ALOGV("Draining done");
- mDrainDoneCb.Run(true);
-
- // Draining the encoder is done, we can now start encoding again.
- if (!mEncodeRequests.empty()) {
- setState(State::ENCODING);
- mTaskRunner->PostTask(FROM_HERE,
- ::base::BindOnce(&V4L2Encoder::handleEncodeRequest, mWeakThis));
- } else {
- setState(State::WAITING_FOR_INPUT_FRAME);
- }
-}
-
-bool V4L2Encoder::configureInputFormat(VideoPixelFormat inputFormat, uint32_t stride) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mState == State::UNINITIALIZED);
- ALOG_ASSERT(!mInputQueue->isStreaming());
- ALOG_ASSERT(!isEmpty(mVisibleSize));
-
- // First try to use the requested pixel format directly.
- std::optional<struct v4l2_format> format;
- auto fourcc = Fourcc::fromVideoPixelFormat(inputFormat, false);
- if (fourcc) {
- format = mInputQueue->setFormat(fourcc->toV4L2PixFmt(), mVisibleSize, 0, stride);
- }
-
- // If the device doesn't support the requested input format we'll try the device's preferred
- // input pixel formats and use a format convertor. We need to try all formats as some formats
- // might not be supported for the configured output format.
- if (!format) {
- std::vector<uint32_t> preferredFormats =
- mDevice->preferredInputFormat(V4L2Device::Type::kEncoder);
- for (uint32_t i = 0; !format && i < preferredFormats.size(); ++i) {
- format = mInputQueue->setFormat(preferredFormats[i], mVisibleSize, 0, stride);
- }
- }
-
- if (!format) {
- ALOGE("Failed to set input format to %s", videoPixelFormatToString(inputFormat).c_str());
- return false;
- }
-
- // Check whether the negotiated input format is valid. The coded size might be adjusted to match
- // encoder minimums, maximums and alignment requirements of the currently selected formats.
- auto layout = V4L2Device::v4L2FormatToVideoFrameLayout(*format);
- if (!layout) {
- ALOGE("Invalid input layout");
- return false;
- }
-
- mInputLayout = layout.value();
- if (!contains(Rect(mInputLayout->mCodedSize.width, mInputLayout->mCodedSize.height),
- Rect(mVisibleSize.width, mVisibleSize.height))) {
- ALOGE("Input size %s exceeds encoder capability, encoder can handle %s",
- toString(mVisibleSize).c_str(), toString(mInputLayout->mCodedSize).c_str());
- return false;
- }
-
- // Calculate the input coded size from the format.
- // TODO(dstaessens): How is this different from mInputLayout->coded_size()?
- mInputCodedSize = V4L2Device::allocatedSizeFromV4L2Format(*format);
-
- // Configuring the input format might cause the output buffer size to change.
- auto outputFormat = mOutputQueue->getFormat();
- if (!outputFormat.first) {
- ALOGE("Failed to get output format (errno: %i)", outputFormat.second);
- return false;
- }
- uint32_t AdjustedOutputBufferSize = outputFormat.first->fmt.pix_mp.plane_fmt[0].sizeimage;
- if (mOutputBufferSize != AdjustedOutputBufferSize) {
- mOutputBufferSize = AdjustedOutputBufferSize;
- ALOGV("Output buffer size adjusted to: %u", mOutputBufferSize);
- }
-
- // The coded input size might be different from the visible size due to alignment requirements,
- // So we need to specify the visible rectangle. Note that this rectangle might still be adjusted
- // due to hardware limitations.
- Rect visibleRectangle(mVisibleSize.width, mVisibleSize.height);
-
- struct v4l2_rect rect;
- memset(&rect, 0, sizeof(rect));
- rect.left = visibleRectangle.left;
- rect.top = visibleRectangle.top;
- rect.width = visibleRectangle.width();
- rect.height = visibleRectangle.height();
-
- // Try to adjust the visible rectangle using the VIDIOC_S_SELECTION command. If this is not
- // supported we'll try to use the VIDIOC_S_CROP command instead. The visible rectangle might be
- // adjusted to conform to hardware limitations (e.g. round to closest horizontal and vertical
- // offsets, width and height).
- struct v4l2_selection selection_arg;
- memset(&selection_arg, 0, sizeof(selection_arg));
- selection_arg.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- selection_arg.target = V4L2_SEL_TGT_CROP;
- selection_arg.r = rect;
- if (mDevice->ioctl(VIDIOC_S_SELECTION, &selection_arg) == 0) {
- visibleRectangle = Rect(selection_arg.r.left, selection_arg.r.top,
- selection_arg.r.left + selection_arg.r.width,
- selection_arg.r.top + selection_arg.r.height);
- } else {
- struct v4l2_crop crop;
- memset(&crop, 0, sizeof(v4l2_crop));
- crop.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- crop.c = rect;
- if (mDevice->ioctl(VIDIOC_S_CROP, &crop) != 0 ||
- mDevice->ioctl(VIDIOC_G_CROP, &crop) != 0) {
- ALOGE("Failed to crop to specified visible rectangle");
- return false;
- }
- visibleRectangle = Rect(crop.c.left, crop.c.top, crop.c.left + crop.c.width,
- crop.c.top + crop.c.height);
- }
-
- ALOGV("Input format set to %s (size: %s, adjusted size: %dx%d, coded size: %s)",
- videoPixelFormatToString(mInputLayout->mFormat).c_str(), toString(mVisibleSize).c_str(),
- visibleRectangle.width(), visibleRectangle.height(), toString(mInputCodedSize).c_str());
-
- mVisibleSize.set(visibleRectangle.width(), visibleRectangle.height());
- return true;
-}
-
-bool V4L2Encoder::configureOutputFormat(C2Config::profile_t outputProfile) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mState == State::UNINITIALIZED);
- ALOG_ASSERT(!mOutputQueue->isStreaming());
- ALOG_ASSERT(!isEmpty(mVisibleSize));
-
- auto format = mOutputQueue->setFormat(V4L2Device::C2ProfileToV4L2PixFmt(outputProfile, false),
- mVisibleSize, GetMaxOutputBufferSize(mVisibleSize));
- if (!format) {
- ALOGE("Failed to set output format to %s", profileToString(outputProfile));
- return false;
- }
-
- // The device might adjust the requested output buffer size to match hardware requirements.
- mOutputBufferSize = format->fmt.pix_mp.plane_fmt[0].sizeimage;
-
- ALOGV("Output format set to %s (buffer size: %u)", profileToString(outputProfile),
- mOutputBufferSize);
- return true;
-}
-
-bool V4L2Encoder::configureDevice(C2Config::profile_t outputProfile,
- std::optional<const uint8_t> outputH264Level) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- // Enable frame-level bitrate control. This is the only mandatory general control.
- if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
- {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE, 1)})) {
- ALOGW("Failed enabling bitrate control");
- // TODO(b/161508368): V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE is currently not supported yet,
- // assume the operation was successful for now.
- }
-
- // Additional optional controls:
- // - Enable macroblock-level bitrate control.
- // - Set GOP length to 0 to disable periodic key frames.
- mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG, {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE, 1),
- V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0)});
-
- // All controls below are H.264-specific, so we can return here if the profile is not H.264.
- if (outputProfile >= C2Config::PROFILE_AVC_BASELINE &&
- outputProfile <= C2Config::PROFILE_AVC_ENHANCED_MULTIVIEW_DEPTH_HIGH) {
- return configureH264(outputProfile, outputH264Level);
- }
-
- return true;
-}
-
-bool V4L2Encoder::configureH264(C2Config::profile_t outputProfile,
- std::optional<const uint8_t> outputH264Level) {
- // When encoding H.264 we want to prepend SPS and PPS to each IDR for resilience. Some
- // devices support this through the V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR control.
- // Otherwise we have to cache the latest SPS and PPS and inject these manually.
- if (mDevice->isCtrlExposed(V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR)) {
- if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
- {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR, 1)})) {
- ALOGE("Failed to configure device to prepend SPS and PPS to each IDR");
- return false;
- }
- mInjectParamsBeforeIDR = false;
- ALOGV("Device supports prepending SPS and PPS to each IDR");
- } else {
- mInjectParamsBeforeIDR = true;
- ALOGV("Device doesn't support prepending SPS and PPS to IDR, injecting manually.");
- }
-
- std::vector<V4L2ExtCtrl> h264Ctrls;
-
- // No B-frames, for lowest decoding latency.
- h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_B_FRAMES, 0);
- // Quantization parameter maximum value (for variable bitrate control).
- h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 51);
-
- // Set H.264 profile.
- int32_t profile = V4L2Device::c2ProfileToV4L2H264Profile(outputProfile);
- if (profile < 0) {
- ALOGE("Trying to set invalid H.264 profile");
- return false;
- }
- h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_H264_PROFILE, profile);
-
- // Set H.264 output level. Use Level 4.0 as fallback default.
- int32_t h264Level =
- static_cast<int32_t>(outputH264Level.value_or(V4L2_MPEG_VIDEO_H264_LEVEL_4_0));
- h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_H264_LEVEL, h264Level);
-
- // Ask not to put SPS and PPS into separate bitstream buffers.
- h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_HEADER_MODE,
- V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME);
-
- // Ignore return value as these controls are optional.
- mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG, std::move(h264Ctrls));
-
- return true;
-}
-
-bool V4L2Encoder::configureBitrateMode(C2Config::bitrate_mode_t bitrateMode) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- v4l2_mpeg_video_bitrate_mode v4l2BitrateMode =
- V4L2Device::C2BitrateModeToV4L2BitrateMode(bitrateMode);
- if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
- {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_BITRATE_MODE, v4l2BitrateMode)})) {
- // TODO(b/190336806): Our stack doesn't support bitrate mode changes yet. We default to CBR
- // which is currently the only supported mode so we can safely ignore this for now.
- ALOGW("Setting bitrate mode to %u failed", v4l2BitrateMode);
- }
- return true;
-}
-
-bool V4L2Encoder::startDevicePoll() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- if (!mDevice->startPolling(::base::BindRepeating(&V4L2Encoder::serviceDeviceTask, mWeakThis),
- ::base::BindRepeating(&V4L2Encoder::onPollError, mWeakThis))) {
- ALOGE("Device poll thread failed to start");
- onError();
- return false;
- }
-
- ALOGV("Device poll started");
- return true;
-}
-
-bool V4L2Encoder::stopDevicePoll() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- if (!mDevice->stopPolling()) {
- ALOGE("Failed to stop polling on the device");
- onError();
- return false;
- }
-
- ALOGV("Device poll stopped");
- return true;
-}
-
-void V4L2Encoder::onPollError() {
- ALOGV("%s()", __func__);
- onError();
-}
-
-void V4L2Encoder::serviceDeviceTask(bool /*event*/) {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mState != State::UNINITIALIZED);
-
- if (mState == State::ERROR) {
- return;
- }
-
- // Dequeue completed input (VIDEO_OUTPUT) buffers, and recycle to the free list.
- while (mInputQueue->queuedBuffersCount() > 0) {
- if (!dequeueInputBuffer()) break;
- }
-
- // Dequeue completed output (VIDEO_CAPTURE) buffers, and recycle to the free list.
- while (mOutputQueue->queuedBuffersCount() > 0) {
- if (!dequeueOutputBuffer()) break;
- }
-
- ALOGV("%s() - done", __func__);
-}
-
-bool V4L2Encoder::enqueueInputBuffer(std::unique_ptr<InputFrame> frame) {
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mInputQueue->freeBuffersCount() > 0);
- ALOG_ASSERT(mState == State::ENCODING);
- ALOG_ASSERT(frame);
- ALOG_ASSERT(mInputLayout->mFormat == frame->pixelFormat());
- ALOG_ASSERT(mInputLayout->mPlanes.size() == frame->planes().size());
-
- auto format = frame->pixelFormat();
- auto planes = frame->planes();
- auto index = frame->index();
- auto timestamp = frame->timestamp();
-
- ALOGV("%s(): queuing input buffer (index: %" PRId64 ")", __func__, index);
-
- auto buffer = mInputQueue->getFreeBuffer();
- if (!buffer) {
- ALOGE("Failed to get free buffer from device input queue");
- return false;
- }
-
- // Mark the buffer with the frame's timestamp so we can identify the associated output buffers.
- buffer->setTimeStamp(
- {.tv_sec = static_cast<time_t>(timestamp / ::base::Time::kMicrosecondsPerSecond),
- .tv_usec = static_cast<time_t>(timestamp % ::base::Time::kMicrosecondsPerSecond)});
- size_t bufferId = buffer->bufferId();
-
- for (size_t i = 0; i < planes.size(); ++i) {
- // Single-buffer input format may have multiple color planes, so bytesUsed of the single
- // buffer should be sum of each color planes' size.
- size_t bytesUsed = 0;
- if (planes.size() == 1) {
- bytesUsed = allocationSize(format, mInputLayout->mCodedSize);
- } else {
- bytesUsed = ::base::checked_cast<size_t>(
- getArea(planeSize(format, i, mInputLayout->mCodedSize)).value());
- }
-
- // TODO(crbug.com/901264): The way to pass an offset within a DMA-buf is not defined
- // in V4L2 specification, so we abuse data_offset for now. Fix it when we have the
- // right interface, including any necessary validation and potential alignment.
- buffer->setPlaneDataOffset(i, planes[i].mOffset);
- bytesUsed += planes[i].mOffset;
- // Workaround: filling length should not be needed. This is a bug of videobuf2 library.
- buffer->setPlaneSize(i, mInputLayout->mPlanes[i].mSize + planes[i].mOffset);
- buffer->setPlaneBytesUsed(i, bytesUsed);
- }
-
- if (!std::move(*buffer).queueDMABuf(frame->fds())) {
- ALOGE("Failed to queue input buffer using QueueDMABuf");
- onError();
- return false;
- }
-
- ALOGV("Queued buffer in input queue (index: %" PRId64 ", timestamp: %" PRId64
- ", bufferId: %zu)",
- index, timestamp, bufferId);
-
- ALOG_ASSERT(!mInputBuffers[bufferId]);
- mInputBuffers[bufferId] = std::move(frame);
-
- return true;
-}
-
-bool V4L2Encoder::enqueueOutputBuffer() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mOutputQueue->freeBuffersCount() > 0);
-
- auto buffer = mOutputQueue->getFreeBuffer();
- if (!buffer) {
- ALOGE("Failed to get free buffer from device output queue");
- onError();
- return false;
- }
-
- std::unique_ptr<BitstreamBuffer> bitstreamBuffer;
- mFetchOutputBufferCb.Run(mOutputBufferSize, &bitstreamBuffer);
- if (!bitstreamBuffer) {
- ALOGE("Failed to fetch output block");
- onError();
- return false;
- }
-
- size_t bufferId = buffer->bufferId();
-
- std::vector<int> fds;
- fds.push_back(bitstreamBuffer->dmabuf->handle()->data[0]);
- if (!std::move(*buffer).queueDMABuf(fds)) {
- ALOGE("Failed to queue output buffer using QueueDMABuf");
- onError();
- return false;
- }
-
- ALOG_ASSERT(!mOutputBuffers[bufferId]);
- mOutputBuffers[bufferId] = std::move(bitstreamBuffer);
- ALOGV("%s(): Queued buffer in output queue (bufferId: %zu)", __func__, bufferId);
- return true;
-}
-
-bool V4L2Encoder::dequeueInputBuffer() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mState != State::UNINITIALIZED);
- ALOG_ASSERT(mInputQueue->queuedBuffersCount() > 0);
-
- if (mState == State::ERROR) {
- return false;
- }
-
- bool success;
- V4L2ReadableBufferRef buffer;
- std::tie(success, buffer) = mInputQueue->dequeueBuffer();
- if (!success) {
- ALOGE("Failed to dequeue buffer from input queue");
- onError();
- return false;
- }
- if (!buffer) {
- // No more buffers ready to be dequeued in input queue.
- return false;
- }
-
- uint64_t index = mInputBuffers[buffer->bufferId()]->index();
- int64_t timestamp = buffer->getTimeStamp().tv_usec +
- buffer->getTimeStamp().tv_sec * ::base::Time::kMicrosecondsPerSecond;
- ALOGV("Dequeued buffer from input queue (index: %" PRId64 ", timestamp: %" PRId64
- ", bufferId: %zu)",
- index, timestamp, buffer->bufferId());
-
- mInputBuffers[buffer->bufferId()] = nullptr;
-
- mInputBufferDoneCb.Run(index);
-
- // If we previously used up all input queue buffers we can start encoding again now.
- if ((mState == State::WAITING_FOR_V4L2_BUFFER) && !mEncodeRequests.empty()) {
- setState(State::ENCODING);
- mTaskRunner->PostTask(FROM_HERE,
- ::base::BindOnce(&V4L2Encoder::handleEncodeRequest, mWeakThis));
- }
-
- return true;
-}
-
-bool V4L2Encoder::dequeueOutputBuffer() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(mState != State::UNINITIALIZED);
- ALOG_ASSERT(mOutputQueue->queuedBuffersCount() > 0);
-
- if (mState == State::ERROR) {
- return false;
- }
-
- bool success;
- V4L2ReadableBufferRef buffer;
- std::tie(success, buffer) = mOutputQueue->dequeueBuffer();
- if (!success) {
- ALOGE("Failed to dequeue buffer from output queue");
- onError();
- return false;
- }
- if (!buffer) {
- // No more buffers ready to be dequeued in output queue.
- return false;
- }
-
- size_t encodedDataSize = buffer->getPlaneBytesUsed(0) - buffer->getPlaneDataOffset(0);
- ::base::TimeDelta timestamp = ::base::TimeDelta::FromMicroseconds(
- buffer->getTimeStamp().tv_usec +
- buffer->getTimeStamp().tv_sec * ::base::Time::kMicrosecondsPerSecond);
-
- ALOGV("Dequeued buffer from output queue (timestamp: %" PRId64
- ", bufferId: %zu, data size: %zu, EOS: %d)",
- timestamp.InMicroseconds(), buffer->bufferId(), encodedDataSize, buffer->isLast());
-
- if (!mOutputBuffers[buffer->bufferId()]) {
- ALOGE("Failed to find output block associated with output buffer");
- onError();
- return false;
- }
-
- std::unique_ptr<BitstreamBuffer> bitstreamBuffer =
- std::move(mOutputBuffers[buffer->bufferId()]);
- if (encodedDataSize > 0) {
- if (!mInjectParamsBeforeIDR) {
- // No need to inject SPS or PPS before IDR frames, we can just return the buffer as-is.
- mOutputBufferDoneCb.Run(encodedDataSize, timestamp.InMicroseconds(),
- buffer->isKeyframe(), std::move(bitstreamBuffer));
- } else if (!buffer->isKeyframe()) {
- // We need to inject SPS and PPS before IDR frames, but this frame is not a key frame.
- // We can return the buffer as-is, but need to update our SPS and PPS cache if required.
- C2ConstLinearBlock constBlock = bitstreamBuffer->dmabuf->share(
- bitstreamBuffer->dmabuf->offset(), encodedDataSize, C2Fence());
- C2ReadView readView = constBlock.map().get();
- extractSPSPPS(readView.data(), encodedDataSize, &mCachedSPS, &mCachedPPS);
- mOutputBufferDoneCb.Run(encodedDataSize, timestamp.InMicroseconds(),
- buffer->isKeyframe(), std::move(bitstreamBuffer));
- } else {
- // We need to inject our cached SPS and PPS NAL units to the IDR frame. It's possible
- // this frame already has SPS and PPS NAL units attached, in which case we only need to
- // update our cached SPS and PPS.
- C2ConstLinearBlock constBlock = bitstreamBuffer->dmabuf->share(
- bitstreamBuffer->dmabuf->offset(), encodedDataSize, C2Fence());
- C2ReadView readView = constBlock.map().get();
-
- // Allocate a new buffer to copy the data with prepended SPS and PPS into.
- std::unique_ptr<BitstreamBuffer> prependedBitstreamBuffer;
- mFetchOutputBufferCb.Run(mOutputBufferSize, &prependedBitstreamBuffer);
- if (!prependedBitstreamBuffer) {
- ALOGE("Failed to fetch output block");
- onError();
- return false;
- }
- C2WriteView writeView = prependedBitstreamBuffer->dmabuf->map().get();
-
- // If there is not enough space in the output buffer just return the original buffer.
- size_t newSize = prependSPSPPSToIDR(readView.data(), encodedDataSize, writeView.data(),
- writeView.size(), &mCachedSPS, &mCachedPPS);
- if (newSize > 0) {
- mOutputBufferDoneCb.Run(newSize, timestamp.InMicroseconds(), buffer->isKeyframe(),
- std::move(prependedBitstreamBuffer));
- } else {
- mOutputBufferDoneCb.Run(encodedDataSize, timestamp.InMicroseconds(),
- buffer->isKeyframe(), std::move(bitstreamBuffer));
- }
- }
- }
-
- // If the buffer is marked as last and we were flushing the encoder, flushing is now done.
- if ((mState == State::DRAINING) && buffer->isLast()) {
- onDrainDone(true);
- // Start the encoder again.
- struct v4l2_encoder_cmd cmd;
- memset(&cmd, 0, sizeof(v4l2_encoder_cmd));
- cmd.cmd = V4L2_ENC_CMD_START;
- if (mDevice->ioctl(VIDIOC_ENCODER_CMD, &cmd) != 0) {
- ALOGE("Failed to restart encoder after draining (V4L2_ENC_CMD_START)");
- onError();
- return false;
- }
- }
-
- // Queue a new output buffer to replace the one we dequeued.
- buffer = nullptr;
- enqueueOutputBuffer();
-
- return true;
-}
-
-bool V4L2Encoder::createInputBuffers() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(!mInputQueue->isStreaming());
- ALOG_ASSERT(mInputBuffers.empty());
-
- // No memory is allocated here, we just generate a list of buffers on the input queue, which
- // will hold memory handles to the real buffers.
- if (mInputQueue->allocateBuffers(kInputBufferCount, V4L2_MEMORY_DMABUF) < kInputBufferCount) {
- ALOGE("Failed to create V4L2 input buffers.");
- return false;
- }
-
- mInputBuffers.resize(mInputQueue->allocatedBuffersCount());
- return true;
-}
-
-bool V4L2Encoder::createOutputBuffers() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(!mOutputQueue->isStreaming());
- ALOG_ASSERT(mOutputBuffers.empty());
-
- // No memory is allocated here, we just generate a list of buffers on the output queue, which
- // will hold memory handles to the real buffers.
- if (mOutputQueue->allocateBuffers(kOutputBufferCount, V4L2_MEMORY_DMABUF) <
- kOutputBufferCount) {
- ALOGE("Failed to create V4L2 output buffers.");
- return false;
- }
-
- mOutputBuffers.resize(mOutputQueue->allocatedBuffersCount());
- return true;
-}
-
-void V4L2Encoder::destroyInputBuffers() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(!mInputQueue->isStreaming());
-
- if (!mInputQueue || mInputQueue->allocatedBuffersCount() == 0) return;
- mInputQueue->deallocateBuffers();
- mInputBuffers.clear();
-}
-
-void V4L2Encoder::destroyOutputBuffers() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
- ALOG_ASSERT(!mOutputQueue->isStreaming());
-
- if (!mOutputQueue || mOutputQueue->allocatedBuffersCount() == 0) return;
- mOutputQueue->deallocateBuffers();
- mOutputBuffers.clear();
-}
-
-void V4L2Encoder::onError() {
- ALOGV("%s()", __func__);
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- if (mState != State::ERROR) {
- setState(State::ERROR);
- mErrorCb.Run();
- }
-}
-
-void V4L2Encoder::setState(State state) {
- ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
-
- // Check whether the state change is valid.
- switch (state) {
- case State::UNINITIALIZED:
- break;
- case State::WAITING_FOR_INPUT_FRAME:
- ALOG_ASSERT(mState != State::ERROR);
- break;
- case State::WAITING_FOR_V4L2_BUFFER:
- ALOG_ASSERT(mState == State::ENCODING);
- break;
- case State::ENCODING:
- ALOG_ASSERT(mState == State::WAITING_FOR_INPUT_FRAME ||
- mState == State::WAITING_FOR_V4L2_BUFFER || mState == State::DRAINING);
- break;
- case State::DRAINING:
- ALOG_ASSERT(mState == State::ENCODING || mState == State::WAITING_FOR_INPUT_FRAME);
- break;
- case State::ERROR:
- break;
- }
-
- ALOGV("Changed encoder state from %s to %s", stateToString(mState), stateToString(state));
- mState = state;
-}
-
-const char* V4L2Encoder::stateToString(State state) {
- switch (state) {
- case State::UNINITIALIZED:
- return "UNINITIALIZED";
- case State::WAITING_FOR_INPUT_FRAME:
- return "WAITING_FOR_INPUT_FRAME";
- case State::WAITING_FOR_V4L2_BUFFER:
- return "WAITING_FOR_V4L2_BUFFER";
- case State::ENCODING:
- return "ENCODING";
- case State::DRAINING:
- return "DRAINING";
- case State::ERROR:
- return "ERROR";
- }
-}
-
-} // namespace android
diff --git a/components/VideoEncoder.cpp b/components/VideoEncoder.cpp
index e3e19c2..8f1044b 100644
--- a/components/VideoEncoder.cpp
+++ b/components/VideoEncoder.cpp
@@ -6,11 +6,12 @@
namespace android {
-VideoEncoder::InputFrame::InputFrame(std::vector<int>&& fds, std::vector<VideoFramePlane>&& planes,
+VideoEncoder::InputFrame::InputFrame(std::vector<int>&& fds,
+ const std::vector<VideoFramePlane>& planes,
VideoPixelFormat pixelFormat, uint64_t index,
int64_t timestamp)
: mFds(std::move(fds)),
- mPlanes(std::move(planes)),
+ mPlanes(planes),
mPixelFormat(pixelFormat),
mIndex(index),
mTimestamp(timestamp) {}
diff --git a/components/VideoFramePool.cpp b/components/VideoFramePool.cpp
index 4bf45f3..9927acf 100644
--- a/components/VideoFramePool.cpp
+++ b/components/VideoFramePool.cpp
@@ -179,7 +179,7 @@ void VideoFramePool::getVideoFrameTask() {
ALOG_ASSERT(mFetchTaskRunner->RunsTasksInCurrentSequence());
// Variables used to exponential backoff retry when buffer fetching times out.
- constexpr size_t kFetchRetryDelayInit = 64; // Initial delay: 64us
+ constexpr size_t kFetchRetryDelayInit = 256; // Initial delay: 256us
constexpr size_t kFetchRetryDelayMax = 16384; // Max delay: 16ms (1 frame at 60fps)
constexpr size_t kFenceWaitTimeoutNs = 16000000; // 16ms (1 frame at 60fps)
static size_t sNumRetries = 0;
@@ -190,7 +190,18 @@ void VideoFramePool::getVideoFrameTask() {
c2_status_t err = mBlockPool->fetchGraphicBlock(mSize.width, mSize.height,
static_cast<uint32_t>(mPixelFormat),
mMemoryUsage, &block, &fence);
- if (err == C2_BLOCKING) {
+ // C2_BLOCKING can be returned either based on the state of the block pool itself
+ // or the state of the underlying buffer queue. If the cause is the underlying
+ // buffer queue, then the block pool returns a null fence. Since a null fence is
+ // immediately ready, we need to delay instead of trying to wait on the fence, to
+ // avoid spinning.
+ //
+ // Unfortunately, a null fence is considered a valid fence, so the best we can do
+ // to detect a null fence is to assume that any fence that is immediately ready
+ // is the null fence. A false positive by racing with a real fence can result in
+ // an unnecessary delay, but the only alternative is to ignore fences altogether
+ // and always delay.
+ if (err == C2_BLOCKING && !fence.ready()) {
err = fence.wait(kFenceWaitTimeoutNs);
if (err == C2_OK) {
ALOGV("%s(): fence wait succeded, retrying now", __func__);
@@ -231,7 +242,7 @@ void VideoFramePool::getVideoFrameTask() {
FROM_HERE, ::base::BindOnce(&VideoFramePool::getVideoFrameTask, mFetchWeakThis),
::base::TimeDelta::FromMicroseconds(sDelay));
- sDelay = std::min(sDelay * 2, kFetchRetryDelayMax); // Exponential backoff
+ sDelay = std::min(sDelay * 4, kFetchRetryDelayMax); // Exponential backoff
sNumRetries++;
return;
}
diff --git a/components/include/v4l2_codec2/components/V4L2ComponentStore.h b/components/include/v4l2_codec2/components/ComponentStore.h
index bfec407..a759d4d 100644
--- a/components/include/v4l2_codec2/components/V4L2ComponentStore.h
+++ b/components/include/v4l2_codec2/components/ComponentStore.h
@@ -1,9 +1,9 @@
-// Copyright 2020 The Chromium Authors. All rights reserved.
+// Copyright 2023 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_COMPONENT_STORE_H
-#define ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_COMPONENT_STORE_H
+#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_COMPONENT_STORE_MIXIN_H
+#define ANDROID_V4L2_CODEC2_COMPONENTS_COMPONENT_STORE_MIXIN_H
#include <map>
#include <mutex>
@@ -15,10 +15,15 @@
namespace android {
-class V4L2ComponentStore : public C2ComponentStore {
+enum class VideoCodec;
+
+class ComponentStore : public C2ComponentStore {
public:
- static std::shared_ptr<C2ComponentStore> Create();
- ~V4L2ComponentStore();
+ using GetFactory = std::function<std::unique_ptr<C2ComponentFactory>(
+ const std::string& /* name */, std::shared_ptr<C2ReflectorHelper>)>;
+ class Builder;
+
+ virtual ~ComponentStore();
// C2ComponentStore implementation.
C2String getName() const override;
@@ -41,10 +46,21 @@ public:
std::vector<C2FieldSupportedValuesQuery>& fields) const override;
private:
- V4L2ComponentStore();
+ struct Declaration {
+ VideoCodec codec;
+ C2Component::kind_t kind;
+ GetFactory factory;
+ };
+
+ ComponentStore(C2String storeName);
+
+ ::C2ComponentFactory* getFactory(const C2String& name);
- ::C2ComponentFactory* GetFactory(const C2String& name);
- std::shared_ptr<const C2Component::Traits> GetTraits(const C2String& name);
+ std::shared_ptr<const C2Component::Traits> getTraits(const C2String& name);
+
+ C2String mStoreName;
+
+ std::map<std::string, Declaration> mDeclarations;
std::shared_ptr<C2ReflectorHelper> mReflector;
@@ -54,8 +70,25 @@ private:
std::mutex mCachedTraitsLock;
std::map<C2String, std::shared_ptr<const C2Component::Traits>> mCachedTraits
GUARDED_BY(mCachedTraitsLock);
+
+ friend class Builder;
+};
+
+class ComponentStore::Builder final {
+public:
+ Builder(C2String storeName);
+ ~Builder() = default;
+
+ Builder& decoder(std::string name, VideoCodec codec, GetFactory factory);
+
+ Builder& encoder(std::string name, VideoCodec codec, GetFactory factory);
+
+ std::shared_ptr<ComponentStore> build() &&;
+
+private:
+ std::unique_ptr<ComponentStore> mStore;
};
} // namespace android
-#endif // ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_COMPONENT_STORE_H
+#endif // ANDROID_V4L2_CODEC2_COMPONENTS_COMPONENT_STORE_MIXIN_H
diff --git a/components/include/v4l2_codec2/components/V4L2DecodeComponent.h b/components/include/v4l2_codec2/components/DecodeComponent.h
index 962f7d6..27905c7 100644
--- a/components/include/v4l2_codec2/components/V4L2DecodeComponent.h
+++ b/components/include/v4l2_codec2/components/DecodeComponent.h
@@ -1,9 +1,9 @@
-// Copyright 2020 The Chromium Authors. All rights reserved.
+// Copyright 2023 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_DECODE_COMPONENT_H
-#define ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_DECODE_COMPONENT_H
+#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_DECODE_COMPONENT_H
+#define ANDROID_V4L2_CODEC2_COMPONENTS_DECODE_COMPONENT_H
#include <atomic>
#include <memory>
@@ -17,22 +17,17 @@
#include <base/synchronization/waitable_event.h>
#include <base/threading/thread.h>
-#include <v4l2_codec2/components/V4L2DecodeInterface.h>
+#include <v4l2_codec2/components/DecodeInterface.h>
#include <v4l2_codec2/components/VideoDecoder.h>
#include <v4l2_codec2/components/VideoFramePool.h>
namespace android {
-class V4L2DecodeComponent : public C2Component,
- public std::enable_shared_from_this<V4L2DecodeComponent> {
+class DecodeComponent : public C2Component, public std::enable_shared_from_this<DecodeComponent> {
public:
- static std::shared_ptr<C2Component> create(const std::string& name, c2_node_id_t id,
- const std::shared_ptr<C2ReflectorHelper>& helper,
- C2ComponentFactory::ComponentDeleter deleter);
- V4L2DecodeComponent(const std::string& name, c2_node_id_t id,
- const std::shared_ptr<C2ReflectorHelper>& helper,
- const std::shared_ptr<V4L2DecodeInterface>& intfImpl);
- ~V4L2DecodeComponent() override;
+ DecodeComponent(uint32_t debugStreamId, const std::string& name, c2_node_id_t id,
+ const std::shared_ptr<DecodeInterface>& intfImpl);
+ virtual ~DecodeComponent() override;
// Implementation of C2Component.
c2_status_t start() override;
@@ -48,7 +43,7 @@ public:
c2_status_t drain_nb(drain_mode_t mode) override;
std::shared_ptr<C2ComponentInterface> intf() override;
-private:
+protected:
// The C2Component state machine.
enum class ComponentState {
STOPPED,
@@ -59,7 +54,7 @@ private:
static const char* ComponentStateToString(ComponentState state);
// Handle C2Component's public methods on |mDecoderTaskRunner|.
- void startTask(c2_status_t* status, ::base::WaitableEvent* done);
+ virtual void startTask(c2_status_t* status, ::base::WaitableEvent* done) = 0;
void stopTask();
void releaseTask();
void queueTask(std::unique_ptr<C2Work> work);
@@ -69,6 +64,11 @@ private:
// Try to process pending works at |mPendingWorks|. Paused when |mIsDraining| is set.
void pumpPendingWorks();
+
+ void processCSDWork(const int32_t bitstreamId, const C2Work* work);
+ void processWork(const int32_t bitstreamId, const C2Work* work);
+ void processWorkBuffer(const int32_t bitstreamId, const C2ConstLinearBlock& linearBlock);
+
// Get the buffer pool.
std::unique_ptr<VideoFramePool> getVideoFramePool(const ui::Size& size,
HalPixelFormat pixelFormat,
@@ -92,10 +92,11 @@ private:
// Report error when any error occurs.
void reportError(c2_status_t error);
- static std::atomic<int32_t> sConcurrentInstances;
+ // Identifier used for debugging purposes.
+ uint32_t mDebugStreamId;
// The pointer of component interface implementation.
- std::shared_ptr<V4L2DecodeInterface> mIntfImpl;
+ std::shared_ptr<DecodeInterface> mIntfImpl;
// The pointer of component interface.
const std::shared_ptr<C2ComponentInterface> mIntf;
// The pointer of component listener.
@@ -137,13 +138,13 @@ private:
// The device task runner and its sequence checker. We should interact with
// |mDevice| on this.
- ::base::Thread mDecoderThread{"V4L2DecodeComponentDecoderThread"};
+ ::base::Thread mDecoderThread{"DecodeComponentDecoderThread"};
scoped_refptr<::base::SequencedTaskRunner> mDecoderTaskRunner;
- ::base::WeakPtrFactory<V4L2DecodeComponent> mWeakThisFactory{this};
- ::base::WeakPtr<V4L2DecodeComponent> mWeakThis;
+ ::base::WeakPtrFactory<DecodeComponent> mWeakThisFactory{this};
+ ::base::WeakPtr<DecodeComponent> mWeakThis;
};
} // namespace android
-#endif // ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_DECODE_COMPONENT_H
+#endif // ANDROID_V4L2_CODEC2_COMPONENTS_DECODE_COMPONENT_H
diff --git a/components/include/v4l2_codec2/components/V4L2DecodeInterface.h b/components/include/v4l2_codec2/components/DecodeInterface.h
index f2ab898..7e513c5 100644
--- a/components/include/v4l2_codec2/components/V4L2DecodeInterface.h
+++ b/components/include/v4l2_codec2/components/DecodeInterface.h
@@ -1,9 +1,9 @@
-// Copyright 2020 The Chromium Authors. All rights reserved.
+// Copyright 2023 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_DECODE_INTERFACE_H
-#define ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_DECODE_INTERFACE_H
+#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_DECODE_INTERFACE_H
+#define ANDROID_V4L2_CODEC2_COMPONENTS_DECODE_INTERFACE_H
#include <memory>
#include <string>
@@ -12,16 +12,18 @@
#include <ui/Size.h>
#include <util/C2InterfaceHelper.h>
+#include <v4l2_codec2/common/Common.h>
#include <v4l2_codec2/common/VideoTypes.h>
namespace android {
-class V4L2DecodeInterface : public C2InterfaceHelper {
+class DecodeInterface : public C2InterfaceHelper {
public:
- V4L2DecodeInterface(const std::string& name, const std::shared_ptr<C2ReflectorHelper>& helper);
- V4L2DecodeInterface(const V4L2DecodeInterface&) = delete;
- V4L2DecodeInterface& operator=(const V4L2DecodeInterface&) = delete;
- ~V4L2DecodeInterface() = default;
+ DecodeInterface(const std::string& name, const std::shared_ptr<C2ReflectorHelper>& helper,
+ const SupportedCapabilities& caps);
+ DecodeInterface(const DecodeInterface&) = delete;
+ DecodeInterface& operator=(const DecodeInterface&) = delete;
+ ~DecodeInterface() = default;
// interfaces for the client component.
c2_status_t status() const { return mInitStatus; }
@@ -38,6 +40,7 @@ private:
// Configurable parameter setters.
static C2R ProfileLevelSetter(bool mayBlock, C2P<C2StreamProfileLevelInfo::input>& info);
static C2R SizeSetter(bool mayBlock, C2P<C2StreamPictureSizeInfo::output>& videoSize);
+ static C2R InputSizeSetter(bool mayBlock, C2P<C2StreamMaxBufferSizeInfo::input>& inputSize);
static C2R MaxInputBufferSizeCalculator(bool mayBlock,
C2P<C2StreamMaxBufferSizeInfo::input>& me,
const C2P<C2StreamPictureSizeInfo::output>& size);
@@ -66,6 +69,9 @@ private:
// buffer can be released by the component; only used for H264 because H264 may reorder the
// output frames.
std::shared_ptr<C2PortDelayTuning::output> mOutputDelay;
+ // The number of extra frames processed at one time by the component. Allows more input
+ // buffers to be simultaneously enqueued.
+ std::shared_ptr<C2PipelineDelayTuning> mPipelineDelay;
// The input codec profile and level. For now configuring this parameter is useless since
// the component always uses fixed codec profile to initialize accelerator. It is only used
// for the client to query supported profile and level values.
@@ -100,4 +106,4 @@ private:
} // namespace android
-#endif // ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_DECODE_INTERFACE_H
+#endif // ANDROID_V4L2_CODEC2_COMPONENTS_DECODE_INTERFACE_H
diff --git a/components/include/v4l2_codec2/components/V4L2EncodeComponent.h b/components/include/v4l2_codec2/components/EncodeComponent.h
index 0b150e4..81c8c6d 100644
--- a/components/include/v4l2_codec2/components/V4L2EncodeComponent.h
+++ b/components/include/v4l2_codec2/components/EncodeComponent.h
@@ -1,14 +1,15 @@
-// Copyright 2020 The Chromium Authors. All rights reserved.
+// Copyright 2023 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_ENCODE_COMPONENT_H
-#define ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_ENCODE_COMPONENT_H
+#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_ENCODE_COMPONENT_H
+#define ANDROID_V4L2_CODEC2_COMPONENTS_ENCODE_COMPONENT_H
#include <atomic>
#include <memory>
#include <optional>
#include <unordered_map>
+#include <vector>
#include <C2Component.h>
#include <C2ComponentFactory.h>
@@ -22,23 +23,24 @@
#include <base/synchronization/waitable_event.h>
#include <base/threading/thread.h>
#include <util/C2InterfaceHelper.h>
+#include <v4l2_codec2/common/Common.h>
+#include <v4l2_codec2/common/VideoPixelFormat.h>
namespace android {
struct BitstreamBuffer;
class FormatConverter;
class VideoEncoder;
-class V4L2EncodeInterface;
+class EncodeInterface;
-class V4L2EncodeComponent : public C2Component,
- public std::enable_shared_from_this<V4L2EncodeComponent> {
-public:
- // Create a new instance of the V4L2EncodeComponent.
- static std::shared_ptr<C2Component> create(C2String name, c2_node_id_t id,
- std::shared_ptr<C2ReflectorHelper> helper,
- C2ComponentFactory::ComponentDeleter deleter);
+std::optional<std::vector<VideoFramePlane>> getVideoFrameLayout(const C2ConstGraphicBlock& block,
+ VideoPixelFormat* format);
+
+std::optional<uint32_t> getVideoFrameStride(VideoPixelFormat format, ui::Size size);
- virtual ~V4L2EncodeComponent() override;
+class EncodeComponent : public C2Component, public std::enable_shared_from_this<EncodeComponent> {
+public:
+ virtual ~EncodeComponent() override;
// Implementation of the C2Component interface.
c2_status_t start() override;
@@ -54,7 +56,7 @@ public:
c2_blocking_t mayBlock) override;
std::shared_ptr<C2ComponentInterface> intf() override;
-private:
+protected:
// Possible component states.
enum class ComponentState {
UNLOADED, // Initial state of component.
@@ -63,11 +65,13 @@ private:
ERROR, // An error occurred.
};
- V4L2EncodeComponent(C2String name, c2_node_id_t id,
- std::shared_ptr<V4L2EncodeInterface> interface);
+ EncodeComponent(C2String name, c2_node_id_t id, std::shared_ptr<EncodeInterface> interface);
- V4L2EncodeComponent(const V4L2EncodeComponent&) = delete;
- V4L2EncodeComponent& operator=(const V4L2EncodeComponent&) = delete;
+ EncodeComponent(const EncodeComponent&) = delete;
+ EncodeComponent& operator=(const EncodeComponent&) = delete;
+
+ // Initialize the V4L2 device for encoding with the requested configuration.
+ virtual bool initializeEncoder() = 0;
// Initialize the encoder on the encoder thread.
void startTask(bool* success, ::base::WaitableEvent* done);
@@ -87,8 +91,6 @@ private:
// Set the component listener on the encoder thread.
void setListenerTask(const std::shared_ptr<Listener>& listener, ::base::WaitableEvent* done);
- // Initialize the V4L2 device for encoding with the requested configuration.
- bool initializeEncoder();
// Update the |mBitrate| and |mFramerate| currently configured on the V4L2 device, to match the
// values requested by the codec 2.0 framework.
bool updateEncodingParameters();
@@ -132,14 +134,12 @@ private:
// The underlying V4L2 encoder.
std::unique_ptr<VideoEncoder> mEncoder;
- // The number of concurrent encoder instances currently created.
- static std::atomic<int32_t> sConcurrentInstances;
// The component's registered name.
const C2String mName;
// The component's id, provided by the C2 framework upon initialization.
const c2_node_id_t mId = 0;
// The component's interface implementation.
- const std::shared_ptr<V4L2EncodeInterface> mInterface;
+ const std::shared_ptr<EncodeInterface> mInterface;
// Mutex used by the component to synchronize start/stop/reset/release calls, as the codec 2.0
// API can be accessed from any thread.
@@ -153,6 +153,11 @@ private:
// An input format convertor will be used if the device doesn't support the video's format.
std::unique_ptr<FormatConverter> mInputFormatConverter;
+ // Pixel format of frames sent to V4L2 encoder, determined when the first input frame is queued.
+ VideoPixelFormat mInputPixelFormat = VideoPixelFormat::UNKNOWN;
+ // Layout of frames sent to V4L2 encoder, determined when the first input frame is queued.
+ std::vector<VideoFramePlane> mInputLayout;
+
// The bitrate currently configured on the v4l2 device.
uint32_t mBitrate = 0;
// The bitrate mode currently configured on the v4l2 device.
@@ -175,15 +180,15 @@ private:
std::atomic<ComponentState> mComponentState;
// The encoder thread on which all interaction with the V4L2 device is performed.
- ::base::Thread mEncoderThread{"V4L2EncodeComponentThread"};
+ ::base::Thread mEncoderThread{"EncodeComponentThread"};
// The task runner on the encoder thread.
scoped_refptr<::base::SequencedTaskRunner> mEncoderTaskRunner;
// The WeakPtrFactory used to get weak pointers of this.
- ::base::WeakPtr<V4L2EncodeComponent> mWeakThis;
- ::base::WeakPtrFactory<V4L2EncodeComponent> mWeakThisFactory{this};
+ ::base::WeakPtr<EncodeComponent> mWeakThis;
+ ::base::WeakPtrFactory<EncodeComponent> mWeakThisFactory{this};
};
} // namespace android
-#endif // ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_ENCODE_COMPONENT_H
+#endif // ANDROID_V4L2_CODEC2_COMPONENTS_ENCODE_COMPONENT_H
diff --git a/components/include/v4l2_codec2/components/V4L2EncodeInterface.h b/components/include/v4l2_codec2/components/EncodeInterface.h
index fefebf0..e59f58c 100644
--- a/components/include/v4l2_codec2/components/V4L2EncodeInterface.h
+++ b/components/include/v4l2_codec2/components/EncodeInterface.h
@@ -1,9 +1,9 @@
-// Copyright 2020 The Chromium Authors. All rights reserved.
+// Copyright 2023 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_ENCODE_INTERFACE_H
-#define ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_ENCODE_INTERFACE_H
+#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_ENCODE_INTERFACE_H
+#define ANDROID_V4L2_CODEC2_COMPONENTS_ENCODE_INTERFACE_H
#include <optional>
#include <vector>
@@ -14,6 +14,7 @@
#include <ui/Size.h>
#include <util/C2InterfaceHelper.h>
+#include <v4l2_codec2/common/Common.h>
#include <v4l2_codec2/common/EncodeHelpers.h>
namespace media {
@@ -24,9 +25,10 @@ namespace android {
// Codec 2.0 interface describing the V4L2EncodeComponent. This interface is used by the codec 2.0
// framework to query the component's capabilities and request configuration changes.
-class V4L2EncodeInterface : public C2InterfaceHelper {
+class EncodeInterface : public C2InterfaceHelper {
public:
- V4L2EncodeInterface(const C2String& name, std::shared_ptr<C2ReflectorHelper> helper);
+ EncodeInterface(const C2String& name, std::shared_ptr<C2ReflectorHelper> helper,
+ const SupportedCapabilities& caps);
// Interfaces for the V4L2EncodeInterface
// Note: these getters are not thread-safe. For dynamic parameters, component should use
@@ -53,7 +55,7 @@ public:
void setFramerate(uint32_t framerate) { mFrameRate->value = framerate; }
protected:
- void Initialize(const C2String& name);
+ void Initialize(const C2String& name, const SupportedCapabilities& caps);
// Configurable parameter setters.
static C2R H264ProfileLevelSetter(bool mayBlock, C2P<C2StreamProfileLevelInfo::output>& info,
@@ -70,6 +72,10 @@ protected:
static C2R IntraRefreshPeriodSetter(bool mayBlock,
C2P<C2StreamIntraRefreshTuning::output>& period);
+ // Recorded lowest configured level
+ // Is static for the need to use H264ProfileLevelSetter as a setter
+ static C2Config::level_t lowestConfigLevel;
+
// Constant parameters
// The kind of the component; should be C2Component::KIND_ENCODER.
@@ -121,4 +127,4 @@ protected:
} // namespace android
-#endif // ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_ENCODE_INTERFACE_H
+#endif // ANDROID_V4L2_CODEC2_COMPONENTS_ENCODE_INTERFACE_H
diff --git a/components/include/v4l2_codec2/components/V4L2ComponentFactory.h b/components/include/v4l2_codec2/components/V4L2ComponentFactory.h
deleted file mode 100644
index fc6abea..0000000
--- a/components/include/v4l2_codec2/components/V4L2ComponentFactory.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2021 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_COMPONENT_FACTORY_H
-#define ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_COMPONENT_FACTORY_H
-
-#include <memory>
-#include <string>
-
-#include <C2ComponentFactory.h>
-#include <util/C2InterfaceHelper.h>
-
-namespace android {
-
-class V4L2ComponentFactory : public C2ComponentFactory {
-public:
- static std::unique_ptr<V4L2ComponentFactory> create(
- const std::string& componentName, std::shared_ptr<C2ReflectorHelper> reflector);
- V4L2ComponentFactory(const std::string& componentName, bool isEncoder,
- std::shared_ptr<C2ReflectorHelper> reflector);
- ~V4L2ComponentFactory() override = default;
-
- // Implementation of C2ComponentFactory.
- c2_status_t createComponent(c2_node_id_t id, std::shared_ptr<C2Component>* const component,
- ComponentDeleter deleter) override;
- c2_status_t createInterface(c2_node_id_t id,
- std::shared_ptr<C2ComponentInterface>* const interface,
- InterfaceDeleter deleter) override;
-
-private:
- const std::string mComponentName;
- const bool mIsEncoder;
- std::shared_ptr<C2ReflectorHelper> mReflector;
-};
-
-} // namespace android
-
-#endif // ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_COMPONENT_FACTORY_H
diff --git a/components/include/v4l2_codec2/components/V4L2Decoder.h b/components/include/v4l2_codec2/components/V4L2Decoder.h
deleted file mode 100644
index 2ecb3bd..0000000
--- a/components/include/v4l2_codec2/components/V4L2Decoder.h
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2020 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_DECODER_H
-#define ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_DECODER_H
-
-#include <stdint.h>
-
-#include <memory>
-#include <optional>
-
-#include <base/callback.h>
-#include <base/memory/weak_ptr.h>
-
-#include <ui/Rect.h>
-#include <ui/Size.h>
-#include <v4l2_codec2/common/V4L2Device.h>
-#include <v4l2_codec2/common/VideoTypes.h>
-#include <v4l2_codec2/components/VideoDecoder.h>
-#include <v4l2_codec2/components/VideoFrame.h>
-#include <v4l2_codec2/components/VideoFramePool.h>
-
-namespace android {
-
-class V4L2Decoder : public VideoDecoder {
-public:
- static std::unique_ptr<VideoDecoder> Create(
- const VideoCodec& codec, const size_t inputBufferSize, const size_t minNumOutputBuffers,
- GetPoolCB getPoolCB, OutputCB outputCb, ErrorCB errorCb,
- scoped_refptr<::base::SequencedTaskRunner> taskRunner);
- ~V4L2Decoder() override;
-
- void decode(std::unique_ptr<ConstBitstreamBuffer> buffer, DecodeCB decodeCb) override;
- void drain(DecodeCB drainCb) override;
- void flush() override;
-
-private:
- enum class State {
- Idle, // Not received any decode buffer after initialized, flushed, or drained.
- Decoding,
- Draining,
- Error,
- };
- static const char* StateToString(State state);
-
- struct DecodeRequest {
- DecodeRequest(std::unique_ptr<ConstBitstreamBuffer> buffer, DecodeCB decodeCb)
- : buffer(std::move(buffer)), decodeCb(std::move(decodeCb)) {}
- DecodeRequest(DecodeRequest&&) = default;
- ~DecodeRequest() = default;
-
- std::unique_ptr<ConstBitstreamBuffer> buffer; // nullptr means Drain
- DecodeCB decodeCb;
- };
-
- V4L2Decoder(scoped_refptr<::base::SequencedTaskRunner> taskRunner);
- bool start(const VideoCodec& codec, const size_t inputBufferSize,
- const size_t minNumOutputBuffers, GetPoolCB getPoolCb, OutputCB outputCb,
- ErrorCB errorCb);
- bool setupInputFormat(const uint32_t inputPixelFormat, const size_t inputBufferSize);
- void pumpDecodeRequest();
-
- void serviceDeviceTask(bool event);
- bool dequeueResolutionChangeEvent();
- bool changeResolution();
- bool setupOutputFormat(const ui::Size& size);
-
- void tryFetchVideoFrame();
- void onVideoFrameReady(std::optional<VideoFramePool::FrameWithBlockId> frameWithBlockId);
-
- std::optional<size_t> getNumOutputBuffers();
- std::optional<struct v4l2_format> getFormatInfo();
- Rect getVisibleRect(const ui::Size& codedSize);
- bool sendV4L2DecoderCmd(bool start);
-
- void setState(State newState);
- void onError();
-
- std::unique_ptr<VideoFramePool> mVideoFramePool;
-
- scoped_refptr<V4L2Device> mDevice;
- scoped_refptr<V4L2Queue> mInputQueue;
- scoped_refptr<V4L2Queue> mOutputQueue;
-
- std::queue<DecodeRequest> mDecodeRequests;
- std::map<int32_t, DecodeCB> mPendingDecodeCbs;
-
- size_t mMinNumOutputBuffers = 0;
- GetPoolCB mGetPoolCb;
- OutputCB mOutputCb;
- DecodeCB mDrainCb;
- ErrorCB mErrorCb;
-
- ui::Size mCodedSize;
- Rect mVisibleRect;
-
- std::map<size_t, std::unique_ptr<VideoFrame>> mFrameAtDevice;
-
- // Block IDs can be arbitrarily large, but we only have a limited number of
- // buffers. This maintains an association between a block ID and a specific
- // V4L2 buffer index.
- std::map<size_t, size_t> mBlockIdToV4L2Id;
-
- State mState = State::Idle;
-
- scoped_refptr<::base::SequencedTaskRunner> mTaskRunner;
-
- ::base::WeakPtr<V4L2Decoder> mWeakThis;
- ::base::WeakPtrFactory<V4L2Decoder> mWeakThisFactory{this};
-};
-
-} // namespace android
-
-#endif // ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_DECODER_H
diff --git a/components/include/v4l2_codec2/components/V4L2Encoder.h b/components/include/v4l2_codec2/components/V4L2Encoder.h
deleted file mode 100644
index d7b55c0..0000000
--- a/components/include/v4l2_codec2/components/V4L2Encoder.h
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright 2021 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_ENCODER_H
-#define ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_ENCODER_H
-
-#include <stdint.h>
-#include <memory>
-#include <optional>
-#include <queue>
-#include <vector>
-
-#include <base/memory/weak_ptr.h>
-#include <base/sequenced_task_runner.h>
-#include <ui/Size.h>
-
-#include <v4l2_codec2/common/Common.h>
-#include <v4l2_codec2/components/VideoEncoder.h>
-
-namespace android {
-
-struct BitstreamBuffer;
-struct VideoFramePlane;
-class V4L2Device;
-class V4L2Queue;
-
-class V4L2Encoder : public VideoEncoder {
-public:
- // Number of buffers on V4L2 device queues.
- static constexpr size_t kInputBufferCount = 2;
- static constexpr size_t kOutputBufferCount = 2;
-
- static std::unique_ptr<VideoEncoder> create(
- C2Config::profile_t profile, std::optional<uint8_t> level, const ui::Size& visibleSize,
- uint32_t stride, uint32_t keyFramePeriod, C2Config::bitrate_mode_t bitrateMode,
- uint32_t bitrate, std::optional<uint32_t> peakBitrate,
- FetchOutputBufferCB fetchOutputBufferCb, InputBufferDoneCB inputBufferDoneCb,
- OutputBufferDoneCB outputBufferDoneCb, DrainDoneCB drainDoneCb, ErrorCB errorCb,
- scoped_refptr<::base::SequencedTaskRunner> taskRunner);
- ~V4L2Encoder() override;
-
- bool encode(std::unique_ptr<InputFrame> frame) override;
- void drain() override;
- void flush() override;
-
- bool setBitrate(uint32_t bitrate) override;
- bool setPeakBitrate(uint32_t peakBitrate) override;
- bool setFramerate(uint32_t framerate) override;
- void requestKeyframe() override;
-
- VideoPixelFormat inputFormat() const override;
- const ui::Size& visibleSize() const override { return mVisibleSize; }
- const ui::Size& codedSize() const override { return mInputCodedSize; }
-
-private:
- // Possible encoder states.
- enum class State {
- UNINITIALIZED, // Not initialized yet or initialization failed.
- WAITING_FOR_INPUT_FRAME, // Waiting for frames to be queued.
- WAITING_FOR_V4L2_BUFFER, // Waiting for V4L2 input queue buffers.
- ENCODING, // Queuing input buffers.
- DRAINING, // Draining encoder.
- ERROR, // Encoder encountered an error.
- };
-
- // Contains a single encode request.
- struct EncodeRequest {
- EncodeRequest(std::unique_ptr<InputFrame> video_frame)
- : video_frame(std::move(video_frame)) {}
- ~EncodeRequest() = default;
- EncodeRequest(EncodeRequest&&) = default;
- EncodeRequest& operator=(EncodeRequest&&) = default;
-
- std::unique_ptr<InputFrame> video_frame;
- bool end_of_stream = false;
- };
-
- V4L2Encoder(scoped_refptr<::base::SequencedTaskRunner> taskRunner,
- FetchOutputBufferCB fetchOutputBufferCb, InputBufferDoneCB mInputBufferDoneCb,
- OutputBufferDoneCB mOutputBufferDoneCb, DrainDoneCB drainDoneCb, ErrorCB errorCb);
-
- // Initialize the V4L2 encoder for specified parameters.
- bool initialize(C2Config::profile_t outputProfile, std::optional<uint8_t> level,
- const ui::Size& visibleSize, uint32_t stride, uint32_t keyFramePeriod,
- C2Config::bitrate_mode_t bitrateMode, uint32_t bitrate,
- std::optional<uint32_t> peakBitrate);
-
- // Handle the next encode request on the queue.
- void handleEncodeRequest();
- // Handle a request to flush the encoder.
- void handleFlushRequest();
- // Handle a request to drain the encoder.
- void handleDrainRequest();
- // Called when draining the encoder has completed.
- void onDrainDone(bool done);
-
- // Configure input format on the V4L2 device.
- bool configureInputFormat(VideoPixelFormat inputFormat, uint32_t stride);
- // Configure output format on the V4L2 device.
- bool configureOutputFormat(C2Config::profile_t outputProfile);
- // Configure required and optional controls on the V4L2 device.
- bool configureDevice(C2Config::profile_t outputProfile,
- std::optional<const uint8_t> outputH264Level);
- // Configure required and optional H.264 controls on the V4L2 device.
- bool configureH264(C2Config::profile_t outputProfile,
- std::optional<const uint8_t> outputH264Level);
- // Configure the specified bitrate mode on the V4L2 device.
- bool configureBitrateMode(C2Config::bitrate_mode_t bitrateMode);
-
- // Attempt to start the V4L2 device poller.
- bool startDevicePoll();
- // Attempt to stop the V4L2 device poller.
- bool stopDevicePoll();
- // Called by the V4L2 device poller whenever an error occurred.
- void onPollError();
- // Service I/O on the V4L2 device, called by the V4L2 device poller.
- void serviceDeviceTask(bool event);
-
- // Enqueue an input buffer to be encoded on the device input queue. Returns whether the
- // operation was successful.
- bool enqueueInputBuffer(std::unique_ptr<InputFrame> frame);
- // Enqueue an output buffer to store the encoded bitstream on the device output queue. Returns
- // whether the operation was successful.
- bool enqueueOutputBuffer();
- // Dequeue an input buffer the V4L2 device has finished encoding on the device input queue.
- // Returns whether a buffer could be dequeued.
- bool dequeueInputBuffer();
- // Dequeue an output buffer containing the encoded bitstream from the device output queue.
- // Returns whether the operation was successful.
- bool dequeueOutputBuffer();
-
- // Create input buffers on the V4L2 device input queue.
- bool createInputBuffers();
- // Create output buffers on the V4L2 device output queue.
- bool createOutputBuffers();
- // Destroy the input buffers on the V4L2 device input queue.
- void destroyInputBuffers();
- // Destroy the output buffers on the V4L2 device output queue.
- void destroyOutputBuffers();
-
- // Notify the client an error occurred and switch to the error state.
- void onError();
-
- // Change the state of the encoder.
- void setState(State state);
- // Get the specified encoder |state| as string.
- static const char* stateToString(State state);
-
- // The list of currently queued encode requests.
- std::queue<EncodeRequest> mEncodeRequests;
-
- // The video stream's visible size.
- ui::Size mVisibleSize;
- // The video stream's coded size.
- ui::Size mInputCodedSize;
- // The input layout configured on the V4L2 device.
- std::optional<VideoFrameLayout> mInputLayout;
- // Required output buffer byte size.
- uint32_t mOutputBufferSize = 0;
-
- // How often we want to request the V4L2 device to create a key frame.
- uint32_t mKeyFramePeriod = 0;
- // Key frame counter, a key frame will be requested each time it reaches zero.
- uint32_t mKeyFrameCounter = 0;
-
- // Whether we need to manually cache and prepend SPS and PPS to IDR frames.
- bool mInjectParamsBeforeIDR = false;
- // The latest cached SPS and PPS (without H.264 start code).
- std::vector<uint8_t> mCachedSPS;
- std::vector<uint8_t> mCachedPPS;
-
- // The V4L2 device and associated queues used to interact with the device.
- scoped_refptr<V4L2Device> mDevice;
- scoped_refptr<V4L2Queue> mInputQueue;
- scoped_refptr<V4L2Queue> mOutputQueue;
-
- // List of frames associated with each buffer in the V4L2 device input queue.
- std::vector<std::unique_ptr<InputFrame>> mInputBuffers;
- // List of bitstream buffers associated with each buffer in the V4L2 device output queue.
- std::vector<std::unique_ptr<BitstreamBuffer>> mOutputBuffers;
-
- // Callbacks to be triggered on various events.
- FetchOutputBufferCB mFetchOutputBufferCb;
- InputBufferDoneCB mInputBufferDoneCb;
- OutputBufferDoneCB mOutputBufferDoneCb;
- DrainDoneCB mDrainDoneCb;
- ErrorCB mErrorCb;
-
- // The current state of the encoder.
- State mState = State::UNINITIALIZED;
-
- scoped_refptr<::base::SequencedTaskRunner> mTaskRunner;
-
- ::base::WeakPtr<V4L2Encoder> mWeakThis;
- ::base::WeakPtrFactory<V4L2Encoder> mWeakThisFactory{this};
-};
-
-} // namespace android
-
-#endif // ANDROID_V4L2_CODEC2_COMPONENTS_V4L2_ENCODER_H
diff --git a/components/include/v4l2_codec2/components/VideoEncoder.h b/components/include/v4l2_codec2/components/VideoEncoder.h
index 7e5a3c2..eeb63c2 100644
--- a/components/include/v4l2_codec2/components/VideoEncoder.h
+++ b/components/include/v4l2_codec2/components/VideoEncoder.h
@@ -22,13 +22,21 @@ struct BitstreamBuffer;
class VideoEncoder {
public:
+ // Number of buffers on component delays.
+ static constexpr size_t kInputBufferCount = 2;
+ static constexpr size_t kOutputBufferCount = 2;
+ static constexpr VideoPixelFormat kInputPixelFormat = VideoPixelFormat::NV12;
+
+ // The peak bitrate in function of the target bitrate, used when the bitrate mode is VBR.
+ static constexpr uint32_t kPeakBitrateMultiplier = 2u;
+
// The InputFrame class can be used to store raw video frames.
// Note: The InputFrame does not take ownership of the data. The file descriptor is not
// duplicated and the caller is responsible for keeping the data alive until the buffer
// is returned by an InputBufferDoneCB() call.
class InputFrame {
public:
- InputFrame(std::vector<int>&& fds, std::vector<VideoFramePlane>&& planes,
+ InputFrame(std::vector<int>&& fds, const std::vector<VideoFramePlane>& planes,
VideoPixelFormat pixelFormat, uint64_t index, int64_t timestamp);
~InputFrame() = default;