diff options
30 files changed, 1632 insertions, 452 deletions
diff --git a/cmds/flatland/GLHelper.cpp b/cmds/flatland/GLHelper.cpp index 5c04f6cf19..dfc3e581fd 100644 --- a/cmds/flatland/GLHelper.cpp +++ b/cmds/flatland/GLHelper.cpp @@ -25,7 +25,6 @@ namespace android { GLHelper::GLHelper() : - mGraphicBufferAlloc(new GraphicBufferAlloc()), mDisplay(EGL_NO_DISPLAY), mContext(EGL_NO_CONTEXT), mDummySurface(EGL_NO_SURFACE), @@ -203,7 +202,7 @@ bool GLHelper::createNamedSurfaceTexture(GLuint name, uint32_t w, uint32_t h, sp<GLConsumer>* glConsumer, EGLSurface* surface) { sp<IGraphicBufferProducer> producer; sp<IGraphicBufferConsumer> consumer; - BufferQueue::createBufferQueue(&producer, &consumer, mGraphicBufferAlloc); + BufferQueue::createBufferQueue(&producer, &consumer); sp<GLConsumer> glc = new GLConsumer(consumer, name, GL_TEXTURE_EXTERNAL_OES, false, true); glc->setDefaultBufferSize(w, h); diff --git a/cmds/flatland/GLHelper.h b/cmds/flatland/GLHelper.h index 7a9e9e3ea2..d09463a9b8 100644 --- a/cmds/flatland/GLHelper.h +++ b/cmds/flatland/GLHelper.h @@ -14,7 +14,6 @@ * limitations under the License. */ -#include <gui/GraphicBufferAlloc.h> #include <gui/GLConsumer.h> #include <gui/Surface.h> #include <gui/SurfaceControl.h> @@ -75,8 +74,6 @@ private: bool setUpShaders(const ShaderDesc* shaderDescs, size_t numShaders); - sp<GraphicBufferAlloc> mGraphicBufferAlloc; - EGLDisplay mDisplay; EGLContext mContext; EGLSurface mDummySurface; diff --git a/cmds/flatland/Main.cpp b/cmds/flatland/Main.cpp index c47b0c8f4b..ec1e5432f1 100644 --- a/cmds/flatland/Main.cpp +++ b/cmds/flatland/Main.cpp @@ -16,7 +16,6 @@ #define ATRACE_TAG ATRACE_TAG_ALWAYS -#include <gui/GraphicBufferAlloc.h> #include <gui/Surface.h> #include <gui/SurfaceControl.h> #include <gui/GLConsumer.h> diff --git a/cmds/lshal/Lshal.cpp b/cmds/lshal/Lshal.cpp index 4c57361c92..33bc43c397 100644 --- a/cmds/lshal/Lshal.cpp +++ b/cmds/lshal/Lshal.cpp @@ -296,7 +296,7 @@ void Lshal::dumpVintf() const { continue; } - vintf::ManifestHal *hal = manifest.getHal(fqName.package()); + vintf::ManifestHal *hal = manifest.getAnyHal(fqName.package()); if (hal == nullptr) { if (!manifest.add(vintf::ManifestHal{ .format = vintf::HalFormat::HIDL, @@ -306,7 +306,7 @@ void Lshal::dumpVintf() const { mErr << "Warning: cannot add hal '" << fqInstanceName << "'" << std::endl; continue; } - hal = manifest.getHal(fqName.package()); + hal = manifest.getAnyHal(fqName.package()); } if (hal == nullptr) { mErr << "Warning: cannot get hal '" << fqInstanceName diff --git a/include/gui/BufferQueue.h b/include/gui/BufferQueue.h index c95c535363..bd62d85afb 100644 --- a/include/gui/BufferQueue.h +++ b/include/gui/BufferQueue.h @@ -23,10 +23,6 @@ #include <gui/IGraphicBufferProducer.h> #include <gui/IConsumerListener.h> -// These are only required to keep other parts of the framework with incomplete -// dependencies building successfully -#include <gui/IGraphicBufferAlloc.h> - namespace android { class BufferQueue { @@ -81,11 +77,9 @@ public: // needed gralloc buffers. static void createBufferQueue(sp<IGraphicBufferProducer>* outProducer, sp<IGraphicBufferConsumer>* outConsumer, - const sp<IGraphicBufferAlloc>& allocator = NULL, bool consumerIsSurfaceFlinger = false); -private: - BufferQueue(); // Create through createBufferQueue + BufferQueue() = delete; // Create through createBufferQueue }; // ---------------------------------------------------------------------------- diff --git a/include/gui/BufferQueueCore.h b/include/gui/BufferQueueCore.h index 91463401f0..cfe716ff39 100644 --- a/include/gui/BufferQueueCore.h +++ b/include/gui/BufferQueueCore.h @@ -51,7 +51,6 @@ namespace android { class IConsumerListener; -class IGraphicBufferAlloc; class IProducerListener; class BufferQueueCore : public virtual RefBase { @@ -79,9 +78,8 @@ public: typedef Vector<BufferItem> Fifo; // BufferQueueCore manages a pool of gralloc memory slots to be used by - // producers and consumers. allocator is used to allocate all the needed - // gralloc buffers. - BufferQueueCore(const sp<IGraphicBufferAlloc>& allocator = NULL); + // producers and consumers. + BufferQueueCore(); virtual ~BufferQueueCore(); private: @@ -143,10 +141,6 @@ private: void validateConsistencyLocked() const; #endif - // mAllocator is the connection to SurfaceFlinger that is used to allocate - // new GraphicBuffer objects. - sp<IGraphicBufferAlloc> mAllocator; - // mMutex is the mutex used to prevent concurrent access to the member // variables of BufferQueueCore objects. It must be locked whenever any // member variable is accessed. diff --git a/include/gui/GraphicBufferAlloc.h b/include/gui/GraphicBufferAlloc.h deleted file mode 100644 index 54c9829484..0000000000 --- a/include/gui/GraphicBufferAlloc.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_GUI_GRAPHIC_BUFFER_ALLOC_H -#define ANDROID_GUI_GRAPHIC_BUFFER_ALLOC_H - -#include <stdint.h> -#include <sys/types.h> - -#include <gui/IGraphicBufferAlloc.h> -#include <ui/PixelFormat.h> -#include <utils/Errors.h> - -namespace android { - -class GraphicBuffer; - -/* - * Concrete implementation of the IGraphicBufferAlloc interface. - * - * This can create GraphicBuffer instance across processes. This is mainly used - * by surfaceflinger. - */ - -class GraphicBufferAlloc : public BnGraphicBufferAlloc { -public: - GraphicBufferAlloc(); - virtual ~GraphicBufferAlloc(); - virtual sp<GraphicBuffer> createGraphicBuffer(uint32_t width, - uint32_t height, PixelFormat format, uint32_t layerCount, - uint64_t producerUsage, uint64_t consumerUsage, - std::string requestorName, status_t* error) override; -}; - - -} // namespace android - -#endif // ANDROID_GUI_GRAPHIC_BUFFER_ALLOC_H diff --git a/include/gui/IGraphicBufferAlloc.h b/include/gui/IGraphicBufferAlloc.h deleted file mode 100644 index 1e578cc260..0000000000 --- a/include/gui/IGraphicBufferAlloc.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ANDROID_GUI_IGRAPHIC_BUFFER_ALLOC_H -#define ANDROID_GUI_IGRAPHIC_BUFFER_ALLOC_H - -#include <stdint.h> -#include <sys/types.h> - -#include <binder/IInterface.h> -#include <ui/GraphicBuffer.h> -#include <ui/PixelFormat.h> -#include <utils/RefBase.h> - -#include <string> - -namespace android { -// ---------------------------------------------------------------------------- - -class IGraphicBufferAlloc : public IInterface -{ -public: - DECLARE_META_INTERFACE(GraphicBufferAlloc) - - /* Create a new GraphicBuffer for the client to use. - */ - virtual sp<GraphicBuffer> createGraphicBuffer(uint32_t w, uint32_t h, - PixelFormat format, uint32_t layerCount, uint64_t producerUsage, - uint64_t consumerUsage, std::string requestorName, - status_t* error) = 0; - - sp<GraphicBuffer> createGraphicBuffer(uint32_t w, uint32_t h, - PixelFormat format, uint32_t layerCount, uint32_t usage, - status_t* error) { - return createGraphicBuffer(w, h, format, layerCount, usage, - usage, "<Unknown>", error); - } - - sp<GraphicBuffer> createGraphicBuffer(uint32_t w, uint32_t h, - PixelFormat format, uint32_t layerCount, uint32_t usage, - std::string requestorName, status_t* error) { - return createGraphicBuffer(w, h, format, layerCount, usage, - usage, requestorName, error); - } - - sp<GraphicBuffer> createGraphicBuffer(uint32_t w, uint32_t h, - PixelFormat format, uint32_t layerCount, uint64_t producerUsage, - uint64_t consumerUsage, status_t* error) { - return createGraphicBuffer(w, h, format, layerCount, producerUsage, - consumerUsage, "<Unknown>", error); - } -}; - -// ---------------------------------------------------------------------------- - -class BnGraphicBufferAlloc : public BnInterface<IGraphicBufferAlloc> -{ -public: - virtual status_t onTransact(uint32_t code, - const Parcel& data, - Parcel* reply, - uint32_t flags = 0); -}; - -// ---------------------------------------------------------------------------- - -}; // namespace android - -#endif // ANDROID_GUI_IGRAPHIC_BUFFER_ALLOC_H diff --git a/include/gui/ISurfaceComposer.h b/include/gui/ISurfaceComposer.h index 9870ba0715..1112973587 100644 --- a/include/gui/ISurfaceComposer.h +++ b/include/gui/ISurfaceComposer.h @@ -41,7 +41,6 @@ struct DisplayInfo; struct DisplayStatInfo; class HdrCapabilities; class IDisplayEventConnection; -class IGraphicBufferAlloc; class IGraphicBufferProducer; class ISurfaceComposerClient; class Rect; @@ -89,10 +88,6 @@ public: virtual sp<ISurfaceComposerClient> createScopedConnection( const sp<IGraphicBufferProducer>& parent) = 0; - /* create a graphic buffer allocator - */ - virtual sp<IGraphicBufferAlloc> createGraphicBufferAlloc() = 0; - /* return an IDisplayEventConnection */ virtual sp<IDisplayEventConnection> createDisplayEventConnection() = 0; @@ -205,7 +200,7 @@ public: // Java by ActivityManagerService. BOOT_FINISHED = IBinder::FIRST_CALL_TRANSACTION, CREATE_CONNECTION, - CREATE_GRAPHIC_BUFFER_ALLOC, + UNUSED, // formerly CREATE_GRAPHIC_BUFFER_ALLOC CREATE_DISPLAY_EVENT_CONNECTION, CREATE_DISPLAY, DESTROY_DISPLAY, diff --git a/libs/binder/ProcessState.cpp b/libs/binder/ProcessState.cpp index 9ccf07ca2a..add5e744db 100644 --- a/libs/binder/ProcessState.cpp +++ b/libs/binder/ProcessState.cpp @@ -79,6 +79,11 @@ sp<ProcessState> ProcessState::initWithDriver(const char* driver) { Mutex::Autolock _l(gProcessMutex); if (gProcess != NULL) { + // Allow for initWithDriver to be called repeatedly with the same + // driver. + if (!strcmp(gProcess->getDriverName().c_str(), driver)) { + return gProcess; + } LOG_ALWAYS_FATAL("ProcessState was already initialized."); } gProcess = new ProcessState(driver); diff --git a/libs/binder/tests/schd-dbg.cpp b/libs/binder/tests/schd-dbg.cpp index fe9e05a4f1..13f03b1ae0 100644 --- a/libs/binder/tests/schd-dbg.cpp +++ b/libs/binder/tests/schd-dbg.cpp @@ -40,7 +40,7 @@ vector<sp<IBinder> > workers; // GOOD_SYNC_MIN is considered as good #define GOOD_SYNC_MIN (0.6) -#define DUMP_PRICISION 3 +#define DUMP_PRESICION 2 string trace_path = "/sys/kernel/debug/tracing"; @@ -246,10 +246,11 @@ struct Results { double worst = (double)m_worst / 1.0E6; double average = (double)m_total_time / m_transactions / 1.0E6; // FIXME: libjson? - cout << std::setprecision(DUMP_PRICISION) << "{ \"avg\":" << setw(5) << left - << average << ", \"wst\":" << setw(5) << left << worst - << ", \"bst\":" << setw(5) << left << best << ", \"miss\":" << setw(5) - << left << m_miss << ", \"meetR\":" << setw(3) << left + int W = DUMP_PRESICION + 2; + cout << setprecision(DUMP_PRESICION) << "{ \"avg\":" << setw(W) << left + << average << ",\"wst\":" << setw(W) << left << worst + << ",\"bst\":" << setw(W) << left << best << ",\"miss\":" << left + << m_miss << ",\"meetR\":" << left << setprecision(DUMP_PRESICION + 3) << (1.0 - (double)m_miss / m_transactions) << "}"; } }; @@ -272,8 +273,15 @@ static void parcel_fill(Parcel& data, int sz, int priority, int cpu) { } } +typedef struct { + void* result; + int target; +} thread_priv_t; + static void* thread_start(void* p) { - Results* results_fifo = (Results*)p; + thread_priv_t* priv = (thread_priv_t*)p; + int target = priv->target; + Results* results_fifo = (Results*)priv->result; Parcel data, reply; Tick sta, end; @@ -281,7 +289,7 @@ static void* thread_start(void* p) { thread_dump("fifo-caller"); sta = tickNow(); - status_t ret = workers[0]->transact(BINDER_NOP, data, &reply); + status_t ret = workers[target]->transact(BINDER_NOP, data, &reply); end = tickNow(); results_fifo->add_time(tickNano(sta, end)); @@ -291,16 +299,19 @@ static void* thread_start(void* p) { } // create a fifo thread to transact and wait it to finished -static void thread_transaction(Results* results_fifo) { +static void thread_transaction(int target, Results* results_fifo) { + thread_priv_t thread_priv; void* dummy; pthread_t thread; pthread_attr_t attr; struct sched_param param; + thread_priv.target = target; + thread_priv.result = results_fifo; ASSERT(!pthread_attr_init(&attr)); ASSERT(!pthread_attr_setschedpolicy(&attr, SCHED_FIFO)); param.sched_priority = sched_get_priority_max(SCHED_FIFO); ASSERT(!pthread_attr_setschedparam(&attr, ¶m)); - ASSERT(!pthread_create(&thread, &attr, &thread_start, results_fifo)); + ASSERT(!pthread_create(&thread, &attr, &thread_start, &thread_priv)); ASSERT(!pthread_join(thread, &dummy)); } @@ -316,7 +327,9 @@ void worker_fx(int num, int no_process, int iterations, int payload_size, sp<IServiceManager> serviceMgr = defaultServiceManager(); sp<BinderWorkerService> service = new BinderWorkerService; serviceMgr->addService(generateServiceName(num), service); + // init done p.signal(); + // wait for kick-off p.wait(); // If client/server pairs, then half the workers are @@ -338,7 +351,7 @@ void worker_fx(int num, int no_process, int iterations, int payload_size, int target = num % server_count; // 1. transaction by fifo thread - thread_transaction(&results_fifo); + thread_transaction(target, &results_fifo); parcel_fill(data, payload_size, thread_pri(), sched_getcpu()); thread_dump("other-caller"); @@ -356,6 +369,7 @@ void worker_fx(int num, int no_process, int iterations, int payload_size, p.wait(); p.send(&dummy); + // wait for kill p.wait(); // Client for each pair dump here if (is_client(num)) { @@ -367,10 +381,10 @@ void worker_fx(int num, int no_process, int iterations, int payload_size, << "\"S\":" << (no_trans - no_sync) << ",\"I\":" << no_trans << "," << "\"R\":" << sync_ratio << "," << endl; - cout << " \"other_ms\":"; + cout << " \"other_ms\":"; results_other.dump(); cout << "," << endl; - cout << " \"fifo_ms\": "; + cout << " \"fifo_ms\": "; results_fifo.dump(); cout << endl; cout << "}," << endl; @@ -463,12 +477,17 @@ int main(int argc, char** argv) { for (int i = 0; i < no_process; i++) { pipes.push_back(make_process(i, iterations, no_process, payload_size)); } + // wait for init done wait_all(pipes); + // kick-off iterations signal_all(pipes); + // wait for completion wait_all(pipes); + // start to send result signal_all(pipes); for (int i = 0; i < no_process; i++) { int status; + // kill pipes[i].signal(); wait(&status); // the exit status is number of transactions without priority inheritance diff --git a/libs/gui/Android.bp b/libs/gui/Android.bp index 90ab286d2f..5eafb2c173 100644 --- a/libs/gui/Android.bp +++ b/libs/gui/Android.bp @@ -79,11 +79,9 @@ cc_library_shared { "DisplayEventReceiver.cpp", "FrameTimestamps.cpp", "GLConsumer.cpp", - "GraphicBufferAlloc.cpp", "GuiConfig.cpp", "IDisplayEventConnection.cpp", "IConsumerListener.cpp", - "IGraphicBufferAlloc.cpp", "IGraphicBufferConsumer.cpp", "IGraphicBufferProducer.cpp", "IProducerListener.cpp", diff --git a/libs/gui/BufferQueue.cpp b/libs/gui/BufferQueue.cpp index 13692eb982..41512127f2 100644 --- a/libs/gui/BufferQueue.cpp +++ b/libs/gui/BufferQueue.cpp @@ -79,14 +79,13 @@ void BufferQueue::ProxyConsumerListener::addAndGetFrameTimestamps( void BufferQueue::createBufferQueue(sp<IGraphicBufferProducer>* outProducer, sp<IGraphicBufferConsumer>* outConsumer, - const sp<IGraphicBufferAlloc>& allocator, bool consumerIsSurfaceFlinger) { LOG_ALWAYS_FATAL_IF(outProducer == NULL, "BufferQueue: outProducer must not be NULL"); LOG_ALWAYS_FATAL_IF(outConsumer == NULL, "BufferQueue: outConsumer must not be NULL"); - sp<BufferQueueCore> core(new BufferQueueCore(allocator)); + sp<BufferQueueCore> core(new BufferQueueCore()); LOG_ALWAYS_FATAL_IF(core == NULL, "BufferQueue: failed to create BufferQueueCore"); diff --git a/libs/gui/BufferQueueCore.cpp b/libs/gui/BufferQueueCore.cpp index 566af90d8e..cd9425335d 100644 --- a/libs/gui/BufferQueueCore.cpp +++ b/libs/gui/BufferQueueCore.cpp @@ -33,9 +33,7 @@ #include <gui/BufferItem.h> #include <gui/BufferQueueCore.h> -#include <gui/GraphicBufferAlloc.h> #include <gui/IConsumerListener.h> -#include <gui/IGraphicBufferAlloc.h> #include <gui/IProducerListener.h> #include <gui/ISurfaceComposer.h> #include <private/gui/ComposerService.h> @@ -54,8 +52,7 @@ static uint64_t getUniqueId() { return id | counter++; } -BufferQueueCore::BufferQueueCore(const sp<IGraphicBufferAlloc>& allocator) : - mAllocator(allocator), +BufferQueueCore::BufferQueueCore() : mMutex(), mIsAbandoned(false), mConsumerControlledByApp(false), @@ -97,30 +94,6 @@ BufferQueueCore::BufferQueueCore(const sp<IGraphicBufferAlloc>& allocator) : mLastQueuedSlot(INVALID_BUFFER_SLOT), mUniqueId(getUniqueId()) { - if (allocator == NULL) { - -#ifdef HAVE_NO_SURFACE_FLINGER - // Without a SurfaceFlinger, allocate in-process. This only makes - // sense in systems with static SELinux configurations and no - // applications (since applications need dynamic SELinux policy). - mAllocator = new GraphicBufferAlloc(); -#else - // Run time check for headless, where we also allocate in-process. - char value[PROPERTY_VALUE_MAX]; - property_get("config.headless", value, "0"); - if (atoi(value) == 1) { - mAllocator = new GraphicBufferAlloc(); - } else { - sp<ISurfaceComposer> composer(ComposerService::getComposerService()); - mAllocator = composer->createGraphicBufferAlloc(); - } -#endif // HAVE_NO_SURFACE_FLINGER - - if (mAllocator == NULL) { - BQ_LOGE("createGraphicBufferAlloc failed"); - } - } - int numStartingBuffers = getMaxBufferCountLocked(); for (int s = 0; s < numStartingBuffers; s++) { mFreeSlots.insert(s); diff --git a/libs/gui/BufferQueueProducer.cpp b/libs/gui/BufferQueueProducer.cpp index aef231a252..a540ab9468 100644 --- a/libs/gui/BufferQueueProducer.cpp +++ b/libs/gui/BufferQueueProducer.cpp @@ -34,7 +34,6 @@ #include <gui/BufferQueueProducer.h> #include <gui/GLConsumer.h> #include <gui/IConsumerListener.h> -#include <gui/IGraphicBufferAlloc.h> #include <gui/IProducerListener.h> #include <utils/Log.h> @@ -454,8 +453,7 @@ status_t BufferQueueProducer::dequeueBuffer(int *outSlot, mSlots[found].mBufferState.dequeue(); if ((buffer == NULL) || - buffer->needsReallocation(width, height, format, BQ_LAYER_COUNT, - usage)) + buffer->needsReallocation(width, height, format, BQ_LAYER_COUNT, usage)) { mSlots[found].mAcquireCalled = false; mSlots[found].mGraphicBuffer = NULL; @@ -503,11 +501,13 @@ status_t BufferQueueProducer::dequeueBuffer(int *outSlot, } // Autolock scope if (returnFlags & BUFFER_NEEDS_REALLOCATION) { - status_t error; BQ_LOGV("dequeueBuffer: allocating a new buffer for slot %d", *outSlot); - sp<GraphicBuffer> graphicBuffer(mCore->mAllocator->createGraphicBuffer( - width, height, format, BQ_LAYER_COUNT, usage, - {mConsumerName.string(), mConsumerName.size()}, &error)); + sp<GraphicBuffer> graphicBuffer = new GraphicBuffer( + width, height, format, BQ_LAYER_COUNT, usage, usage, + {mConsumerName.string(), mConsumerName.size()}); + + status_t error = graphicBuffer->initCheck(); + { // Autolock scope Mutex::Autolock lock(mCore->mMutex); @@ -1337,11 +1337,12 @@ void BufferQueueProducer::allocateBuffers(uint32_t width, uint32_t height, Vector<sp<GraphicBuffer>> buffers; for (size_t i = 0; i < newBufferCount; ++i) { - status_t result = NO_ERROR; - sp<GraphicBuffer> graphicBuffer(mCore->mAllocator->createGraphicBuffer( + sp<GraphicBuffer> graphicBuffer = new GraphicBuffer( allocWidth, allocHeight, allocFormat, BQ_LAYER_COUNT, - allocUsage, {mConsumerName.string(), mConsumerName.size()}, - &result)); + allocUsage, allocUsage, {mConsumerName.string(), mConsumerName.size()}); + + status_t result = graphicBuffer->initCheck(); + if (result != NO_ERROR) { BQ_LOGE("allocateBuffers: failed to allocate buffer (%u x %u, format" " %u, usage %u)", width, height, format, usage); diff --git a/libs/gui/ConsumerBase.cpp b/libs/gui/ConsumerBase.cpp index 1783561928..5c6158c771 100644 --- a/libs/gui/ConsumerBase.cpp +++ b/libs/gui/ConsumerBase.cpp @@ -30,7 +30,6 @@ #include <cutils/atomic.h> #include <gui/BufferItem.h> -#include <gui/IGraphicBufferAlloc.h> #include <gui/ISurfaceComposer.h> #include <gui/SurfaceComposerClient.h> #include <gui/ConsumerBase.h> diff --git a/libs/gui/GLConsumer.cpp b/libs/gui/GLConsumer.cpp index 55e0d264bb..c654f083b3 100644 --- a/libs/gui/GLConsumer.cpp +++ b/libs/gui/GLConsumer.cpp @@ -31,7 +31,6 @@ #include <gui/BufferItem.h> #include <gui/GLConsumer.h> -#include <gui/IGraphicBufferAlloc.h> #include <gui/ISurfaceComposer.h> #include <gui/SurfaceComposerClient.h> diff --git a/libs/gui/GraphicBufferAlloc.cpp b/libs/gui/GraphicBufferAlloc.cpp deleted file mode 100644 index cc7d403040..0000000000 --- a/libs/gui/GraphicBufferAlloc.cpp +++ /dev/null @@ -1,49 +0,0 @@ -/* - ** - ** Copyright 2012 The Android Open Source Project - ** - ** Licensed under the Apache License Version 2.0(the "License"); - ** you may not use this file except in compliance with the License. - ** You may obtain a copy of the License at - ** - ** http://www.apache.org/licenses/LICENSE-2.0 - ** - ** Unless required by applicable law or agreed to in writing software - ** distributed under the License is distributed on an "AS IS" BASIS - ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND either express or implied. - ** See the License for the specific language governing permissions and - ** limitations under the License. - */ - -#include <gui/GraphicBufferAlloc.h> - -#include <log/log.h> - - -namespace android { - -GraphicBufferAlloc::GraphicBufferAlloc() = default; -GraphicBufferAlloc::~GraphicBufferAlloc() = default; - -sp<GraphicBuffer> GraphicBufferAlloc::createGraphicBuffer(uint32_t width, - uint32_t height, PixelFormat format, uint32_t layerCount, - uint64_t producerUsage, uint64_t consumerUsage, - std::string requestorName, status_t* error) { - sp<GraphicBuffer> graphicBuffer(new GraphicBuffer( - width, height, format, layerCount, producerUsage, consumerUsage, - std::move(requestorName))); - status_t err = graphicBuffer->initCheck(); - *error = err; - if (err != 0 || graphicBuffer->handle == 0) { - if (err == NO_MEMORY) { - GraphicBuffer::dumpAllocationsToSystemLog(); - } - ALOGE("GraphicBufferAlloc::createGraphicBuffer(w=%u, h=%u, lc=%u) failed (%s), handle=%p", - width, height, layerCount, strerror(-err), - graphicBuffer->handle); - graphicBuffer.clear(); - } - return graphicBuffer; -} - -} // namespace android diff --git a/libs/gui/IGraphicBufferAlloc.cpp b/libs/gui/IGraphicBufferAlloc.cpp deleted file mode 100644 index 21a0dd5b3d..0000000000 --- a/libs/gui/IGraphicBufferAlloc.cpp +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// tag as surfaceflinger -#define LOG_TAG "SurfaceFlinger" - -#include <stdint.h> -#include <sys/types.h> - -#include <binder/Parcel.h> - -#include <ui/GraphicBuffer.h> - -#include <gui/IGraphicBufferAlloc.h> - -// --------------------------------------------------------------------------- - -namespace android { - -enum { - CREATE_GRAPHIC_BUFFER = IBinder::FIRST_CALL_TRANSACTION, -}; - -class BpGraphicBufferAlloc : public BpInterface<IGraphicBufferAlloc> -{ -public: - explicit BpGraphicBufferAlloc(const sp<IBinder>& impl) - : BpInterface<IGraphicBufferAlloc>(impl) - { - } - - virtual ~BpGraphicBufferAlloc(); - - virtual sp<GraphicBuffer> createGraphicBuffer(uint32_t width, - uint32_t height, PixelFormat format, uint32_t layerCount, - uint64_t producerUsage, uint64_t consumerUsage, - std::string requestorName, status_t* error) { - Parcel data, reply; - data.writeInterfaceToken(IGraphicBufferAlloc::getInterfaceDescriptor()); - data.writeUint32(width); - data.writeUint32(height); - data.writeInt32(static_cast<int32_t>(format)); - data.writeUint32(layerCount); - data.writeUint64(producerUsage); - data.writeUint64(consumerUsage); - if (requestorName.empty()) { - requestorName += "[PID "; - requestorName += std::to_string(getpid()); - requestorName += ']'; - } - data.writeUtf8AsUtf16(requestorName); - remote()->transact(CREATE_GRAPHIC_BUFFER, data, &reply); - sp<GraphicBuffer> graphicBuffer; - status_t result = reply.readInt32(); - if (result == NO_ERROR) { - graphicBuffer = new GraphicBuffer(); - result = reply.read(*graphicBuffer); - if (result != NO_ERROR) { - graphicBuffer.clear(); - } - // reply.readStrongBinder(); - // here we don't even have to read the BufferReference from - // the parcel, it'll die with the parcel. - } - *error = result; - return graphicBuffer; - } -}; - -// Out-of-line virtual method definition to trigger vtable emission in this -// translation unit (see clang warning -Wweak-vtables) -BpGraphicBufferAlloc::~BpGraphicBufferAlloc() {} - -IMPLEMENT_META_INTERFACE(GraphicBufferAlloc, "android.ui.IGraphicBufferAlloc"); - -// ---------------------------------------------------------------------- - -status_t BnGraphicBufferAlloc::onTransact( - uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) -{ - // codes that don't require permission check - - // BufferReference just keeps a strong reference to a GraphicBuffer until it - // is destroyed (that is, until no local or remote process have a reference - // to it). - class BufferReference : public BBinder { - sp<GraphicBuffer> mBuffer; - public: - explicit BufferReference(const sp<GraphicBuffer>& buffer) : mBuffer(buffer) {} - }; - - - switch (code) { - case CREATE_GRAPHIC_BUFFER: { - CHECK_INTERFACE(IGraphicBufferAlloc, data, reply); - uint32_t width = data.readUint32(); - uint32_t height = data.readUint32(); - PixelFormat format = static_cast<PixelFormat>(data.readInt32()); - uint32_t layerCount = data.readUint32(); - uint64_t producerUsage = data.readUint64(); - uint64_t consumerUsage = data.readUint64(); - status_t error = NO_ERROR; - std::string requestorName; - data.readUtf8FromUtf16(&requestorName); - sp<GraphicBuffer> result = createGraphicBuffer(width, height, - format, layerCount, producerUsage, consumerUsage, - requestorName, &error); - reply->writeInt32(error); - if (result != 0) { - reply->write(*result); - // We add a BufferReference to this parcel to make sure the - // buffer stays alive until the GraphicBuffer object on - // the other side has been created. - // This is needed so that the buffer handle can be - // registered before the buffer is destroyed on implementations - // that do not use file-descriptors to track their buffers. - reply->writeStrongBinder( new BufferReference(result) ); - } - return NO_ERROR; - } - default: - return BBinder::onTransact(code, data, reply, flags); - } -} - -}; // namespace android diff --git a/libs/gui/ISurfaceComposer.cpp b/libs/gui/ISurfaceComposer.cpp index 4d2692fe62..2516fb8a2d 100644 --- a/libs/gui/ISurfaceComposer.cpp +++ b/libs/gui/ISurfaceComposer.cpp @@ -25,7 +25,6 @@ #include <binder/IServiceManager.h> #include <gui/IDisplayEventConnection.h> -#include <gui/IGraphicBufferAlloc.h> #include <gui/IGraphicBufferProducer.h> #include <gui/ISurfaceComposer.h> #include <gui/ISurfaceComposerClient.h> @@ -72,14 +71,6 @@ public: return interface_cast<ISurfaceComposerClient>(reply.readStrongBinder()); } - virtual sp<IGraphicBufferAlloc> createGraphicBufferAlloc() - { - Parcel data, reply; - data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor()); - remote()->transact(BnSurfaceComposer::CREATE_GRAPHIC_BUFFER_ALLOC, data, &reply); - return interface_cast<IGraphicBufferAlloc>(reply.readStrongBinder()); - } - virtual void setTransactionState( const Vector<ComposerState>& state, const Vector<DisplayState>& displays, @@ -505,12 +496,6 @@ status_t BnSurfaceComposer::onTransact( reply->writeStrongBinder(b); return NO_ERROR; } - case CREATE_GRAPHIC_BUFFER_ALLOC: { - CHECK_INTERFACE(ISurfaceComposer, data, reply); - sp<IBinder> b = IInterface::asBinder(createGraphicBufferAlloc()); - reply->writeStrongBinder(b); - return NO_ERROR; - } case SET_TRANSACTION_STATE: { CHECK_INTERFACE(ISurfaceComposer, data, reply); diff --git a/libs/gui/tests/Surface_test.cpp b/libs/gui/tests/Surface_test.cpp index cf3d1b27c7..08d6715a1c 100644 --- a/libs/gui/tests/Surface_test.cpp +++ b/libs/gui/tests/Surface_test.cpp @@ -377,9 +377,6 @@ public: const sp<IGraphicBufferProducer>& /* parent */) override { return nullptr; } - sp<IGraphicBufferAlloc> createGraphicBufferAlloc() override { - return nullptr; - } sp<IDisplayEventConnection> createDisplayEventConnection() override { return nullptr; } diff --git a/libs/nativewindow/AHardwareBuffer.cpp b/libs/nativewindow/AHardwareBuffer.cpp index 17b2a8693e..5812831811 100644 --- a/libs/nativewindow/AHardwareBuffer.cpp +++ b/libs/nativewindow/AHardwareBuffer.cpp @@ -70,7 +70,7 @@ int AHardwareBuffer_allocate(const AHardwareBuffer_Desc* desc, AHardwareBuffer** if (err == NO_MEMORY) { GraphicBuffer::dumpAllocationsToSystemLog(); } - ALOGE("GraphicBufferAlloc::createGraphicBuffer(w=%u, h=%u, lc=%u) failed (%s), handle=%p", + ALOGE("GraphicBuffer(w=%u, h=%u, lc=%u) failed (%s), handle=%p", desc->width, desc->height, desc->layers, strerror(-err), gbuffer->handle); return err; } diff --git a/libs/vr/libbroadcastring/Android.bp b/libs/vr/libbroadcastring/Android.bp new file mode 100644 index 0000000000..13af470a26 --- /dev/null +++ b/libs/vr/libbroadcastring/Android.bp @@ -0,0 +1,35 @@ +cc_library_static { + name: "libbroadcastring", + clang: true, + cflags: [ + "-Wall", + "-Wextra", + "-Werror", + ], + export_include_dirs: ["include"], + shared_libs: [ + "libbase", + ], + export_shared_lib_headers: [ + "libbase", + ], +} + +cc_test { + name: "broadcast_ring_tests", + clang: true, + cflags: [ + "-Wall", + "-Wextra", + "-Werror", + ], + srcs: [ + "broadcast_ring_test.cc", + ], + static_libs: [ + "libbroadcastring", + ], + shared_libs: [ + "libbase", + ], +} diff --git a/libs/vr/libbroadcastring/broadcast_ring_test.cc b/libs/vr/libbroadcastring/broadcast_ring_test.cc new file mode 100644 index 0000000000..dfdd4ef0db --- /dev/null +++ b/libs/vr/libbroadcastring/broadcast_ring_test.cc @@ -0,0 +1,866 @@ +#include "libbroadcastring/broadcast_ring.h" + +#include <stdlib.h> +#include <memory> +#include <thread> // NOLINT +#include <sys/mman.h> + +#include <gtest/gtest.h> + +namespace android { +namespace dvr { +namespace { + +template <uint32_t N> +struct alignas(8) Aligned { + char v[N]; +}; + +template <uint32_t N> +struct alignas(8) Sized { + Sized() { Clear(); } + explicit Sized(char c) { Fill(c); } + char v[sizeof(Aligned<N>)]; + void Clear() { memset(v, 0, sizeof(v)); } + void Fill(char c) { memset(v, c, sizeof(v)); } + static Sized Pattern(uint8_t c) { + Sized sized; + for (size_t i = 0; i < sizeof(v); ++i) { + sized.v[i] = static_cast<char>(c + i); + } + return sized; + } + bool operator==(const Sized& right) const { + static_assert(sizeof(*this) == sizeof(v), "Size mismatch"); + return !memcmp(v, right.v, sizeof(v)); + } + template <typename SmallerSized> + SmallerSized Truncate() const { + SmallerSized val; + static_assert(sizeof(val.v) <= sizeof(v), "Cannot truncate to larger size"); + memcpy(val.v, v, sizeof(val.v)); + return val; + } +}; + +char FillChar(int val) { return static_cast<char>(val); } + +struct FakeMmap { + explicit FakeMmap(size_t size) : size(size), data(new char[size]) {} + size_t size; + std::unique_ptr<char[]> data; + void* mmap() { return static_cast<void*>(data.get()); } +}; + +template <typename Ring> +FakeMmap CreateRing(Ring* ring, uint32_t count) { + FakeMmap mmap(Ring::MemorySize(count)); + *ring = Ring::Create(mmap.mmap(), mmap.size, count); + return mmap; +} + +template <typename RecordType, bool StaticSize = false, + uint32_t StaticCount = 0, uint32_t MaxReserved = 1, + uint32_t MinAvailable = 0> +struct Traits { + using Record = RecordType; + static constexpr bool kUseStaticRecordSize = StaticSize; + static constexpr uint32_t kStaticRecordCount = StaticCount; + static constexpr uint32_t kMaxReservedRecords = MaxReserved; + static constexpr uint32_t kMinAvailableRecords = MinAvailable; + static constexpr uint32_t kMinRecordCount = MaxReserved + MinAvailable; +}; + +template <typename Record, bool StaticSize = false, uint32_t MaxReserved = 1, + uint32_t MinAvailable = 7> +struct TraitsDynamic + : public Traits<Record, StaticSize, 0, MaxReserved, MinAvailable> { + using Ring = BroadcastRing<Record, TraitsDynamic>; + static uint32_t MinCount() { return MaxReserved + MinAvailable; } +}; + +template <typename Record, uint32_t StaticCount = 1, bool StaticSize = true, + uint32_t MaxReserved = 1, uint32_t MinAvailable = 0> +struct TraitsStatic + : public Traits<Record, true, StaticCount, MaxReserved, MinAvailable> { + using Ring = BroadcastRing<Record, TraitsStatic>; + static uint32_t MinCount() { return StaticCount; } +}; + +using Dynamic_8_NxM = TraitsDynamic<Sized<8>>; +using Dynamic_16_NxM = TraitsDynamic<Sized<16>>; +using Dynamic_32_NxM = TraitsDynamic<Sized<32>>; +using Dynamic_32_32xM = TraitsDynamic<Sized<32>, true>; +using Dynamic_16_NxM_1plus0 = TraitsDynamic<Sized<16>, false, 1, 0>; +using Dynamic_16_NxM_1plus1 = TraitsDynamic<Sized<16>, false, 1, 1>; +using Dynamic_16_NxM_5plus11 = TraitsDynamic<Sized<16>, false, 5, 11>; +using Dynamic_256_NxM_1plus0 = TraitsDynamic<Sized<256>, false, 1, 0>; + +using Static_8_8x1 = TraitsStatic<Sized<8>, 1>; +using Static_8_8x16 = TraitsStatic<Sized<8>, 16>; +using Static_16_16x8 = TraitsStatic<Sized<16>, 8>; +using Static_16_16x16 = TraitsStatic<Sized<16>, 16>; +using Static_16_16x32 = TraitsStatic<Sized<16>, 32>; +using Static_32_Nx8 = TraitsStatic<Sized<32>, 8, false>; + +using TraitsList = ::testing::Types<Dynamic_8_NxM, // + Dynamic_16_NxM, // + Dynamic_32_NxM, // + Dynamic_32_32xM, // + Dynamic_16_NxM_1plus0, // + Dynamic_16_NxM_1plus1, // + Dynamic_16_NxM_5plus11, // + Dynamic_256_NxM_1plus0, // + Static_8_8x1, // + Static_8_8x16, // + Static_16_16x8, // + Static_16_16x16, // + Static_16_16x32, // + Static_32_Nx8>; + +} // namespace + +template <typename T> +class BroadcastRingTest : public ::testing::Test {}; + +TYPED_TEST_CASE(BroadcastRingTest, TraitsList); + +TYPED_TEST(BroadcastRingTest, Geometry) { + using Record = typename TypeParam::Record; + using Ring = typename TypeParam::Ring; + Ring ring; + auto mmap = CreateRing(&ring, Ring::Traits::MinCount()); + EXPECT_EQ(Ring::Traits::MinCount(), ring.record_count()); + EXPECT_EQ(sizeof(Record), ring.record_size()); +} + +TYPED_TEST(BroadcastRingTest, PutGet) { + using Record = typename TypeParam::Record; + using Ring = typename TypeParam::Ring; + Ring ring; + auto mmap = CreateRing(&ring, Ring::Traits::MinCount()); + const uint32_t oldest_sequence_at_start = ring.GetOldestSequence(); + const uint32_t next_sequence_at_start = ring.GetNextSequence(); + { + uint32_t sequence = oldest_sequence_at_start; + Record record; + EXPECT_FALSE(ring.Get(&sequence, &record)); + EXPECT_EQ(oldest_sequence_at_start, sequence); + EXPECT_EQ(Record(), record); + } + const Record original_record(0x1a); + ring.Put(original_record); + { + uint32_t sequence = next_sequence_at_start; + Record record; + EXPECT_TRUE(ring.Get(&sequence, &record)); + EXPECT_EQ(next_sequence_at_start, sequence); + EXPECT_EQ(original_record, record); + } + { + uint32_t sequence = next_sequence_at_start + 1; + Record record; + EXPECT_FALSE(ring.Get(&sequence, &record)); + EXPECT_EQ(next_sequence_at_start + 1, sequence); + EXPECT_EQ(Record(), record); + } +} + +TYPED_TEST(BroadcastRingTest, FillOnce) { + using Record = typename TypeParam::Record; + using Ring = typename TypeParam::Ring; + Ring ring; + auto mmap = CreateRing(&ring, Ring::Traits::MinCount()); + const uint32_t next_sequence_at_start = ring.GetNextSequence(); + for (uint32_t i = 0; i < ring.record_count(); ++i) + ring.Put(Record(FillChar(i))); + for (uint32_t i = 0; i < ring.record_count(); ++i) { + const uint32_t expected_sequence = next_sequence_at_start + i; + const Record expected_record(FillChar(i)); + { + uint32_t sequence = ring.GetOldestSequence() + i; + Record record; + EXPECT_TRUE(ring.Get(&sequence, &record)); + EXPECT_EQ(expected_sequence, sequence); + EXPECT_EQ(expected_record, record); + } + } + { + uint32_t sequence = ring.GetOldestSequence() + ring.record_count(); + Record record; + EXPECT_FALSE(ring.Get(&sequence, &record)); + } +} + +TYPED_TEST(BroadcastRingTest, FillTwice) { + using Record = typename TypeParam::Record; + using Ring = typename TypeParam::Ring; + Ring ring; + auto mmap = CreateRing(&ring, Ring::Traits::MinCount()); + const uint32_t next_sequence_at_start = ring.GetNextSequence(); + for (uint32_t i = 0; i < 2 * ring.record_count(); ++i) { + const Record newest_record(FillChar(i)); + ring.Put(newest_record); + + const uint32_t newest_sequence = next_sequence_at_start + i; + const uint32_t records_available = std::min(i + 1, ring.record_count()); + const uint32_t oldest_sequence = newest_sequence - records_available + 1; + EXPECT_EQ(newest_sequence, ring.GetNewestSequence()); + EXPECT_EQ(oldest_sequence, ring.GetOldestSequence()); + EXPECT_EQ(newest_sequence + 1, ring.GetNextSequence()); + + for (uint32_t j = 0; j < records_available; ++j) { + const uint32_t sequence_jth_newest = newest_sequence - j; + const Record record_jth_newest(FillChar(i - j)); + + { + uint32_t sequence = sequence_jth_newest; + Record record; + EXPECT_TRUE(ring.Get(&sequence, &record)); + EXPECT_EQ(sequence_jth_newest, sequence); + EXPECT_EQ(record_jth_newest, record); + } + + { + uint32_t sequence = sequence_jth_newest; + Record record; + EXPECT_TRUE(ring.GetNewest(&sequence, &record)); + EXPECT_EQ(newest_sequence, sequence); + EXPECT_EQ(newest_record, record); + } + } + + const Record oldest_record( + FillChar(i + (oldest_sequence - newest_sequence))); + const uint32_t sequence_0th_overwritten = oldest_sequence - 1; + const uint32_t sequence_0th_future = newest_sequence + 1; + const uint32_t sequence_1st_future = newest_sequence + 2; + + { + uint32_t sequence = sequence_0th_overwritten; + Record record; + EXPECT_TRUE(ring.Get(&sequence, &record)); + EXPECT_EQ(oldest_sequence, sequence); + EXPECT_EQ(oldest_record, record); + } + + { + uint32_t sequence = sequence_0th_overwritten; + Record record; + EXPECT_TRUE(ring.GetNewest(&sequence, &record)); + EXPECT_EQ(newest_sequence, sequence); + EXPECT_EQ(newest_record, record); + } + + { + uint32_t sequence = sequence_0th_future; + Record record; + EXPECT_FALSE(ring.Get(&sequence, &record)); + EXPECT_EQ(sequence_0th_future, sequence); + EXPECT_EQ(Record(), record); + } + + { + uint32_t sequence = sequence_0th_future; + Record record; + EXPECT_FALSE(ring.GetNewest(&sequence, &record)); + EXPECT_EQ(sequence_0th_future, sequence); + EXPECT_EQ(Record(), record); + } + + { + uint32_t sequence = sequence_1st_future; + Record record; + EXPECT_TRUE(ring.Get(&sequence, &record)); + EXPECT_EQ(oldest_sequence, sequence); + EXPECT_EQ(oldest_record, record); + } + + { + uint32_t sequence = sequence_1st_future; + Record record; + EXPECT_TRUE(ring.GetNewest(&sequence, &record)); + EXPECT_EQ(newest_sequence, sequence); + EXPECT_EQ(newest_record, record); + } + } +} + +TYPED_TEST(BroadcastRingTest, Import) { + using Record = typename TypeParam::Record; + using Ring = typename TypeParam::Ring; + Ring ring; + auto mmap = CreateRing(&ring, Ring::Traits::MinCount()); + + const uint32_t sequence_0 = ring.GetNextSequence(); + const uint32_t sequence_1 = ring.GetNextSequence() + 1; + const Record record_0 = Record::Pattern(0x00); + const Record record_1 = Record::Pattern(0x80); + ring.Put(record_0); + ring.Put(record_1); + + { + Ring imported_ring; + bool import_ok; + std::tie(imported_ring, import_ok) = Ring::Import(mmap.mmap(), mmap.size); + EXPECT_TRUE(import_ok); + EXPECT_EQ(ring.record_size(), imported_ring.record_size()); + EXPECT_EQ(ring.record_count(), imported_ring.record_count()); + + if (ring.record_count() != 1) { + uint32_t sequence = sequence_0; + Record imported_record; + EXPECT_TRUE(imported_ring.Get(&sequence, &imported_record)); + EXPECT_EQ(sequence_0, sequence); + EXPECT_EQ(record_0, imported_record); + } + + { + uint32_t sequence = sequence_1; + Record imported_record; + EXPECT_TRUE(imported_ring.Get(&sequence, &imported_record)); + EXPECT_EQ(sequence_1, sequence); + EXPECT_EQ(record_1, imported_record); + } + } +} + +TEST(BroadcastRingTest, ShouldFailImportIfStaticSizeMismatch) { + using OriginalRing = typename Static_16_16x16::Ring; + using RecordSizeMismatchRing = typename Static_8_8x16::Ring; + using RecordCountMismatchRing = typename Static_16_16x8::Ring; + + OriginalRing original_ring; + auto mmap = CreateRing(&original_ring, OriginalRing::Traits::MinCount()); + + { + using ImportedRing = RecordSizeMismatchRing; + ImportedRing imported_ring; + bool import_ok; + std::tie(imported_ring, import_ok) = + ImportedRing::Import(mmap.mmap(), mmap.size); + EXPECT_FALSE(import_ok); + auto mmap_imported = + CreateRing(&imported_ring, ImportedRing::Traits::MinCount()); + EXPECT_NE(original_ring.record_size(), imported_ring.record_size()); + EXPECT_EQ(original_ring.record_count(), imported_ring.record_count()); + } + + { + using ImportedRing = RecordCountMismatchRing; + ImportedRing imported_ring; + bool import_ok; + std::tie(imported_ring, import_ok) = + ImportedRing::Import(mmap.mmap(), mmap.size); + EXPECT_FALSE(import_ok); + auto mmap_imported = + CreateRing(&imported_ring, ImportedRing::Traits::MinCount()); + EXPECT_EQ(original_ring.record_size(), imported_ring.record_size()); + EXPECT_NE(original_ring.record_count(), imported_ring.record_count()); + } +} + +TEST(BroadcastRingTest, ShouldFailImportIfDynamicSizeGrows) { + using OriginalRing = typename Dynamic_8_NxM::Ring; + using RecordSizeGrowsRing = typename Dynamic_16_NxM::Ring; + + OriginalRing original_ring; + auto mmap = CreateRing(&original_ring, OriginalRing::Traits::MinCount()); + + { + using ImportedRing = RecordSizeGrowsRing; + ImportedRing imported_ring; + bool import_ok; + std::tie(imported_ring, import_ok) = + ImportedRing::Import(mmap.mmap(), mmap.size); + EXPECT_FALSE(import_ok); + auto mmap_imported = + CreateRing(&imported_ring, ImportedRing::Traits::MinCount()); + EXPECT_LT(original_ring.record_size(), imported_ring.record_size()); + EXPECT_EQ(original_ring.record_count(), imported_ring.record_count()); + } +} + +TEST(BroadcastRingTest, ShouldFailImportIfCountTooSmall) { + using OriginalRing = typename Dynamic_16_NxM_1plus0::Ring; + using MinCountRing = typename Dynamic_16_NxM_1plus1::Ring; + + OriginalRing original_ring; + auto mmap = CreateRing(&original_ring, OriginalRing::Traits::MinCount()); + + { + using ImportedRing = MinCountRing; + ImportedRing imported_ring; + bool import_ok; + std::tie(imported_ring, import_ok) = + ImportedRing::Import(mmap.mmap(), mmap.size); + EXPECT_FALSE(import_ok); + auto mmap_imported = + CreateRing(&imported_ring, ImportedRing::Traits::MinCount()); + EXPECT_EQ(original_ring.record_size(), imported_ring.record_size()); + EXPECT_LT(original_ring.record_count(), imported_ring.record_count()); + } +} + +TEST(BroadcastRingTest, ShouldFailImportIfMmapTooSmall) { + using OriginalRing = typename Dynamic_16_NxM::Ring; + + OriginalRing original_ring; + auto mmap = CreateRing(&original_ring, OriginalRing::Traits::MinCount()); + + { + using ImportedRing = OriginalRing; + ImportedRing imported_ring; + bool import_ok; + const size_t kMinSize = + ImportedRing::MemorySize(original_ring.record_count()); + std::tie(imported_ring, import_ok) = ImportedRing::Import(mmap.mmap(), 0); + EXPECT_FALSE(import_ok); + std::tie(imported_ring, import_ok) = + ImportedRing::Import(mmap.mmap(), kMinSize - 1); + EXPECT_FALSE(import_ok); + std::tie(imported_ring, import_ok) = + ImportedRing::Import(mmap.mmap(), kMinSize); + EXPECT_TRUE(import_ok); + EXPECT_EQ(original_ring.record_size(), imported_ring.record_size()); + EXPECT_EQ(original_ring.record_count(), imported_ring.record_count()); + } +} + +TEST(BroadcastRingTest, ShouldImportIfDynamicSizeShrinks) { + using OriginalRing = typename Dynamic_16_NxM::Ring; + using RecordSizeShrinksRing = typename Dynamic_8_NxM::Ring; + + OriginalRing original_ring; + auto mmap = CreateRing(&original_ring, OriginalRing::Traits::MinCount()); + + using OriginalRecord = typename OriginalRing::Record; + const uint32_t original_sequence_0 = original_ring.GetNextSequence(); + const uint32_t original_sequence_1 = original_ring.GetNextSequence() + 1; + const OriginalRecord original_record_0 = OriginalRecord::Pattern(0x00); + const OriginalRecord original_record_1 = OriginalRecord::Pattern(0x80); + original_ring.Put(original_record_0); + original_ring.Put(original_record_1); + + { + using ImportedRing = RecordSizeShrinksRing; + using ImportedRecord = typename ImportedRing::Record; + ImportedRing imported_ring; + bool import_ok; + std::tie(imported_ring, import_ok) = + ImportedRing::Import(mmap.mmap(), mmap.size); + EXPECT_TRUE(import_ok); + EXPECT_EQ(original_ring.record_size(), imported_ring.record_size()); + EXPECT_EQ(original_ring.record_count(), imported_ring.record_count()); + EXPECT_GT(sizeof(OriginalRecord), sizeof(ImportedRecord)); + + { + uint32_t sequence = original_sequence_0; + ImportedRecord shrunk_record; + EXPECT_TRUE(imported_ring.Get(&sequence, &shrunk_record)); + EXPECT_EQ(original_sequence_0, sequence); + EXPECT_EQ(original_record_0.Truncate<ImportedRecord>(), shrunk_record); + } + + { + uint32_t sequence = original_sequence_1; + ImportedRecord shrunk_record; + EXPECT_TRUE(imported_ring.Get(&sequence, &shrunk_record)); + EXPECT_EQ(original_sequence_1, sequence); + EXPECT_EQ(original_record_1.Truncate<ImportedRecord>(), shrunk_record); + } + } +} + +TEST(BroadcastRingTest, ShouldImportIfCompatibleDynamicToStatic) { + using OriginalRing = typename Dynamic_16_NxM::Ring; + using ImportedRing = typename Static_16_16x16::Ring; + using OriginalRecord = typename OriginalRing::Record; + using ImportedRecord = typename ImportedRing::Record; + using StaticRing = ImportedRing; + + OriginalRing original_ring; + auto mmap = CreateRing(&original_ring, StaticRing::Traits::MinCount()); + + const uint32_t original_sequence_0 = original_ring.GetNextSequence(); + const uint32_t original_sequence_1 = original_ring.GetNextSequence() + 1; + const OriginalRecord original_record_0 = OriginalRecord::Pattern(0x00); + const OriginalRecord original_record_1 = OriginalRecord::Pattern(0x80); + original_ring.Put(original_record_0); + original_ring.Put(original_record_1); + + { + ImportedRing imported_ring; + bool import_ok; + std::tie(imported_ring, import_ok) = + ImportedRing::Import(mmap.mmap(), mmap.size); + EXPECT_TRUE(import_ok); + EXPECT_EQ(original_ring.record_size(), imported_ring.record_size()); + EXPECT_EQ(original_ring.record_count(), imported_ring.record_count()); + + { + uint32_t sequence = original_sequence_0; + ImportedRecord imported_record; + EXPECT_TRUE(imported_ring.Get(&sequence, &imported_record)); + EXPECT_EQ(original_sequence_0, sequence); + EXPECT_EQ(original_record_0, imported_record); + } + + { + uint32_t sequence = original_sequence_1; + ImportedRecord imported_record; + EXPECT_TRUE(imported_ring.Get(&sequence, &imported_record)); + EXPECT_EQ(original_sequence_1, sequence); + EXPECT_EQ(original_record_1, imported_record); + } + } +} + +TEST(BroadcastRingTest, ShouldImportIfCompatibleStaticToDynamic) { + using OriginalRing = typename Static_16_16x16::Ring; + using ImportedRing = typename Dynamic_16_NxM::Ring; + using OriginalRecord = typename OriginalRing::Record; + using ImportedRecord = typename ImportedRing::Record; + using StaticRing = OriginalRing; + + OriginalRing original_ring; + auto mmap = CreateRing(&original_ring, StaticRing::Traits::MinCount()); + + const uint32_t original_sequence_0 = original_ring.GetNextSequence(); + const uint32_t original_sequence_1 = original_ring.GetNextSequence() + 1; + const OriginalRecord original_record_0 = OriginalRecord::Pattern(0x00); + const OriginalRecord original_record_1 = OriginalRecord::Pattern(0x80); + original_ring.Put(original_record_0); + original_ring.Put(original_record_1); + + { + ImportedRing imported_ring; + bool import_ok; + std::tie(imported_ring, import_ok) = + ImportedRing::Import(mmap.mmap(), mmap.size); + EXPECT_TRUE(import_ok); + EXPECT_EQ(original_ring.record_size(), imported_ring.record_size()); + EXPECT_EQ(original_ring.record_count(), imported_ring.record_count()); + + { + uint32_t sequence = original_sequence_0; + ImportedRecord imported_record; + EXPECT_TRUE(imported_ring.Get(&sequence, &imported_record)); + EXPECT_EQ(original_sequence_0, sequence); + EXPECT_EQ(original_record_0, imported_record); + } + + { + uint32_t sequence = original_sequence_1; + ImportedRecord imported_record; + EXPECT_TRUE(imported_ring.Get(&sequence, &imported_record)); + EXPECT_EQ(original_sequence_1, sequence); + EXPECT_EQ(original_record_1, imported_record); + } + } +} + +TEST(BroadcastRingTest, ShouldImportIfReadonlyMmap) { + using Ring = Dynamic_32_NxM::Ring; + using Record = Ring::Record; + + uint32_t record_count = Ring::Traits::MinCount(); + size_t ring_size = Ring::MemorySize(record_count); + + size_t page_size = sysconf(_SC_PAGESIZE); + size_t mmap_size = (ring_size + (page_size - 1)) & ~(page_size - 1); + ASSERT_GE(mmap_size, ring_size); + + void* mmap_base = mmap(nullptr, mmap_size, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + ASSERT_NE(MAP_FAILED, mmap_base); + + Ring ring = Ring::Create(mmap_base, mmap_size, record_count); + for (uint32_t i = 0; i < record_count; ++i) ring.Put(Record(FillChar(i))); + + ASSERT_EQ(0, mprotect(mmap_base, mmap_size, PROT_READ)); + + { + Ring imported_ring; + bool import_ok; + std::tie(imported_ring, import_ok) = Ring::Import(mmap_base, mmap_size); + EXPECT_TRUE(import_ok); + EXPECT_EQ(ring.record_size(), imported_ring.record_size()); + EXPECT_EQ(ring.record_count(), imported_ring.record_count()); + + uint32_t oldest_sequence = imported_ring.GetOldestSequence(); + for (uint32_t i = 0; i < record_count; ++i) { + uint32_t sequence = oldest_sequence + i; + Record record; + EXPECT_TRUE(imported_ring.Get(&sequence, &record)); + EXPECT_EQ(Record(FillChar(i)), record); + } + } + + ASSERT_EQ(0, munmap(mmap_base, mmap_size)); +} + +TEST(BroadcastRingTest, ShouldDieIfPutReadonlyMmap) { + using Ring = Dynamic_32_NxM::Ring; + using Record = Ring::Record; + + uint32_t record_count = Ring::Traits::MinCount(); + size_t ring_size = Ring::MemorySize(record_count); + + size_t page_size = sysconf(_SC_PAGESIZE); + size_t mmap_size = (ring_size + (page_size - 1)) & ~(page_size - 1); + ASSERT_GE(mmap_size, ring_size); + + void* mmap_base = mmap(nullptr, mmap_size, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + ASSERT_NE(MAP_FAILED, mmap_base); + + Ring ring = Ring::Create(mmap_base, mmap_size, record_count); + for (uint32_t i = 0; i < record_count; ++i) ring.Put(Record(FillChar(i))); + + ASSERT_EQ(0, mprotect(mmap_base, mmap_size, PROT_READ)); + + EXPECT_DEATH_IF_SUPPORTED({ ring.Put(Record(7)); }, ""); + + ASSERT_EQ(0, munmap(mmap_base, mmap_size)); +} + +TEST(BroadcastRingTest, ShouldDieIfCreationMmapTooSmall) { + using Ring = Dynamic_32_NxM::Ring; + using Record = Ring::Record; + + uint32_t record_count = Ring::Traits::MinCount(); + size_t ring_size = Ring::MemorySize(record_count); + FakeMmap mmap(ring_size); + + EXPECT_DEATH_IF_SUPPORTED({ + Ring ring = Ring::Create(mmap.mmap(), ring_size - 1, record_count); + }, ""); + + Ring ring = Ring::Create(mmap.mmap(), ring_size, record_count); + + ring.Put(Record(3)); + + { + uint32_t sequence = ring.GetNewestSequence(); + Record record; + EXPECT_TRUE(ring.Get(&sequence, &record)); + EXPECT_EQ(Record(3), record); + } +} + +TEST(BroadcastRingTest, ShouldDieIfCreationMmapMisaligned) { + using Ring = Static_8_8x1::Ring; + using Record = Ring::Record; + + constexpr int kAlign = Ring::mmap_alignment(); + constexpr int kMisalign = kAlign / 2; + size_t ring_size = Ring::MemorySize(); + std::unique_ptr<char[]> buf(new char[ring_size + kMisalign]); + + EXPECT_DEATH_IF_SUPPORTED( + { Ring ring = Ring::Create(buf.get() + kMisalign, ring_size); }, ""); + + Ring ring = Ring::Create(buf.get(), ring_size); + + ring.Put(Record(3)); + + { + uint32_t sequence = ring.GetNewestSequence(); + Record record; + EXPECT_TRUE(ring.Get(&sequence, &record)); + EXPECT_EQ(Record(3), record); + } +} + +template <typename Ring> +std::unique_ptr<std::thread> CopyTask(std::atomic<bool>* quit, void* in_base, + size_t in_size, void* out_base, + size_t out_size) { + return std::unique_ptr<std::thread>( + new std::thread([quit, in_base, in_size, out_base, out_size]() { + using Record = typename Ring::Record; + + bool import_ok; + Ring in_ring; + Ring out_ring; + std::tie(in_ring, import_ok) = Ring::Import(in_base, in_size); + ASSERT_TRUE(import_ok); + std::tie(out_ring, import_ok) = Ring::Import(out_base, out_size); + ASSERT_TRUE(import_ok); + + uint32_t sequence = in_ring.GetOldestSequence(); + while (!std::atomic_load_explicit(quit, std::memory_order_relaxed)) { + Record record; + if (in_ring.Get(&sequence, &record)) { + out_ring.Put(record); + sequence++; + } + } + })); +} + +TEST(BroadcastRingTest, ThreadedCopySingle) { + using Ring = Dynamic_32_NxM::Ring; + using Record = Ring::Record; + Ring in_ring; + auto in_mmap = CreateRing(&in_ring, Ring::Traits::MinCount()); + + Ring out_ring; + auto out_mmap = CreateRing(&out_ring, Ring::Traits::MinCount()); + + std::atomic<bool> quit(false); + std::unique_ptr<std::thread> copy_task = CopyTask<Ring>( + &quit, out_mmap.mmap(), out_mmap.size, in_mmap.mmap(), in_mmap.size); + + const Record out_record(0x1c); + out_ring.Put(out_record); + + uint32_t in_sequence = in_ring.GetOldestSequence(); + Record in_record; + while (!in_ring.Get(&in_sequence, &in_record)) { + // Do nothing. + } + + EXPECT_EQ(out_record, in_record); + std::atomic_store_explicit(&quit, true, std::memory_order_relaxed); + copy_task->join(); +} + +TEST(BroadcastRingTest, ThreadedCopyLossless) { + using Ring = Dynamic_32_NxM::Ring; + using Record = Ring::Record; + Ring in_ring; + auto in_mmap = CreateRing(&in_ring, Ring::Traits::MinCount()); + + Ring out_ring; + auto out_mmap = CreateRing(&out_ring, Ring::Traits::MinCount()); + + std::atomic<bool> quit(false); + std::unique_ptr<std::thread> copy_task = CopyTask<Ring>( + &quit, out_mmap.mmap(), out_mmap.size, in_mmap.mmap(), in_mmap.size); + + constexpr uint32_t kRecordsToProcess = 10000; + uint32_t out_records = 0; + uint32_t in_records = 0; + uint32_t in_sequence = in_ring.GetNextSequence(); + while (out_records < kRecordsToProcess || in_records < kRecordsToProcess) { + if (out_records < kRecordsToProcess && + out_records - in_records < out_ring.record_count()) { + const Record out_record(FillChar(out_records)); + out_ring.Put(out_record); + out_records++; + } + + Record in_record; + while (in_ring.Get(&in_sequence, &in_record)) { + EXPECT_EQ(Record(FillChar(in_records)), in_record); + in_records++; + in_sequence++; + } + } + + EXPECT_EQ(kRecordsToProcess, out_records); + EXPECT_EQ(kRecordsToProcess, in_records); + + std::atomic_store_explicit(&quit, true, std::memory_order_relaxed); + copy_task->join(); +} + +TEST(BroadcastRingTest, ThreadedCopyLossy) { + using Ring = Dynamic_32_NxM::Ring; + using Record = Ring::Record; + Ring in_ring; + auto in_mmap = CreateRing(&in_ring, Ring::Traits::MinCount()); + + Ring out_ring; + auto out_mmap = CreateRing(&out_ring, Ring::Traits::MinCount()); + + std::atomic<bool> quit(false); + std::unique_ptr<std::thread> copy_task = CopyTask<Ring>( + &quit, out_mmap.mmap(), out_mmap.size, in_mmap.mmap(), in_mmap.size); + + constexpr uint32_t kRecordsToProcess = 100000; + uint32_t out_records = 0; + uint32_t in_records = 0; + uint32_t in_sequence = in_ring.GetNextSequence(); + while (out_records < kRecordsToProcess) { + const Record out_record(FillChar(out_records)); + out_ring.Put(out_record); + out_records++; + + Record in_record; + if (in_ring.GetNewest(&in_sequence, &in_record)) { + EXPECT_EQ(Record(in_record.v[0]), in_record); + in_records++; + in_sequence++; + } + } + + EXPECT_EQ(kRecordsToProcess, out_records); + EXPECT_GE(kRecordsToProcess, in_records); + + std::atomic_store_explicit(&quit, true, std::memory_order_relaxed); + copy_task->join(); +} + +template <typename Ring> +std::unique_ptr<std::thread> CheckFillTask(std::atomic<bool>* quit, + void* in_base, size_t in_size) { + return std::unique_ptr<std::thread>( + new std::thread([quit, in_base, in_size]() { + using Record = typename Ring::Record; + + bool import_ok; + Ring in_ring; + std::tie(in_ring, import_ok) = Ring::Import(in_base, in_size); + ASSERT_TRUE(import_ok); + + uint32_t sequence = in_ring.GetOldestSequence(); + while (!std::atomic_load_explicit(quit, std::memory_order_relaxed)) { + Record record; + if (in_ring.Get(&sequence, &record)) { + ASSERT_EQ(Record(record.v[0]), record); + sequence++; + } + } + })); +} + +template <typename Ring> +void ThreadedOverwriteTorture() { + using Record = typename Ring::Record; + + // Maximize overwrites by having few records. + const int kMinRecordCount = 1; + const int kMaxRecordCount = 4; + + for (int count = kMinRecordCount; count <= kMaxRecordCount; count *= 2) { + Ring out_ring; + auto out_mmap = CreateRing(&out_ring, count); + + std::atomic<bool> quit(false); + std::unique_ptr<std::thread> check_task = + CheckFillTask<Ring>(&quit, out_mmap.mmap(), out_mmap.size); + + constexpr int kIterations = 10000; + for (int i = 0; i < kIterations; ++i) { + const Record record(FillChar(i)); + out_ring.Put(record); + } + + std::atomic_store_explicit(&quit, true, std::memory_order_relaxed); + check_task->join(); + } +} + +TEST(BroadcastRingTest, ThreadedOverwriteTortureSmall) { + ThreadedOverwriteTorture<Dynamic_16_NxM_1plus0::Ring>(); +} + +TEST(BroadcastRingTest, ThreadedOverwriteTortureLarge) { + ThreadedOverwriteTorture<Dynamic_256_NxM_1plus0::Ring>(); +} + +} // namespace dvr +} // namespace android diff --git a/libs/vr/libbroadcastring/include/libbroadcastring/broadcast_ring.h b/libs/vr/libbroadcastring/include/libbroadcastring/broadcast_ring.h new file mode 100644 index 0000000000..69cb64826e --- /dev/null +++ b/libs/vr/libbroadcastring/include/libbroadcastring/broadcast_ring.h @@ -0,0 +1,668 @@ +#ifndef ANDROID_DVR_BROADCAST_RING_H_ +#define ANDROID_DVR_BROADCAST_RING_H_ + +#include <inttypes.h> +#include <stddef.h> +#include <stdio.h> +#include <atomic> +#include <limits> +#include <tuple> +#include <type_traits> +#include <utility> + +#include "android-base/logging.h" + +#if ATOMIC_LONG_LOCK_FREE != 2 || ATOMIC_INT_LOCK_FREE != 2 +#error "This file requires lock free atomic uint32_t and long" +#endif + +namespace android { +namespace dvr { + +struct DefaultRingTraits { + // Set this to false to allow compatibly expanding the record size. + static constexpr bool kUseStaticRecordSize = false; + + // Set this to a nonzero value to fix the number of records in the ring. + static constexpr uint32_t kStaticRecordCount = 0; + + // Set this to the max number of records that can be written simultaneously. + static constexpr uint32_t kMaxReservedRecords = 1; + + // Set this to the min number of records that must be readable. + static constexpr uint32_t kMinAvailableRecords = 1; +}; + +// Nonblocking ring suitable for concurrent single-writer, multi-reader access. +// +// Readers never block the writer and thus this is a nondeterministically lossy +// transport in the absence of external synchronization. Don't use this as a +// transport when deterministic behavior is required. +// +// Readers may have a read-only mapping; each reader's state is a single local +// sequence number. +// +// The implementation takes care to avoid data races on record access. +// Inconsistent data can only be returned if at least 2^32 records are written +// during the read-side critical section. +// +// In addition, both readers and the writer are careful to avoid accesses +// outside the bounds of the mmap area passed in during initialization even if +// there is a misbehaving or malicious task with write access to the mmap area. +// +// When dynamic record size is enabled, readers use the record size in the ring +// header when indexing the ring, so that it is possible to extend the record +// type without breaking the read-side ABI. +// +// Avoid calling Put() in a tight loop; there should be significantly more time +// between successive puts than it takes to read one record from memory to +// ensure Get() completes quickly. This requirement should not be difficult to +// achieve for most practical uses; 4kB puts at 10,000Hz is well below the +// scaling limit on current mobile chips. +// +// Example Writer Usage: +// +// using Record = MyRecordType; +// using Ring = BroadcastRing<Record>; +// +// uint32_t record_count = kMyDesiredCount; +// uint32_t ring_size = Ring::MemorySize(record_count); +// +// size_t page_size = sysconf(_SC_PAGESIZE); +// uint32_t mmap_size = (ring_size + (page_size - 1)) & ~(page_size - 1); +// +// // Allocate & map via your preferred mechanism, e.g. +// int fd = open("/dev/shm/ring_test", O_CREAT|O_RDWR|O_CLOEXEC, 0600); +// CHECK(fd >= 0); +// CHECK(!ftruncate(fd, ring_size)); +// void *mmap_base = mmap(nullptr, mmap_size, PROT_READ|PROT_WRITE, +// MAP_SHARED, fd, 0); +// CHECK(mmap_base != MAP_FAILED); +// close(fd); +// +// Ring ring = Ring::Create(mmap_base, mmap_size, record_count); +// +// while (!done) +// ring.Put(BuildNextRecordBlocking()); +// +// CHECK(!munmap(mmap_base, mmap_size)); +// +// Example Reader Usage: +// +// using Record = MyRecordType; +// using Ring = BroadcastRing<Record>; +// +// // Map via your preferred mechanism, e.g. +// int fd = open("/dev/shm/ring_test", O_RDONLY|O_CLOEXEC); +// CHECK(fd >= 0); +// struct stat st; +// CHECK(!fstat(fd, &st)); +// size_t mmap_size = st.st_size; +// void *mmap_base = mmap(nullptr, mmap_size, PROT_READ, +// MAP_SHARED, fd, 0); +// CHECK(mmap_base != MAP_FAILED); +// close(fd); +// +// Ring ring; +// bool import_ok; +// std::tie(ring, import_ok) = Ring::Import(mmap_base, mmap_size); +// CHECK(import_ok); +// +// uint32_t sequence; +// +// // Choose starting point (using "0" is unpredictable but not dangerous) +// sequence = ring.GetOldestSequence(); // The oldest available +// sequence = ring.GetNewestSequence(); // The newest available +// sequence = ring.GetNextSequence(); // The next one produced +// +// while (!done) { +// Record record; +// +// if (you_want_to_process_all_available_records) { +// while (ring.Get(&sequence, &record)) { +// ProcessRecord(sequence, record); +// sequence++; +// } +// } else if (you_want_to_skip_to_the_newest_record) { +// if (ring.GetNewest(&sequence, &record)) { +// ProcessRecord(sequence, record); +// sequence++; +// } +// } +// +// DoSomethingExpensiveOrBlocking(); +// } +// +// CHECK(!munmap(mmap_base, mmap_size)); +// +template <typename RecordType, typename BaseTraits = DefaultRingTraits> +class BroadcastRing { + public: + using Record = RecordType; + struct Traits : public BaseTraits { + // Must have enough space for writers, plus enough space for readers. + static constexpr int kMinRecordCount = + BaseTraits::kMaxReservedRecords + BaseTraits::kMinAvailableRecords; + + // Count of zero means dynamic, non-zero means static. + static constexpr bool kUseStaticRecordCount = + (BaseTraits::kStaticRecordCount != 0); + + // If both record size and count are static then the overall size is too. + static constexpr bool kIsStaticSize = + BaseTraits::kUseStaticRecordSize && kUseStaticRecordCount; + }; + + static constexpr bool IsPowerOfTwo(uint32_t size) { + return (size & (size - 1)) == 0; + } + + // Sanity check the options provided in Traits. + static_assert(Traits::kMinRecordCount >= 1, "Min record count too small"); + static_assert(!Traits::kUseStaticRecordCount || + Traits::kStaticRecordCount >= Traits::kMinRecordCount, + "Static record count is too small"); + static_assert(!Traits::kStaticRecordCount || + IsPowerOfTwo(Traits::kStaticRecordCount), + "Static record count is not a power of two"); + static_assert(std::is_standard_layout<Record>::value, + "Record type must be standard layout"); + + BroadcastRing() {} + + // Creates a new ring at |mmap| with |record_count| records. + // + // There must be at least |MemorySize(record_count)| bytes of space already + // allocated at |mmap|. The ring does not take ownership. + // + // Use this function for dynamically sized rings. + static BroadcastRing Create(void* mmap, size_t mmap_size, + uint32_t record_count) { + BroadcastRing ring(mmap); + CHECK(ring.ValidateGeometry(mmap_size, sizeof(Record), record_count)); + ring.InitializeHeader(sizeof(Record), record_count); + return ring; + } + + // Creates a new ring at |mmap|. + // + // There must be at least |MemorySize()| bytes of space already allocated at + // |mmap|. The ring does not take ownership. + // + // Use this function for statically sized rings. + static BroadcastRing Create(void* mmap, size_t mmap_size) { + static_assert(Traits::kUseStaticRecordCount, + "Wrong Create() function called for dynamic record count"); + return Create(mmap, mmap_size, Traits::kStaticRecordCount); + } + + // Imports an existing ring at |mmap|. + // + // Import may fail if the ring parameters in the mmap header are not sensible. + // In this case the returned boolean is false; make sure to check this value. + static std::tuple<BroadcastRing, bool> Import(void* mmap, size_t mmap_size) { + BroadcastRing ring(mmap); + uint32_t record_size = 0; + uint32_t record_count = 0; + if (mmap_size >= sizeof(Header)) { + record_size = std::atomic_load_explicit(&ring.header_mmap()->record_size, + std::memory_order_relaxed); + record_count = std::atomic_load_explicit( + &ring.header_mmap()->record_count, std::memory_order_relaxed); + } + bool ok = ring.ValidateGeometry(mmap_size, record_size, record_count); + return std::make_tuple(ring, ok); + } + + ~BroadcastRing() {} + + // Calculates the space necessary for a ring of size |record_count|. + // + // Use this function for dynamically sized rings. + static constexpr size_t MemorySize(uint32_t record_count) { + return sizeof(Header) + sizeof(Record) * record_count; + } + + // Calculates the space necessary for a statically sized ring. + // + // Use this function for statically sized rings. + static constexpr size_t MemorySize() { + static_assert( + Traits::kUseStaticRecordCount, + "Wrong MemorySize() function called for dynamic record count"); + return MemorySize(Traits::kStaticRecordCount); + } + + // Writes a record to the ring. + // + // The oldest record is overwritten unless the ring is not already full. + void Put(const Record& record) { + const int kRecordCount = 1; + Reserve(kRecordCount); + Geometry geometry = GetGeometry(); + PutRecordInternal(&record, record_mmap_writer(geometry.tail_index)); + Publish(kRecordCount); + } + + // Gets sequence number of the oldest currently available record. + uint32_t GetOldestSequence() const { + return std::atomic_load_explicit(&header_mmap()->head, + std::memory_order_relaxed); + } + + // Gets sequence number of the first future record. + // + // If the returned value is passed to Get() and there is no concurrent Put(), + // Get() will return false. + uint32_t GetNextSequence() const { + return std::atomic_load_explicit(&header_mmap()->tail, + std::memory_order_relaxed); + } + + // Gets sequence number of the newest currently available record. + uint32_t GetNewestSequence() const { return GetNextSequence() - 1; } + + // Copies the oldest available record with sequence at least |*sequence| to + // |record|. + // + // Returns false if there is no recent enough record available. + // + // Updates |*sequence| with the sequence number of the record returned. To get + // the following record, increment this number by one. + // + // This function synchronizes with two other operations: + // + // (1) Load-Acquire of |tail| + // + // Together with the store-release in Publish(), this load-acquire + // ensures each store to a record in PutRecordInternal() happens-before + // any corresponding load in GetRecordInternal(). + // + // i.e. the stores for the records with sequence numbers < |tail| have + // completed from our perspective + // + // (2) Acquire Fence between record access & final load of |head| + // + // Together with the release fence in Reserve(), this ensures that if + // GetRecordInternal() loads a value stored in some execution of + // PutRecordInternal(), then the store of |head| in the Reserve() that + // preceeded it happens-before our final load of |head|. + // + // i.e. if we read a record with sequence number >= |final_head| then + // no later store to that record has completed from our perspective + bool Get(uint32_t* sequence /*inout*/, Record* record /*out*/) const { + for (;;) { + uint32_t tail = std::atomic_load_explicit(&header_mmap()->tail, + std::memory_order_acquire); + uint32_t head = std::atomic_load_explicit(&header_mmap()->head, + std::memory_order_relaxed); + + if (tail - head > record_count()) + continue; // Concurrent modification; re-try. + + if (*sequence - head > tail - head) + *sequence = head; // Out of window, skip forward to first available. + + if (*sequence == tail) return false; // No new records available. + + Geometry geometry = + CalculateGeometry(record_count(), record_size(), *sequence, tail); + + // Compute address explicitly in case record_size > sizeof(Record). + RecordStorage* record_storage = record_mmap_reader(geometry.head_index); + + GetRecordInternal(record_storage, record); + + // NB: It is not sufficient to change this to a load-acquire of |head|. + std::atomic_thread_fence(std::memory_order_acquire); + + uint32_t final_head = std::atomic_load_explicit( + &header_mmap()->head, std::memory_order_relaxed); + + if (final_head - head > *sequence - head) + continue; // Concurrent modification; re-try. + + // Note: Combining the above 4 comparisons gives: + // 0 <= final_head - head <= sequence - head < tail - head <= record_count + // + // We can also write this as: + // head <=* final_head <=* sequence <* tail <=* head + record_count + // + // where <* orders by difference from head: x <* y if x - head < y - head. + // This agrees with the order of sequence updates during "put" operations. + return true; + } + } + + // Copies the newest available record with sequence at least |*sequence| to + // |record|. + // + // Returns false if there is no recent enough record available. + // + // Updates |*sequence| with the sequence number of the record returned. To get + // the following record, increment this number by one. + bool GetNewest(uint32_t* sequence, Record* record) const { + uint32_t newest_sequence = GetNewestSequence(); + if (*sequence == newest_sequence + 1) return false; + *sequence = newest_sequence; + return Get(sequence, record); + } + + uint32_t record_count() const { return record_count_internal(); } + uint32_t record_size() const { return record_size_internal(); } + static constexpr uint32_t mmap_alignment() { return alignof(Mmap); } + + private: + struct Header { + // Record size for reading out of the ring. Writers always write the full + // length; readers may need to read a prefix of each record. + std::atomic<uint32_t> record_size; + + // Number of records in the ring. + std::atomic<uint32_t> record_count; + + // Readable region is [head % record_count, tail % record_count). + // + // The region in [tail % record_count, head % record_count) was either never + // populated or is being updated. + // + // These are sequences numbers, not indexes - indexes should be computed + // with a modulus. + // + // To ensure consistency: + // + // (1) Writes advance |head| past any updated records before writing to + // them, and advance |tail| after they are written. + // (2) Readers check |tail| before reading data and |head| after, + // making sure to discard any data that was written to concurrently. + std::atomic<uint32_t> head; + std::atomic<uint32_t> tail; + }; + + // Store using the standard word size. + using StorageType = long; // NOLINT + + // Always require 8 byte alignment so that the same record sizes are legal on + // 32 and 64 bit builds. + static constexpr size_t kRecordAlignment = 8; + static_assert(kRecordAlignment % sizeof(StorageType) == 0, + "Bad record alignment"); + + struct RecordStorage { + // This is accessed with relaxed atomics to prevent data races on the + // contained data, which would be undefined behavior. + std::atomic<StorageType> data[sizeof(Record) / sizeof(StorageType)]; + }; + + static_assert(sizeof(StorageType) * + std::extent<decltype(RecordStorage::data)>() == + sizeof(Record), + "Record length must be a multiple of sizeof(StorageType)"); + + struct Geometry { + // Static geometry. + uint32_t record_count; + uint32_t record_size; + + // Copy of atomic sequence counts. + uint32_t head; + uint32_t tail; + + // First index of readable region. + uint32_t head_index; + + // First index of writable region. + uint32_t tail_index; + + // Number of records in readable region. + uint32_t count; + + // Number of records in writable region. + uint32_t space; + }; + + // Mmap area layout. + // + // Readers should not index directly into |records| as this is not valid when + // dynamic record sizes are used; use record_mmap_reader() instead. + struct Mmap { + Header header; + RecordStorage records[]; + }; + + static_assert(std::is_standard_layout<Mmap>::value, + "Mmap must be standard layout"); + static_assert(sizeof(std::atomic<uint32_t>) == sizeof(uint32_t), + "Lockless atomics contain extra state"); + static_assert(sizeof(std::atomic<StorageType>) == sizeof(StorageType), + "Lockless atomics contain extra state"); + + explicit BroadcastRing(void* mmap) { + CHECK_EQ(0U, reinterpret_cast<uintptr_t>(mmap) % alignof(Mmap)); + data_.mmap = reinterpret_cast<Mmap*>(mmap); + } + + // Initializes the mmap area header for a new ring. + void InitializeHeader(uint32_t record_size, uint32_t record_count) { + constexpr uint32_t kInitialSequence = -256; // Force an early wrap. + std::atomic_store_explicit(&header_mmap()->record_size, record_size, + std::memory_order_relaxed); + std::atomic_store_explicit(&header_mmap()->record_count, record_count, + std::memory_order_relaxed); + std::atomic_store_explicit(&header_mmap()->head, kInitialSequence, + std::memory_order_relaxed); + std::atomic_store_explicit(&header_mmap()->tail, kInitialSequence, + std::memory_order_relaxed); + } + + // Validates ring geometry. + // + // Ring geometry is validated carefully on import and then cached. This allows + // us to avoid out-of-range accesses even if the parameters in the header are + // later changed. + bool ValidateGeometry(size_t mmap_size, uint32_t header_record_size, + uint32_t header_record_count) { + set_record_size(header_record_size); + set_record_count(header_record_count); + + if (record_size() != header_record_size) return false; + if (record_count() != header_record_count) return false; + if (record_count() < Traits::kMinRecordCount) return false; + if (record_size() < sizeof(Record)) return false; + if (record_size() % kRecordAlignment != 0) return false; + if (!IsPowerOfTwo(record_count())) return false; + + size_t memory_size = record_count() * record_size(); + if (memory_size / record_size() != record_count()) return false; + if (memory_size + sizeof(Header) < memory_size) return false; + if (memory_size + sizeof(Header) > mmap_size) return false; + + return true; + } + + // Copies a record into the ring. + // + // This is done with relaxed atomics because otherwise it is racy according to + // the C++ memory model. This is very low overhead once optimized. + static inline void PutRecordInternal(const Record* in, RecordStorage* out) { + StorageType data[sizeof(Record) / sizeof(StorageType)]; + memcpy(data, in, sizeof(*in)); + for (size_t i = 0; i < std::extent<decltype(data)>(); ++i) { + std::atomic_store_explicit(&out->data[i], data[i], + std::memory_order_relaxed); + } + } + + // Copies a record out of the ring. + // + // This is done with relaxed atomics because otherwise it is racy according to + // the C++ memory model. This is very low overhead once optimized. + static inline void GetRecordInternal(RecordStorage* in, Record* out) { + StorageType data[sizeof(Record) / sizeof(StorageType)]; + for (size_t i = 0; i < std::extent<decltype(data)>(); ++i) { + data[i] = + std::atomic_load_explicit(&in->data[i], std::memory_order_relaxed); + } + memcpy(out, &data, sizeof(*out)); + } + + // Converts a record's sequence number into a storage index. + static uint32_t SequenceToIndex(uint32_t sequence, uint32_t record_count) { + return sequence & (record_count - 1); + } + + // Computes readable & writable ranges from ring parameters. + static Geometry CalculateGeometry(uint32_t record_count, uint32_t record_size, + uint32_t head, uint32_t tail) { + Geometry geometry; + geometry.record_count = record_count; + geometry.record_size = record_size; + DCHECK_EQ(0U, geometry.record_size % kRecordAlignment); + geometry.head = head; + geometry.tail = tail; + geometry.head_index = SequenceToIndex(head, record_count); + geometry.tail_index = SequenceToIndex(tail, record_count); + geometry.count = geometry.tail - geometry.head; + DCHECK_LE(geometry.count, record_count); + geometry.space = geometry.record_count - geometry.count; + return geometry; + } + + // Gets the current ring readable & writable regions. + // + // This this is always safe from the writing thread since it is the only + // thread allowed to update the header. + Geometry GetGeometry() const { + return CalculateGeometry( + record_count(), record_size(), + std::atomic_load_explicit(&header_mmap()->head, + std::memory_order_relaxed), + std::atomic_load_explicit(&header_mmap()->tail, + std::memory_order_relaxed)); + } + + // Makes space for at least |reserve_count| records. + // + // There is nothing to prevent overwriting records that have concurrent + // readers. We do however ensure that this situation can be detected: the + // fence ensures the |head| update will be the first update seen by readers, + // and readers check this value after reading and discard data that may have + // been concurrently modified. + void Reserve(uint32_t reserve_count) { + Geometry geometry = GetGeometry(); + DCHECK_LE(reserve_count, Traits::kMaxReservedRecords); + uint32_t needed = + (geometry.space >= reserve_count ? 0 : reserve_count - geometry.space); + + std::atomic_store_explicit(&header_mmap()->head, geometry.head + needed, + std::memory_order_relaxed); + + // NB: It is not sufficient to change this to a store-release of |head|. + std::atomic_thread_fence(std::memory_order_release); + } + + // Makes |publish_count| records visible to readers. + // + // Space must have been reserved by a previous call to Reserve(). + void Publish(uint32_t publish_count) { + Geometry geometry = GetGeometry(); + DCHECK_LE(publish_count, geometry.space); + std::atomic_store_explicit(&header_mmap()->tail, + geometry.tail + publish_count, + std::memory_order_release); + } + + // Helpers to compute addresses in mmap area. + Mmap* mmap() const { return data_.mmap; } + Header* header_mmap() const { return &data_.mmap->header; } + RecordStorage* record_mmap_writer(uint32_t index) const { + DCHECK_EQ(sizeof(Record), record_size()); + return &data_.mmap->records[index]; + } + RecordStorage* record_mmap_reader(uint32_t index) const { + if (Traits::kUseStaticRecordSize) { + return &data_.mmap->records[index]; + } else { + // Calculate the location of a record in the ring without assuming that + // sizeof(Record) == record_size. + return reinterpret_cast<RecordStorage*>( + reinterpret_cast<char*>(data_.mmap->records) + index * record_size()); + } + } + + // The following horrifying template gunk enables us to store just the mmap + // base pointer for compile-time statically sized rings. Dynamically sized + // rings also store the validated copy of the record size & count. + // + // This boils down to: use a compile time constant if available, and otherwise + // load the value that was validated on import from a member variable. + template <typename T = Traits> + typename std::enable_if<T::kUseStaticRecordSize, uint32_t>::type + record_size_internal() const { + return sizeof(Record); + } + + template <typename T = Traits> + typename std::enable_if<!T::kUseStaticRecordSize, uint32_t>::type + record_size_internal() const { + return data_.record_size; + } + + template <typename T = Traits> + typename std::enable_if<T::kUseStaticRecordSize, void>::type set_record_size( + uint32_t /*record_size*/) {} + + template <typename T = Traits> + typename std::enable_if<!T::kUseStaticRecordSize, void>::type set_record_size( + uint32_t record_size) { + data_.record_size = record_size; + } + + template <typename T = Traits> + typename std::enable_if<T::kUseStaticRecordCount, uint32_t>::type + record_count_internal() const { + return Traits::kStaticRecordCount; + } + + template <typename T = Traits> + typename std::enable_if<!T::kUseStaticRecordCount, uint32_t>::type + record_count_internal() const { + return data_.record_count; + } + + template <typename T = Traits> + typename std::enable_if<T::kUseStaticRecordCount, void>::type + set_record_count(uint32_t /*record_count*/) const {} + + template <typename T = Traits> + typename std::enable_if<!T::kUseStaticRecordCount, void>::type + set_record_count(uint32_t record_count) { + data_.record_count = record_count; + } + + // Data we need to store for statically sized rings. + struct DataStaticSize { + Mmap* mmap = nullptr; + }; + + // Data we need to store for dynamically sized rings. + struct DataDynamicSize { + Mmap* mmap = nullptr; + + // These are cached to make sure misbehaving writers cannot cause + // out-of-bounds memory accesses by updating the values in the mmap header. + uint32_t record_size = 0; + uint32_t record_count = 0; + }; + + using DataStaticOrDynamic = + typename std::conditional<Traits::kIsStaticSize, DataStaticSize, + DataDynamicSize>::type; + + DataStaticOrDynamic data_; +}; + +} // namespace dvr +} // namespace android + +#endif // ANDROID_DVR_BROADCAST_RING_H_ diff --git a/services/surfaceflinger/DisplayHardware/FramebufferSurface.cpp b/services/surfaceflinger/DisplayHardware/FramebufferSurface.cpp index 1b598f856a..5b869e12c7 100644 --- a/services/surfaceflinger/DisplayHardware/FramebufferSurface.cpp +++ b/services/surfaceflinger/DisplayHardware/FramebufferSurface.cpp @@ -32,7 +32,6 @@ #include <hardware/hardware.h> #include <gui/BufferItem.h> #include <gui/BufferQueue.h> -#include <gui/GraphicBufferAlloc.h> #include <gui/Surface.h> #include <ui/GraphicBuffer.h> diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp index 027fae6ccb..5e5efb7f57 100644 --- a/services/surfaceflinger/Layer.cpp +++ b/services/surfaceflinger/Layer.cpp @@ -167,7 +167,7 @@ void Layer::onFirstRef() { // Creates a custom BufferQueue for SurfaceFlingerConsumer to use sp<IGraphicBufferProducer> producer; sp<IGraphicBufferConsumer> consumer; - BufferQueue::createBufferQueue(&producer, &consumer, nullptr, true); + BufferQueue::createBufferQueue(&producer, &consumer, true); mProducer = new MonitoredProducer(producer, mFlinger, this); mSurfaceFlingerConsumer = new SurfaceFlingerConsumer(consumer, mTextureName, this); mSurfaceFlingerConsumer->setConsumerUsageBits(getEffectiveUsage(0)); diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp index c72f8e0db6..09a4124396 100644 --- a/services/surfaceflinger/SurfaceFlinger.cpp +++ b/services/surfaceflinger/SurfaceFlinger.cpp @@ -45,7 +45,6 @@ #include <gui/GuiConfig.h> #include <gui/IDisplayEventConnection.h> #include <gui/Surface.h> -#include <gui/GraphicBufferAlloc.h> #include <ui/GraphicBufferAllocator.h> #include <ui/PixelFormat.h> @@ -340,12 +339,6 @@ sp<IBinder> SurfaceFlinger::getBuiltInDisplay(int32_t id) { return mBuiltinDisplays[id]; } -sp<IGraphicBufferAlloc> SurfaceFlinger::createGraphicBufferAlloc() -{ - sp<GraphicBufferAlloc> gba(new GraphicBufferAlloc()); - return gba; -} - void SurfaceFlinger::bootFinished() { if (mStartBootAnimThread->join() != NO_ERROR) { @@ -1171,8 +1164,7 @@ void SurfaceFlinger::onHotplugReceived(int32_t disp, bool connected) { sp<IGraphicBufferProducer> producer; sp<IGraphicBufferConsumer> consumer; - BufferQueue::createBufferQueue(&producer, &consumer, - new GraphicBufferAlloc()); + BufferQueue::createBufferQueue(&producer, &consumer); sp<FramebufferSurface> fbs = new FramebufferSurface(*mHwc, DisplayDevice::DISPLAY_PRIMARY, consumer); @@ -1994,8 +1986,7 @@ void SurfaceFlinger::handleTransactionLocked(uint32_t transactionFlags) sp<IGraphicBufferProducer> producer; sp<IGraphicBufferProducer> bqProducer; sp<IGraphicBufferConsumer> bqConsumer; - BufferQueue::createBufferQueue(&bqProducer, &bqConsumer, - new GraphicBufferAlloc()); + BufferQueue::createBufferQueue(&bqProducer, &bqConsumer); int32_t hwcId = -1; if (state.isVirtualDisplay()) { diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h index afb64ed304..b17a5f22d0 100644 --- a/services/surfaceflinger/SurfaceFlinger.h +++ b/services/surfaceflinger/SurfaceFlinger.h @@ -76,7 +76,6 @@ namespace android { class Client; class DisplayEventConnection; class EventThread; -class IGraphicBufferAlloc; class Layer; class LayerDim; class Surface; @@ -257,7 +256,6 @@ private: */ virtual sp<ISurfaceComposerClient> createConnection(); virtual sp<ISurfaceComposerClient> createScopedConnection(const sp<IGraphicBufferProducer>& gbp); - virtual sp<IGraphicBufferAlloc> createGraphicBufferAlloc(); virtual sp<IBinder> createDisplay(const String8& displayName, bool secure); virtual void destroyDisplay(const sp<IBinder>& display); virtual sp<IBinder> getBuiltInDisplay(int32_t id); diff --git a/services/surfaceflinger/SurfaceFlinger_hwc1.cpp b/services/surfaceflinger/SurfaceFlinger_hwc1.cpp index 28dac11f46..59c8b76331 100644 --- a/services/surfaceflinger/SurfaceFlinger_hwc1.cpp +++ b/services/surfaceflinger/SurfaceFlinger_hwc1.cpp @@ -42,7 +42,6 @@ #include <gui/GuiConfig.h> #include <gui/IDisplayEventConnection.h> #include <gui/Surface.h> -#include <gui/GraphicBufferAlloc.h> #include <ui/GraphicBufferAllocator.h> #include <ui/HdrCapabilities.h> @@ -319,12 +318,6 @@ sp<IBinder> SurfaceFlinger::getBuiltInDisplay(int32_t id) { return mBuiltinDisplays[id]; } -sp<IGraphicBufferAlloc> SurfaceFlinger::createGraphicBufferAlloc() -{ - sp<GraphicBufferAlloc> gba(new GraphicBufferAlloc()); - return gba; -} - void SurfaceFlinger::bootFinished() { if (mStartBootAnimThread->join() != NO_ERROR) { @@ -564,8 +557,7 @@ void SurfaceFlinger::init() { sp<IGraphicBufferProducer> producer; sp<IGraphicBufferConsumer> consumer; - BufferQueue::createBufferQueue(&producer, &consumer, - new GraphicBufferAlloc()); + BufferQueue::createBufferQueue(&producer, &consumer); sp<FramebufferSurface> fbs = new FramebufferSurface(*mHwc, i, consumer); @@ -1705,8 +1697,7 @@ void SurfaceFlinger::handleTransactionLocked(uint32_t transactionFlags) sp<IGraphicBufferProducer> producer; sp<IGraphicBufferProducer> bqProducer; sp<IGraphicBufferConsumer> bqConsumer; - BufferQueue::createBufferQueue(&bqProducer, &bqConsumer, - new GraphicBufferAlloc()); + BufferQueue::createBufferQueue(&bqProducer, &bqConsumer); int32_t hwcDisplayId = -1; if (state.isVirtualDisplay()) { |