diff options
54 files changed, 1862 insertions, 1556 deletions
diff --git a/cmds/atrace/atrace.rc b/cmds/atrace/atrace.rc index d950b7c287..d6ca0bf01d 100644 --- a/cmds/atrace/atrace.rc +++ b/cmds/atrace/atrace.rc @@ -91,7 +91,12 @@ on late-init chmod 0666 /sys/kernel/tracing/events/sync/enable chmod 0666 /sys/kernel/debug/tracing/events/fence/enable chmod 0666 /sys/kernel/tracing/events/fence/enable - + chmod 0666 /sys/kernel/debug/tracing/events/kmem/rss_stat/enable + chmod 0666 /sys/kernel/tracing/events/kmem/rss_stat/enable + chmod 0666 /sys/kernel/debug/tracing/events/kmem/ion_heap_grow/enable + chmod 0666 /sys/kernel/tracing/events/kmem/ion_heap_grow/enable + chmod 0666 /sys/kernel/debug/tracing/events/kmem/ion_heap_shrink/enable + chmod 0666 /sys/kernel/tracing/events/kmem/ion_heap_shrink/enable # disk chmod 0666 /sys/kernel/tracing/events/f2fs/f2fs_get_data_block/enable diff --git a/cmds/dumpstate/dumpstate.cpp b/cmds/dumpstate/dumpstate.cpp index 0b9bca013c..904c0e924e 100644 --- a/cmds/dumpstate/dumpstate.cpp +++ b/cmds/dumpstate/dumpstate.cpp @@ -1374,6 +1374,12 @@ static void dumpstate() { printf("========================================================\n"); printf("== dumpstate: done (id %d)\n", ds.id_); printf("========================================================\n"); + + printf("========================================================\n"); + printf("== Obtaining statsd metadata\n"); + printf("========================================================\n"); + // This differs from the usual dumpsys stats, which is the stats report data. + RunDumpsys("STATSDSTATS", {"stats", "--metadata"}); } /* Dumps state for the default case. Returns true if everything went fine. */ diff --git a/cmds/installd/Android.bp b/cmds/installd/Android.bp index 9d0d8ba8b7..2e9701f8b0 100644 --- a/cmds/installd/Android.bp +++ b/cmds/installd/Android.bp @@ -14,6 +14,7 @@ cc_defaults { "CacheItem.cpp", "CacheTracker.cpp", "InstalldNativeService.cpp", + "QuotaUtils.cpp", "dexopt.cpp", "globals.cpp", "utils.cpp", @@ -33,6 +34,21 @@ cc_defaults { "libutils", ], + product_variables: { + arc: { + exclude_srcs: [ + "QuotaUtils.cpp", + ], + static_libs: [ + "libarcdiskquota", + "arc_services_aidl", + ], + cflags: [ + "-DUSE_ARC", + ], + }, + }, + clang: true, tidy: true, @@ -59,6 +75,26 @@ cc_library_static { aidl: { export_aidl_headers: true, }, + + product_variables: { + arc: { + exclude_srcs: [ + "QuotaUtils.cpp", + ], + static_libs: [ + "libarcdiskquota", + "arc_services_aidl", + ], + cflags: [ + "-DUSE_ARC", + ], + }, + }, +} + +cc_library_headers { + name: "libinstalld_headers", + export_include_dirs: ["."], } // @@ -73,6 +109,21 @@ cc_binary { static_libs: ["libdiskusage"], init_rc: ["installd.rc"], + + product_variables: { + arc: { + exclude_srcs: [ + "QuotaUtils.cpp", + ], + static_libs: [ + "libarcdiskquota", + "arc_services_aidl", + ], + cflags: [ + "-DUSE_ARC", + ], + }, + }, } // OTA chroot tool diff --git a/cmds/installd/CacheTracker.cpp b/cmds/installd/CacheTracker.cpp index a7242c35cf..8b868fb584 100644 --- a/cmds/installd/CacheTracker.cpp +++ b/cmds/installd/CacheTracker.cpp @@ -19,13 +19,13 @@ #include "CacheTracker.h" #include <fts.h> -#include <sys/quota.h> #include <sys/xattr.h> #include <utils/Trace.h> #include <android-base/logging.h> #include <android-base/stringprintf.h> +#include "QuotaUtils.h" #include "utils.h" using android::base::StringPrintf; @@ -33,9 +33,13 @@ using android::base::StringPrintf; namespace android { namespace installd { -CacheTracker::CacheTracker(userid_t userId, appid_t appId, const std::string& quotaDevice) : - cacheUsed(0), cacheQuota(0), mUserId(userId), mAppId(appId), mQuotaDevice(quotaDevice), - mItemsLoaded(false) { +CacheTracker::CacheTracker(userid_t userId, appid_t appId, const std::string& uuid) + : cacheUsed(0), + cacheQuota(0), + mUserId(userId), + mAppId(appId), + mItemsLoaded(false), + mUuid(uuid) { } CacheTracker::~CacheTracker() { @@ -72,26 +76,18 @@ void CacheTracker::loadStats() { bool CacheTracker::loadQuotaStats() { int cacheGid = multiuser_get_cache_gid(mUserId, mAppId); int extCacheGid = multiuser_get_ext_cache_gid(mUserId, mAppId); - if (!mQuotaDevice.empty() && cacheGid != -1 && extCacheGid != -1) { - struct dqblk dq; - if (quotactl(QCMD(Q_GETQUOTA, GRPQUOTA), mQuotaDevice.c_str(), cacheGid, - reinterpret_cast<char*>(&dq)) != 0) { - if (errno != ESRCH) { - PLOG(ERROR) << "Failed to quotactl " << mQuotaDevice << " for GID " << cacheGid; - } - return false; + if (IsQuotaSupported(mUuid) && cacheGid != -1 && extCacheGid != -1) { + int64_t space; + if ((space = GetOccupiedSpaceForGid(mUuid, cacheGid)) != -1) { + cacheUsed += space; } else { - cacheUsed += dq.dqb_curspace; + return false; } - if (quotactl(QCMD(Q_GETQUOTA, GRPQUOTA), mQuotaDevice.c_str(), extCacheGid, - reinterpret_cast<char*>(&dq)) != 0) { - if (errno != ESRCH) { - PLOG(ERROR) << "Failed to quotactl " << mQuotaDevice << " for GID " << cacheGid; - } - return false; + if ((space = GetOccupiedSpaceForGid(mUuid, extCacheGid)) != -1) { + cacheUsed += space; } else { - cacheUsed += dq.dqb_curspace; + return false; } return true; } else { diff --git a/cmds/installd/CacheTracker.h b/cmds/installd/CacheTracker.h index 44359b4970..b0527e7b3f 100644 --- a/cmds/installd/CacheTracker.h +++ b/cmds/installd/CacheTracker.h @@ -39,7 +39,7 @@ namespace installd { */ class CacheTracker { public: - CacheTracker(userid_t userId, appid_t appId, const std::string& quotaDevice); + CacheTracker(userid_t userId, appid_t appId, const std::string& uuid); ~CacheTracker(); std::string toString(); @@ -61,8 +61,8 @@ public: private: userid_t mUserId; appid_t mAppId; - std::string mQuotaDevice; bool mItemsLoaded; + const std::string& mUuid; std::vector<std::string> mDataPaths; diff --git a/cmds/installd/InstalldNativeService.cpp b/cmds/installd/InstalldNativeService.cpp index a475fcc4b8..81055d854e 100644 --- a/cmds/installd/InstalldNativeService.cpp +++ b/cmds/installd/InstalldNativeService.cpp @@ -31,7 +31,6 @@ #include <sys/file.h> #include <sys/ioctl.h> #include <sys/mman.h> -#include <sys/quota.h> #include <sys/resource.h> #include <sys/stat.h> #include <sys/statvfs.h> @@ -64,6 +63,7 @@ #include "CacheTracker.h" #include "MatchExtensionGen.h" +#include "QuotaUtils.h" #ifndef LOG_TAG #define LOG_TAG "installd" @@ -257,11 +257,6 @@ status_t InstalldNativeService::dump(int fd, const Vector<String16> & /* args */ for (const auto& n : mStorageMounts) { out << " " << n.first << " = " << n.second << endl; } - - out << endl << "Quota reverse mounts:" << endl; - for (const auto& n : mQuotaReverseMounts) { - out << " " << n.first << " = " << n.second << endl; - } } { @@ -988,9 +983,9 @@ binder::Status InstalldNativeService::freeCache(const std::unique_ptr<std::strin CHECK_ARGUMENT_UUID(uuid); std::lock_guard<std::recursive_mutex> lock(mLock); + auto uuidString = uuid ? *uuid : ""; const char* uuid_ = uuid ? uuid->c_str() : nullptr; auto data_path = create_data_path(uuid_); - auto device = findQuotaDeviceForUuid(uuid); auto noop = (flags & FLAG_FREE_CACHE_NOOP); int64_t free = data_disk_free(data_path); @@ -1037,7 +1032,7 @@ binder::Status InstalldNativeService::freeCache(const std::unique_ptr<std::strin search->second->addDataPath(p->fts_path); } else { auto tracker = std::shared_ptr<CacheTracker>(new CacheTracker( - multiuser_get_user_id(uid), multiuser_get_app_id(uid), device)); + multiuser_get_user_id(uid), multiuser_get_app_id(uid), uuidString)); tracker->addDataPath(p->fts_path); { std::lock_guard<std::recursive_mutex> lock(mQuotasLock); @@ -1202,53 +1197,26 @@ static std::string toString(std::vector<int64_t> values) { } #endif -static void collectQuotaStats(const std::string& device, int32_t userId, +static void collectQuotaStats(const std::string& uuid, int32_t userId, int32_t appId, struct stats* stats, struct stats* extStats) { - if (device.empty()) return; - - struct dqblk dq; - + int64_t space; if (stats != nullptr) { uid_t uid = multiuser_get_uid(userId, appId); - if (quotactl(QCMD(Q_GETQUOTA, USRQUOTA), device.c_str(), uid, - reinterpret_cast<char*>(&dq)) != 0) { - if (errno != ESRCH) { - PLOG(ERROR) << "Failed to quotactl " << device << " for UID " << uid; - } - } else { -#if MEASURE_DEBUG - LOG(DEBUG) << "quotactl() for UID " << uid << " " << dq.dqb_curspace; -#endif - stats->dataSize += dq.dqb_curspace; + if ((space = GetOccupiedSpaceForUid(uuid, uid)) != -1) { + stats->dataSize += space; } int cacheGid = multiuser_get_cache_gid(userId, appId); if (cacheGid != -1) { - if (quotactl(QCMD(Q_GETQUOTA, GRPQUOTA), device.c_str(), cacheGid, - reinterpret_cast<char*>(&dq)) != 0) { - if (errno != ESRCH) { - PLOG(ERROR) << "Failed to quotactl " << device << " for GID " << cacheGid; - } - } else { -#if MEASURE_DEBUG - LOG(DEBUG) << "quotactl() for GID " << cacheGid << " " << dq.dqb_curspace; -#endif - stats->cacheSize += dq.dqb_curspace; + if ((space = GetOccupiedSpaceForGid(uuid, cacheGid)) != -1) { + stats->cacheSize += space; } } int sharedGid = multiuser_get_shared_gid(0, appId); if (sharedGid != -1) { - if (quotactl(QCMD(Q_GETQUOTA, GRPQUOTA), device.c_str(), sharedGid, - reinterpret_cast<char*>(&dq)) != 0) { - if (errno != ESRCH) { - PLOG(ERROR) << "Failed to quotactl " << device << " for GID " << sharedGid; - } - } else { -#if MEASURE_DEBUG - LOG(DEBUG) << "quotactl() for GID " << sharedGid << " " << dq.dqb_curspace; -#endif - stats->codeSize += dq.dqb_curspace; + if ((space = GetOccupiedSpaceForGid(uuid, sharedGid)) != -1) { + stats->codeSize += space; } } } @@ -1256,32 +1224,16 @@ static void collectQuotaStats(const std::string& device, int32_t userId, if (extStats != nullptr) { int extGid = multiuser_get_ext_gid(userId, appId); if (extGid != -1) { - if (quotactl(QCMD(Q_GETQUOTA, GRPQUOTA), device.c_str(), extGid, - reinterpret_cast<char*>(&dq)) != 0) { - if (errno != ESRCH) { - PLOG(ERROR) << "Failed to quotactl " << device << " for GID " << extGid; - } - } else { -#if MEASURE_DEBUG - LOG(DEBUG) << "quotactl() for GID " << extGid << " " << dq.dqb_curspace; -#endif - extStats->dataSize += dq.dqb_curspace; + if ((space = GetOccupiedSpaceForGid(uuid, extGid)) != -1) { + extStats->dataSize += space; } } int extCacheGid = multiuser_get_ext_cache_gid(userId, appId); if (extCacheGid != -1) { - if (quotactl(QCMD(Q_GETQUOTA, GRPQUOTA), device.c_str(), extCacheGid, - reinterpret_cast<char*>(&dq)) != 0) { - if (errno != ESRCH) { - PLOG(ERROR) << "Failed to quotactl " << device << " for GID " << extCacheGid; - } - } else { -#if MEASURE_DEBUG - LOG(DEBUG) << "quotactl() for GID " << extCacheGid << " " << dq.dqb_curspace; -#endif - extStats->dataSize += dq.dqb_curspace; - extStats->cacheSize += dq.dqb_curspace; + if ((space = GetOccupiedSpaceForGid(uuid, extCacheGid)) != -1) { + extStats->dataSize += space; + extStats->cacheSize += space; } } } @@ -1445,10 +1397,10 @@ binder::Status InstalldNativeService::getAppSize(const std::unique_ptr<std::stri memset(&stats, 0, sizeof(stats)); memset(&extStats, 0, sizeof(extStats)); + auto uuidString = uuid ? *uuid : ""; const char* uuid_ = uuid ? uuid->c_str() : nullptr; - auto device = findQuotaDeviceForUuid(uuid); - if (device.empty()) { + if (!IsQuotaSupported(uuidString)) { flags &= ~FLAG_USE_QUOTA; } @@ -1468,7 +1420,7 @@ binder::Status InstalldNativeService::getAppSize(const std::unique_ptr<std::stri ATRACE_END(); ATRACE_BEGIN("quota"); - collectQuotaStats(device, userId, appId, &stats, &extStats); + collectQuotaStats(uuidString, userId, appId, &stats, &extStats); ATRACE_END(); } else { ATRACE_BEGIN("code"); @@ -1553,27 +1505,19 @@ binder::Status InstalldNativeService::getUserSize(const std::unique_ptr<std::str memset(&stats, 0, sizeof(stats)); memset(&extStats, 0, sizeof(extStats)); + auto uuidString = uuid ? *uuid : ""; const char* uuid_ = uuid ? uuid->c_str() : nullptr; - auto device = findQuotaDeviceForUuid(uuid); - if (device.empty()) { + if (!IsQuotaSupported(uuidString)) { flags &= ~FLAG_USE_QUOTA; } if (flags & FLAG_USE_QUOTA) { - struct dqblk dq; + int64_t space; ATRACE_BEGIN("obb"); - if (quotactl(QCMD(Q_GETQUOTA, GRPQUOTA), device.c_str(), AID_MEDIA_OBB, - reinterpret_cast<char*>(&dq)) != 0) { - if (errno != ESRCH) { - PLOG(ERROR) << "Failed to quotactl " << device << " for GID " << AID_MEDIA_OBB; - } - } else { -#if MEASURE_DEBUG - LOG(DEBUG) << "quotactl() for GID " << AID_MEDIA_OBB << " " << dq.dqb_curspace; -#endif - extStats.codeSize += dq.dqb_curspace; + if ((space = GetOccupiedSpaceForGid(uuidString, AID_MEDIA_OBB)) != -1) { + extStats.codeSize += space; } ATRACE_END(); @@ -1599,16 +1543,8 @@ binder::Status InstalldNativeService::getUserSize(const std::unique_ptr<std::str ATRACE_BEGIN("external"); uid_t uid = multiuser_get_uid(userId, AID_MEDIA_RW); - if (quotactl(QCMD(Q_GETQUOTA, USRQUOTA), device.c_str(), uid, - reinterpret_cast<char*>(&dq)) != 0) { - if (errno != ESRCH) { - PLOG(ERROR) << "Failed to quotactl " << device << " for UID " << uid; - } - } else { -#if MEASURE_DEBUG - LOG(DEBUG) << "quotactl() for UID " << uid << " " << dq.dqb_curspace; -#endif - extStats.dataSize += dq.dqb_curspace; + if ((space = GetOccupiedSpaceForUid(uuidString, uid)) != -1) { + extStats.dataSize += space; } ATRACE_END(); @@ -1625,7 +1561,7 @@ binder::Status InstalldNativeService::getUserSize(const std::unique_ptr<std::str int64_t dataSize = extStats.dataSize; for (auto appId : appIds) { if (appId >= AID_APP_START) { - collectQuotaStats(device, userId, appId, &stats, &extStats); + collectQuotaStats(uuidString, userId, appId, &stats, &extStats); #if MEASURE_DEBUG // Sleep to make sure we don't lose logs @@ -1707,6 +1643,7 @@ binder::Status InstalldNativeService::getExternalSize(const std::unique_ptr<std: LOG(INFO) << "Measuring external " << userId; #endif + auto uuidString = uuid ? *uuid : ""; const char* uuid_ = uuid ? uuid->c_str() : nullptr; int64_t totalSize = 0; @@ -1716,58 +1653,33 @@ binder::Status InstalldNativeService::getExternalSize(const std::unique_ptr<std: int64_t appSize = 0; int64_t obbSize = 0; - auto device = findQuotaDeviceForUuid(uuid); - if (device.empty()) { + if (!IsQuotaSupported(uuidString)) { flags &= ~FLAG_USE_QUOTA; } if (flags & FLAG_USE_QUOTA) { - struct dqblk dq; + int64_t space; ATRACE_BEGIN("quota"); uid_t uid = multiuser_get_uid(userId, AID_MEDIA_RW); - if (quotactl(QCMD(Q_GETQUOTA, USRQUOTA), device.c_str(), uid, - reinterpret_cast<char*>(&dq)) != 0) { - if (errno != ESRCH) { - PLOG(ERROR) << "Failed to quotactl " << device << " for UID " << uid; - } - } else { -#if MEASURE_DEBUG - LOG(DEBUG) << "quotactl() for UID " << uid << " " << dq.dqb_curspace; -#endif - totalSize = dq.dqb_curspace; + if ((space = GetOccupiedSpaceForUid(uuidString, uid)) != -1) { + totalSize = space; } gid_t audioGid = multiuser_get_uid(userId, AID_MEDIA_AUDIO); - if (quotactl(QCMD(Q_GETQUOTA, GRPQUOTA), device.c_str(), audioGid, - reinterpret_cast<char*>(&dq)) == 0) { -#if MEASURE_DEBUG - LOG(DEBUG) << "quotactl() for GID " << audioGid << " " << dq.dqb_curspace; -#endif - audioSize = dq.dqb_curspace; + if ((space = GetOccupiedSpaceForGid(uuidString, audioGid)) != -1) { + audioSize = space; } gid_t videoGid = multiuser_get_uid(userId, AID_MEDIA_VIDEO); - if (quotactl(QCMD(Q_GETQUOTA, GRPQUOTA), device.c_str(), videoGid, - reinterpret_cast<char*>(&dq)) == 0) { -#if MEASURE_DEBUG - LOG(DEBUG) << "quotactl() for GID " << videoGid << " " << dq.dqb_curspace; -#endif - videoSize = dq.dqb_curspace; + if ((space = GetOccupiedSpaceForGid(uuidString, videoGid)) != -1) { + videoSize = space; } gid_t imageGid = multiuser_get_uid(userId, AID_MEDIA_IMAGE); - if (quotactl(QCMD(Q_GETQUOTA, GRPQUOTA), device.c_str(), imageGid, - reinterpret_cast<char*>(&dq)) == 0) { -#if MEASURE_DEBUG - LOG(DEBUG) << "quotactl() for GID " << imageGid << " " << dq.dqb_curspace; -#endif - imageSize = dq.dqb_curspace; + if ((space = GetOccupiedSpaceForGid(uuidString, imageGid)) != -1) { + imageSize = space; } - if (quotactl(QCMD(Q_GETQUOTA, GRPQUOTA), device.c_str(), AID_MEDIA_OBB, - reinterpret_cast<char*>(&dq)) == 0) { -#if MEASURE_DEBUG - LOG(DEBUG) << "quotactl() for GID " << AID_MEDIA_OBB << " " << dq.dqb_curspace; -#endif - obbSize = dq.dqb_curspace; + if ((space = GetOccupiedSpaceForGid(uuidString, AID_MEDIA_OBB)) != -1) { + obbSize = space; } ATRACE_END(); @@ -1776,7 +1688,7 @@ binder::Status InstalldNativeService::getExternalSize(const std::unique_ptr<std: memset(&extStats, 0, sizeof(extStats)); for (auto appId : appIds) { if (appId >= AID_APP_START) { - collectQuotaStats(device, userId, appId, nullptr, &extStats); + collectQuotaStats(uuidString, userId, appId, nullptr, &extStats); } } appSize = extStats.dataSize; @@ -2564,7 +2476,12 @@ binder::Status InstalldNativeService::invalidateMounts() { std::lock_guard<std::recursive_mutex> lock(mMountsLock); mStorageMounts.clear(); - mQuotaReverseMounts.clear(); + +#if !BYPASS_QUOTA + if (!InvalidateQuotaMounts()) { + return error("Failed to read mounts"); + } +#endif std::ifstream in("/proc/mounts"); if (!in.is_open()) { @@ -2585,17 +2502,6 @@ binder::Status InstalldNativeService::invalidateMounts() { mStorageMounts[source] = target; } #endif - -#if !BYPASS_QUOTA - if (source.compare(0, 11, "/dev/block/") == 0) { - struct dqblk dq; - if (quotactl(QCMD(Q_GETQUOTA, USRQUOTA), source.c_str(), 0, - reinterpret_cast<char*>(&dq)) == 0) { - LOG(DEBUG) << "Found quota mount " << source << " at " << target; - mQuotaReverseMounts[target] = source; - } - } -#endif } return ok(); } @@ -2613,16 +2519,10 @@ std::string InstalldNativeService::findDataMediaPath( return StringPrintf("%s/%u", resolved.c_str(), userid); } -std::string InstalldNativeService::findQuotaDeviceForUuid( - const std::unique_ptr<std::string>& uuid) { - std::lock_guard<std::recursive_mutex> lock(mMountsLock); - auto path = create_data_path(uuid ? uuid->c_str() : nullptr); - return mQuotaReverseMounts[path]; -} - binder::Status InstalldNativeService::isQuotaSupported( - const std::unique_ptr<std::string>& volumeUuid, bool* _aidl_return) { - *_aidl_return = !findQuotaDeviceForUuid(volumeUuid).empty(); + const std::unique_ptr<std::string>& uuid, bool* _aidl_return) { + auto uuidString = uuid ? *uuid : ""; + *_aidl_return = IsQuotaSupported(uuidString); return ok(); } diff --git a/cmds/installd/InstalldNativeService.h b/cmds/installd/InstalldNativeService.h index cebd3f90d3..367f2c1547 100644 --- a/cmds/installd/InstalldNativeService.h +++ b/cmds/installd/InstalldNativeService.h @@ -150,14 +150,11 @@ private: /* Map of all storage mounts from source to target */ std::unordered_map<std::string, std::string> mStorageMounts; - /* Map of all quota mounts from target to source */ - std::unordered_map<std::string, std::string> mQuotaReverseMounts; /* Map from UID to cache quota size */ std::unordered_map<uid_t, int64_t> mCacheQuotas; std::string findDataMediaPath(const std::unique_ptr<std::string>& uuid, userid_t userid); - std::string findQuotaDeviceForUuid(const std::unique_ptr<std::string>& uuid); }; } // namespace installd diff --git a/cmds/installd/QuotaUtils.cpp b/cmds/installd/QuotaUtils.cpp new file mode 100644 index 0000000000..b238dd36e3 --- /dev/null +++ b/cmds/installd/QuotaUtils.cpp @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "QuotaUtils.h" + +#include <fstream> +#include <unordered_map> + +#include <sys/quota.h> + +#include <android-base/logging.h> + +#include "utils.h" + +namespace android { +namespace installd { + +namespace { + +std::recursive_mutex mMountsLock; + +/* Map of all quota mounts from target to source */ +std::unordered_map<std::string, std::string> mQuotaReverseMounts; + +std::string& FindQuotaDeviceForUuid(const std::string& uuid) { + std::lock_guard<std::recursive_mutex> lock(mMountsLock); + auto path = create_data_path(uuid.empty() ? nullptr : uuid.c_str()); + return mQuotaReverseMounts[path]; +} + +} // namespace + +bool InvalidateQuotaMounts() { + std::lock_guard<std::recursive_mutex> lock(mMountsLock); + + mQuotaReverseMounts.clear(); + + std::ifstream in("/proc/mounts"); + if (!in.is_open()) { + return false; + } + + std::string source; + std::string target; + std::string ignored; + while (!in.eof()) { + std::getline(in, source, ' '); + std::getline(in, target, ' '); + std::getline(in, ignored); + + if (source.compare(0, 11, "/dev/block/") == 0) { + struct dqblk dq; + if (quotactl(QCMD(Q_GETQUOTA, USRQUOTA), source.c_str(), 0, + reinterpret_cast<char*>(&dq)) == 0) { + LOG(DEBUG) << "Found quota mount " << source << " at " << target; + mQuotaReverseMounts[target] = source; + } + } + } + return true; +} + +bool IsQuotaSupported(const std::string& uuid) { + return !FindQuotaDeviceForUuid(uuid).empty(); +} + +int64_t GetOccupiedSpaceForUid(const std::string& uuid, uid_t uid) { + const std::string device = FindQuotaDeviceForUuid(uuid); + if (device == "") { + return -1; + } + struct dqblk dq; + if (quotactl(QCMD(Q_GETQUOTA, USRQUOTA), device.c_str(), uid, + reinterpret_cast<char*>(&dq)) != 0) { + if (errno != ESRCH) { + PLOG(ERROR) << "Failed to quotactl " << device << " for UID " << uid; + } + return -1; + } else { +#if MEASURE_DEBUG + LOG(DEBUG) << "quotactl() for UID " << uid << " " << dq.dqb_curspace; +#endif + return dq.dqb_curspace; + } +} + +int64_t GetOccupiedSpaceForGid(const std::string& uuid, gid_t gid) { + const std::string device = FindQuotaDeviceForUuid(uuid); + if (device == "") { + return -1; + } + struct dqblk dq; + if (quotactl(QCMD(Q_GETQUOTA, GRPQUOTA), device.c_str(), gid, + reinterpret_cast<char*>(&dq)) != 0) { + if (errno != ESRCH) { + PLOG(ERROR) << "Failed to quotactl " << device << " for GID " << gid; + } + return -1; + } else { +#if MEASURE_DEBUG + LOG(DEBUG) << "quotactl() for GID " << gid << " " << dq.dqb_curspace; +#endif + return dq.dqb_curspace; + } + +} + +} // namespace installd +} // namespace android diff --git a/cmds/installd/QuotaUtils.h b/cmds/installd/QuotaUtils.h new file mode 100644 index 0000000000..9ad170fcbb --- /dev/null +++ b/cmds/installd/QuotaUtils.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_INSTALLD_QUOTA_UTILS_H_ +#define ANDROID_INSTALLD_QUOTA_UTILS_H_ + +#include <memory> +#include <string> + +namespace android { +namespace installd { + +/* Clear and recompute the reverse mounts map */ +bool InvalidateQuotaMounts(); + +/* Whether quota is supported in the device with the given uuid */ +bool IsQuotaSupported(const std::string& uuid); + +/* Get the current occupied space in bytes for a uid or -1 if fails */ +int64_t GetOccupiedSpaceForUid(const std::string& uuid, uid_t uid); + +/* Get the current occupied space in bytes for a gid or -1 if fails */ +int64_t GetOccupiedSpaceForGid(const std::string& uuid, gid_t gid); + +} // namespace installd +} // namespace android + +#endif // ANDROID_INSTALLD_QUOTA_UTILS_H_ diff --git a/cmds/installd/dexopt.cpp b/cmds/installd/dexopt.cpp index b6038f917f..90cadb4084 100644 --- a/cmds/installd/dexopt.cpp +++ b/cmds/installd/dexopt.cpp @@ -54,6 +54,8 @@ #include "utils.h" using android::base::EndsWith; +using android::base::GetBoolProperty; +using android::base::GetProperty; using android::base::ReadFully; using android::base::StringPrintf; using android::base::WriteFully; @@ -181,36 +183,11 @@ bool clear_primary_current_profile(const std::string& package_name, const std::s return clear_current_profile(package_name, location, user, /*is_secondary_dex*/false); } -static int split_count(const char *str) -{ - char *ctx; - int count = 0; - char buf[kPropertyValueMax]; - - strlcpy(buf, str, sizeof(buf)); - char *pBuf = buf; - - while(strtok_r(pBuf, " ", &ctx) != nullptr) { - count++; - pBuf = nullptr; - } - - return count; -} - -static int split(char *buf, const char **argv) -{ - char *ctx; - int count = 0; - char *tok; - char *pBuf = buf; - - while((tok = strtok_r(pBuf, " ", &ctx)) != nullptr) { - argv[count++] = tok; - pBuf = nullptr; - } - - return count; +static std::vector<std::string> SplitBySpaces(const std::string& str) { + if (str.empty()) { + return {}; + } + return android::base::Split(str, " "); } static const char* get_location_from_path(const char* path) { @@ -224,6 +201,34 @@ static const char* get_location_from_path(const char* path) { } } +// Automatically adds binary and null terminator arg. +static inline void ExecVWithArgs(const char* bin, const std::vector<std::string>& args) { + std::vector<const char*> argv = {bin}; + for (const std::string& arg : args) { + argv.push_back(arg.c_str()); + } + // Add null terminator. + argv.push_back(nullptr); + execv(bin, (char * const *)&argv[0]); +} + +static inline void AddArgIfNonEmpty(const std::string& arg, std::vector<std::string>* args) { + DCHECK(args != nullptr); + if (!arg.empty()) { + args->push_back(arg); + } +} + +static std::string MapPropertyToArg(const std::string& property, + const std::string& format, + const std::string& default_value = "") { + std::string prop = GetProperty(property, default_value); + if (!prop.empty()) { + return StringPrintf(format.c_str(), prop.c_str()); + } + return ""; +} + [[ noreturn ]] static void run_dex2oat(int zip_fd, int oat_fd, int input_vdex_fd, int output_vdex_fd, int image_fd, const char* input_file_name, const char* output_file_name, int swap_fd, @@ -231,83 +236,51 @@ static void run_dex2oat(int zip_fd, int oat_fd, int input_vdex_fd, int output_vd bool debuggable, bool post_bootcomplete, bool background_job_compile, int profile_fd, const char* class_loader_context, int target_sdk_version, bool enable_hidden_api_checks, bool generate_compact_dex, int dex_metadata_fd, const char* compilation_reason) { - static const unsigned int MAX_INSTRUCTION_SET_LEN = 7; - - if (strlen(instruction_set) >= MAX_INSTRUCTION_SET_LEN) { - LOG(ERROR) << "Instruction set '" << instruction_set << "' longer than max length of " - << MAX_INSTRUCTION_SET_LEN; - exit(DexoptReturnCodes::kInstructionSetLength); - } - // Get the relative path to the input file. const char* relative_input_file_name = get_location_from_path(input_file_name); - char dex2oat_Xms_flag[kPropertyValueMax]; - bool have_dex2oat_Xms_flag = get_property("dalvik.vm.dex2oat-Xms", dex2oat_Xms_flag, nullptr) > 0; - - char dex2oat_Xmx_flag[kPropertyValueMax]; - bool have_dex2oat_Xmx_flag = get_property("dalvik.vm.dex2oat-Xmx", dex2oat_Xmx_flag, nullptr) > 0; + std::string dex2oat_Xms_arg = MapPropertyToArg("dalvik.vm.dex2oat-Xms", "-Xms%s"); + std::string dex2oat_Xmx_arg = MapPropertyToArg("dalvik.vm.dex2oat-Xmx", "-Xmx%s"); - char dex2oat_threads_buf[kPropertyValueMax]; - bool have_dex2oat_threads_flag = get_property(post_bootcomplete - ? "dalvik.vm.dex2oat-threads" - : "dalvik.vm.boot-dex2oat-threads", - dex2oat_threads_buf, - nullptr) > 0; - char dex2oat_threads_arg[kPropertyValueMax + 2]; - if (have_dex2oat_threads_flag) { - sprintf(dex2oat_threads_arg, "-j%s", dex2oat_threads_buf); - } + const char* threads_property = post_bootcomplete + ? "dalvik.vm.dex2oat-threads" + : "dalvik.vm.boot-dex2oat-threads"; + std::string dex2oat_threads_arg = MapPropertyToArg(threads_property, "-j%s"); - char dex2oat_isa_features_key[kPropertyKeyMax]; - sprintf(dex2oat_isa_features_key, "dalvik.vm.isa.%s.features", instruction_set); - char dex2oat_isa_features[kPropertyValueMax]; - bool have_dex2oat_isa_features = get_property(dex2oat_isa_features_key, - dex2oat_isa_features, nullptr) > 0; + const std::string dex2oat_isa_features_key = + StringPrintf("dalvik.vm.isa.%s.features", instruction_set); + std::string instruction_set_features_arg = + MapPropertyToArg(dex2oat_isa_features_key, "--instruction-set-features=%s"); - char dex2oat_isa_variant_key[kPropertyKeyMax]; - sprintf(dex2oat_isa_variant_key, "dalvik.vm.isa.%s.variant", instruction_set); - char dex2oat_isa_variant[kPropertyValueMax]; - bool have_dex2oat_isa_variant = get_property(dex2oat_isa_variant_key, - dex2oat_isa_variant, nullptr) > 0; + const std::string dex2oat_isa_variant_key = + StringPrintf("dalvik.vm.isa.%s.variant", instruction_set); + std::string instruction_set_variant_arg = + MapPropertyToArg(dex2oat_isa_variant_key, "--instruction-set-variant=%s"); const char *dex2oat_norelocation = "-Xnorelocate"; - bool have_dex2oat_relocation_skip_flag = false; - char dex2oat_flags[kPropertyValueMax]; - int dex2oat_flags_count = get_property("dalvik.vm.dex2oat-flags", - dex2oat_flags, nullptr) <= 0 ? 0 : split_count(dex2oat_flags); - ALOGV("dalvik.vm.dex2oat-flags=%s\n", dex2oat_flags); + const std::string dex2oat_flags = GetProperty("dalvik.vm.dex2oat-flags", ""); + std::vector<std::string> dex2oat_flags_args = SplitBySpaces(dex2oat_flags); + ALOGV("dalvik.vm.dex2oat-flags=%s\n", dex2oat_flags.c_str()); // If we are booting without the real /data, don't spend time compiling. - char vold_decrypt[kPropertyValueMax]; - bool have_vold_decrypt = get_property("vold.decrypt", vold_decrypt, "") > 0; - bool skip_compilation = (have_vold_decrypt && - (strcmp(vold_decrypt, "trigger_restart_min_framework") == 0 || - (strcmp(vold_decrypt, "1") == 0))); - - bool generate_debug_info = property_get_bool("debug.generate-debug-info", false); - const bool resolve_startup_strings = - property_get_bool("dalvik.vm.dex2oat-resolve-startup-strings", false); - - char app_image_format[kPropertyValueMax]; - char image_format_arg[strlen("--image-format=") + kPropertyValueMax]; - bool have_app_image_format = - image_fd >= 0 && get_property("dalvik.vm.appimageformat", app_image_format, nullptr) > 0; - if (have_app_image_format) { - sprintf(image_format_arg, "--image-format=%s", app_image_format); - } - - char dex2oat_large_app_threshold[kPropertyValueMax]; - bool have_dex2oat_large_app_threshold = - get_property("dalvik.vm.dex2oat-very-large", dex2oat_large_app_threshold, nullptr) > 0; - char dex2oat_large_app_threshold_arg[strlen("--very-large-app-threshold=") + kPropertyValueMax]; - if (have_dex2oat_large_app_threshold) { - sprintf(dex2oat_large_app_threshold_arg, - "--very-large-app-threshold=%s", - dex2oat_large_app_threshold); + std::string vold_decrypt = GetProperty("vold.decrypt", ""); + bool skip_compilation = vold_decrypt == "trigger_restart_min_framework" || + vold_decrypt == "1"; + + const std::string resolve_startup_string_arg = + MapPropertyToArg("dalvik.vm.dex2oat-resolve-startup-strings", + "--resolve-startup-const-strings=%s"); + const bool generate_debug_info = GetBoolProperty("debug.generate-debug-info", false); + + std::string image_format_arg; + if (image_fd >= 0) { + image_format_arg = MapPropertyToArg("dalvik.vm.appimageformat", "--image-format=%s"); } + std::string dex2oat_large_app_threshold_arg = + MapPropertyToArg("dalvik.vm.dex2oat-very-large", "--very-large-app-threshold=%s"); + // If the runtime was requested to use libartd.so, we'll run dex2oatd, otherwise dex2oat. const char* dex2oat_bin = "/system/bin/dex2oat"; constexpr const char* kDex2oatDebugPath = "/system/bin/dex2oatd"; @@ -323,113 +296,65 @@ static void run_dex2oat(int zip_fd, int oat_fd, int input_vdex_fd, int output_vd android::base::GetBoolProperty(kMinidebugInfoSystemProperty, kMinidebugInfoSystemPropertyDefault); - static const char* RUNTIME_ARG = "--runtime-arg"; - - static const int MAX_INT_LEN = 12; // '-'+10dig+'\0' -OR- 0x+8dig - // clang FORTIFY doesn't let us use strlen in constant array bounds, so we // use arraysize instead. - char zip_fd_arg[arraysize("--zip-fd=") + MAX_INT_LEN]; - char zip_location_arg[arraysize("--zip-location=") + PKG_PATH_MAX]; - char input_vdex_fd_arg[arraysize("--input-vdex-fd=") + MAX_INT_LEN]; - char output_vdex_fd_arg[arraysize("--output-vdex-fd=") + MAX_INT_LEN]; - char oat_fd_arg[arraysize("--oat-fd=") + MAX_INT_LEN]; - char oat_location_arg[arraysize("--oat-location=") + PKG_PATH_MAX]; - char instruction_set_arg[arraysize("--instruction-set=") + MAX_INSTRUCTION_SET_LEN]; - char instruction_set_variant_arg[arraysize("--instruction-set-variant=") + kPropertyValueMax]; - char instruction_set_features_arg[arraysize("--instruction-set-features=") + kPropertyValueMax]; - char dex2oat_Xms_arg[arraysize("-Xms") + kPropertyValueMax]; - char dex2oat_Xmx_arg[arraysize("-Xmx") + kPropertyValueMax]; - char dex2oat_compiler_filter_arg[arraysize("--compiler-filter=") + kPropertyValueMax]; - bool have_dex2oat_swap_fd = false; - char dex2oat_swap_fd[arraysize("--swap-fd=") + MAX_INT_LEN]; - bool have_dex2oat_image_fd = false; - char dex2oat_image_fd[arraysize("--app-image-fd=") + MAX_INT_LEN]; - size_t class_loader_context_size = arraysize("--class-loader-context=") + PKG_PATH_MAX; - char target_sdk_version_arg[arraysize("-Xtarget-sdk-version:") + MAX_INT_LEN]; - char class_loader_context_arg[class_loader_context_size]; - if (class_loader_context != nullptr) { - snprintf(class_loader_context_arg, class_loader_context_size, "--class-loader-context=%s", - class_loader_context); - } - - sprintf(zip_fd_arg, "--zip-fd=%d", zip_fd); - sprintf(zip_location_arg, "--zip-location=%s", relative_input_file_name); - sprintf(input_vdex_fd_arg, "--input-vdex-fd=%d", input_vdex_fd); - sprintf(output_vdex_fd_arg, "--output-vdex-fd=%d", output_vdex_fd); - sprintf(oat_fd_arg, "--oat-fd=%d", oat_fd); - sprintf(oat_location_arg, "--oat-location=%s", output_file_name); - sprintf(instruction_set_arg, "--instruction-set=%s", instruction_set); - sprintf(instruction_set_variant_arg, "--instruction-set-variant=%s", dex2oat_isa_variant); - sprintf(instruction_set_features_arg, "--instruction-set-features=%s", dex2oat_isa_features); - if (swap_fd >= 0) { - have_dex2oat_swap_fd = true; - sprintf(dex2oat_swap_fd, "--swap-fd=%d", swap_fd); + std::string zip_fd_arg = StringPrintf("--zip-fd=%d", zip_fd); + std::string zip_location_arg = StringPrintf("--zip-location=%s", relative_input_file_name); + std::string input_vdex_fd_arg = StringPrintf("--input-vdex-fd=%d", input_vdex_fd); + std::string output_vdex_fd_arg = StringPrintf("--output-vdex-fd=%d", output_vdex_fd); + std::string oat_fd_arg = StringPrintf("--oat-fd=%d", oat_fd); + std::string oat_location_arg = StringPrintf("--oat-location=%s", output_file_name); + std::string instruction_set_arg = StringPrintf("--instruction-set=%s", instruction_set); + std::string dex2oat_compiler_filter_arg; + std::string dex2oat_swap_fd; + std::string dex2oat_image_fd; + std::string target_sdk_version_arg; + if (target_sdk_version != 0) { + StringPrintf("-Xtarget-sdk-version:%d", target_sdk_version); } - if (image_fd >= 0) { - have_dex2oat_image_fd = true; - sprintf(dex2oat_image_fd, "--app-image-fd=%d", image_fd); + std::string class_loader_context_arg; + if (class_loader_context != nullptr) { + class_loader_context_arg = StringPrintf("--class-loader-context=%s", class_loader_context); } - if (have_dex2oat_Xms_flag) { - sprintf(dex2oat_Xms_arg, "-Xms%s", dex2oat_Xms_flag); + if (swap_fd >= 0) { + dex2oat_swap_fd = StringPrintf("--swap-fd=%d", swap_fd); } - if (have_dex2oat_Xmx_flag) { - sprintf(dex2oat_Xmx_arg, "-Xmx%s", dex2oat_Xmx_flag); + if (image_fd >= 0) { + dex2oat_image_fd = StringPrintf("--app-image-fd=%d", image_fd); } - sprintf(target_sdk_version_arg, "-Xtarget-sdk-version:%d", target_sdk_version); // Compute compiler filter. - - bool have_dex2oat_compiler_filter_flag = false; + bool have_dex2oat_relocation_skip_flag = false; if (skip_compilation) { - strlcpy(dex2oat_compiler_filter_arg, "--compiler-filter=extract", - sizeof(dex2oat_compiler_filter_arg)); - have_dex2oat_compiler_filter_flag = true; + dex2oat_compiler_filter_arg = "--compiler-filter=extract"; have_dex2oat_relocation_skip_flag = true; } else if (compiler_filter != nullptr) { - if (strlen(compiler_filter) + strlen("--compiler-filter=") < - arraysize(dex2oat_compiler_filter_arg)) { - sprintf(dex2oat_compiler_filter_arg, "--compiler-filter=%s", compiler_filter); - have_dex2oat_compiler_filter_flag = true; - } else { - ALOGW("Compiler filter name '%s' is too large (max characters is %zu)", - compiler_filter, - kPropertyValueMax); - } + dex2oat_compiler_filter_arg = StringPrintf("--compiler-filter=%s", compiler_filter); } - if (!have_dex2oat_compiler_filter_flag) { - char dex2oat_compiler_filter_flag[kPropertyValueMax]; - have_dex2oat_compiler_filter_flag = get_property("dalvik.vm.dex2oat-filter", - dex2oat_compiler_filter_flag, nullptr) > 0; - if (have_dex2oat_compiler_filter_flag) { - sprintf(dex2oat_compiler_filter_arg, - "--compiler-filter=%s", - dex2oat_compiler_filter_flag); - } + if (dex2oat_compiler_filter_arg.empty()) { + dex2oat_compiler_filter_arg = MapPropertyToArg("dalvik.vm.dex2oat-filter", + "--compiler-filter=%s"); } // Check whether all apps should be compiled debuggable. if (!debuggable) { - char prop_buf[kPropertyValueMax]; - debuggable = - (get_property("dalvik.vm.always_debuggable", prop_buf, "0") > 0) && - (prop_buf[0] == '1'); + debuggable = GetProperty("dalvik.vm.always_debuggable", "") == "1"; } - char profile_arg[strlen("--profile-file-fd=") + MAX_INT_LEN]; + std::string profile_arg; if (profile_fd != -1) { - sprintf(profile_arg, "--profile-file-fd=%d", profile_fd); + profile_arg = StringPrintf("--profile-file-fd=%d", profile_fd); } // Get the directory of the apk to pass as a base classpath directory. - char base_dir[arraysize("--classpath-dir=") + PKG_PATH_MAX]; + std::string base_dir; std::string apk_dir(input_file_name); unsigned long dir_index = apk_dir.rfind('/'); bool has_base_dir = dir_index != std::string::npos; if (has_base_dir) { apk_dir = apk_dir.substr(0, dir_index); - sprintf(base_dir, "--classpath-dir=%s", apk_dir.c_str()); + base_dir = StringPrintf("--classpath-dir=%s", apk_dir.c_str()); } std::string dex_metadata_fd_arg = "--dm-fd=" + std::to_string(dex_metadata_fd); @@ -444,121 +369,69 @@ static void run_dex2oat(int zip_fd, int oat_fd, int input_vdex_fd, int output_vd // supported. const bool disable_cdex = !generate_compact_dex || (input_vdex_fd == output_vdex_fd); - const char* argv[10 // program name, mandatory arguments and the final NULL - + (have_dex2oat_isa_variant ? 1 : 0) - + (have_dex2oat_isa_features ? 1 : 0) - + (have_dex2oat_Xms_flag ? 2 : 0) - + (have_dex2oat_Xmx_flag ? 2 : 0) - + (have_dex2oat_compiler_filter_flag ? 1 : 0) - + (have_dex2oat_threads_flag ? 1 : 0) - + (have_dex2oat_swap_fd ? 1 : 0) - + (have_dex2oat_image_fd ? 1 : 0) - + (have_dex2oat_relocation_skip_flag ? 2 : 0) - + (generate_debug_info ? 1 : 0) - + (debuggable ? 1 : 0) - + (have_app_image_format ? 1 : 0) - + dex2oat_flags_count - + (profile_fd == -1 ? 0 : 1) - + (class_loader_context != nullptr ? 1 : 0) - + (has_base_dir ? 1 : 0) - + (have_dex2oat_large_app_threshold ? 1 : 0) - + (disable_cdex ? 1 : 0) - + (generate_minidebug_info ? 1 : 0) - + (target_sdk_version != 0 ? 2 : 0) - + (enable_hidden_api_checks ? 2 : 0) - + (dex_metadata_fd > -1 ? 1 : 0) - + (compilation_reason != nullptr ? 1 : 0)]; - int i = 0; - argv[i++] = dex2oat_bin; - argv[i++] = zip_fd_arg; - argv[i++] = zip_location_arg; - argv[i++] = input_vdex_fd_arg; - argv[i++] = output_vdex_fd_arg; - argv[i++] = oat_fd_arg; - argv[i++] = oat_location_arg; - argv[i++] = instruction_set_arg; - argv[i++] = resolve_startup_strings ? "--resolve-startup-const-strings=true" : - "--resolve-startup-const-strings=false"; - if (have_dex2oat_isa_variant) { - argv[i++] = instruction_set_variant_arg; - } - if (have_dex2oat_isa_features) { - argv[i++] = instruction_set_features_arg; - } - if (have_dex2oat_Xms_flag) { - argv[i++] = RUNTIME_ARG; - argv[i++] = dex2oat_Xms_arg; - } - if (have_dex2oat_Xmx_flag) { - argv[i++] = RUNTIME_ARG; - argv[i++] = dex2oat_Xmx_arg; - } - if (have_dex2oat_compiler_filter_flag) { - argv[i++] = dex2oat_compiler_filter_arg; - } - if (have_dex2oat_threads_flag) { - argv[i++] = dex2oat_threads_arg; - } - if (have_dex2oat_swap_fd) { - argv[i++] = dex2oat_swap_fd; - } - if (have_dex2oat_image_fd) { - argv[i++] = dex2oat_image_fd; + std::vector<std::string> args = { + zip_fd_arg, + zip_location_arg, + input_vdex_fd_arg, + output_vdex_fd_arg, + oat_fd_arg, + oat_location_arg, + instruction_set_arg, + }; + auto add_runtime_arg = [&](const std::string& arg) { + args.push_back("--runtime-arg"); + args.push_back(arg); + }; + + AddArgIfNonEmpty(instruction_set_variant_arg, &args); + AddArgIfNonEmpty(instruction_set_features_arg, &args); + if (!dex2oat_Xms_arg.empty()) { + add_runtime_arg(dex2oat_Xms_arg); + } + if (!dex2oat_Xmx_arg.empty()) { + add_runtime_arg(dex2oat_Xmx_arg); } + AddArgIfNonEmpty(resolve_startup_string_arg, &args); + AddArgIfNonEmpty(dex2oat_compiler_filter_arg, &args); + AddArgIfNonEmpty(dex2oat_threads_arg, &args); + AddArgIfNonEmpty(dex2oat_swap_fd, &args); + AddArgIfNonEmpty(dex2oat_image_fd, &args); + if (generate_debug_info) { - argv[i++] = "--generate-debug-info"; + args.push_back("--generate-debug-info"); } if (debuggable) { - argv[i++] = "--debuggable"; - } - if (have_app_image_format) { - argv[i++] = image_format_arg; - } - if (have_dex2oat_large_app_threshold) { - argv[i++] = dex2oat_large_app_threshold_arg; - } - if (dex2oat_flags_count) { - i += split(dex2oat_flags, argv + i); + args.push_back("--debuggable"); } + AddArgIfNonEmpty(image_format_arg, &args); + AddArgIfNonEmpty(dex2oat_large_app_threshold_arg, &args); + args.insert(args.end(), dex2oat_flags_args.begin(), dex2oat_flags_args.end()); if (have_dex2oat_relocation_skip_flag) { - argv[i++] = RUNTIME_ARG; - argv[i++] = dex2oat_norelocation; - } - if (profile_fd != -1) { - argv[i++] = profile_arg; - } - if (has_base_dir) { - argv[i++] = base_dir; - } - if (class_loader_context != nullptr) { - argv[i++] = class_loader_context_arg; + add_runtime_arg(dex2oat_norelocation); } + AddArgIfNonEmpty(profile_arg, &args); + AddArgIfNonEmpty(base_dir, &args); + AddArgIfNonEmpty(class_loader_context_arg, &args); if (generate_minidebug_info) { - argv[i++] = kMinidebugDex2oatFlag; + args.push_back(kMinidebugDex2oatFlag); } if (disable_cdex) { - argv[i++] = kDisableCompactDexFlag; - } - if (target_sdk_version != 0) { - argv[i++] = RUNTIME_ARG; - argv[i++] = target_sdk_version_arg; + args.push_back(kDisableCompactDexFlag); } + AddArgIfNonEmpty(target_sdk_version_arg, &args); if (enable_hidden_api_checks) { - argv[i++] = RUNTIME_ARG; - argv[i++] = "-Xhidden-api-checks"; + add_runtime_arg("-Xhidden-api-checks"); } if (dex_metadata_fd > -1) { - argv[i++] = dex_metadata_fd_arg.c_str(); + args.push_back(dex_metadata_fd_arg); } - if(compilation_reason != nullptr) { - argv[i++] = compilation_reason_arg.c_str(); - } + AddArgIfNonEmpty(compilation_reason_arg, &args); + // Do not add after dex2oat_flags, they should override others for debugging. - argv[i] = nullptr; - execv(dex2oat_bin, (char * const *)argv); + ExecVWithArgs(dex2oat_bin, args); PLOG(ERROR) << "execv(" << dex2oat_bin << ") failed"; exit(DexoptReturnCodes::kDex2oatExec); } @@ -584,13 +457,9 @@ static bool ShouldUseSwapFileForDexopt() { } // Check the "override" property. If it exists, return value == "true". - char dex2oat_prop_buf[kPropertyValueMax]; - if (get_property("dalvik.vm.dex2oat-swap", dex2oat_prop_buf, "") > 0) { - if (strcmp(dex2oat_prop_buf, "true") == 0) { - return true; - } else { - return false; - } + std::string dex2oat_prop_buf = GetProperty("dalvik.vm.dex2oat-swap", ""); + if (!dex2oat_prop_buf.empty()) { + return dex2oat_prop_buf == "true"; } // Shortcut for default value. This is an implementation optimization for the process sketched @@ -600,8 +469,7 @@ static bool ShouldUseSwapFileForDexopt() { return true; } - bool is_low_mem = property_get_bool("ro.config.low_ram", false); - if (is_low_mem) { + if (GetBoolProperty("ro.config.low_ram", false)) { return true; } @@ -755,50 +623,33 @@ static void run_profman(const std::vector<unique_fd>& profile_fds, CHECK(apk_fds != nullptr); CHECK_EQ(1u, apk_fds->size()); } - std::vector<std::string> profile_args(profile_fds.size()); - for (size_t k = 0; k < profile_fds.size(); k++) { - profile_args[k] = "--profile-file-fd=" + std::to_string(profile_fds[k].get()); + std::vector<std::string> args; + args.push_back("--reference-profile-file-fd=" + std::to_string(reference_profile_fd.get())); + + for (const unique_fd& fd : profile_fds) { + args.push_back("--profile-file-fd=" + std::to_string(fd.get())); } - std::string reference_profile_arg = "--reference-profile-file-fd=" - + std::to_string(reference_profile_fd.get()); - std::vector<std::string> apk_args; if (apk_fds != nullptr) { - for (size_t k = 0; k < apk_fds->size(); k++) { - apk_args.push_back("--apk-fd=" + std::to_string((*apk_fds)[k].get())); + for (const unique_fd& fd : *apk_fds) { + args.push_back("--apk-fd=" + std::to_string(fd.get())); } } std::vector<std::string> dex_location_args; if (dex_locations != nullptr) { - for (size_t k = 0; k < dex_locations->size(); k++) { - dex_location_args.push_back("--dex-location=" + (*dex_locations)[k]); + for (const std::string& dex_location : *dex_locations) { + args.push_back("--dex-location=" + dex_location); } } - // program name, reference profile fd, the final NULL and the profile fds - const char* argv[3 + profile_args.size() + apk_args.size() - + dex_location_args.size() + (copy_and_update ? 1 : 0)]; - int i = 0; - argv[i++] = profman_bin; - argv[i++] = reference_profile_arg.c_str(); - for (size_t k = 0; k < profile_args.size(); k++) { - argv[i++] = profile_args[k].c_str(); - } - for (size_t k = 0; k < apk_args.size(); k++) { - argv[i++] = apk_args[k].c_str(); - } - for (size_t k = 0; k < dex_location_args.size(); k++) { - argv[i++] = dex_location_args[k].c_str(); - } if (copy_and_update) { - argv[i++] = "--copy-and-update-profile-key"; + args.push_back("--copy-and-update-profile-key"); } // Do not add after dex2oat_flags, they should override others for debugging. - argv[i] = nullptr; - execv(profman_bin, (char * const *)argv); + ExecVWithArgs(profman_bin, args); PLOG(ERROR) << "execv(" << profman_bin << ") failed"; exit(DexoptReturnCodes::kProfmanExec); /* only get here on exec failure */ } @@ -931,7 +782,6 @@ static void run_profman_dump(const std::vector<unique_fd>& profile_fds, const unique_fd& output_fd) { std::vector<std::string> profman_args; static const char* PROFMAN_BIN = "/system/bin/profman"; - profman_args.push_back(PROFMAN_BIN); profman_args.push_back("--dump-only"); profman_args.push_back(StringPrintf("--dump-output-to-fd=%d", output_fd.get())); if (reference_profile_fd != -1) { @@ -947,14 +797,8 @@ static void run_profman_dump(const std::vector<unique_fd>& profile_fds, for (size_t i = 0; i < apk_fds.size(); i++) { profman_args.push_back(StringPrintf("--apk-fd=%d", apk_fds[i].get())); } - const char **argv = new const char*[profman_args.size() + 1]; - size_t i = 0; - for (const std::string& profman_arg : profman_args) { - argv[i++] = profman_arg.c_str(); - } - argv[i] = nullptr; - execv(PROFMAN_BIN, (char * const *)argv); + ExecVWithArgs(PROFMAN_BIN, profman_args); PLOG(ERROR) << "execv(" << PROFMAN_BIN << ") failed"; exit(DexoptReturnCodes::kProfmanExec); /* only get here on exec failure */ } @@ -1310,10 +1154,8 @@ Dex2oatFileWrapper maybe_open_app_image(const char* out_oat_path, if (!generate_app_image) { return Dex2oatFileWrapper(); } - char app_image_format[kPropertyValueMax]; - bool have_app_image_format = - get_property("dalvik.vm.appimageformat", app_image_format, nullptr) > 0; - if (!have_app_image_format) { + std::string app_image_format = GetProperty("dalvik.vm.appimageformat", ""); + if (app_image_format.empty()) { return Dex2oatFileWrapper(); } // Recreate is true since we do not want to modify a mapped image. If the app is @@ -1583,13 +1425,6 @@ static void exec_dexoptanalyzer(const std::string& dex_file, int vdex_fd, int oa is_debug_runtime() ? "/system/bin/dexoptanalyzerd" : "/system/bin/dexoptanalyzer"; - static const unsigned int MAX_INSTRUCTION_SET_LEN = 7; - - if (instruction_set.size() >= MAX_INSTRUCTION_SET_LEN) { - LOG(ERROR) << "Instruction set " << instruction_set - << " longer than max length of " << MAX_INSTRUCTION_SET_LEN; - return; - } std::string dex_file_arg = "--dex-file=" + dex_file; std::string oat_fd_arg = "--oat-fd=" + std::to_string(oat_fd); @@ -1604,38 +1439,30 @@ static void exec_dexoptanalyzer(const std::string& dex_file, int vdex_fd, int oa class_loader_context_arg += class_loader_context; } - // program name, dex file, isa, filter, the final NULL - const int argc = 6 + - (profile_was_updated ? 1 : 0) + - (vdex_fd >= 0 ? 1 : 0) + - (oat_fd >= 0 ? 1 : 0) + - (downgrade ? 1 : 0) + - (class_loader_context != nullptr ? 1 : 0); - const char* argv[argc]; - int i = 0; - argv[i++] = dexoptanalyzer_bin; - argv[i++] = dex_file_arg.c_str(); - argv[i++] = isa_arg.c_str(); - argv[i++] = compiler_filter_arg.c_str(); + // program name, dex file, isa, filter + std::vector<std::string> args = { + dex_file_arg, + isa_arg, + compiler_filter_arg, + }; if (oat_fd >= 0) { - argv[i++] = oat_fd_arg.c_str(); + args.push_back(oat_fd_arg); } if (vdex_fd >= 0) { - argv[i++] = vdex_fd_arg.c_str(); + args.push_back(vdex_fd_arg); } - argv[i++] = zip_fd_arg.c_str(); + args.push_back(zip_fd_arg.c_str()); if (profile_was_updated) { - argv[i++] = assume_profile_changed; + args.push_back(assume_profile_changed); } if (downgrade) { - argv[i++] = downgrade_flag; + args.push_back(downgrade_flag); } if (class_loader_context != nullptr) { - argv[i++] = class_loader_context_arg.c_str(); + args.push_back(class_loader_context_arg.c_str()); } - argv[i] = nullptr; - execv(dexoptanalyzer_bin, (char * const *)argv); + ExecVWithArgs(dexoptanalyzer_bin, args); ALOGE("execv(%s) failed: %s\n", dexoptanalyzer_bin, strerror(errno)); } @@ -2425,18 +2252,14 @@ static bool move_ab_path(const std::string& b_path, const std::string& a_path) { bool move_ab(const char* apk_path, const char* instruction_set, const char* oat_dir) { // Get the current slot suffix. No suffix, no A/B. - std::string slot_suffix; - { - char buf[kPropertyValueMax]; - if (get_property("ro.boot.slot_suffix", buf, nullptr) <= 0) { - return false; - } - slot_suffix = buf; + const std::string slot_suffix = GetProperty("ro.boot.slot_suffix", ""); + if (slot_suffix.empty()) { + return false; + } - if (!ValidateTargetSlotSuffix(slot_suffix)) { - LOG(ERROR) << "Target slot suffix not legal: " << slot_suffix; - return false; - } + if (!ValidateTargetSlotSuffix(slot_suffix)) { + LOG(ERROR) << "Target slot suffix not legal: " << slot_suffix; + return false; } // Validate other inputs. diff --git a/cmds/installd/tests/installd_dexopt_test.cpp b/cmds/installd/tests/installd_dexopt_test.cpp index f216c53be4..79e6859ce7 100644 --- a/cmds/installd/tests/installd_dexopt_test.cpp +++ b/cmds/installd/tests/installd_dexopt_test.cpp @@ -143,6 +143,20 @@ static const char kDexFile[] = "AAAACADojmFLPcugSwoBAAAUAgAACwAYAAAAAAAAAAAAoIEAAAAAY2xhc3Nlcy5kZXhVVAUAA/Ns" "+ll1eAsAAQQj5QIABIgTAABQSwUGAAAAAAEAAQBRAAAATwEAAAAA"; +class DexoptTestEnvTest : public testing::Test { +}; + +TEST_F(DexoptTestEnvTest, CheckSelinux) { + ASSERT_EQ(1, is_selinux_enabled()); + + // Crude cutout for virtual devices. +#if !defined(__i386__) && !defined(__x86_64__) + constexpr bool kIsX86 = false; +#else + constexpr bool kIsX86 = true; +#endif + ASSERT_TRUE(1 == security_getenforce() || kIsX86 || true /* b/119032200 */); +} class DexoptTest : public testing::Test { protected: diff --git a/cmds/installd/utils.cpp b/cmds/installd/utils.cpp index 6b9cf0d526..74ad1841a5 100644 --- a/cmds/installd/utils.cpp +++ b/cmds/installd/utils.cpp @@ -43,6 +43,7 @@ #define DEBUG_XATTRS 0 using android::base::EndsWith; +using android::base::Fdopendir; using android::base::StringPrintf; using android::base::unique_fd; @@ -1036,7 +1037,7 @@ static bool collect_profiles(DIR* d, continue; } - DIR* subdir = fdopendir(subdir_fd); + DIR* subdir = Fdopendir(std::move(subdir_fd)); if (subdir == nullptr) { PLOG(WARNING) << "Could not open dir path " << local_path; result = false; diff --git a/libs/binder/ndk/Android.bp b/libs/binder/ndk/Android.bp index 5461b4f719..d16502feab 100644 --- a/libs/binder/ndk/Android.bp +++ b/libs/binder/ndk/Android.bp @@ -38,6 +38,8 @@ cc_library { "libbinder", "libutils", ], + + version_script: "libbinder_ndk.map.txt", } ndk_headers { diff --git a/libs/binder/ndk/ibinder.cpp b/libs/binder/ndk/ibinder.cpp index 896c5c13b2..e1c2009eec 100644 --- a/libs/binder/ndk/ibinder.cpp +++ b/libs/binder/ndk/ibinder.cpp @@ -302,7 +302,7 @@ AIBinder* AIBinder_new(const AIBinder_Class* clazz, void* args) { bool AIBinder_isRemote(const AIBinder* binder) { if (binder == nullptr) { - return true; + return false; } return binder->isRemote(); diff --git a/libs/binder/ndk/include_ndk/android/binder_parcel.h b/libs/binder/ndk/include_ndk/android/binder_parcel.h index 4e0132b536..a303f4a33d 100644 --- a/libs/binder/ndk/include_ndk/android/binder_parcel.h +++ b/libs/binder/ndk/include_ndk/android/binder_parcel.h @@ -50,80 +50,120 @@ typedef struct AParcel AParcel; */ void AParcel_delete(AParcel* parcel) __INTRODUCED_IN(29); +/** + * This is called to allocate a buffer for a C-style string (null-terminated). The returned buffer + * should be at least length bytes. This includes space for a null terminator. length will always be + * strictly less than or equal to the maximum size that can be held in a size_t and will always be + * greater than 0. + * + * See also AParcel_readString. + * + * If allocation fails, null should be returned. + */ +typedef char* (*AParcel_stringAllocator)(void* stringData, size_t length); + +/** + * This is called to allocate an array of size 'length'. + * + * See also AParcel_readStringArray + */ +typedef bool (*AParcel_stringArrayAllocator)(void* arrayData, size_t length); + +/** + * This is called to allocate a string inside of an array that was allocated by an + * AParcel_stringArrayAllocator. + * + * The index returned will always be within the range [0, length of arrayData). The returned buffer + * should be at least length bytes. This includes space for a null-terminator. length will always be + * strictly less than or equal to the maximum size that can be held in a size_t and will always be + * greater than 0. + * + * See also AParcel_readStringArray + */ +typedef char* (*AParcel_stringArrayElementAllocator)(void* arrayData, size_t index, size_t length); + +/** + * This returns the length and buffer of an array at a specific index in an arrayData object. + * + * See also AParcel_writeStringArray + */ +typedef const char* (*AParcel_stringArrayElementGetter)(const void* arrayData, size_t index, + size_t* outLength); + // @START-PRIMITIVE-VECTOR-GETTERS /** * This is called to get the underlying data from an arrayData object. * - * The implementation of this function should allocate a contiguous array of length length and + * The implementation of this function should allocate a contiguous array of size 'length' and * return that underlying buffer to be filled out. If there is an error or length is 0, null may be * returned. * * See also AParcel_readInt32Array */ -typedef int32_t* (*AParcel_int32Allocator)(void* arrayData, size_t length); +typedef int32_t* (*AParcel_int32ArrayAllocator)(void* arrayData, size_t length); /** * This is called to get the underlying data from an arrayData object. * - * The implementation of this function should allocate a contiguous array of length length and + * The implementation of this function should allocate a contiguous array of size 'length' and * return that underlying buffer to be filled out. If there is an error or length is 0, null may be * returned. * * See also AParcel_readUint32Array */ -typedef uint32_t* (*AParcel_uint32Allocator)(void* arrayData, size_t length); +typedef uint32_t* (*AParcel_uint32ArrayAllocator)(void* arrayData, size_t length); /** * This is called to get the underlying data from an arrayData object. * - * The implementation of this function should allocate a contiguous array of length length and + * The implementation of this function should allocate a contiguous array of size 'length' and * return that underlying buffer to be filled out. If there is an error or length is 0, null may be * returned. * * See also AParcel_readInt64Array */ -typedef int64_t* (*AParcel_int64Allocator)(void* arrayData, size_t length); +typedef int64_t* (*AParcel_int64ArrayAllocator)(void* arrayData, size_t length); /** * This is called to get the underlying data from an arrayData object. * - * The implementation of this function should allocate a contiguous array of length length and + * The implementation of this function should allocate a contiguous array of size 'length' and * return that underlying buffer to be filled out. If there is an error or length is 0, null may be * returned. * * See also AParcel_readUint64Array */ -typedef uint64_t* (*AParcel_uint64Allocator)(void* arrayData, size_t length); +typedef uint64_t* (*AParcel_uint64ArrayAllocator)(void* arrayData, size_t length); /** * This is called to get the underlying data from an arrayData object. * - * The implementation of this function should allocate a contiguous array of length length and + * The implementation of this function should allocate a contiguous array of size 'length' and * return that underlying buffer to be filled out. If there is an error or length is 0, null may be * returned. * * See also AParcel_readFloatArray */ -typedef float* (*AParcel_floatAllocator)(void* arrayData, size_t length); +typedef float* (*AParcel_floatArrayAllocator)(void* arrayData, size_t length); /** * This is called to get the underlying data from an arrayData object. * - * The implementation of this function should allocate a contiguous array of length length and + * The implementation of this function should allocate a contiguous array of size 'length' and * return that underlying buffer to be filled out. If there is an error or length is 0, null may be * returned. * * See also AParcel_readDoubleArray */ -typedef double* (*AParcel_doubleAllocator)(void* arrayData, size_t length); +typedef double* (*AParcel_doubleArrayAllocator)(void* arrayData, size_t length); /** - * This allocates an array of length length inside of arrayData and returns whether or not there was + * This allocates an array of size 'length' inside of arrayData and returns whether or not there was * a success. * * See also AParcel_readBoolArray */ -typedef bool (*AParcel_boolAllocator)(void* arrayData, size_t length); +typedef bool (*AParcel_boolArrayAllocator)(void* arrayData, size_t length); /** * This is called to get the underlying data from an arrayData object at index. @@ -142,38 +182,28 @@ typedef void (*AParcel_boolArraySetter)(void* arrayData, size_t index, bool valu /** * This is called to get the underlying data from an arrayData object. * - * The implementation of this function should allocate a contiguous array of length length and + * The implementation of this function should allocate a contiguous array of size 'length' and * return that underlying buffer to be filled out. If there is an error or length is 0, null may be * returned. * * See also AParcel_readCharArray */ -typedef char16_t* (*AParcel_charAllocator)(void* arrayData, size_t length); +typedef char16_t* (*AParcel_charArrayAllocator)(void* arrayData, size_t length); /** * This is called to get the underlying data from an arrayData object. * - * The implementation of this function should allocate a contiguous array of length length and + * The implementation of this function should allocate a contiguous array of size 'length' and * return that underlying buffer to be filled out. If there is an error or length is 0, null may be * returned. * * See also AParcel_readByteArray */ -typedef int8_t* (*AParcel_byteAllocator)(void* arrayData, size_t length); +typedef int8_t* (*AParcel_byteArrayAllocator)(void* arrayData, size_t length); // @END-PRIMITIVE-VECTOR-GETTERS /** - * This is called to allocate a buffer for a C-style string (null-terminated). The buffer should be - * of length length which includes space for the null-terminator. - * - * See also AParcel_readString. - * - * If allocation fails, null should be returned. - */ -typedef char* (*AParcel_stringAllocator)(void* stringData, size_t length); - -/** * Writes an AIBinder to the next location in a non-null parcel. Can be null. */ binder_status_t AParcel_writeStrongBinder(AParcel* parcel, AIBinder* binder) __INTRODUCED_IN(29); @@ -229,20 +259,45 @@ binder_status_t AParcel_readStatusHeader(const AParcel* parcel, AStatus** status __INTRODUCED_IN(29); /** - * Writes string value to the next location in a non-null parcel. + * Writes utf-8 string value to the next location in a non-null parcel. */ binder_status_t AParcel_writeString(AParcel* parcel, const char* string, size_t length) __INTRODUCED_IN(29); /** - * Reads and allocates string value from the next location in a non-null parcel. + * Reads and allocates utf-8 string value from the next location in a non-null parcel. * * Data is passed to the string allocator once the string size is known. This size includes the * space for the null-terminator of this string. This allocator returns a buffer which is used as * the output buffer from this read. */ -binder_status_t AParcel_readString(const AParcel* parcel, AParcel_stringAllocator allocator, - void* stringData) __INTRODUCED_IN(29); +binder_status_t AParcel_readString(const AParcel* parcel, void* stringData, + AParcel_stringAllocator allocator) __INTRODUCED_IN(29); + +/** + * Writes utf-8 string array data to the next location in a non-null parcel. + * + * length is the length of the array. AParcel_stringArrayElementGetter will be called for all + * indices in range [0, length) with the arrayData provided here. The string length and buffer + * returned from this function will be used to fill out the data from the parcel. + */ +binder_status_t AParcel_writeStringArray(AParcel* parcel, const void* arrayData, size_t length, + AParcel_stringArrayElementGetter getter) + __INTRODUCED_IN(29); + +/** + * Reads and allocates utf-8 string array value from the next location in a non-null parcel. + * + * First, AParcel_stringArrayAllocator will be called with the size of the array to be read where + * length is the length of the array to be read from the parcel. Then, for each index i in [0, + * length), AParcel_stringArrayElementAllocator will be called with the length of the string to be + * read from the parcel. The resultant buffer from each of these calls will be filled according to + * the contents of the string that is read. + */ +binder_status_t AParcel_readStringArray(const AParcel* parcel, void* arrayData, + AParcel_stringArrayAllocator allocator, + AParcel_stringArrayElementAllocator elementAllocator) + __INTRODUCED_IN(29); // @START-PRIMITIVE-READ-WRITE /** @@ -377,9 +432,8 @@ binder_status_t AParcel_writeDoubleArray(AParcel* parcel, const double* value, s * getter(arrayData, i) will be called for each i in [0, length) in order to get the underlying * values to write to the parcel. */ -binder_status_t AParcel_writeBoolArray(AParcel* parcel, const void* arrayData, - AParcel_boolArrayGetter getter, size_t length) - __INTRODUCED_IN(29); +binder_status_t AParcel_writeBoolArray(AParcel* parcel, const void* arrayData, size_t length, + AParcel_boolArrayGetter getter) __INTRODUCED_IN(29); /** * Writes an array of char16_t to the next location in a non-null parcel. @@ -401,7 +455,7 @@ binder_status_t AParcel_writeByteArray(AParcel* parcel, const int8_t* value, siz * corresponding data */ binder_status_t AParcel_readInt32Array(const AParcel* parcel, void* arrayData, - AParcel_int32Allocator allocator) __INTRODUCED_IN(29); + AParcel_int32ArrayAllocator allocator) __INTRODUCED_IN(29); /** * Reads an array of uint32_t from the next location in a non-null parcel. @@ -411,7 +465,7 @@ binder_status_t AParcel_readInt32Array(const AParcel* parcel, void* arrayData, * corresponding data */ binder_status_t AParcel_readUint32Array(const AParcel* parcel, void* arrayData, - AParcel_uint32Allocator allocator) __INTRODUCED_IN(29); + AParcel_uint32ArrayAllocator allocator) __INTRODUCED_IN(29); /** * Reads an array of int64_t from the next location in a non-null parcel. @@ -421,7 +475,7 @@ binder_status_t AParcel_readUint32Array(const AParcel* parcel, void* arrayData, * corresponding data */ binder_status_t AParcel_readInt64Array(const AParcel* parcel, void* arrayData, - AParcel_int64Allocator allocator) __INTRODUCED_IN(29); + AParcel_int64ArrayAllocator allocator) __INTRODUCED_IN(29); /** * Reads an array of uint64_t from the next location in a non-null parcel. @@ -431,7 +485,7 @@ binder_status_t AParcel_readInt64Array(const AParcel* parcel, void* arrayData, * corresponding data */ binder_status_t AParcel_readUint64Array(const AParcel* parcel, void* arrayData, - AParcel_uint64Allocator allocator) __INTRODUCED_IN(29); + AParcel_uint64ArrayAllocator allocator) __INTRODUCED_IN(29); /** * Reads an array of float from the next location in a non-null parcel. @@ -441,7 +495,7 @@ binder_status_t AParcel_readUint64Array(const AParcel* parcel, void* arrayData, * corresponding data */ binder_status_t AParcel_readFloatArray(const AParcel* parcel, void* arrayData, - AParcel_floatAllocator allocator) __INTRODUCED_IN(29); + AParcel_floatArrayAllocator allocator) __INTRODUCED_IN(29); /** * Reads an array of double from the next location in a non-null parcel. @@ -451,7 +505,7 @@ binder_status_t AParcel_readFloatArray(const AParcel* parcel, void* arrayData, * corresponding data */ binder_status_t AParcel_readDoubleArray(const AParcel* parcel, void* arrayData, - AParcel_doubleAllocator allocator) __INTRODUCED_IN(29); + AParcel_doubleArrayAllocator allocator) __INTRODUCED_IN(29); /** * Reads an array of bool from the next location in a non-null parcel. @@ -460,7 +514,7 @@ binder_status_t AParcel_readDoubleArray(const AParcel* parcel, void* arrayData, * setter(arrayData, i, x) will be called where x is the value at the associated index. */ binder_status_t AParcel_readBoolArray(const AParcel* parcel, void* arrayData, - AParcel_boolAllocator allocator, + AParcel_boolArrayAllocator allocator, AParcel_boolArraySetter setter) __INTRODUCED_IN(29); /** @@ -471,7 +525,7 @@ binder_status_t AParcel_readBoolArray(const AParcel* parcel, void* arrayData, * corresponding data */ binder_status_t AParcel_readCharArray(const AParcel* parcel, void* arrayData, - AParcel_charAllocator allocator) __INTRODUCED_IN(29); + AParcel_charArrayAllocator allocator) __INTRODUCED_IN(29); /** * Reads an array of int8_t from the next location in a non-null parcel. @@ -481,7 +535,7 @@ binder_status_t AParcel_readCharArray(const AParcel* parcel, void* arrayData, * corresponding data */ binder_status_t AParcel_readByteArray(const AParcel* parcel, void* arrayData, - AParcel_byteAllocator allocator) __INTRODUCED_IN(29); + AParcel_byteArrayAllocator allocator) __INTRODUCED_IN(29); // @END-PRIMITIVE-READ-WRITE diff --git a/libs/binder/ndk/include_ndk/android/binder_parcel_utils.h b/libs/binder/ndk/include_ndk/android/binder_parcel_utils.h index 6e41a7f90e..3fcb1216db 100644 --- a/libs/binder/ndk/include_ndk/android/binder_parcel_utils.h +++ b/libs/binder/ndk/include_ndk/android/binder_parcel_utils.h @@ -36,7 +36,7 @@ namespace ndk { /** - * This retrieves and allocates a vector to length length and returns the underlying buffer. + * This retrieves and allocates a vector to size 'length' and returns the underlying buffer. */ template <typename T> static inline T* AParcel_stdVectorAllocator(void* vectorData, size_t length) { @@ -48,10 +48,18 @@ static inline T* AParcel_stdVectorAllocator(void* vectorData, size_t length) { } /** - * This allocates a vector to length length and returns whether the allocation is successful. + * This allocates a vector to size 'length' and returns whether the allocation is successful. + * + * See also AParcel_stdVectorAllocator. Types used with this allocator have their sizes defined + * externally with respect to the NDK, and that size information is not passed into the NDK. + * Instead, it is used in cases where callbacks are used. + * + * See AParcel_readVector(const AParcel* parcel, std::vector<bool>) + * See AParcel_readVector(const AParcel* parcel, std::vector<std::string>) */ -static inline bool AParcel_stdVectorBoolAllocator(void* vectorData, size_t length) { - std::vector<bool>* vec = static_cast<std::vector<bool>*>(vectorData); +template <typename T> +static inline bool AParcel_stdVectorExternalAllocator(void* vectorData, size_t length) { + std::vector<T>* vec = static_cast<std::vector<T>*>(vectorData); if (length > vec->max_size()) return false; vec->resize(length); @@ -173,8 +181,8 @@ inline binder_status_t AParcel_readVector(const AParcel* parcel, std::vector<dou * Writes a vector of bool to the next location in a non-null parcel. */ inline binder_status_t AParcel_writeVector(AParcel* parcel, const std::vector<bool>& vec) { - return AParcel_writeBoolArray(parcel, static_cast<const void*>(&vec), - AParcel_stdVectorGetter<bool>, vec.size()); + return AParcel_writeBoolArray(parcel, static_cast<const void*>(&vec), vec.size(), + AParcel_stdVectorGetter<bool>); } /** @@ -182,7 +190,7 @@ inline binder_status_t AParcel_writeVector(AParcel* parcel, const std::vector<bo */ inline binder_status_t AParcel_readVector(const AParcel* parcel, std::vector<bool>* vec) { void* vectorData = static_cast<void*>(vec); - return AParcel_readBoolArray(parcel, vectorData, AParcel_stdVectorBoolAllocator, + return AParcel_readBoolArray(parcel, vectorData, AParcel_stdVectorExternalAllocator<bool>, AParcel_stdVectorSetter<bool>); } @@ -229,6 +237,32 @@ static inline char* AParcel_stdStringAllocator(void* stringData, size_t length) } /** + * Allocates a std::string inside of a std::vector<std::string> at index index to size 'length'. + */ +static inline char* AParcel_stdVectorStringElementAllocator(void* vectorData, size_t index, + size_t length) { + std::vector<std::string>* vec = static_cast<std::vector<std::string>*>(vectorData); + + std::string& element = vec->at(index); + element.resize(length - 1); + return &element[0]; +} + +/** + * This gets the length and buffer of a std::string inside of a std::vector<std::string> at index + * index. + */ +static inline const char* AParcel_stdVectorStringElementGetter(const void* vectorData, size_t index, + size_t* outLength) { + const std::vector<std::string>* vec = static_cast<const std::vector<std::string>*>(vectorData); + + const std::string& element = vec->at(index); + + *outLength = element.size(); + return element.c_str(); +} + +/** * Convenience API for writing a std::string. */ static inline binder_status_t AParcel_writeString(AParcel* parcel, const std::string& str) { @@ -240,7 +274,28 @@ static inline binder_status_t AParcel_writeString(AParcel* parcel, const std::st */ static inline binder_status_t AParcel_readString(const AParcel* parcel, std::string* str) { void* stringData = static_cast<void*>(str); - return AParcel_readString(parcel, AParcel_stdStringAllocator, stringData); + return AParcel_readString(parcel, stringData, AParcel_stdStringAllocator); +} + +/** + * Convenience API for writing a std::vector<std::string> + */ +static inline binder_status_t AParcel_writeVector(AParcel* parcel, + const std::vector<std::string>& vec) { + const void* vectorData = static_cast<const void*>(&vec); + return AParcel_writeStringArray(parcel, vectorData, vec.size(), + AParcel_stdVectorStringElementGetter); +} + +/** + * Convenience API for reading a std::vector<std::string> + */ +static inline binder_status_t AParcel_readVector(const AParcel* parcel, + std::vector<std::string>* vec) { + void* vectorData = static_cast<void*>(vec); + return AParcel_readStringArray(parcel, vectorData, + AParcel_stdVectorExternalAllocator<std::string>, + AParcel_stdVectorStringElementAllocator); } template <typename T> diff --git a/libs/binder/ndk/libbinder_ndk.map.txt b/libs/binder/ndk/libbinder_ndk.map.txt index ec6587a7e4..d2c1a3dda8 100644 --- a/libs/binder/ndk/libbinder_ndk.map.txt +++ b/libs/binder/ndk/libbinder_ndk.map.txt @@ -41,6 +41,7 @@ LIBBINDER_NDK { # introduced=29 AParcel_readParcelFileDescriptor; AParcel_readStatusHeader; AParcel_readString; + AParcel_readStringArray; AParcel_readStrongBinder; AParcel_readUint32; AParcel_readUint32Array; @@ -63,6 +64,7 @@ LIBBINDER_NDK { # introduced=29 AParcel_writeParcelFileDescriptor; AParcel_writeStatusHeader; AParcel_writeString; + AParcel_writeStringArray; AParcel_writeStrongBinder; AParcel_writeUint32; AParcel_writeUint32Array; diff --git a/libs/binder/ndk/parcel.cpp b/libs/binder/ndk/parcel.cpp index 77c0558f58..3b44a622ce 100644 --- a/libs/binder/ndk/parcel.cpp +++ b/libs/binder/ndk/parcel.cpp @@ -142,8 +142,8 @@ binder_status_t ReadArray<char16_t>(const AParcel* parcel, void* arrayData, } template <typename T> -binder_status_t WriteArray(AParcel* parcel, const void* arrayData, ArrayGetter<T> getter, - size_t length, status_t (Parcel::*write)(T)) { +binder_status_t WriteArray(AParcel* parcel, const void* arrayData, size_t length, + ArrayGetter<T> getter, status_t (Parcel::*write)(T)) { if (length > std::numeric_limits<int32_t>::max()) return STATUS_BAD_VALUE; Parcel* rawParcel = parcel->get(); @@ -273,8 +273,8 @@ binder_status_t AParcel_writeString(AParcel* parcel, const char* string, size_t return STATUS_OK; } -binder_status_t AParcel_readString(const AParcel* parcel, AParcel_stringAllocator allocator, - void* stringData) { +binder_status_t AParcel_readString(const AParcel* parcel, void* stringData, + AParcel_stringAllocator allocator) { size_t len16; const char16_t* str16 = parcel->get()->readString16Inplace(&len16); @@ -291,7 +291,7 @@ binder_status_t AParcel_readString(const AParcel* parcel, AParcel_stringAllocato len8 = utf16_to_utf8_length(str16, len16) + 1; } - if (len8 <= 0 || len8 >= std::numeric_limits<int32_t>::max()) { + if (len8 <= 0 || len8 > std::numeric_limits<int32_t>::max()) { LOG(WARNING) << __func__ << ": Invalid string length: " << len8; return STATUS_BAD_VALUE; } @@ -308,6 +308,68 @@ binder_status_t AParcel_readString(const AParcel* parcel, AParcel_stringAllocato return STATUS_OK; } +binder_status_t AParcel_writeStringArray(AParcel* parcel, const void* arrayData, size_t length, + AParcel_stringArrayElementGetter getter) { + if (length > std::numeric_limits<int32_t>::max()) return STATUS_BAD_VALUE; + + Parcel* rawParcel = parcel->get(); + + status_t status = rawParcel->writeInt32(static_cast<int32_t>(length)); + if (status != STATUS_OK) return PruneStatusT(status); + + for (size_t i = 0; i < length; i++) { + size_t length = 0; + const char* str = getter(arrayData, i, &length); + if (str == nullptr) return STATUS_BAD_VALUE; + + binder_status_t status = AParcel_writeString(parcel, str, length); + if (status != STATUS_OK) return status; + } + + return STATUS_OK; +} + +// This implements AParcel_stringAllocator for a string using an array, index, and element +// allocator. +struct StringArrayElementAllocationAdapter { + void* arrayData; // stringData from the NDK + size_t index; // index into the string array + AParcel_stringArrayElementAllocator elementAllocator; + + static char* Allocator(void* stringData, size_t length) { + StringArrayElementAllocationAdapter* adapter = + static_cast<StringArrayElementAllocationAdapter*>(stringData); + return adapter->elementAllocator(adapter->arrayData, adapter->index, length); + } +}; + +binder_status_t AParcel_readStringArray(const AParcel* parcel, void* arrayData, + AParcel_stringArrayAllocator allocator, + AParcel_stringArrayElementAllocator elementAllocator) { + const Parcel* rawParcel = parcel->get(); + + int32_t length; + status_t status = rawParcel->readInt32(&length); + + if (status != STATUS_OK) return PruneStatusT(status); + if (length < 0) return STATUS_UNEXPECTED_NULL; + + if (!allocator(arrayData, length)) return STATUS_NO_MEMORY; + + StringArrayElementAllocationAdapter adapter{ + .arrayData = arrayData, + .index = 0, + .elementAllocator = elementAllocator, + }; + + for (; adapter.index < length; adapter.index++) { + AParcel_readString(parcel, static_cast<void*>(&adapter), + StringArrayElementAllocationAdapter::Allocator); + } + + return STATUS_OK; +} + // See gen_parcel_helper.py. These auto-generated read/write methods use the same types for // libbinder and this library. // @START @@ -425,9 +487,9 @@ binder_status_t AParcel_writeDoubleArray(AParcel* parcel, const double* value, s return WriteArray<double>(parcel, value, length); } -binder_status_t AParcel_writeBoolArray(AParcel* parcel, const void* arrayData, - AParcel_boolArrayGetter getter, size_t length) { - return WriteArray<bool>(parcel, arrayData, getter, length, &Parcel::writeBool); +binder_status_t AParcel_writeBoolArray(AParcel* parcel, const void* arrayData, size_t length, + AParcel_boolArrayGetter getter) { + return WriteArray<bool>(parcel, arrayData, length, getter, &Parcel::writeBool); } binder_status_t AParcel_writeCharArray(AParcel* parcel, const char16_t* value, size_t length) { @@ -439,48 +501,48 @@ binder_status_t AParcel_writeByteArray(AParcel* parcel, const int8_t* value, siz } binder_status_t AParcel_readInt32Array(const AParcel* parcel, void* arrayData, - AParcel_int32Allocator allocator) { + AParcel_int32ArrayAllocator allocator) { return ReadArray<int32_t>(parcel, arrayData, allocator); } binder_status_t AParcel_readUint32Array(const AParcel* parcel, void* arrayData, - AParcel_uint32Allocator allocator) { + AParcel_uint32ArrayAllocator allocator) { return ReadArray<uint32_t>(parcel, arrayData, allocator); } binder_status_t AParcel_readInt64Array(const AParcel* parcel, void* arrayData, - AParcel_int64Allocator allocator) { + AParcel_int64ArrayAllocator allocator) { return ReadArray<int64_t>(parcel, arrayData, allocator); } binder_status_t AParcel_readUint64Array(const AParcel* parcel, void* arrayData, - AParcel_uint64Allocator allocator) { + AParcel_uint64ArrayAllocator allocator) { return ReadArray<uint64_t>(parcel, arrayData, allocator); } binder_status_t AParcel_readFloatArray(const AParcel* parcel, void* arrayData, - AParcel_floatAllocator allocator) { + AParcel_floatArrayAllocator allocator) { return ReadArray<float>(parcel, arrayData, allocator); } binder_status_t AParcel_readDoubleArray(const AParcel* parcel, void* arrayData, - AParcel_doubleAllocator allocator) { + AParcel_doubleArrayAllocator allocator) { return ReadArray<double>(parcel, arrayData, allocator); } binder_status_t AParcel_readBoolArray(const AParcel* parcel, void* arrayData, - AParcel_boolAllocator allocator, + AParcel_boolArrayAllocator allocator, AParcel_boolArraySetter setter) { return ReadArray<bool>(parcel, arrayData, allocator, setter, &Parcel::readBool); } binder_status_t AParcel_readCharArray(const AParcel* parcel, void* arrayData, - AParcel_charAllocator allocator) { + AParcel_charArrayAllocator allocator) { return ReadArray<char16_t>(parcel, arrayData, allocator); } binder_status_t AParcel_readByteArray(const AParcel* parcel, void* arrayData, - AParcel_byteAllocator allocator) { + AParcel_byteArrayAllocator allocator) { return ReadArray<int8_t>(parcel, arrayData, allocator); } diff --git a/libs/binder/ndk/scripts/gen_parcel_helper.py b/libs/binder/ndk/scripts/gen_parcel_helper.py index 86cc57e9b7..0e10220496 100755 --- a/libs/binder/ndk/scripts/gen_parcel_helper.py +++ b/libs/binder/ndk/scripts/gen_parcel_helper.py @@ -89,10 +89,10 @@ def main(): for pretty, cpp in data_types: nca = pretty in non_contiguously_addressable - arg_type = "const " + cpp + "* value" - if nca: arg_type = "const void* arrayData, AParcel_" + pretty.lower() + "ArrayGetter getter" + arg_types = "const " + cpp + "* value, size_t length" + if nca: arg_types = "const void* arrayData, size_t length, AParcel_" + pretty.lower() + "ArrayGetter getter" args = "value, length" - if nca: args = "arrayData, getter, length, &Parcel::write" + pretty + if nca: args = "arrayData, length, getter, &Parcel::write" + pretty header += "/**\n" header += " * Writes an array of " + cpp + " to the next location in a non-null parcel.\n" @@ -101,8 +101,8 @@ def main(): header += " * getter(arrayData, i) will be called for each i in [0, length) in order to get the underlying values to write " header += "to the parcel.\n" header += " */\n" - header += "binder_status_t AParcel_write" + pretty + "Array(AParcel* parcel, " + arg_type + ", size_t length) __INTRODUCED_IN(29);\n\n" - source += "binder_status_t AParcel_write" + pretty + "Array(AParcel* parcel, " + arg_type + ", size_t length) {\n" + header += "binder_status_t AParcel_write" + pretty + "Array(AParcel* parcel, " + arg_types + ") __INTRODUCED_IN(29);\n\n" + source += "binder_status_t AParcel_write" + pretty + "Array(AParcel* parcel, " + arg_types + ") {\n" source += " return WriteArray<" + cpp + ">(parcel, " + args + ");\n"; source += "}\n\n" @@ -111,13 +111,13 @@ def main(): read_func = "AParcel_read" + pretty + "Array" write_func = "AParcel_write" + pretty + "Array" - allocator_type = "AParcel_" + pretty.lower() + "Allocator" + allocator_type = "AParcel_" + pretty.lower() + "ArrayAllocator" getter_type = "AParcel_" + pretty.lower() + "ArrayGetter" setter_type = "AParcel_" + pretty.lower() + "ArraySetter" if nca: pre_header += "/**\n" - pre_header += " * This allocates an array of length length inside of arrayData and returns whether or not there was " + pre_header += " * This allocates an array of size 'length' inside of arrayData and returns whether or not there was " pre_header += "a success.\n" pre_header += " *\n" pre_header += " * See also " + read_func + "\n" @@ -141,7 +141,7 @@ def main(): pre_header += "/**\n" pre_header += " * This is called to get the underlying data from an arrayData object.\n" pre_header += " *\n" - pre_header += " * The implementation of this function should allocate a contiguous array of length length and " + pre_header += " * The implementation of this function should allocate a contiguous array of size 'length' and " pre_header += "return that underlying buffer to be filled out. If there is an error or length is 0, null may be " pre_header += "returned.\n" pre_header += " *\n" @@ -178,9 +178,9 @@ def main(): cpp_helper += " * Writes a vector of " + cpp + " to the next location in a non-null parcel.\n" cpp_helper += " */\n" cpp_helper += "inline binder_status_t AParcel_writeVector(AParcel* parcel, const std::vector<" + cpp + ">& vec) {\n" - write_args = "vec.data()" - if nca: write_args = "static_cast<const void*>(&vec), AParcel_stdVectorGetter<" + cpp + ">" - cpp_helper += " return AParcel_write" + pretty + "Array(parcel, " + write_args + ", vec.size());\n" + write_args = "vec.data(), vec.size()" + if nca: write_args = "static_cast<const void*>(&vec), vec.size(), AParcel_stdVectorGetter<" + cpp + ">" + cpp_helper += " return AParcel_write" + pretty + "Array(parcel, " + write_args + ");\n" cpp_helper += "}\n\n" cpp_helper += "/**\n" @@ -192,7 +192,7 @@ def main(): read_args += ["parcel"] read_args += ["vectorData"] if nca: - read_args += ["AParcel_stdVectorBoolAllocator"] + read_args += ["AParcel_stdVectorExternalAllocator<bool>"] read_args += ["AParcel_stdVectorSetter<" + cpp + ">"] else: read_args += ["AParcel_stdVectorAllocator<" + cpp + ">"] diff --git a/libs/graphicsenv/GraphicsEnv.cpp b/libs/graphicsenv/GraphicsEnv.cpp index 8661401021..97b4828e01 100644 --- a/libs/graphicsenv/GraphicsEnv.cpp +++ b/libs/graphicsenv/GraphicsEnv.cpp @@ -153,10 +153,18 @@ const std::string& GraphicsEnv::getDebugLayers() { return mDebugLayers; } +const std::string& GraphicsEnv::getDebugLayersGLES() { + return mDebugLayersGLES; +} + void GraphicsEnv::setDebugLayers(const std::string layers) { mDebugLayers = layers; } +void GraphicsEnv::setDebugLayersGLES(const std::string layers) { + mDebugLayersGLES = layers; +} + android_namespace_t* GraphicsEnv::getDriverNamespace() { static std::once_flag once; std::call_once(once, [this]() { diff --git a/libs/graphicsenv/include/graphicsenv/GraphicsEnv.h b/libs/graphicsenv/include/graphicsenv/GraphicsEnv.h index 10c2549227..528c260653 100644 --- a/libs/graphicsenv/include/graphicsenv/GraphicsEnv.h +++ b/libs/graphicsenv/include/graphicsenv/GraphicsEnv.h @@ -61,7 +61,9 @@ public: const std::string& getLayerPaths(); void setDebugLayers(const std::string layers); + void setDebugLayersGLES(const std::string layers); const std::string& getDebugLayers(); + const std::string& getDebugLayersGLES(); private: GraphicsEnv() = default; @@ -74,6 +76,7 @@ private: long mAngleRulesOffset; long mAngleRulesLength; std::string mDebugLayers; + std::string mDebugLayersGLES; std::string mLayerPaths; android_namespace_t* mDriverNamespace = nullptr; android_namespace_t* mAngleNamespace = nullptr; diff --git a/libs/renderengine/Android.bp b/libs/renderengine/Android.bp index 674659c63f..7efc8bd64d 100644 --- a/libs/renderengine/Android.bp +++ b/libs/renderengine/Android.bp @@ -1,4 +1,3 @@ -// TODO(b/112585051) Add to VNDK once moved to libs/ cc_defaults { name: "renderengine_defaults", cflags: [ @@ -60,8 +59,11 @@ filegroup { cc_library_static { name: "librenderengine", defaults: ["librenderengine_defaults"], + vendor_available: true, + vndk: { + enabled: true, + }, double_loadable: true, - clang: true, cflags: [ "-fvisibility=hidden", diff --git a/libs/ui/include_vndk/ui/Transform.h b/libs/ui/include_vndk/ui/Transform.h new file mode 120000 index 0000000000..60633c2ef5 --- /dev/null +++ b/libs/ui/include_vndk/ui/Transform.h @@ -0,0 +1 @@ +../../include/ui/Transform.h
\ No newline at end of file diff --git a/opengl/libs/EGL/Loader.cpp b/opengl/libs/EGL/Loader.cpp index 3d1eb14d06..a150db1b7d 100644 --- a/opengl/libs/EGL/Loader.cpp +++ b/opengl/libs/EGL/Loader.cpp @@ -539,7 +539,6 @@ static void* load_angle(const char* kind, android_namespace_t* ns, egl_connectio if (strcmp(kind, "EGL") != 0 && strcmp(kind, "GLESv2") != 0 && strcmp(kind, "GLESv1_CM") != 0) return nullptr; - void* so = nullptr; std::string name; char prop[PROPERTY_VALUE_MAX]; @@ -573,7 +572,7 @@ static void* load_angle(const char* kind, android_namespace_t* ns, egl_connectio bool use_version0_API = false; bool use_version1_API = false; fpANGLEGetUtilityAPI ANGLEGetUtilityAPI = - (fpANGLEGetUtilityAPI)dlsym(so, "ANGLEGetUtilityAPI"); + (fpANGLEGetUtilityAPI)dlsym(cnx->featureSo, "ANGLEGetUtilityAPI"); if (ANGLEGetUtilityAPI) { unsigned int versionToUse = 1; if ((ANGLEGetUtilityAPI)(&versionToUse)) { @@ -585,12 +584,13 @@ static void* load_angle(const char* kind, android_namespace_t* ns, egl_connectio } } else { use_version0_API = true; + ALOGV("Cannot find ANGLEGetUtilityAPI in library"); } if (use_version1_API) { // Use the new version 1 API to determine if the // application should use the ANGLE or the native driver. fpAndroidUseANGLEForApplication AndroidUseANGLEForApplication = - (fpAndroidUseANGLEForApplication)dlsym(so, "AndroidUseANGLEForApplication"); + (fpAndroidUseANGLEForApplication)dlsym(cnx->featureSo, "AndroidUseANGLEForApplication"); if (AndroidUseANGLEForApplication) { use_angle = (AndroidUseANGLEForApplication)(rules_fd, rules_offset, rules_length, app_name_str.c_str(), @@ -602,7 +602,7 @@ static void* load_angle(const char* kind, android_namespace_t* ns, egl_connectio // Use the old version 0 API to determine if the // application should use the ANGLE or the native driver. fpANGLEUseForApplication ANGLEUseForApplication = - (fpANGLEUseForApplication)dlsym(so, "ANGLEUseForApplication"); + (fpANGLEUseForApplication)dlsym(cnx->featureSo, "ANGLEUseForApplication"); if (ANGLEUseForApplication) { ANGLEPreference app_preference = getAnglePref(android::GraphicsEnv::getInstance().getAngleAppPref()); @@ -621,6 +621,7 @@ static void* load_angle(const char* kind, android_namespace_t* ns, egl_connectio } cnx->angleDecided = true; } + void* so = nullptr; if (use_angle) { so = load_angle_from_namespace(kind, ns); } diff --git a/opengl/libs/EGL/egl_display.cpp b/opengl/libs/EGL/egl_display.cpp index 7cf58b4eee..a0659730c4 100644 --- a/opengl/libs/EGL/egl_display.cpp +++ b/opengl/libs/EGL/egl_display.cpp @@ -265,7 +265,12 @@ EGLDisplay egl_display_t::getPlatformDisplay(EGLNativeDisplayType display, if (cnx->egl.eglGetPlatformDisplay) { dpy = cnx->egl.eglGetPlatformDisplay(EGL_PLATFORM_ANDROID_KHR, display, attrib_list); - } else { + } + + // It is possible that eglGetPlatformDisplay does not have a + // working implementation for Android platform; in that case, + // one last fallback to eglGetDisplay + if(dpy == EGL_NO_DISPLAY) { if (attrib_list) { ALOGW("getPlatformDisplay: unexpected attribute list, attributes ignored"); } diff --git a/opengl/libs/EGL/egl_layers.cpp b/opengl/libs/EGL/egl_layers.cpp index e4906e08c4..dd8fbfcb7d 100644 --- a/opengl/libs/EGL/egl_layers.cpp +++ b/opengl/libs/EGL/egl_layers.cpp @@ -144,8 +144,8 @@ const char kSystemLayerLibraryDir[] = "/data/local/debug/gles"; std::string LayerLoader::GetDebugLayers() { // Layers can be specified at the Java level in GraphicsEnvironemnt - // gpu_debug_layers = layer1:layer2:layerN - std::string debug_layers = android::GraphicsEnv::getInstance().getDebugLayers(); + // gpu_debug_layers_gles = layer1:layer2:layerN + std::string debug_layers = android::GraphicsEnv::getInstance().getDebugLayersGLES(); if (debug_layers.empty()) { // Only check system properties if Java settings are empty diff --git a/opengl/libs/EGL/egl_platform_entries.cpp b/opengl/libs/EGL/egl_platform_entries.cpp index 1daa4d236e..547a6690b0 100644 --- a/opengl/libs/EGL/egl_platform_entries.cpp +++ b/opengl/libs/EGL/egl_platform_entries.cpp @@ -485,58 +485,29 @@ static EGLint getReportedColorSpace(EGLint colorspace) { } // Returns a list of color spaces understood by the vendor EGL driver. -static std::vector<EGLint> getDriverColorSpaces(egl_display_ptr dp, - android_pixel_format format) { +static std::vector<EGLint> getDriverColorSpaces(egl_display_ptr dp) { std::vector<EGLint> colorSpaces; - if (!dp->hasColorSpaceSupport) return colorSpaces; - - // OpenGL drivers only support sRGB encoding with 8-bit formats. - // RGB_888 is never returned by getNativePixelFormat, but is included for completeness. - const bool formatSupportsSRGBEncoding = - format == HAL_PIXEL_FORMAT_RGBA_8888 || format == HAL_PIXEL_FORMAT_RGBX_8888 || - format == HAL_PIXEL_FORMAT_RGB_888; - const bool formatIsFloatingPoint = format == HAL_PIXEL_FORMAT_RGBA_FP16; - - if (formatSupportsSRGBEncoding) { - // sRGB and linear are always supported when color space support is present. - colorSpaces.push_back(EGL_GL_COLORSPACE_SRGB_KHR); - colorSpaces.push_back(EGL_GL_COLORSPACE_LINEAR_KHR); - // DCI-P3 uses the sRGB transfer function, so it's only relevant for 8-bit formats. - if (findExtension(dp->disp.queryString.extensions, - "EGL_EXT_gl_colorspace_display_p3")) { - colorSpaces.push_back(EGL_GL_COLORSPACE_DISPLAY_P3_EXT); - } - } - // According to the spec, scRGB is only supported for floating point formats. - // For non-linear scRGB, the application is responsible for applying the - // transfer function. - if (formatIsFloatingPoint) { - if (findExtension(dp->disp.queryString.extensions, - "EGL_EXT_gl_colorspace_scrgb")) { - colorSpaces.push_back(EGL_GL_COLORSPACE_SCRGB_EXT); - } - if (findExtension(dp->disp.queryString.extensions, - "EGL_EXT_gl_colorspace_scrgb_linear")) { - colorSpaces.push_back(EGL_GL_COLORSPACE_SCRGB_LINEAR_EXT); - } - } + // sRGB and linear are always supported when color space support is present. + colorSpaces.push_back(EGL_GL_COLORSPACE_SRGB_KHR); + colorSpaces.push_back(EGL_GL_COLORSPACE_LINEAR_KHR); - // BT2020 can be used with any pixel format. PQ encoding must be applied by the - // application and does not affect the behavior of OpenGL. - if (findExtension(dp->disp.queryString.extensions, - "EGL_EXT_gl_colorspace_bt2020_linear")) { + if (findExtension(dp->disp.queryString.extensions, "EGL_EXT_gl_colorspace_display_p3")) { + colorSpaces.push_back(EGL_GL_COLORSPACE_DISPLAY_P3_EXT); + } + if (findExtension(dp->disp.queryString.extensions, "EGL_EXT_gl_colorspace_scrgb")) { + colorSpaces.push_back(EGL_GL_COLORSPACE_SCRGB_EXT); + } + if (findExtension(dp->disp.queryString.extensions, "EGL_EXT_gl_colorspace_scrgb_linear")) { + colorSpaces.push_back(EGL_GL_COLORSPACE_SCRGB_LINEAR_EXT); + } + if (findExtension(dp->disp.queryString.extensions, "EGL_EXT_gl_colorspace_bt2020_linear")) { colorSpaces.push_back(EGL_GL_COLORSPACE_BT2020_LINEAR_EXT); } - if (findExtension(dp->disp.queryString.extensions, - "EGL_EXT_gl_colorspace_bt2020_pq")) { + if (findExtension(dp->disp.queryString.extensions, "EGL_EXT_gl_colorspace_bt2020_pq")) { colorSpaces.push_back(EGL_GL_COLORSPACE_BT2020_PQ_EXT); } - - // Linear DCI-P3 simply uses different primaries than standard RGB and thus - // can be used with any pixel format. - if (findExtension(dp->disp.queryString.extensions, - "EGL_EXT_gl_colorspace_display_p3_linear")) { + if (findExtension(dp->disp.queryString.extensions, "EGL_EXT_gl_colorspace_display_p3_linear")) { colorSpaces.push_back(EGL_GL_COLORSPACE_DISPLAY_P3_LINEAR_EXT); } return colorSpaces; @@ -547,18 +518,32 @@ static std::vector<EGLint> getDriverColorSpaces(egl_display_ptr dp, // unmodified. template <typename AttrType> static EGLBoolean processAttributes(egl_display_ptr dp, ANativeWindow* window, - android_pixel_format format, const AttrType* attrib_list, - EGLint* colorSpace, std::vector<AttrType>* strippedAttribList) { + const AttrType* attrib_list, EGLint* colorSpace, + std::vector<AttrType>* strippedAttribList) { for (const AttrType* attr = attrib_list; attr && attr[0] != EGL_NONE; attr += 2) { bool copyAttribute = true; if (attr[0] == EGL_GL_COLORSPACE_KHR) { - // Fail immediately if the driver doesn't have color space support at all. - if (!dp->hasColorSpaceSupport) return false; + switch (attr[1]) { + case EGL_GL_COLORSPACE_LINEAR_KHR: + case EGL_GL_COLORSPACE_SRGB_KHR: + case EGL_GL_COLORSPACE_DISPLAY_P3_EXT: + case EGL_GL_COLORSPACE_SCRGB_LINEAR_EXT: + case EGL_GL_COLORSPACE_SCRGB_EXT: + case EGL_GL_COLORSPACE_BT2020_LINEAR_EXT: + case EGL_GL_COLORSPACE_BT2020_PQ_EXT: + case EGL_GL_COLORSPACE_DISPLAY_P3_LINEAR_EXT: + // Fail immediately if the driver doesn't have color space support at all. + if (!dp->hasColorSpaceSupport) return setError(EGL_BAD_ATTRIBUTE, EGL_FALSE); + break; + default: + // BAD_ATTRIBUTE if attr is not any of the EGL_GL_COLORSPACE_* + return setError(EGL_BAD_ATTRIBUTE, EGL_FALSE); + } *colorSpace = static_cast<EGLint>(attr[1]); // Strip the attribute if the driver doesn't understand it. copyAttribute = false; - std::vector<EGLint> driverColorSpaces = getDriverColorSpaces(dp, format); + std::vector<EGLint> driverColorSpaces = getDriverColorSpaces(dp); for (auto driverColorSpace : driverColorSpaces) { if (static_cast<EGLint>(attr[1]) == driverColorSpace) { copyAttribute = true; @@ -585,10 +570,8 @@ static EGLBoolean processAttributes(egl_display_ptr dp, ANativeWindow* window, // If the passed color space has wide color gamut, check whether the target native window // supports wide color. - const bool colorSpaceIsNarrow = - *colorSpace == EGL_GL_COLORSPACE_SRGB_KHR || - *colorSpace == EGL_GL_COLORSPACE_LINEAR_KHR || - *colorSpace == EGL_UNKNOWN; + const bool colorSpaceIsNarrow = *colorSpace == EGL_GL_COLORSPACE_SRGB_KHR || + *colorSpace == EGL_GL_COLORSPACE_LINEAR_KHR || *colorSpace == EGL_UNKNOWN; if (window && !colorSpaceIsNarrow) { bool windowSupportsWideColor = true; // Ordinarily we'd put a call to native_window_get_wide_color_support @@ -606,12 +589,12 @@ static EGLBoolean processAttributes(egl_display_ptr dp, ANativeWindow* window, ALOGE("processAttributes: invalid window (win=%p) " "failed (%#x) (already connected to another API?)", window, err); - return false; + return setError(EGL_BAD_NATIVE_WINDOW, EGL_FALSE); } if (!windowSupportsWideColor) { // Application has asked for a wide-color colorspace but // wide-color support isn't available on the display the window is on. - return false; + return setError(EGL_BAD_MATCH, EGL_FALSE); } } return true; @@ -695,8 +678,7 @@ EGLBoolean sendSurfaceMetadata(egl_surface_t* s) { native_window_set_buffers_smpte2086_metadata(s->getNativeWindow(), &smpteMetadata); s->resetSmpte2086Metadata(); if (err != 0) { - ALOGE("error setting native window smpte2086 metadata: %s (%d)", - strerror(-err), err); + ALOGE("error setting native window smpte2086 metadata: %s (%d)", strerror(-err), err); return EGL_FALSE; } } @@ -706,8 +688,7 @@ EGLBoolean sendSurfaceMetadata(egl_surface_t* s) { native_window_set_buffers_cta861_3_metadata(s->getNativeWindow(), &cta8613Metadata); s->resetCta8613Metadata(); if (err != 0) { - ALOGE("error setting native window CTS 861.3 metadata: %s (%d)", - strerror(-err), err); + ALOGE("error setting native window CTS 861.3 metadata: %s (%d)", strerror(-err), err); return EGL_FALSE; } } @@ -749,10 +730,12 @@ EGLSurface eglCreateWindowSurfaceTmpl(egl_display_ptr dp, egl_connection_t* cnx, // now select correct colorspace and dataspace based on user's attribute list EGLint colorSpace = EGL_UNKNOWN; std::vector<AttrType> strippedAttribList; - if (!processAttributes<AttrType>(dp, window, format, attrib_list, &colorSpace, - &strippedAttribList)) { + if (!processAttributes<AttrType>(dp, window, attrib_list, &colorSpace, &strippedAttribList)) { ALOGE("error invalid colorspace: %d", colorSpace); - return setError(EGL_BAD_ATTRIBUTE, EGL_NO_SURFACE); + if (cnx->angleBackend != EGL_PLATFORM_ANGLE_TYPE_VULKAN_ANGLE) { + native_window_api_disconnect(window, NATIVE_WINDOW_API_EGL); + } + return EGL_NO_SURFACE; } attrib_list = strippedAttribList.data(); @@ -765,13 +748,14 @@ EGLSurface eglCreateWindowSurfaceTmpl(egl_display_ptr dp, egl_connection_t* cnx, } android_dataspace dataSpace = dataSpaceFromEGLColorSpace(colorSpace); - if (dataSpace != HAL_DATASPACE_UNKNOWN) { - err = native_window_set_buffers_data_space(window, dataSpace); - if (err != 0) { - ALOGE("error setting native window pixel dataSpace: %s (%d)", strerror(-err), err); - native_window_api_disconnect(window, NATIVE_WINDOW_API_EGL); - return setError(EGL_BAD_NATIVE_WINDOW, EGL_NO_SURFACE); - } + // Set dataSpace even if it could be HAL_DATASPACE_UNKNOWN. + // HAL_DATASPACE_UNKNOWN is the default value, but it may have changed + // at this point. + err = native_window_set_buffers_data_space(window, dataSpace); + if (err != 0) { + ALOGE("error setting native window pixel dataSpace: %s (%d)", strerror(-err), err); + native_window_api_disconnect(window, NATIVE_WINDOW_API_EGL); + return setError(EGL_BAD_NATIVE_WINDOW, EGL_NO_SURFACE); } } @@ -873,9 +857,8 @@ EGLSurface eglCreatePixmapSurfaceImpl(EGLDisplay dpy, EGLConfig /*config*/, return EGL_NO_SURFACE; } -EGLSurface eglCreatePbufferSurfaceImpl( EGLDisplay dpy, EGLConfig config, - const EGLint *attrib_list) -{ +EGLSurface eglCreatePbufferSurfaceImpl(EGLDisplay dpy, EGLConfig config, + const EGLint* attrib_list) { egl_connection_t* cnx = nullptr; egl_display_ptr dp = validate_display_connection(dpy, cnx); if (dp) { @@ -886,35 +869,30 @@ EGLSurface eglCreatePbufferSurfaceImpl( EGLDisplay dpy, EGLConfig config, // Select correct colorspace based on user's attribute list EGLint colorSpace = EGL_UNKNOWN; std::vector<EGLint> strippedAttribList; - if (!processAttributes(dp, nullptr, format, attrib_list, &colorSpace, - &strippedAttribList)) { + if (!processAttributes(dp, nullptr, attrib_list, &colorSpace, &strippedAttribList)) { ALOGE("error invalid colorspace: %d", colorSpace); - return setError(EGL_BAD_ATTRIBUTE, EGL_NO_SURFACE); + return EGL_NO_SURFACE; } attrib_list = strippedAttribList.data(); - EGLSurface surface = cnx->egl.eglCreatePbufferSurface( - dp->disp.dpy, config, attrib_list); + EGLSurface surface = cnx->egl.eglCreatePbufferSurface(dp->disp.dpy, config, attrib_list); if (surface != EGL_NO_SURFACE) { - egl_surface_t* s = - new egl_surface_t(dp.get(), config, nullptr, surface, - getReportedColorSpace(colorSpace), cnx); + egl_surface_t* s = new egl_surface_t(dp.get(), config, nullptr, surface, + getReportedColorSpace(colorSpace), cnx); return s; } } return EGL_NO_SURFACE; } -EGLBoolean eglDestroySurfaceImpl(EGLDisplay dpy, EGLSurface surface) -{ +EGLBoolean eglDestroySurfaceImpl(EGLDisplay dpy, EGLSurface surface) { const egl_display_ptr dp = validate_display(dpy); if (!dp) return EGL_FALSE; SurfaceRef _s(dp.get(), surface); - if (!_s.get()) - return setError(EGL_BAD_SURFACE, (EGLBoolean)EGL_FALSE); + if (!_s.get()) return setError(EGL_BAD_SURFACE, (EGLBoolean)EGL_FALSE); - egl_surface_t * const s = get_surface(surface); + egl_surface_t* const s = get_surface(surface); EGLBoolean result = s->cnx->egl.eglDestroySurface(dp->disp.dpy, s->surface); if (result == EGL_TRUE) { _s.terminate(); @@ -922,17 +900,15 @@ EGLBoolean eglDestroySurfaceImpl(EGLDisplay dpy, EGLSurface surface) return result; } -EGLBoolean eglQuerySurfaceImpl( EGLDisplay dpy, EGLSurface surface, - EGLint attribute, EGLint *value) -{ +EGLBoolean eglQuerySurfaceImpl(EGLDisplay dpy, EGLSurface surface, EGLint attribute, + EGLint* value) { const egl_display_ptr dp = validate_display(dpy); if (!dp) return EGL_FALSE; SurfaceRef _s(dp.get(), surface); - if (!_s.get()) - return setError(EGL_BAD_SURFACE, (EGLBoolean)EGL_FALSE); + if (!_s.get()) return setError(EGL_BAD_SURFACE, (EGLBoolean)EGL_FALSE); - egl_surface_t const * const s = get_surface(surface); + egl_surface_t const* const s = get_surface(surface); if (s->getColorSpaceAttribute(attribute, value)) { return EGL_TRUE; } else if (s->getSmpte2086Attribute(attribute, value)) { diff --git a/opengl/libs/libEGL.map.txt b/opengl/libs/libEGL.map.txt index fa26e33f39..b2d795745f 100644 --- a/opengl/libs/libEGL.map.txt +++ b/opengl/libs/libEGL.map.txt @@ -3,23 +3,30 @@ LIBEGL { eglBindAPI; eglBindTexImage; eglChooseConfig; + eglClientWaitSync; # introduced=29 eglClientWaitSyncKHR; # introduced-arm=18 introduced-arm64=21 introduced-mips=18 introduced-mips64=21 introduced-x86=18 introduced-x86_64=21 eglCopyBuffers; eglCreateContext; + eglCreateImage; # introduced=29 eglCreateImageKHR; eglCreateNativeClientBufferANDROID; # introduced=24 eglCreatePbufferFromClientBuffer; eglCreatePbufferSurface; eglCreatePixmapSurface; + eglCreatePlatformPixmapSurface; # introduced=29 + eglCreatePlatformWindowSurface; # introduced=29 eglCreateStreamFromFileDescriptorKHR; # introduced=23 eglCreateStreamKHR; # introduced=23 eglCreateStreamProducerSurfaceKHR; # introduced=23 + eglCreateSync; # introduced=29 eglCreateSyncKHR; # introduced-arm=18 introduced-arm64=21 introduced-mips=18 introduced-mips64=21 introduced-x86=18 introduced-x86_64=21 eglCreateWindowSurface; eglDestroyContext; + eglDestroyImage; # introduced=29 eglDestroyImageKHR; eglDestroyStreamKHR; # introduced=23 eglDestroySurface; + eglDestroySync; # introduced=29 eglDestroySyncKHR; # introduced-arm=18 introduced-arm64=21 introduced-mips=18 introduced-mips64=21 introduced-x86=18 introduced-x86_64=21 eglDupNativeFenceFDANDROID; # vndk eglGetConfigAttrib; @@ -30,8 +37,10 @@ LIBEGL { eglGetDisplay; eglGetError; eglGetNativeClientBufferANDROID; # introduced=26 + eglGetPlatformDisplay; # introduced=29 eglGetProcAddress; eglGetStreamFileDescriptorKHR; # introduced=23 + eglGetSyncAttrib; # introduced=29 eglGetSyncAttribKHR; # introduced-arm=18 introduced-arm64=21 introduced-mips=18 introduced-mips64=21 introduced-x86=18 introduced-x86_64=21 eglGetSystemTimeFrequencyNV; # introduced-arm=14 introduced-arm64=21 introduced-mips=14 introduced-mips64=21 introduced-x86=14 introduced-x86_64=21 eglGetSystemTimeNV; # introduced-arm=14 introduced-arm64=21 introduced-mips=14 introduced-mips64=21 introduced-x86=14 introduced-x86_64=21 @@ -64,6 +73,7 @@ LIBEGL { eglWaitClient; eglWaitGL; eglWaitNative; + eglWaitSync; # introduced=29 eglWaitSyncKHR; # introduced-arm=18 introduced-arm64=21 introduced-mips=18 introduced-mips64=21 introduced-x86=18 introduced-x86_64=21 local: *; diff --git a/services/bufferhub/Android.bp b/services/bufferhub/Android.bp index a9af22fd83..d03d833fd7 100644 --- a/services/bufferhub/Android.bp +++ b/services/bufferhub/Android.bp @@ -23,6 +23,13 @@ cc_library_shared { ], srcs: [ "BufferHubService.cpp", + "BufferNode.cpp", + ], + header_libs: [ + "libbufferhub_headers", + "libdvr_headers", + "libnativewindow_headers", + "libpdx_headers", ], shared_libs: [ "android.frameworks.bufferhub@1.0", @@ -30,6 +37,7 @@ cc_library_shared { "libhidltransport", "libhwbinder", "liblog", + "libui", "libutils", ], export_include_dirs: [ @@ -49,6 +57,7 @@ cc_binary { "libhidltransport", "libhwbinder", "liblog", + "libui", "libutils", ], cflags: [ diff --git a/services/bufferhub/BufferNode.cpp b/services/bufferhub/BufferNode.cpp new file mode 100644 index 0000000000..62583a666d --- /dev/null +++ b/services/bufferhub/BufferNode.cpp @@ -0,0 +1,100 @@ +#include <errno.h> + +#include <bufferhub/BufferNode.h> +#include <private/dvr/buffer_hub_defs.h> +#include <ui/GraphicBufferAllocator.h> + +namespace android { +namespace frameworks { +namespace bufferhub { +namespace V1_0 { +namespace implementation { + +void BufferNode::InitializeMetadata() { + // Using placement new here to reuse shared memory instead of new allocation + // Initialize the atomic variables to zero. + dvr::BufferHubDefs::MetadataHeader* metadata_header = metadata_.metadata_header(); + buffer_state_ = new (&metadata_header->buffer_state) std::atomic<uint64_t>(0); + fence_state_ = new (&metadata_header->fence_state) std::atomic<uint64_t>(0); + active_clients_bit_mask_ = + new (&metadata_header->active_clients_bit_mask) std::atomic<uint64_t>(0); +} + +// Allocates a new BufferNode. +BufferNode::BufferNode(uint32_t width, uint32_t height, uint32_t layer_count, uint32_t format, + uint64_t usage, size_t user_metadata_size) { + uint32_t out_stride = 0; + // graphicBufferId is not used in GraphicBufferAllocator::allocate + // TODO(b/112338294) After move to the service folder, stop using the + // hardcoded service name "bufferhub". + int ret = GraphicBufferAllocator::get().allocate(width, height, format, layer_count, usage, + const_cast<const native_handle_t**>( + &buffer_handle_), + &out_stride, + /*graphicBufferId=*/0, + /*requestor=*/"bufferhub"); + + if (ret != OK || buffer_handle_ == nullptr) { + ALOGE("BufferNode::BufferNode: Failed to allocate buffer: %s", strerror(-ret)); + return; + } + + buffer_desc_.width = width; + buffer_desc_.height = height; + buffer_desc_.layers = layer_count; + buffer_desc_.format = format; + buffer_desc_.usage = usage; + buffer_desc_.stride = out_stride; + + metadata_ = BufferHubMetadata::Create(user_metadata_size); + if (!metadata_.IsValid()) { + ALOGE("BufferNode::BufferNode: Failed to allocate metadata."); + return; + } + InitializeMetadata(); +} + +// Free the handle +BufferNode::~BufferNode() { + if (buffer_handle_ != nullptr) { + status_t ret = GraphicBufferAllocator::get().free(buffer_handle_); + if (ret != OK) { + ALOGE("BufferNode::~BufferNode: Failed to free handle; Got error: %d", ret); + } + } +} + +uint64_t BufferNode::GetActiveClientsBitMask() const { + return active_clients_bit_mask_->load(std::memory_order_acquire); +} + +uint64_t BufferNode::AddNewActiveClientsBitToMask() { + uint64_t current_active_clients_bit_mask = GetActiveClientsBitMask(); + uint64_t client_state_mask = 0ULL; + uint64_t updated_active_clients_bit_mask = 0ULL; + do { + client_state_mask = dvr::BufferHubDefs::FindNextAvailableClientStateMask( + current_active_clients_bit_mask); + if (client_state_mask == 0ULL) { + ALOGE("BufferNode::AddNewActiveClientsBitToMask: reached the maximum " + "mumber of channels per buffer node: 32."); + errno = E2BIG; + return 0ULL; + } + updated_active_clients_bit_mask = current_active_clients_bit_mask | client_state_mask; + } while (!(active_clients_bit_mask_->compare_exchange_weak(current_active_clients_bit_mask, + updated_active_clients_bit_mask, + std::memory_order_acq_rel, + std::memory_order_acquire))); + return client_state_mask; +} + +void BufferNode::RemoveClientsBitFromMask(const uint64_t& value) { + active_clients_bit_mask_->fetch_and(~value); +} + +} // namespace implementation +} // namespace V1_0 +} // namespace bufferhub +} // namespace frameworks +} // namespace android diff --git a/services/bufferhub/include/bufferhub/BufferNode.h b/services/bufferhub/include/bufferhub/BufferNode.h new file mode 100644 index 0000000000..ffeacaccc3 --- /dev/null +++ b/services/bufferhub/include/bufferhub/BufferNode.h @@ -0,0 +1,84 @@ +#ifndef ANDROID_FRAMEWORKS_BUFFERHUB_V1_0_BUFFER_NODE_H_ +#define ANDROID_FRAMEWORKS_BUFFERHUB_V1_0_BUFFER_NODE_H_ + +#include <android/hardware_buffer.h> +#include <ui/BufferHubMetadata.h> + +namespace android { +namespace frameworks { +namespace bufferhub { +namespace V1_0 { +namespace implementation { + +class BufferNode { +public: + // Allocates a new BufferNode. + BufferNode(uint32_t width, uint32_t height, uint32_t layer_count, uint32_t format, + uint64_t usage, size_t user_metadata_size); + + ~BufferNode(); + + // Returns whether the object holds a valid metadata. + bool IsValid() const { return metadata_.IsValid(); } + + size_t user_metadata_size() const { return metadata_.user_metadata_size(); } + + // Accessors of the buffer description and handle + const native_handle_t* buffer_handle() const { return buffer_handle_; } + const AHardwareBuffer_Desc& buffer_desc() const { return buffer_desc_; } + + // Accessors of metadata. + const BufferHubMetadata& metadata() const { return metadata_; } + + // Gets the current value of active_clients_bit_mask in metadata_ with + // std::memory_order_acquire, so that all previous releases of + // active_clients_bit_mask from all threads will be returned here. + uint64_t GetActiveClientsBitMask() const; + + // Find and add a new client_state_mask to active_clients_bit_mask in + // metadata_. + // Return the new client_state_mask that is added to active_clients_bit_mask. + // Return 0ULL if there are already 32 bp clients of the buffer. + uint64_t AddNewActiveClientsBitToMask(); + + // Removes the value from active_clients_bit_mask in metadata_ with + // std::memory_order_release, so that the change will be visible to any + // acquire of active_clients_bit_mask_ in any threads after the succeed of + // this operation. + void RemoveClientsBitFromMask(const uint64_t& value); + +private: + // Helper method for constructors to initialize atomic metadata header + // variables in shared memory. + void InitializeMetadata(); + + // Gralloc buffer handles. + native_handle_t* buffer_handle_; + AHardwareBuffer_Desc buffer_desc_; + + // Metadata in shared memory. + BufferHubMetadata metadata_; + + // The following variables are atomic variables in metadata_ that are visible + // to Bn object and Bp objects. Please find more info in + // BufferHubDefs::MetadataHeader. + + // buffer_state_ tracks the state of the buffer. Buffer can be in one of these + // four states: gained, posted, acquired, released. + std::atomic<uint64_t>* buffer_state_ = nullptr; + + // TODO(b/112012161): add comments to fence_state_. + std::atomic<uint64_t>* fence_state_ = nullptr; + + // active_clients_bit_mask_ tracks all the bp clients of the buffer. It is the + // union of all client_state_mask of all bp clients. + std::atomic<uint64_t>* active_clients_bit_mask_ = nullptr; +}; + +} // namespace implementation +} // namespace V1_0 +} // namespace bufferhub +} // namespace frameworks +} // namespace android + +#endif // ANDROID_FRAMEWORKS_BUFFERHUB_V1_0_BUFFER_NODE_H_ diff --git a/services/bufferhub/tests/Android.bp b/services/bufferhub/tests/Android.bp new file mode 100644 index 0000000000..cef31f6ba9 --- /dev/null +++ b/services/bufferhub/tests/Android.bp @@ -0,0 +1,24 @@ +cc_test { + name: "BufferNode_test", + srcs: ["BufferNode_test.cpp"], + cflags: [ + "-DLOG_TAG=\"BufferNode_test\"", + "-DTRACE=0", + "-DATRACE_TAG=ATRACE_TAG_GRAPHICS", + ], + header_libs: [ + "libbufferhub_headers", + "libdvr_headers", + "libnativewindow_headers", + "libpdx_headers", + ], + shared_libs: [ + "libbufferhubservice", + "libui", + ], + static_libs: [ + "libgmock", + ], + // TODO(b/117568153): Temporarily opt out using libcrt. + no_libcrt: true, +}
\ No newline at end of file diff --git a/services/bufferhub/tests/BufferNode_test.cpp b/services/bufferhub/tests/BufferNode_test.cpp new file mode 100644 index 0000000000..df31d78b89 --- /dev/null +++ b/services/bufferhub/tests/BufferNode_test.cpp @@ -0,0 +1,110 @@ +#include <bufferhub/BufferNode.h> +#include <errno.h> +#include <gmock/gmock.h> +#include <gtest/gtest.h> +#include <ui/GraphicBufferMapper.h> + +namespace android { +namespace frameworks { +namespace bufferhub { +namespace V1_0 { +namespace implementation { + +namespace { + +using testing::NotNull; + +const uint32_t kWidth = 640; +const uint32_t kHeight = 480; +const uint32_t kLayerCount = 1; +const uint32_t kFormat = 1; +const uint64_t kUsage = 0; +const size_t kUserMetadataSize = 0; +const size_t kMaxClientsCount = dvr::BufferHubDefs::kMaxNumberOfClients; + +class BufferNodeTest : public ::testing::Test { +protected: + void SetUp() override { + buffer_node = + new BufferNode(kWidth, kHeight, kLayerCount, kFormat, kUsage, kUserMetadataSize); + ASSERT_TRUE(buffer_node->IsValid()); + } + + void TearDown() override { + if (buffer_node != nullptr) { + delete buffer_node; + } + } + + BufferNode* buffer_node = nullptr; +}; + +TEST_F(BufferNodeTest, TestCreateBufferNode) { + EXPECT_EQ(buffer_node->user_metadata_size(), kUserMetadataSize); + // Test the handle just allocated is good (i.e. able to be imported) + GraphicBufferMapper& mapper = GraphicBufferMapper::get(); + const native_handle_t* outHandle; + status_t ret = + mapper.importBuffer(buffer_node->buffer_handle(), buffer_node->buffer_desc().width, + buffer_node->buffer_desc().height, + buffer_node->buffer_desc().layers, + buffer_node->buffer_desc().format, buffer_node->buffer_desc().usage, + buffer_node->buffer_desc().stride, &outHandle); + EXPECT_EQ(ret, OK); + EXPECT_THAT(outHandle, NotNull()); +} + +TEST_F(BufferNodeTest, TestAddNewActiveClientsBitToMask_twoNewClients) { + uint64_t new_client_state_mask_1 = buffer_node->AddNewActiveClientsBitToMask(); + EXPECT_EQ(buffer_node->GetActiveClientsBitMask(), new_client_state_mask_1); + + // Request and add a new client_state_mask again. + // Active clients bit mask should be the union of the two new + // client_state_masks. + uint64_t new_client_state_mask_2 = buffer_node->AddNewActiveClientsBitToMask(); + EXPECT_EQ(buffer_node->GetActiveClientsBitMask(), + new_client_state_mask_1 | new_client_state_mask_2); +} + +TEST_F(BufferNodeTest, TestAddNewActiveClientsBitToMask_32NewClients) { + uint64_t new_client_state_mask = 0ULL; + uint64_t current_mask = 0ULL; + uint64_t expected_mask = 0ULL; + + for (int i = 0; i < kMaxClientsCount; ++i) { + new_client_state_mask = buffer_node->AddNewActiveClientsBitToMask(); + EXPECT_NE(new_client_state_mask, 0); + EXPECT_FALSE(new_client_state_mask & current_mask); + expected_mask = current_mask | new_client_state_mask; + current_mask = buffer_node->GetActiveClientsBitMask(); + EXPECT_EQ(current_mask, expected_mask); + } + + // Method should fail upon requesting for more than maximum allowable clients. + new_client_state_mask = buffer_node->AddNewActiveClientsBitToMask(); + EXPECT_EQ(new_client_state_mask, 0ULL); + EXPECT_EQ(errno, E2BIG); +} + +TEST_F(BufferNodeTest, TestRemoveActiveClientsBitFromMask) { + buffer_node->AddNewActiveClientsBitToMask(); + uint64_t current_mask = buffer_node->GetActiveClientsBitMask(); + uint64_t new_client_state_mask = buffer_node->AddNewActiveClientsBitToMask(); + EXPECT_NE(buffer_node->GetActiveClientsBitMask(), current_mask); + + buffer_node->RemoveClientsBitFromMask(new_client_state_mask); + EXPECT_EQ(buffer_node->GetActiveClientsBitMask(), current_mask); + + // Remove the test_mask again to the active client bit mask should not modify + // the value of active clients bit mask. + buffer_node->RemoveClientsBitFromMask(new_client_state_mask); + EXPECT_EQ(buffer_node->GetActiveClientsBitMask(), current_mask); +} + +} // namespace + +} // namespace implementation +} // namespace V1_0 +} // namespace bufferhub +} // namespace frameworks +} // namespace android diff --git a/services/surfaceflinger/BufferQueueLayer.cpp b/services/surfaceflinger/BufferQueueLayer.cpp index e592a8bf98..c130bc5105 100644 --- a/services/surfaceflinger/BufferQueueLayer.cpp +++ b/services/surfaceflinger/BufferQueueLayer.cpp @@ -23,7 +23,9 @@ namespace android { BufferQueueLayer::BufferQueueLayer(const LayerCreationArgs& args) : BufferLayer(args) {} -BufferQueueLayer::~BufferQueueLayer() = default; +BufferQueueLayer::~BufferQueueLayer() { + mConsumer->abandon(); +} // ----------------------------------------------------------------------- // Interface implementation for Layer @@ -33,10 +35,6 @@ void BufferQueueLayer::onLayerDisplayed(const sp<Fence>& releaseFence) { mConsumer->setReleaseFence(releaseFence); } -void BufferQueueLayer::abandon() { - mConsumer->abandon(); -} - void BufferQueueLayer::setTransformHint(uint32_t orientation) const { mConsumer->setTransformHint(orientation); } @@ -380,7 +378,17 @@ void BufferQueueLayer::onFrameAvailable(const BufferItem& item) { mFlinger->mInterceptor->saveBufferUpdate(this, item.mGraphicBuffer->getWidth(), item.mGraphicBuffer->getHeight(), item.mFrameNumber); - mFlinger->signalLayerUpdate(); + + // If this layer is orphaned, then we run a fake vsync pulse so that + // dequeueBuffer doesn't block indefinitely. + if (isRemovedFromCurrentState()) { + bool ignored = false; + latchBuffer(ignored, systemTime(), Fence::NO_FENCE); + usleep(16000); + releasePendingBuffer(systemTime()); + } else { + mFlinger->signalLayerUpdate(); + } } void BufferQueueLayer::onFrameReplaced(const BufferItem& item) { diff --git a/services/surfaceflinger/BufferQueueLayer.h b/services/surfaceflinger/BufferQueueLayer.h index abe0bc7c0a..c9ebe042b8 100644 --- a/services/surfaceflinger/BufferQueueLayer.h +++ b/services/surfaceflinger/BufferQueueLayer.h @@ -40,8 +40,6 @@ public: public: void onLayerDisplayed(const sp<Fence>& releaseFence) override; - void abandon() override; - void setTransformHint(uint32_t orientation) const override; std::vector<OccupancyTracker::Segment> getOccupancyHistory(bool forceFlush) override; diff --git a/services/surfaceflinger/BufferStateLayer.cpp b/services/surfaceflinger/BufferStateLayer.cpp index 5df8ade34e..73098bf2aa 100644 --- a/services/surfaceflinger/BufferStateLayer.cpp +++ b/services/surfaceflinger/BufferStateLayer.cpp @@ -470,8 +470,7 @@ status_t BufferStateLayer::updateTexImage(bool& /*recomputeVisibleRegions*/, nse } // TODO(marissaw): properly support mTimeStats - mTimeStats.setLayerName(layerID, getName().c_str()); - mTimeStats.setPostTime(layerID, getFrameNumber(), latchTime); + mTimeStats.setPostTime(layerID, getFrameNumber(), getName().c_str(), latchTime); mTimeStats.setAcquireFence(layerID, getFrameNumber(), getCurrentFenceTime()); mTimeStats.setLatchTime(layerID, getFrameNumber(), latchTime); diff --git a/services/surfaceflinger/DisplayDevice.cpp b/services/surfaceflinger/DisplayDevice.cpp index 6f645df73f..91b18c9fd7 100644 --- a/services/surfaceflinger/DisplayDevice.cpp +++ b/services/surfaceflinger/DisplayDevice.cpp @@ -28,6 +28,7 @@ #include <string.h> #include <math.h> +#include <android-base/stringprintf.h> #include <android/hardware/configstore/1.0/ISurfaceFlingerConfigs.h> #include <configstore/Utils.h> #include <cutils/properties.h> @@ -646,14 +647,21 @@ uint32_t DisplayDevice::getPrimaryDisplayOrientationTransform() { return sPrimaryDisplayOrientation; } +std::string DisplayDevice::getDebugName() const { + const auto id = mId >= 0 ? base::StringPrintf("%d, ", mId) : std::string(); + return base::StringPrintf("DisplayDevice{%s%s%s\"%s\"}", id.c_str(), + isPrimary() ? "primary, " : "", isVirtual() ? "virtual, " : "", + mDisplayName.c_str()); +} + void DisplayDevice::dump(String8& result) const { const ui::Transform& tr(mGlobalTransform); ANativeWindow* const window = mNativeWindow.get(); - result.appendFormat("+ DisplayDevice: %s\n", mDisplayName.c_str()); - result.appendFormat(" type=%x, ID=%d, layerStack=%u, (%4dx%4d), ANativeWindow=%p " + result.appendFormat("+ %s\n", getDebugName().c_str()); + result.appendFormat(" layerStack=%u, (%4dx%4d), ANativeWindow=%p " "(%d:%d:%d:%d), orient=%2d (type=%08x), " "flips=%u, isSecure=%d, powerMode=%d, activeConfig=%d, numLayers=%zu\n", - mType, mId, mLayerStack, mDisplayWidth, mDisplayHeight, window, + mLayerStack, mDisplayWidth, mDisplayHeight, window, mSurface->queryRedSize(), mSurface->queryGreenSize(), mSurface->queryBlueSize(), mSurface->queryAlphaSize(), mOrientation, tr.getType(), getPageFlipCount(), mIsSecure, mPowerMode, mActiveConfig, @@ -693,7 +701,7 @@ void DisplayDevice::addColorMode( const Dataspace dataspace = colorModeToDataspace(mode); const Dataspace hwcDataspace = colorModeToDataspace(hwcColorMode); - ALOGV("DisplayDevice %d/%d: map (%s, %s) to (%s, %s, %s)", mType, mId, + ALOGV("%s: map (%s, %s) to (%s, %s, %s)", getDebugName().c_str(), dataspaceDetails(static_cast<android_dataspace_t>(dataspace)).c_str(), decodeRenderIntent(intent).c_str(), dataspaceDetails(static_cast<android_dataspace_t>(hwcDataspace)).c_str(), diff --git a/services/surfaceflinger/DisplayDevice.h b/services/surfaceflinger/DisplayDevice.h index 918f7dec89..152d0eca51 100644 --- a/services/surfaceflinger/DisplayDevice.h +++ b/services/surfaceflinger/DisplayDevice.h @@ -203,6 +203,7 @@ public: * Debugging */ uint32_t getPageFlipCount() const; + std::string getDebugName() const; void dump(String8& result) const; private: diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp index 88c3c8a018..f29dfc0f1b 100644 --- a/services/surfaceflinger/Layer.cpp +++ b/services/surfaceflinger/Layer.cpp @@ -111,6 +111,8 @@ Layer::Layer(const LayerCreationArgs& args) args.flinger->getCompositorTiming(&compositorTiming); mFrameEventHistory.initializeCompositorTiming(compositorTiming); mFrameTracker.setDisplayRefreshPeriod(compositorTiming.interval); + + mFlinger->onLayerCreated(); } Layer::~Layer() { @@ -119,13 +121,11 @@ Layer::~Layer() { c->detachLayer(this); } - for (auto& point : mRemoteSyncPoints) { - point->setTransactionApplied(); - } - for (auto& point : mLocalSyncPoints) { - point->setFrameAvailable(); - } mFrameTracker.logAndResetStats(mName); + + destroyAllHwcLayers(); + + mFlinger->onLayerDestroyed(); } // --------------------------------------------------------------------------- @@ -140,10 +140,9 @@ Layer::~Layer() { void Layer::onLayerDisplayed(const sp<Fence>& /*releaseFence*/) {} void Layer::onRemovedFromCurrentState() { - // the layer is removed from SF mCurrentState to mLayersPendingRemoval - - mPendingRemoval = true; + mRemovedFromCurrentState = true; + // the layer is removed from SF mCurrentState to mLayersPendingRemoval if (mCurrentState.zOrderRelativeOf != nullptr) { sp<Layer> strongRelative = mCurrentState.zOrderRelativeOf.promote(); if (strongRelative != nullptr) { @@ -153,19 +152,26 @@ void Layer::onRemovedFromCurrentState() { mCurrentState.zOrderRelativeOf = nullptr; } - for (const auto& child : mCurrentChildren) { - child->onRemovedFromCurrentState(); + // Since we are no longer reachable from CurrentState SurfaceFlinger + // will no longer invoke doTransaction for us, and so we will + // never finish applying transactions. We signal the sync point + // now so that another layer will not become indefinitely + // blocked. + for (auto& point: mRemoteSyncPoints) { + point->setTransactionApplied(); } -} + mRemoteSyncPoints.clear(); -void Layer::onRemoved() { - // the layer is removed from SF mLayersPendingRemoval - abandon(); - - destroyAllHwcLayers(); + { + Mutex::Autolock syncLock(mLocalSyncPointMutex); + for (auto& point : mLocalSyncPoints) { + point->setFrameAvailable(); + } + mLocalSyncPoints.clear(); + } for (const auto& child : mCurrentChildren) { - child->onRemoved(); + child->onRemovedFromCurrentState(); } } @@ -228,6 +234,10 @@ void Layer::destroyAllHwcLayers() { } LOG_ALWAYS_FATAL_IF(!getBE().mHwcLayers.empty(), "All hardware composer layers should have been destroyed"); + + for (const sp<Layer>& child : mDrawingChildren) { + child->destroyAllHwcLayers(); + } } Rect Layer::getContentCrop() const { @@ -752,6 +762,9 @@ bool Layer::addSyncPoint(const std::shared_ptr<SyncPoint>& point) { // relevant frame return false; } + if (isRemovedFromCurrentState()) { + return false; + } Mutex::Autolock lock(mLocalSyncPointMutex); mLocalSyncPoints.push_back(point); @@ -825,7 +838,9 @@ void Layer::pushPendingState() { // If this transaction is waiting on the receipt of a frame, generate a sync // point and send it to the remote layer. - if (mCurrentState.barrierLayer_legacy != nullptr) { + // We don't allow installing sync points after we are removed from the current state + // as we won't be able to signal our end. + if (mCurrentState.barrierLayer_legacy != nullptr && !isRemovedFromCurrentState()) { sp<Layer> barrierLayer = mCurrentState.barrierLayer_legacy.promote(); if (barrierLayer == nullptr) { ALOGE("[%s] Unable to promote barrier Layer.", mName.string()); @@ -1476,9 +1491,8 @@ void Layer::onDisconnect() { void Layer::addAndGetFrameTimestamps(const NewFrameEventsEntry* newTimestamps, FrameEventHistoryDelta* outDelta) { if (newTimestamps) { - const int32_t layerID = getSequence(); - mTimeStats.setLayerName(layerID, getName().c_str()); - mTimeStats.setPostTime(layerID, newTimestamps->frameNumber, newTimestamps->postedTime); + mTimeStats.setPostTime(getSequence(), newTimestamps->frameNumber, getName().c_str(), + newTimestamps->postedTime); } Mutex::Autolock lock(mFrameEventHistoryMutex); @@ -1995,6 +2009,10 @@ void Layer::writeToProto(LayerProto* layerInfo, int32_t displayId) { } } +bool Layer::isRemovedFromCurrentState() const { + return mRemovedFromCurrentState; +} + // --------------------------------------------------------------------------- }; // namespace android diff --git a/services/surfaceflinger/Layer.h b/services/surfaceflinger/Layer.h index 5d05f0530b..12671ff51d 100644 --- a/services/surfaceflinger/Layer.h +++ b/services/surfaceflinger/Layer.h @@ -346,7 +346,7 @@ public: virtual bool isCreatedFromMainThread() const { return false; } - bool isPendingRemoval() const { return mPendingRemoval; } + bool isRemovedFromCurrentState() const; void writeToProto(LayerProto* layerInfo, LayerVector::StateSet stateSet = LayerVector::StateSet::Drawing); @@ -394,8 +394,6 @@ public: */ virtual void onLayerDisplayed(const sp<Fence>& releaseFence); - virtual void abandon() {} - virtual bool shouldPresentNow(nsecs_t /*expectedPresentTime*/) const { return false; } virtual void setTransformHint(uint32_t /*orientation*/) const { } @@ -475,12 +473,6 @@ public: */ void onRemovedFromCurrentState(); - /* - * called with the state lock from the main thread when the layer is - * removed from the pending removal list - */ - void onRemoved(); - // Updates the transform hint in our SurfaceFlingerConsumer to match // the current orientation of the display device. void updateTransformHint(const sp<const DisplayDevice>& display) const; @@ -595,12 +587,12 @@ protected: */ class LayerCleaner { sp<SurfaceFlinger> mFlinger; - wp<Layer> mLayer; + sp<Layer> mLayer; protected: ~LayerCleaner() { // destroy client resources - mFlinger->onLayerDestroyed(mLayer); + mFlinger->onHandleDestroyed(mLayer); } public: @@ -702,6 +694,8 @@ public: virtual PixelFormat getPixelFormat() const { return PIXEL_FORMAT_NONE; } bool getPremultipledAlpha() const; + bool mPendingHWCDestroy{false}; + protected: // ----------------------------------------------------------------------- bool usingRelativeZ(LayerVector::StateSet stateSet); @@ -745,7 +739,7 @@ protected: // Whether filtering is needed b/c of the drawingstate bool mNeedsFiltering{false}; - bool mPendingRemoval{false}; + std::atomic<bool> mRemovedFromCurrentState{false}; // page-flip thread (currently main thread) bool mProtectedByApp{false}; // application requires protected path to external sink diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp index 968fcd6acb..dec08fd629 100644 --- a/services/surfaceflinger/SurfaceFlinger.cpp +++ b/services/surfaceflinger/SurfaceFlinger.cpp @@ -1572,8 +1572,7 @@ void SurfaceFlinger::handleMessageRefresh() { getBE().mEndOfFrameCompositionInfo = std::move(getBE().mCompositionInfo); for (const auto& [token, display] : mDisplays) { - const auto displayId = display->getId(); - for (auto& compositionInfo : getBE().mEndOfFrameCompositionInfo[displayId]) { + for (auto& compositionInfo : getBE().mEndOfFrameCompositionInfo[token]) { compositionInfo.hwc.hwcLayer = nullptr; } } @@ -1670,16 +1669,16 @@ void SurfaceFlinger::calculateWorkingSet() { mDrawingState.colorMatrixChanged = false; for (const auto& [token, display] : mDisplays) { - const auto displayId = display->getId(); - getBE().mCompositionInfo[displayId].clear(); + getBE().mCompositionInfo[token].clear(); + for (auto& layer : display->getVisibleLayersSortedByZ()) { - auto displayId = display->getId(); + const auto displayId = display->getId(); layer->getBE().compositionInfo.compositionType = layer->getCompositionType(displayId); if (!layer->setHwcLayer(displayId)) { ALOGV("Need to create HWCLayer for %s", layer->getName().string()); } layer->getBE().compositionInfo.hwc.displayId = displayId; - getBE().mCompositionInfo[displayId].push_back(layer->getBE().compositionInfo); + getBE().mCompositionInfo[token].push_back(layer->getBE().compositionInfo); layer->getBE().compositionInfo.hwc.hwcLayer = nullptr; } } @@ -1687,7 +1686,6 @@ void SurfaceFlinger::calculateWorkingSet() { void SurfaceFlinger::doDebugFlashRegions(const sp<DisplayDevice>& display, bool repaintEverything) { - const auto displayId = display->getId(); // is debugging enabled if (CC_LIKELY(!mDebugRegion)) return; @@ -1713,14 +1711,7 @@ void SurfaceFlinger::doDebugFlashRegions(const sp<DisplayDevice>& display, bool usleep(mDebugRegion * 1000); } - if (display->isPoweredOn()) { - status_t result = display->prepareFrame( - *getBE().mHwc, getBE().mCompositionInfo[displayId]); - ALOGE_IF(result != NO_ERROR, - "prepareFrame for display %d failed:" - " %d (%s)", - display->getId(), result, strerror(-result)); - } + prepareFrame(display); } void SurfaceFlinger::doTracing(const char* where) { @@ -2132,13 +2123,12 @@ void SurfaceFlinger::beginFrame(const sp<DisplayDevice>& display) void SurfaceFlinger::prepareFrame(const sp<DisplayDevice>& display) { - const auto displayId = display->getId(); if (!display->isPoweredOn()) { return; } - status_t result = display->prepareFrame( - *getBE().mHwc, getBE().mCompositionInfo[displayId]); + status_t result = display->prepareFrame(getHwComposer(), + getBE().mCompositionInfo[display->getDisplayToken()]); ALOGE_IF(result != NO_ERROR, "prepareFrame for display %d failed:" " %d (%s)", @@ -2739,7 +2729,15 @@ void SurfaceFlinger::commitTransaction() for (const auto& l : mLayersPendingRemoval) { recordBufferingStats(l->getName().string(), l->getOccupancyHistory(true)); - l->onRemoved(); + + // We need to release the HWC layers when the Layer is removed + // from the current state otherwise the HWC layer just continues + // showing at its last configured state until we eventually + // abandon the buffer queue. + if (l->isRemovedFromCurrentState()) { + l->destroyAllHwcLayers(); + l->releasePendingBuffer(systemTime()); + } } mLayersPendingRemoval.clear(); } @@ -3172,7 +3170,7 @@ status_t SurfaceFlinger::addClientLayer(const sp<Client>& client, if (parent == nullptr) { mCurrentState.layersSortedByZ.add(lbc); } else { - if (parent->isPendingRemoval()) { + if (parent->isRemovedFromCurrentState()) { ALOGE("addClientLayer called with a removed parent"); return NAME_NOT_FOUND; } @@ -3188,7 +3186,6 @@ status_t SurfaceFlinger::addClientLayer(const sp<Client>& client, mMaxGraphicBufferProducerListSize, mNumLayers); } mLayersAdded = true; - mNumLayers++; } // attach this layer to the client @@ -3202,52 +3199,22 @@ status_t SurfaceFlinger::removeLayer(const sp<Layer>& layer, bool topLevelOnly) return removeLayerLocked(mStateLock, layer, topLevelOnly); } -status_t SurfaceFlinger::removeLayerLocked(const Mutex&, const sp<Layer>& layer, +status_t SurfaceFlinger::removeLayerLocked(const Mutex& lock, const sp<Layer>& layer, bool topLevelOnly) { - if (layer->isPendingRemoval()) { - return NO_ERROR; - } - const auto& p = layer->getParent(); ssize_t index; if (p != nullptr) { if (topLevelOnly) { return NO_ERROR; } - - sp<Layer> ancestor = p; - while (ancestor->getParent() != nullptr) { - ancestor = ancestor->getParent(); - } - if (mCurrentState.layersSortedByZ.indexOf(ancestor) < 0) { - ALOGE("removeLayer called with a layer whose parent has been removed"); - return NAME_NOT_FOUND; - } - index = p->removeChild(layer); } else { index = mCurrentState.layersSortedByZ.remove(layer); } - // As a matter of normal operation, the LayerCleaner will produce a second - // attempt to remove the surface. The Layer will be kept alive in mDrawingState - // so we will succeed in promoting it, but it's already been removed - // from mCurrentState. As long as we can find it in mDrawingState we have no problem - // otherwise something has gone wrong and we are leaking the layer. - if (index < 0 && mDrawingState.layersSortedByZ.indexOf(layer) < 0) { - ALOGE("Failed to find layer (%s) in layer parent (%s).", - layer->getName().string(), - (p != nullptr) ? p->getName().string() : "no-parent"); - return BAD_VALUE; - } else if (index < 0) { - return NO_ERROR; - } - layer->onRemovedFromCurrentState(); - mLayersPendingRemoval.add(layer); - mLayersRemoved = true; - mNumLayers -= 1 + layer->getChildrenCount(); - setTransactionFlags(eTransactionNeeded); + + markLayerPendingRemovalLocked(lock, layer); return NO_ERROR; } @@ -3448,11 +3415,6 @@ uint32_t SurfaceFlinger::setClientStateLocked(const ComposerState& composerState return 0; } - if (layer->isPendingRemoval()) { - ALOGW("Attempting to set client state on removed layer: %s", layer->getName().string()); - return 0; - } - uint32_t flags = 0; const uint32_t what = s.what; @@ -3656,11 +3618,6 @@ void SurfaceFlinger::setDestroyStateLocked(const ComposerState& composerState) { return; } - if (layer->isPendingRemoval()) { - ALOGW("Attempting to destroy on removed layer: %s", layer->getName().string()); - return; - } - if (state.what & layer_state_t::eDestroySurface) { removeLayerLocked(mStateLock, layer); } @@ -3834,17 +3791,16 @@ status_t SurfaceFlinger::onLayerRemoved(const sp<Client>& client, const sp<IBind return err; } -status_t SurfaceFlinger::onLayerDestroyed(const wp<Layer>& layer) +void SurfaceFlinger::markLayerPendingRemovalLocked(const Mutex&, const sp<Layer>& layer) { + mLayersPendingRemoval.add(layer); + mLayersRemoved = true; + setTransactionFlags(eTransactionNeeded); +} + +void SurfaceFlinger::onHandleDestroyed(const sp<Layer>& layer) { - // called by ~LayerCleaner() when all references to the IBinder (handle) - // are gone - sp<Layer> l = layer.promote(); - if (l == nullptr) { - // The layer has already been removed, carry on - return NO_ERROR; - } - // If we have a parent, then we can continue to live as long as it does. - return removeLayer(l, true); + Mutex::Autolock lock(mStateLock); + markLayerPendingRemovalLocked(mStateLock, layer); } // --------------------------------------------------------------------------- @@ -4394,17 +4350,13 @@ void SurfaceFlinger::dumpFrameCompositionInfo(String8& result) const { std::string stringResult; for (const auto& [token, display] : mDisplays) { - const auto displayId = display->getId(); - if (displayId == DisplayDevice::DISPLAY_ID_INVALID) { + const auto it = getBE().mEndOfFrameCompositionInfo.find(token); + if (it == getBE().mEndOfFrameCompositionInfo.end()) { continue; } - const auto& compositionInfoIt = getBE().mEndOfFrameCompositionInfo.find(displayId); - if (compositionInfoIt == getBE().mEndOfFrameCompositionInfo.end()) { - break; - } - const auto& compositionInfoList = compositionInfoIt->second; - stringResult += base::StringPrintf("Display: %d\n", displayId); + const auto& compositionInfoList = it->second; + stringResult += base::StringPrintf("%s\n", display->getDebugName().c_str()); stringResult += base::StringPrintf("numComponents: %zu\n", compositionInfoList.size()); for (const auto& compositionInfo : compositionInfoList) { compositionInfo.dump(stringResult, nullptr); @@ -5193,7 +5145,7 @@ status_t SurfaceFlinger::captureLayers(const sp<IBinder>& layerHandleBinder, auto layerHandle = reinterpret_cast<Layer::Handle*>(layerHandleBinder.get()); auto parent = layerHandle->owner.promote(); - if (parent == nullptr || parent->isPendingRemoval()) { + if (parent == nullptr || parent->isRemovedFromCurrentState()) { ALOGE("captureLayers called with a removed parent"); return NAME_NOT_FOUND; } diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h index f670f0b5d3..51168a6724 100644 --- a/services/surfaceflinger/SurfaceFlinger.h +++ b/services/surfaceflinger/SurfaceFlinger.h @@ -222,8 +222,8 @@ public: // instances. Each hardware composer instance gets a different sequence id. int32_t mComposerSequenceId; - std::unordered_map<int32_t, std::vector<CompositionInfo>> mCompositionInfo; - std::unordered_map<int32_t, std::vector<CompositionInfo>> mEndOfFrameCompositionInfo; + std::map<wp<IBinder>, std::vector<CompositionInfo>> mCompositionInfo; + std::map<wp<IBinder>, std::vector<CompositionInfo>> mEndOfFrameCompositionInfo; }; @@ -364,6 +364,9 @@ public: bool authenticateSurfaceTextureLocked( const sp<IGraphicBufferProducer>& bufferProducer) const; + inline void onLayerCreated() { mNumLayers++; } + inline void onLayerDestroyed() { mNumLayers--; } + private: friend class Client; friend class DisplayEventConnection; @@ -575,10 +578,12 @@ private: // ISurfaceComposerClient::destroySurface() status_t onLayerRemoved(const sp<Client>& client, const sp<IBinder>& handle); + void markLayerPendingRemovalLocked(const Mutex& /* mStateLock */, const sp<Layer>& layer); + // called when all clients have released all their references to // this layer meaning it is entirely safe to destroy all // resources associated to this layer. - status_t onLayerDestroyed(const wp<Layer>& layer); + void onHandleDestroyed(const sp<Layer>& layer); // remove a layer from SurfaceFlinger immediately status_t removeLayer(const sp<Layer>& layer, bool topLevelOnly = false); diff --git a/services/surfaceflinger/TimeStats/TimeStats.cpp b/services/surfaceflinger/TimeStats/TimeStats.cpp index c219afd85d..ace7c1b454 100644 --- a/services/surfaceflinger/TimeStats/TimeStats.cpp +++ b/services/surfaceflinger/TimeStats/TimeStats.cpp @@ -241,25 +241,18 @@ static bool layerNameIsValid(const std::string& layerName) { return std::regex_match(layerName.begin(), layerName.end(), layerNameRegex); } -void TimeStats::setLayerName(int32_t layerID, const std::string& layerName) { +void TimeStats::setPostTime(int32_t layerID, uint64_t frameNumber, const std::string& layerName, + nsecs_t postTime) { if (!mEnabled.load()) return; ATRACE_CALL(); - ALOGV("[%d]-[%s]", layerID, layerName.c_str()); + ALOGV("[%d]-[%" PRIu64 "]-[%s]-PostTime[%" PRId64 "]", layerID, frameNumber, layerName.c_str(), + postTime); std::lock_guard<std::mutex> lock(mMutex); if (!mTimeStatsTracker.count(layerID) && layerNameIsValid(layerName)) { mTimeStatsTracker[layerID].layerName = layerName; } -} - -void TimeStats::setPostTime(int32_t layerID, uint64_t frameNumber, nsecs_t postTime) { - if (!mEnabled.load()) return; - - ATRACE_CALL(); - ALOGV("[%d]-[%" PRIu64 "]-PostTime[%" PRId64 "]", layerID, frameNumber, postTime); - - std::lock_guard<std::mutex> lock(mMutex); if (!mTimeStatsTracker.count(layerID)) return; LayerRecord& layerRecord = mTimeStatsTracker[layerID]; if (layerRecord.timeRecords.size() == MAX_NUM_TIME_RECORDS) { diff --git a/services/surfaceflinger/TimeStats/TimeStats.h b/services/surfaceflinger/TimeStats/TimeStats.h index d1e554cbd2..184bf40967 100644 --- a/services/surfaceflinger/TimeStats/TimeStats.h +++ b/services/surfaceflinger/TimeStats/TimeStats.h @@ -86,8 +86,8 @@ public: void incrementMissedFrames(); void incrementClientCompositionFrames(); - void setLayerName(int32_t layerID, const std::string& layerName); - void setPostTime(int32_t layerID, uint64_t frameNumber, nsecs_t postTime); + void setPostTime(int32_t layerID, uint64_t frameNumber, const std::string& layerName, + nsecs_t postTime); void setLatchTime(int32_t layerID, uint64_t frameNumber, nsecs_t latchTime); void setDesiredTime(int32_t layerID, uint64_t frameNumber, nsecs_t desiredTime); void setAcquireTime(int32_t layerID, uint64_t frameNumber, nsecs_t acquireTime); diff --git a/services/surfaceflinger/tests/Transaction_test.cpp b/services/surfaceflinger/tests/Transaction_test.cpp index edb82d0005..94b33ac2d5 100644 --- a/services/surfaceflinger/tests/Transaction_test.cpp +++ b/services/surfaceflinger/tests/Transaction_test.cpp @@ -2614,6 +2614,37 @@ TEST_F(ChildLayerTest, ReparentChildren) { } } +TEST_F(ChildLayerTest, ChildrenSurviveParentDestruction) { + sp<SurfaceControl> mGrandChild = + mClient->createSurface(String8("Grand Child"), 10, 10, + PIXEL_FORMAT_RGBA_8888, 0, mChild.get()); + fillSurfaceRGBA8(mGrandChild, 111, 111, 111); + + { + SCOPED_TRACE("Grandchild visible"); + ScreenCapture::captureScreen(&mCapture); + mCapture->checkPixel(64, 64, 111, 111, 111); + } + + mChild->clear(); + + { + SCOPED_TRACE("After destroying child"); + ScreenCapture::captureScreen(&mCapture); + mCapture->expectFGColor(64, 64); + } + + asTransaction([&](Transaction& t) { + t.reparent(mGrandChild, mFGSurfaceControl->getHandle()); + }); + + { + SCOPED_TRACE("After reparenting grandchild"); + ScreenCapture::captureScreen(&mCapture); + mCapture->checkPixel(64, 64, 111, 111, 111); + } +} + TEST_F(ChildLayerTest, DetachChildrenSameClient) { asTransaction([&](Transaction& t) { t.show(mChild); diff --git a/services/vr/bufferhubd/Android.bp b/services/vr/bufferhubd/Android.bp index ea9bb1fe43..b5e6bb4a38 100644 --- a/services/vr/bufferhubd/Android.bp +++ b/services/vr/bufferhubd/Android.bp @@ -15,6 +15,7 @@ sharedLibraries = [ "libbase", "libbinder", + "libbufferhubservice", "libcutils", "libgtest_prod", "libgui", @@ -32,7 +33,6 @@ cc_library_static { "buffer_client.cpp", "buffer_hub.cpp", "buffer_hub_binder.cpp", - "buffer_node.cpp", "consumer_channel.cpp", "consumer_queue_channel.cpp", "IBufferHub.cpp", @@ -45,7 +45,10 @@ cc_library_static { "-DATRACE_TAG=ATRACE_TAG_GRAPHICS", ], export_include_dirs: ["include"], - header_libs: ["libdvr_headers"], + header_libs: [ + "libdvr_headers", + "libnativewindow_headers", + ], shared_libs: sharedLibraries, static_libs: [ "libbufferhub", diff --git a/services/vr/bufferhubd/buffer_channel.cpp b/services/vr/bufferhubd/buffer_channel.cpp index a2fa0f1180..589b31a285 100644 --- a/services/vr/bufferhubd/buffer_channel.cpp +++ b/services/vr/bufferhubd/buffer_channel.cpp @@ -49,9 +49,9 @@ BufferChannel::~BufferChannel() { BufferHubChannel::BufferInfo BufferChannel::GetBufferInfo() const { return BufferInfo( - buffer_id(), /*consumer_count=*/0, buffer_node_->buffer().width(), - buffer_node_->buffer().height(), buffer_node_->buffer().layer_count(), - buffer_node_->buffer().format(), buffer_node_->buffer().usage(), + buffer_id(), /*consumer_count=*/0, buffer_node_->buffer_desc().width, + buffer_node_->buffer_desc().height, buffer_node_->buffer_desc().layers, + buffer_node_->buffer_desc().format, buffer_node_->buffer_desc().usage, /*pending_count=*/0, /*state=*/0, /*signaled_mask=*/0, /*index=*/0); } @@ -85,17 +85,17 @@ Status<BufferTraits<BorrowedHandle>> BufferChannel::OnImport( // TODO(b/112057680) Move away from the GraphicBuffer-based IonBuffer. return BufferTraits<BorrowedHandle>{ - /*buffer_handle=*/buffer_node_->buffer().handle(), + /*buffer_handle=*/buffer_node_->buffer_handle(), /*metadata_handle=*/buffer_node_->metadata().ashmem_handle().Borrow(), /*id=*/buffer_id(), /*client_state_mask=*/client_state_mask_, /*metadata_size=*/buffer_node_->metadata().metadata_size(), - /*width=*/buffer_node_->buffer().width(), - /*height=*/buffer_node_->buffer().height(), - /*layer_count=*/buffer_node_->buffer().layer_count(), - /*format=*/buffer_node_->buffer().format(), - /*usage=*/buffer_node_->buffer().usage(), - /*stride=*/buffer_node_->buffer().stride(), + /*width=*/buffer_node_->buffer_desc().width, + /*height=*/buffer_node_->buffer_desc().height, + /*layer_count=*/buffer_node_->buffer_desc().layers, + /*format=*/buffer_node_->buffer_desc().format, + /*usage=*/buffer_node_->buffer_desc().usage, + /*stride=*/buffer_node_->buffer_desc().stride, /*acquire_fence_fd=*/BorrowedHandle{}, /*released_fence_fd=*/BorrowedHandle{}}; } diff --git a/services/vr/bufferhubd/buffer_node.cpp b/services/vr/bufferhubd/buffer_node.cpp deleted file mode 100644 index 1eba4ae264..0000000000 --- a/services/vr/bufferhubd/buffer_node.cpp +++ /dev/null @@ -1,69 +0,0 @@ -#include <errno.h> -#include <private/dvr/buffer_hub_defs.h> -#include <private/dvr/buffer_node.h> - -namespace android { -namespace dvr { - -void BufferNode::InitializeMetadata() { - // Using placement new here to reuse shared memory instead of new allocation - // Initialize the atomic variables to zero. - BufferHubDefs::MetadataHeader* metadata_header = metadata_.metadata_header(); - buffer_state_ = new (&metadata_header->buffer_state) std::atomic<uint64_t>(0); - fence_state_ = new (&metadata_header->fence_state) std::atomic<uint64_t>(0); - active_clients_bit_mask_ = - new (&metadata_header->active_clients_bit_mask) std::atomic<uint64_t>(0); -} - -// Allocates a new BufferNode. -BufferNode::BufferNode(uint32_t width, uint32_t height, uint32_t layer_count, - uint32_t format, uint64_t usage, - size_t user_metadata_size) { - if (int ret = buffer_.Alloc(width, height, layer_count, format, usage)) { - ALOGE( - "DetachedBufferChannel::DetachedBufferChannel: Failed to allocate " - "buffer: %s", - strerror(-ret)); - return; - } - - metadata_ = BufferHubMetadata::Create(user_metadata_size); - if (!metadata_.IsValid()) { - ALOGE("BufferNode::BufferNode: Failed to allocate metadata."); - return; - } - InitializeMetadata(); -} - -uint64_t BufferNode::GetActiveClientsBitMask() const { - return active_clients_bit_mask_->load(std::memory_order_acquire); -} - -uint64_t BufferNode::AddNewActiveClientsBitToMask() { - uint64_t current_active_clients_bit_mask = GetActiveClientsBitMask(); - uint64_t client_state_mask = 0ULL; - uint64_t updated_active_clients_bit_mask = 0ULL; - do { - client_state_mask = BufferHubDefs::FindNextAvailableClientStateMask( - current_active_clients_bit_mask); - if (client_state_mask == 0ULL) { - ALOGE( - "BufferNode::AddNewActiveClientsBitToMask: reached the maximum " - "mumber of channels per buffer node: 32."); - errno = E2BIG; - return 0ULL; - } - updated_active_clients_bit_mask = - current_active_clients_bit_mask | client_state_mask; - } while (!(active_clients_bit_mask_->compare_exchange_weak( - current_active_clients_bit_mask, updated_active_clients_bit_mask, - std::memory_order_acq_rel, std::memory_order_acquire))); - return client_state_mask; -} - -void BufferNode::RemoveClientsBitFromMask(const uint64_t& value) { - active_clients_bit_mask_->fetch_and(~value); -} - -} // namespace dvr -} // namespace android diff --git a/services/vr/bufferhubd/include/private/dvr/buffer_node.h b/services/vr/bufferhubd/include/private/dvr/buffer_node.h index 4f356f0c1c..997aeda917 100644 --- a/services/vr/bufferhubd/include/private/dvr/buffer_node.h +++ b/services/vr/bufferhubd/include/private/dvr/buffer_node.h @@ -1,73 +1,14 @@ #ifndef ANDROID_DVR_BUFFERHUBD_BUFFER_NODE_H_ #define ANDROID_DVR_BUFFERHUBD_BUFFER_NODE_H_ +// TODO(b/118891412) Remove this file -#include <private/dvr/ion_buffer.h> -#include <ui/BufferHubMetadata.h> +#include <bufferhub/BufferNode.h> namespace android { namespace dvr { -class BufferNode { - public: - // Allocates a new BufferNode. - BufferNode(uint32_t width, uint32_t height, uint32_t layer_count, - uint32_t format, uint64_t usage, size_t user_metadata_size); - - // Returns whether the object holds a valid graphic buffer. - bool IsValid() const { return buffer_.IsValid() && metadata_.IsValid(); } - - size_t user_metadata_size() const { return metadata_.user_metadata_size(); } - - // Accessors of the IonBuffer. - IonBuffer& buffer() { return buffer_; } - const IonBuffer& buffer() const { return buffer_; } - - // Accessors of metadata. - const BufferHubMetadata& metadata() const { return metadata_; } - - // Gets the current value of active_clients_bit_mask in metadata_ with - // std::memory_order_acquire, so that all previous releases of - // active_clients_bit_mask from all threads will be returned here. - uint64_t GetActiveClientsBitMask() const; - - // Find and add a new client_state_mask to active_clients_bit_mask in - // metadata_. - // Return the new client_state_mask that is added to active_clients_bit_mask. - // Return 0ULL if there are already 32 bp clients of the buffer. - uint64_t AddNewActiveClientsBitToMask(); - - // Removes the value from active_clients_bit_mask in metadata_ with - // std::memory_order_release, so that the change will be visible to any - // acquire of active_clients_bit_mask_ in any threads after the succeed of - // this operation. - void RemoveClientsBitFromMask(const uint64_t& value); - - private: - // Helper method for constructors to initialize atomic metadata header - // variables in shared memory. - void InitializeMetadata(); - - // Gralloc buffer handles. - IonBuffer buffer_; - - // Metadata in shared memory. - BufferHubMetadata metadata_; - - // The following variables are atomic variables in metadata_ that are visible - // to Bn object and Bp objects. Please find more info in - // BufferHubDefs::MetadataHeader. - - // buffer_state_ tracks the state of the buffer. Buffer can be in one of these - // four states: gained, posted, acquired, released. - std::atomic<uint64_t>* buffer_state_ = nullptr; - - // TODO(b/112012161): add comments to fence_state_. - std::atomic<uint64_t>* fence_state_ = nullptr; - - // active_clients_bit_mask_ tracks all the bp clients of the buffer. It is the - // union of all client_state_mask of all bp clients. - std::atomic<uint64_t>* active_clients_bit_mask_ = nullptr; -}; +typedef android::frameworks::bufferhub::V1_0::implementation::BufferNode + BufferNode; } // namespace dvr } // namespace android diff --git a/services/vr/bufferhubd/tests/Android.bp b/services/vr/bufferhubd/tests/Android.bp index a80691f114..a611268440 100644 --- a/services/vr/bufferhubd/tests/Android.bp +++ b/services/vr/bufferhubd/tests/Android.bp @@ -24,30 +24,3 @@ cc_test { // TODO(b/117568153): Temporarily opt out using libcrt. no_libcrt: true, } - -cc_test { - name: "buffer_node-test", - srcs: ["buffer_node-test.cpp"], - cflags: [ - "-DLOG_TAG=\"buffer_node-test\"", - "-DTRACE=0", - "-DATRACE_TAG=ATRACE_TAG_GRAPHICS", - ], - header_libs: ["libdvr_headers"], - static_libs: [ - "libbufferhub", - "libbufferhubd", - "libgmock", - ], - shared_libs: [ - "libbase", - "libbinder", - "liblog", - "libpdx_default_transport", - "libui", - "libutils", - ], - // TODO(b/117568153): Temporarily opt out using libcrt. - no_libcrt: true, -} - diff --git a/services/vr/bufferhubd/tests/buffer_node-test.cpp b/services/vr/bufferhubd/tests/buffer_node-test.cpp deleted file mode 100644 index 6671e97a5c..0000000000 --- a/services/vr/bufferhubd/tests/buffer_node-test.cpp +++ /dev/null @@ -1,92 +0,0 @@ -#include <errno.h> -#include <gmock/gmock.h> -#include <gtest/gtest.h> -#include <private/dvr/buffer_node.h> - -namespace android { -namespace dvr { - -namespace { - -const uint32_t kWidth = 640; -const uint32_t kHeight = 480; -const uint32_t kLayerCount = 1; -const uint32_t kFormat = 1; -const uint64_t kUsage = 0; -const size_t kUserMetadataSize = 0; -const size_t kMaxClientsCount = BufferHubDefs::kMaxNumberOfClients; - -class BufferNodeTest : public ::testing::Test { - protected: - void SetUp() override { - buffer_node = new BufferNode(kWidth, kHeight, kLayerCount, kFormat, kUsage, - kUserMetadataSize); - ASSERT_TRUE(buffer_node->IsValid()); - } - - void TearDown() override { - if (buffer_node != nullptr) { - delete buffer_node; - } - } - - BufferNode* buffer_node = nullptr; -}; - -TEST_F(BufferNodeTest, TestCreateBufferNode) { - EXPECT_EQ(buffer_node->user_metadata_size(), kUserMetadataSize); -} - -TEST_F(BufferNodeTest, TestAddNewActiveClientsBitToMask_twoNewClients) { - uint64_t new_client_state_mask_1 = - buffer_node->AddNewActiveClientsBitToMask(); - EXPECT_EQ(buffer_node->GetActiveClientsBitMask(), new_client_state_mask_1); - - // Request and add a new client_state_mask again. - // Active clients bit mask should be the union of the two new - // client_state_masks. - uint64_t new_client_state_mask_2 = - buffer_node->AddNewActiveClientsBitToMask(); - EXPECT_EQ(buffer_node->GetActiveClientsBitMask(), - new_client_state_mask_1 | new_client_state_mask_2); -} - -TEST_F(BufferNodeTest, TestAddNewActiveClientsBitToMask_32NewClients) { - uint64_t new_client_state_mask = 0ULL; - uint64_t current_mask = 0ULL; - uint64_t expected_mask = 0ULL; - - for (int i = 0; i < kMaxClientsCount; ++i) { - new_client_state_mask = buffer_node->AddNewActiveClientsBitToMask(); - EXPECT_NE(new_client_state_mask, 0); - EXPECT_FALSE(new_client_state_mask & current_mask); - expected_mask = current_mask | new_client_state_mask; - current_mask = buffer_node->GetActiveClientsBitMask(); - EXPECT_EQ(current_mask, expected_mask); - } - - // Method should fail upon requesting for more than maximum allowable clients. - new_client_state_mask = buffer_node->AddNewActiveClientsBitToMask(); - EXPECT_EQ(new_client_state_mask, 0ULL); - EXPECT_EQ(errno, E2BIG); -} - -TEST_F(BufferNodeTest, TestRemoveActiveClientsBitFromMask) { - buffer_node->AddNewActiveClientsBitToMask(); - uint64_t current_mask = buffer_node->GetActiveClientsBitMask(); - uint64_t new_client_state_mask = buffer_node->AddNewActiveClientsBitToMask(); - EXPECT_NE(buffer_node->GetActiveClientsBitMask(), current_mask); - - buffer_node->RemoveClientsBitFromMask(new_client_state_mask); - EXPECT_EQ(buffer_node->GetActiveClientsBitMask(), current_mask); - - // Remove the test_mask again to the active client bit mask should not modify - // the value of active clients bit mask. - buffer_node->RemoveClientsBitFromMask(new_client_state_mask); - EXPECT_EQ(buffer_node->GetActiveClientsBitMask(), current_mask); -} - -} // namespace - -} // namespace dvr -} // namespace android diff --git a/vulkan/api/vulkan.api b/vulkan/api/vulkan.api index e5b9b47fe3..8f76606980 100644 --- a/vulkan/api/vulkan.api +++ b/vulkan/api/vulkan.api @@ -28,7 +28,7 @@ import platform "platform.api" // API version (major.minor.patch) define VERSION_MAJOR 1 define VERSION_MINOR 1 -define VERSION_PATCH 90 +define VERSION_PATCH 91 // API limits define VK_MAX_PHYSICAL_DEVICE_NAME_SIZE 256 @@ -85,9 +85,7 @@ define NULL_HANDLE 0 @extension("VK_KHR_wayland_surface") define VK_KHR_WAYLAND_SURFACE_SPEC_VERSION 6 @extension("VK_KHR_wayland_surface") define VK_KHR_WAYLAND_SURFACE_NAME "VK_KHR_wayland_surface" -// 8 -@extension("VK_KHR_mir_surface") define VK_KHR_MIR_SURFACE_SPEC_VERSION 4 -@extension("VK_KHR_mir_surface") define VK_KHR_MIR_SURFACE_NAME "VK_KHR_mir_surface" +// 8 - VK_KHR_mir_surface removed // 9 @extension("VK_KHR_android_surface") define VK_KHR_ANDROID_SURFACE_SPEC_VERSION 6 @@ -526,8 +524,8 @@ define NULL_HANDLE 0 @extension("VK_NV_shading_rate_image") define VK_NV_SHADING_RATE_IMAGE_EXTENSION_NAME "VK_NV_shading_rate_image" // 166 -@extension("VK_NVX_raytracing") define VK_NVX_RAYTRACING_SPEC_VERSION 1 -@extension("VK_NVX_raytracing") define VK_NVX_RAYTRACING_EXTENSION_NAME "VK_NVX_raytracing" +@extension("VK_NV_raytracing") define VK_NV_RAYTRACING_SPEC_VERSION 2 +@extension("VK_NV_raytracing") define VK_NV_RAYTRACING_EXTENSION_NAME "VK_NV_raytracing" // 167 @extension("VK_NV_representative_fragment_test") define VK_NV_REPRESENTATIVE_FRAGMENT_TEST_SPEC_VERSION 1 @@ -565,6 +563,10 @@ define NULL_HANDLE 0 @extension("VK_AMD_shader_core_properties") define VK_AMD_SHADER_CORE_PROPERTIES_SPEC_VERSION 1 @extension("VK_AMD_shader_core_properties") define VK_AMD_SHADER_CORE_PROPERTIES_EXTENSION_NAME "VK_AMD_shader_core_properties" +// 190 +@extension("VK_AMD_memory_overallocation_behavior") define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_SPEC_VERSION 1 +@extension("VK_AMD_memory_overallocation_behavior") define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_EXTENSION_NAME "VK_AMD_memory_overallocation_behavior" + // 191 @extension("VK_EXT_vertex_attribute_divisor") define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_SPEC_VERSION 2 @extension("VK_EXT_vertex_attribute_divisor") define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME "VK_EXT_vertex_attribute_divisor" @@ -693,7 +695,7 @@ type u32 VkSampleMask @extension("VK_EXT_validation_cache") @nonDispatchHandle type u64 VkValidationCacheEXT // 166 -@extension("VK_NVX_raytracing") @nonDispatchHandle type u64 VkAccelerationStructureNVX +@extension("VK_NV_raytracing") @nonDispatchHandle type u64 VkAccelerationStructureNV ///////////// // Enums // @@ -794,8 +796,8 @@ enum VkDescriptorType { //@extension("VK_EXT_inline_uniform_block") // 139 VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT = 1000138000, - //@extension("VK_NVX_raytracing") // 166 - VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NVX = 1000165000, + //@extension("VK_NV_raytracing") // 166 + VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000, } enum VkQueryType { @@ -806,8 +808,8 @@ enum VkQueryType { //@extension("VK_EXT_transform_feedback") // 29 VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT = 1000028004, - //@extension("VK_NVX_raytracing") // 166 - VK_QUERY_TYPE_COMPACTED_SIZE_NVX = 1000165000, + //@extension("VK_NV_raytracing") // 166 + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV = 1000165000, } enum VkBorderColor { @@ -823,8 +825,8 @@ enum VkPipelineBindPoint { VK_PIPELINE_BIND_POINT_GRAPHICS = 0x00000000, VK_PIPELINE_BIND_POINT_COMPUTE = 0x00000001, - //@extension("VK_NVX_raytracing") // 166 - VK_PIPELINE_BIND_POINT_RAYTRACING_NVX = 1000165000, + //@extension("VK_NV_raytracing") // 166 + VK_PIPELINE_BIND_POINT_RAY_TRACING_NV = 1000165000, } enum VkPrimitiveTopology { @@ -849,6 +851,9 @@ enum VkSharingMode { enum VkIndexType { VK_INDEX_TYPE_UINT16 = 0x00000000, VK_INDEX_TYPE_UINT32 = 0x00000001, + + //@extension("VK_NV_raytracing") // 166 + VK_INDEX_TYPE_NONE_NV = 1000165000, } enum VkFilter { @@ -1447,9 +1452,6 @@ enum VkStructureType { //@extension("VK_KHR_wayland_surface") // 7 VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR = 1000006000, - //@extension("VK_KHR_mir_surface") // 8 - VK_STRUCTURE_TYPE_MIR_SURFACE_CREATE_INFO_KHR = 1000007000, - //@extension("VK_KHR_android_surface") // 9 VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR = 1000008000, @@ -1786,18 +1788,18 @@ enum VkStructureType { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV = 1000164002, VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV = 1000164005, - //@extension("VK_NVX_raytracing") // 166 - VK_STRUCTURE_TYPE_RAYTRACING_PIPELINE_CREATE_INFO_NVX = 1000165000, - VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NVX = 1000165001, - VK_STRUCTURE_TYPE_GEOMETRY_INSTANCE_NVX = 1000165002, - VK_STRUCTURE_TYPE_GEOMETRY_NVX = 1000165003, - VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NVX = 1000165004, - VK_STRUCTURE_TYPE_GEOMETRY_AABB_NVX = 1000165005, - VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NVX = 1000165006, - VK_STRUCTURE_TYPE_DESCRIPTOR_ACCELERATION_STRUCTURE_INFO_NVX = 1000165007, - VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NVX = 1000165008, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAYTRACING_PROPERTIES_NVX = 1000165009, - VK_STRUCTURE_TYPE_HIT_SHADER_MODULE_CREATE_INFO_NVX = 1000165010, + //@extension("VK_NV_raytracing") // 166 + VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV = 1000165000, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV = 1000165001, + VK_STRUCTURE_TYPE_GEOMETRY_NV = 1000165003, + VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV = 1000165004, + VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV = 1000165005, + VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV = 1000165006, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV = 1000165007, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV = 1000165008, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV = 1000165009, + VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV = 1000165011, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV = 1000165012, //@extension("VK_NV_representative_fragment_test") // 167 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV = 1000166000, @@ -1830,6 +1832,9 @@ enum VkStructureType { //@extension("VK_AMD_shader_core_properties") // 186 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD = 1000185000, + //@extension("VK_AMD_memory_overallocation_behavior") // 190 + VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD = 1000189000, + //@extension("VK_EXT_vertex_attribute_divisor") // 191 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT = 1000190000, VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT = 1000190001, @@ -2011,8 +2016,8 @@ enum VkObjectType { //@extension("VK_EXT_validation_cache") // 161 VK_OBJECT_TYPE_VALIDATION_CACHE_EXT = 1000160000, - //@extension("VK_NVX_raytracing") // 166 - VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NVX = 1000165000, + //@extension("VK_NV_raytracing") // 166 + VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000, } @@ -2135,8 +2140,8 @@ enum VkDebugReportObjectTypeEXT { //@extension("VK_KHR_sampler_ycbcr_conversion") // 157 VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT = 1000156000, - //@extension("VK_NVX_raytracing") // 166 - VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NVX_EXT = 1000165000, + //@extension("VK_NV_raytracing") // 166 + VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT = 1000165000, } @extension("VK_AMD_rasterization_order") // 19 @@ -2311,22 +2316,36 @@ enum VkCoarseSampleOrderTypeNV { VK_COARSE_SAMPLE_ORDER_TYPE_SAMPLE_MAJOR_NV = 3, } -@extension("VK_NVX_raytracing") // 166 -enum VkGeometryTypeNVX { - VK_GEOMETRY_TYPE_TRIANGLES_NVX = 0, - VK_GEOMETRY_TYPE_AABBS_NVX = 1, +@extension("VK_NV_raytracing") // 166 +enum VkRayTracingShaderGroupTypeNV { + VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV = 0, + VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV = 1, + VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV = 2, +} + +@extension("VK_NV_raytracing") // 166 +enum VkGeometryTypeNV { + VK_GEOMETRY_TYPE_TRIANGLES_NV = 0, + VK_GEOMETRY_TYPE_AABBS_NV = 1, +} + +@extension("VK_NV_raytracing") // 166 +enum VkAccelerationStructureTypeNV { + VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV = 0, + VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV = 1, } -@extension("VK_NVX_raytracing") // 166 -enum VkAccelerationStructureTypeNVX { - VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NVX = 0, - VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NVX = 1, +@extension("VK_NV_raytracing") // 166 +enum VkCopyAccelerationStructureModeNV { + VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV = 0, + VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV = 1, } -@extension("VK_NVX_raytracing") // 166 -enum VkCopyAccelerationStructureModeNVX { - VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NVX = 0, - VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NVX = 1, +@extension("VK_NV_raytracing") // 166 +enum VkAccelerationStructureMemoryRequirementsTypeNV { + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV = 0, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV = 1, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV = 2, } @extension("VK_EXT_global_priority") // 175 @@ -2345,6 +2364,13 @@ enum VkTimeDomainEXT { VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT = 3, } +@extension("VK_AMD_memory_overallocation_behavior") // 190 +enum VkMemoryOverallocationBehaviorAMD { + VK_MEMORY_OVERALLOCATION_BEHAVIOR_DEFAULT_AMD = 0, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_ALLOWED_AMD = 1, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD = 2, +} + @extension("VK_KHR_driver_properties") // 197 enum VkDriverIdKHR { VK_DRIVER_ID_AMD_PROPRIETARY_KHR = 1, @@ -2433,9 +2459,9 @@ bitfield VkAccessFlagBits { //@extension("VK_NV_shading_rate_image") // 165 VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV = 0x00800000, - //@extension("VK_NVX_raytracing") // 166 - VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NVX = 0x00200000, - VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NVX = 0x00400000, + //@extension("VK_NV_raytracing") // 166 + VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV = 0x00200000, + VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV = 0x00400000, //@extension("VK_EXT_transform_feedback") // 29 VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 0x02000000, @@ -2459,8 +2485,8 @@ bitfield VkBufferUsageFlagBits { //@extension("VK_EXT_conditional_rendering") // 82 VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00000200, - //@extension("VK_NVX_raytracing") // 166 - VK_BUFFER_USAGE_RAYTRACING_BIT_NVX = 0x00000400, + //@extension("VK_NV_raytracing") // 166 + VK_BUFFER_USAGE_RAY_TRACING_BIT_NV = 0x00000400, //@extension("VK_EXT_transform_feedback") // 29 VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT = 0x00000800, @@ -2491,13 +2517,13 @@ bitfield VkShaderStageFlagBits { VK_SHADER_STAGE_ALL = 0x7FFFFFFF, - //@extension("VK_NVX_raytracing") // 166 - VK_SHADER_STAGE_RAYGEN_BIT_NVX = 0x00000100, - VK_SHADER_STAGE_ANY_HIT_BIT_NVX = 0x00000200, - VK_SHADER_STAGE_CLOSEST_HIT_BIT_NVX = 0x00000400, - VK_SHADER_STAGE_MISS_BIT_NVX = 0x00000800, - VK_SHADER_STAGE_INTERSECTION_BIT_NVX = 0x00001000, - VK_SHADER_STAGE_CALLABLE_BIT_NVX = 0x00002000, + //@extension("VK_NV_raytracing") // 166 + VK_SHADER_STAGE_RAYGEN_BIT_NV = 0x00000100, + VK_SHADER_STAGE_ANY_HIT_BIT_NV = 0x00000200, + VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV = 0x00000400, + VK_SHADER_STAGE_MISS_BIT_NV = 0x00000800, + VK_SHADER_STAGE_INTERSECTION_BIT_NV = 0x00001000, + VK_SHADER_STAGE_CALLABLE_BIT_NV = 0x00002000, //@extension("VK_NV_mesh_shader") // 203 VK_SHADER_STAGE_TASK_BIT_NV = 0x00000040, @@ -2595,8 +2621,8 @@ bitfield VkPipelineCreateFlagBits { VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = 0x00000008, VK_PIPELINE_CREATE_DISPATCH_BASE_KHR = 0x00000010, - //@extension("VK_NVX_raytracing") // 166 - VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NVX = 0x00000020, + //@extension("VK_NV_raytracing") // 166 + VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV = 0x00000020, } /// Color component flags @@ -2791,8 +2817,9 @@ bitfield VkPipelineStageFlagBits { //@extension("VK_NV_shading_rate_image") // 165 VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV = 0x00400000, - //@extension("VK_NVX_raytracing") // 166 - VK_PIPELINE_STAGE_RAYTRACING_BIT_NVX = 0x00200000, + //@extension("VK_NV_raytracing") // 166 + VK_PIPELINE_STAGE_RAY_TRACING_BIT_NV = 0x00200000, + VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV = 0x02000000, //@extension("VK_NV_mesh_shader") // 203 VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV = 0x00080000, @@ -3194,12 +3221,6 @@ type VkFlags VkWaylandSurfaceCreateFlagsKHR //bitfield VkWaylandSurfaceCreateFlagBitsKHR { //} -@extension("VK_KHR_mir_surface") // 8 -type VkFlags VkMirSurfaceCreateFlagsKHR -//@extension("VK_KHR_mir_surface") // 8 -//bitfield VkMirSurfaceCreateFlagBitsKHR { -//} - @extension("VK_KHR_android_surface") // 9 type VkFlags VkAndroidSurfaceCreateFlagsKHR //@extension("VK_KHR_android_surface") // 9 @@ -3484,33 +3505,33 @@ bitfield VkDescriptorBindingFlagBitsEXT { VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT = 0x00000008, } -@extension("VK_NVX_raytracing") // 166 -type VkFlags VkGeometryFlagsNVX -@extension("VK_NVX_raytracing") // 166 -bitfield VkGeometryFlagBitsNVX { - VK_GEOMETRY_OPAQUE_BIT_NVX = 0x00000001, - VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_NVX = 0x00000002, +@extension("VK_NV_raytracing") // 166 +type VkFlags VkGeometryFlagsNV +@extension("VK_NV_raytracing") // 166 +bitfield VkGeometryFlagBitsNV { + VK_GEOMETRY_OPAQUE_BIT_NV = 0x00000001, + VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_NV = 0x00000002, } -@extension("VK_NVX_raytracing") // 166 -type VkFlags VkGeometryInstanceFlagsNVX -@extension("VK_NVX_raytracing") // 166 -bitfield VkGeometryInstanceFlagBitsNVX { - VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NVX = 0x00000001, - VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_FLIP_WINDING_BIT_NVX = 0x00000002, - VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_NVX = 0x00000004, - VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_NVX = 0x00000008, +@extension("VK_NV_raytracing") // 166 +type VkFlags VkGeometryInstanceFlagsNV +@extension("VK_NV_raytracing") // 166 +bitfield VkGeometryInstanceFlagBitsNV { + VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV = 0x00000001, + VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_NV = 0x00000002, + VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_NV = 0x00000004, + VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_NV = 0x00000008, } -@extension("VK_NVX_raytracing") // 166 -type VkFlags VkBuildAccelerationStructureFlagsNVX -@extension("VK_NVX_raytracing") // 166 -bitfield VkBuildAccelerationStructureFlagBitsNVX { - VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NVX = 0x00000001, - VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NVX = 0x00000002, - VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NVX = 0x00000004, - VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NVX = 0x00000008, - VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NVX = 0x00000010, +@extension("VK_NV_raytracing") // 166 +type VkFlags VkBuildAccelerationStructureFlagsNV +@extension("VK_NV_raytracing") // 166 +bitfield VkBuildAccelerationStructureFlagBitsNV { + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV = 0x00000001, + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV = 0x00000002, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NV = 0x00000004, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NV = 0x00000008, + VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NV = 0x00000010, } @extension("VK_FUCHSIA_imagepipe_surface") // 215 @@ -5330,15 +5351,6 @@ class VkWaylandSurfaceCreateInfoKHR { platform.wl_surface* surface } -@extension("VK_KHR_mir_surface") // 8 -class VkMirSurfaceCreateInfoKHR { - VkStructureType sType - const void* pNext - VkMirSurfaceCreateFlagsKHR flags - platform.MirConnection* connection - platform.MirSurface* mirSurface -} - @extension("VK_KHR_android_surface") // 9 class VkAndroidSurfaceCreateInfoKHR { VkStructureType sType @@ -7249,22 +7261,34 @@ class VkPipelineViewportCoarseSampleOrderStateCreateInfoNV { const VkCoarseSampleOrderCustomNV* pCustomSampleOrders } -@extension("VK_NVX_raytracing") // 166 -class VkRaytracingPipelineCreateInfoNVX { - VkStructureType sType - const void* pNext - VkPipelineCreateFlags flags - u32 stageCount - const VkPipelineShaderStageCreateInfo* pStages - const u32* pGroupNumbers - u32 maxRecursionDepth - VkPipelineLayout layout - VkPipeline basePipelineHandle - s32 basePipelineIndex -} - -@extension("VK_NVX_raytracing") // 166 -class VkGeometryTrianglesNVX { +@extension("VK_NV_raytracing") // 166 +class VkRayTracingShaderGroupCreateInfoNV { + VkStructureType sType + const void* pNext + VkRayTracingShaderGroupTypeNV type + u32 generalShader + u32 closestHitShader + u32 anyHitShader + u32 intersectionShader +} + +@extension("VK_NV_raytracing") // 166 +class VkRayTracingPipelineCreateInfoNV { + VkStructureType sType + const void* pNext + VkPipelineCreateFlags flags + u32 stageCount + const VkPipelineShaderStageCreateInfo* pStages + u32 groupCount + const VkRayTracingShaderGroupCreateInfoNV* pGroups + u32 maxRecursionDepth + VkPipelineLayout layout + VkPipeline basePipelineHandle + s32 basePipelineIndex +} + +@extension("VK_NV_raytracing") // 166 +class VkGeometryTrianglesNV { VkStructureType sType const void* pNext VkBuffer vertexData @@ -7280,8 +7304,8 @@ class VkGeometryTrianglesNVX { VkDeviceSize transformOffset } -@extension("VK_NVX_raytracing") // 166 -class VkGeometryAABBNVX { +@extension("VK_NV_raytracing") // 166 +class VkGeometryAABBNV { VkStructureType sType const void* pNext VkBuffer aabbData @@ -7290,66 +7314,79 @@ class VkGeometryAABBNVX { VkDeviceSize offset } -@extension("VK_NVX_raytracing") // 166 -class VkGeometryDataNVX { - VkGeometryTrianglesNVX triangles - VkGeometryAABBNVX aabbs +@extension("VK_NV_raytracing") // 166 +class VkGeometryDataNV { + VkGeometryTrianglesNV triangles + VkGeometryAABBNV aabbs } -@extension("VK_NVX_raytracing") // 166 -class VkGeometryNVX { +@extension("VK_NV_raytracing") // 166 +class VkGeometryNV { VkStructureType sType const void* pNext - VkGeometryTypeNVX geometryType - VkGeometryDataNVX geometry - VkGeometryFlagsNVX flags + VkGeometryTypeNV geometryType + VkGeometryDataNV geometry + VkGeometryFlagsNV flags } -@extension("VK_NVX_raytracing") // 166 -class VkAccelerationStructureCreateInfoNVX { +@extension("VK_NV_raytracing") // 166 +class VkAccelerationStructureInfoNV { VkStructureType sType const void* pNext - VkAccelerationStructureTypeNVX type - VkBuildAccelerationStructureFlagsNVX flags - VkDeviceSize compactedSize + VkAccelerationStructureTypeNV type + VkBuildAccelerationStructureFlagsNV flags u32 instanceCount u32 geometryCount - const VkGeometryNVX* pGeometries + const VkGeometryNV* pGeometries } -@extension("VK_NVX_raytracing") // 166 -class VkBindAccelerationStructureMemoryInfoNVX { +@extension("VK_NV_raytracing") // 166 +class VkAccelerationStructureCreateInfoNV { VkStructureType sType const void* pNext - VkAccelerationStructureNVX accelerationStructure + VkDeviceSize compactedSize + VkAccelerationStructureInfoNV info +} + +@extension("VK_NV_raytracing") // 166 +class VkBindAccelerationStructureMemoryInfoNV { + VkStructureType sType + const void* pNext + VkAccelerationStructureNV accelerationStructure VkDeviceMemory memory VkDeviceSize memoryOffset u32 deviceIndexCount const u32* pDeviceIndices } -@extension("VK_NVX_raytracing") // 166 -class VkDescriptorAccelerationStructureInfoNVX { +@extension("VK_NV_raytracing") // 166 +class VkDescriptorAccelerationStructureInfoNV { VkStructureType sType const void* pNext u32 accelerationStructureCount - const VkAccelerationStructureNVX* pAccelerationStructures + const VkAccelerationStructureNV* pAccelerationStructures } -@extension("VK_NVX_raytracing") // 166 -class VkAccelerationStructureMemoryRequirementsInfoNVX { +@extension("VK_NV_raytracing") // 166 +class VkAccelerationStructureMemoryRequirementsInfoNV { VkStructureType sType const void* pNext - VkAccelerationStructureNVX accelerationStructure + VkAccelerationStructureMemoryRequirementsTypeNV type + VkAccelerationStructureNV accelerationStructure } -@extension("VK_NVX_raytracing") // 166 -class VkPhysicalDeviceRaytracingPropertiesNVX { +@extension("VK_NV_raytracing") // 166 +class VkPhysicalDeviceRaytracingPropertiesNV { VkStructureType sType void* pNext - u32 shaderHeaderSize + u32 shaderGroupHandleSize u32 maxRecursionDepth - u32 maxGeometryCount + u32 maxShaderGroupStride + u32 shaderGroupBaseAlignment + u64 maxGeometryCount + u64 maxInstanceCount + u64 maxTriangleCount + u32 maxDescriptorSetAccelerationStructures } @extension("VK_NV_representative_fragment_test") // 167 @@ -7454,6 +7491,13 @@ class VkPhysicalDeviceShaderCorePropertiesAMD { u32 vgprAllocationGranularity } +@extension("VK_AMD_memory_overallocation_behavior") // 190 +class VkDeviceMemoryOverallocationCreateInfoAMD { + VkStructureType sType + const void* pNext + VkMemoryOverallocationBehaviorAMD overallocationBehavior +} + @extension("VK_EXT_vertex_attribute_divisor") // 191 class VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT { VkStructureType sType @@ -10370,25 +10414,6 @@ cmd VkBool32 vkGetPhysicalDeviceWaylandPresentationSupportKHR( return ? } -@extension("VK_KHR_mir_surface") // 8 -cmd VkResult vkCreateMirSurfaceKHR( - VkInstance instance, - const VkMirSurfaceCreateInfoKHR* pCreateInfo, - const VkAllocationCallbacks* pAllocator, - VkSurfaceKHR* pSurface) { - instanceObject := GetInstance(instance) - return ? -} - -@extension("VK_KHR_mir_surface") // 8 -cmd VkBool32 vkGetPhysicalDeviceMirPresentationSupportKHR( - VkPhysicalDevice physicalDevice, - u32 queueFamilyIndex, - platform.MirConnection* connection) { - physicalDeviceObject := GetPhysicalDevice(physicalDevice) - return ? -} - @extension("VK_KHR_android_surface") // 9 cmd VkResult vkCreateAndroidSurfaceKHR( VkInstance instance, @@ -11448,71 +11473,60 @@ cmd void vkCmdSetCoarseSampleOrderNV( const VkCoarseSampleOrderCustomNV* pCustomSampleOrders) { } -@extension("VK_NVX_raytracing") // 166 -cmd VkResult vkCreateAccelerationStructureNVX( +@extension("VK_NV_raytracing") // 166 +cmd VkResult vkCreateAccelerationStructureNV( VkDevice device, - const VkAccelerationStructureCreateInfoNVX* pCreateInfo, + const VkAccelerationStructureCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, - VkAccelerationStructureNVX* pAccelerationStructure) { + VkAccelerationStructureNV* pAccelerationStructure) { return ? } -@extension("VK_NVX_raytracing") // 166 -cmd void vkDestroyAccelerationStructureNVX( +@extension("VK_NV_raytracing") // 166 +cmd void vkDestroyAccelerationStructureNV( VkDevice device, - VkAccelerationStructureNVX accelerationStructure, + VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks* pAllocator) { } -@extension("VK_NVX_raytracing") // 166 -cmd void vkGetAccelerationStructureMemoryRequirementsNVX( +@extension("VK_NV_raytracing") // 166 +cmd void vkGetAccelerationStructureMemoryRequirementsNV( VkDevice device, - const VkAccelerationStructureMemoryRequirementsInfoNVX* pInfo, + const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements) { } -@extension("VK_NVX_raytracing") // 166 -cmd void vkGetAccelerationStructureScratchMemoryRequirementsNVX( - VkDevice device, - const VkAccelerationStructureMemoryRequirementsInfoNVX* pInfo, - VkMemoryRequirements2KHR* pMemoryRequirements) { -} - -@extension("VK_NVX_raytracing") // 166 -cmd VkResult vkBindAccelerationStructureMemoryNVX( +@extension("VK_NV_raytracing") // 166 +cmd VkResult vkBindAccelerationStructureMemoryNV( VkDevice device, u32 bindInfoCount, - const VkBindAccelerationStructureMemoryInfoNVX* pBindInfos) { + const VkBindAccelerationStructureMemoryInfoNV* pBindInfos) { return ? } -@extension("VK_NVX_raytracing") // 166 -cmd void vkCmdBuildAccelerationStructureNVX( +@extension("VK_NV_raytracing") // 166 +cmd void vkCmdBuildAccelerationStructureNV( VkCommandBuffer commandBuffer, - VkAccelerationStructureTypeNVX type, - u32 instanceCount, + const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, - u32 geometryCount, - const VkGeometryNVX* pGeometries, - VkBuildAccelerationStructureFlagsNVX flags, VkBool32 update, - VkAccelerationStructureNVX dst, - VkAccelerationStructureNVX src, + VkAccelerationStructureNV dst, + VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset) { } -@extension("VK_NVX_raytracing") // 166 -cmd void vkCmdCopyAccelerationStructureNVX( +@extension("VK_NV_raytracing") // 166 +cmd void vkCmdCopyAccelerationStructureNV( VkCommandBuffer commandBuffer, - VkAccelerationStructureNVX dst, - VkAccelerationStructureNVX src, - VkCopyAccelerationStructureModeNVX mode) { + VkAccelerationStructureNV dst, + VkAccelerationStructureNV src, + VkCopyAccelerationStructureModeNV mode) { } -@extension("VK_NVX_raytracing") // 166 -cmd void vkCmdTraceRaysNVX( +@extension("VK_NV_raytracing") // 166 +cmd void vkCmdTraceRaysNV( VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, @@ -11522,23 +11536,27 @@ cmd void vkCmdTraceRaysNVX( VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, + VkBuffer callableShaderBindingTableBuffer, + VkDeviceSize callableShaderBindingOffset, + VkDeviceSize callableShaderBindingStride, u32 width, - u32 height) { + u32 height, + u32 depth) { } -@extension("VK_NVX_raytracing") // 166 -cmd VkResult vkCreateRaytracingPipelinesNVX( +@extension("VK_NV_raytracing") // 166 +cmd VkResult vkCreateRaytracingPipelinesNV( VkDevice device, VkPipelineCache pipelineCache, u32 createInfoCount, - const VkRaytracingPipelineCreateInfoNVX* pCreateInfos, + const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines) { return ? } -@extension("VK_NVX_raytracing") // 166 -cmd VkResult vkGetRaytracingShaderHandlesNVX( +@extension("VK_NV_raytracing") // 166 +cmd VkResult vkGetRaytracingShaderHandlesNV( VkDevice device, VkPipeline pipeline, u32 firstGroup, @@ -11548,26 +11566,27 @@ cmd VkResult vkGetRaytracingShaderHandlesNVX( return ? } -@extension("VK_NVX_raytracing") // 166 -cmd VkResult vkGetAccelerationStructureHandleNVX( +@extension("VK_NV_raytracing") // 166 +cmd VkResult vkGetAccelerationStructureHandleNV( VkDevice device, - VkAccelerationStructureNVX accelerationStructure, + VkAccelerationStructureNV accelerationStructure, platform.size_t dataSize, void* pData) { return ? } -@extension("VK_NVX_raytracing") // 166 -cmd void vkCmdWriteAccelerationStructurePropertiesNVX( +@extension("VK_NV_raytracing") // 166 +cmd void vkCmdWriteAccelerationStructurePropertiesNV( VkCommandBuffer commandBuffer, - VkAccelerationStructureNVX accelerationStructure, + u32 accelerationStructureCount, + const VkAccelerationStructureNV* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, - u32 query) { + u32 firstQuery) { } -@extension("VK_NVX_raytracing") // 166 -cmd VkResult vkCompileDeferredNVX( +@extension("VK_NV_raytracing") // 166 +cmd VkResult vkCompileDeferredNV( VkDevice device, VkPipeline pipeline, u32 shader) { diff --git a/vulkan/include/vulkan/vulkan.h b/vulkan/include/vulkan/vulkan.h index d05c8490a5..77da63783e 100644 --- a/vulkan/include/vulkan/vulkan.h +++ b/vulkan/include/vulkan/vulkan.h @@ -24,6 +24,10 @@ #include "vulkan_android.h" #endif +#ifdef VK_USE_PLATFORM_FUCHSIA +#include <zircon/types.h> +#include "vulkan_fuchsia.h" +#endif #ifdef VK_USE_PLATFORM_IOS_MVK #include "vulkan_ios.h" @@ -35,12 +39,6 @@ #endif -#ifdef VK_USE_PLATFORM_MIR_KHR -#include <mir_toolkit/client_types.h> -#include "vulkan_mir.h" -#endif - - #ifdef VK_USE_PLATFORM_VI_NN #include "vulkan_vi.h" #endif diff --git a/vulkan/include/vulkan/vulkan_core.h b/vulkan/include/vulkan/vulkan_core.h index ac9bb66d1e..4cd8ed51dc 100644 --- a/vulkan/include/vulkan/vulkan_core.h +++ b/vulkan/include/vulkan/vulkan_core.h @@ -43,13 +43,12 @@ extern "C" { #define VK_VERSION_MINOR(version) (((uint32_t)(version) >> 12) & 0x3ff) #define VK_VERSION_PATCH(version) ((uint32_t)(version) & 0xfff) // Version of this file -#define VK_HEADER_VERSION 90 +#define VK_HEADER_VERSION 91 #define VK_NULL_HANDLE 0 - #define VK_DEFINE_HANDLE(object) typedef struct object##_T* object; @@ -62,7 +61,6 @@ extern "C" { #endif - typedef uint32_t VkFlags; typedef uint32_t VkBool32; typedef uint64_t VkDeviceSize; @@ -287,7 +285,6 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR = 1000004000, VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR = 1000005000, VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR = 1000006000, - VK_STRUCTURE_TYPE_MIR_SURFACE_CREATE_INFO_KHR = 1000007000, VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR = 1000008000, VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR = 1000009000, VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT = 1000011000, @@ -419,17 +416,17 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV = 1000164001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV = 1000164002, VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV = 1000164005, - VK_STRUCTURE_TYPE_RAYTRACING_PIPELINE_CREATE_INFO_NVX = 1000165000, - VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NVX = 1000165001, - VK_STRUCTURE_TYPE_GEOMETRY_INSTANCE_NVX = 1000165002, - VK_STRUCTURE_TYPE_GEOMETRY_NVX = 1000165003, - VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NVX = 1000165004, - VK_STRUCTURE_TYPE_GEOMETRY_AABB_NVX = 1000165005, - VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NVX = 1000165006, - VK_STRUCTURE_TYPE_DESCRIPTOR_ACCELERATION_STRUCTURE_INFO_NVX = 1000165007, - VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NVX = 1000165008, - VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAYTRACING_PROPERTIES_NVX = 1000165009, - VK_STRUCTURE_TYPE_HIT_SHADER_MODULE_CREATE_INFO_NVX = 1000165010, + VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV = 1000165000, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV = 1000165001, + VK_STRUCTURE_TYPE_GEOMETRY_NV = 1000165003, + VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV = 1000165004, + VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV = 1000165005, + VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV = 1000165006, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV = 1000165007, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV = 1000165008, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV = 1000165009, + VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV = 1000165011, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV = 1000165012, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV = 1000166000, VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV = 1000166001, VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT = 1000174000, @@ -440,6 +437,7 @@ typedef enum VkStructureType { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR = 1000180000, VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT = 1000184000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD = 1000185000, + VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD = 1000189000, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT = 1000190000, VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT = 1000190001, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT = 1000190002, @@ -848,7 +846,7 @@ typedef enum VkQueryType { VK_QUERY_TYPE_PIPELINE_STATISTICS = 1, VK_QUERY_TYPE_TIMESTAMP = 2, VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT = 1000028004, - VK_QUERY_TYPE_COMPACTED_SIZE_NVX = 1000165000, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV = 1000165000, VK_QUERY_TYPE_BEGIN_RANGE = VK_QUERY_TYPE_OCCLUSION, VK_QUERY_TYPE_END_RANGE = VK_QUERY_TYPE_TIMESTAMP, VK_QUERY_TYPE_RANGE_SIZE = (VK_QUERY_TYPE_TIMESTAMP - VK_QUERY_TYPE_OCCLUSION + 1), @@ -1178,7 +1176,7 @@ typedef enum VkDescriptorType { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10, VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT = 1000138000, - VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NVX = 1000165000, + VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000, VK_DESCRIPTOR_TYPE_BEGIN_RANGE = VK_DESCRIPTOR_TYPE_SAMPLER, VK_DESCRIPTOR_TYPE_END_RANGE = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, VK_DESCRIPTOR_TYPE_RANGE_SIZE = (VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT - VK_DESCRIPTOR_TYPE_SAMPLER + 1), @@ -1207,7 +1205,7 @@ typedef enum VkAttachmentStoreOp { typedef enum VkPipelineBindPoint { VK_PIPELINE_BIND_POINT_GRAPHICS = 0, VK_PIPELINE_BIND_POINT_COMPUTE = 1, - VK_PIPELINE_BIND_POINT_RAYTRACING_NVX = 1000165000, + VK_PIPELINE_BIND_POINT_RAY_TRACING_NV = 1000165000, VK_PIPELINE_BIND_POINT_BEGIN_RANGE = VK_PIPELINE_BIND_POINT_GRAPHICS, VK_PIPELINE_BIND_POINT_END_RANGE = VK_PIPELINE_BIND_POINT_COMPUTE, VK_PIPELINE_BIND_POINT_RANGE_SIZE = (VK_PIPELINE_BIND_POINT_COMPUTE - VK_PIPELINE_BIND_POINT_GRAPHICS + 1), @@ -1226,6 +1224,7 @@ typedef enum VkCommandBufferLevel { typedef enum VkIndexType { VK_INDEX_TYPE_UINT16 = 0, VK_INDEX_TYPE_UINT32 = 1, + VK_INDEX_TYPE_NONE_NV = 1000165000, VK_INDEX_TYPE_BEGIN_RANGE = VK_INDEX_TYPE_UINT16, VK_INDEX_TYPE_END_RANGE = VK_INDEX_TYPE_UINT32, VK_INDEX_TYPE_RANGE_SIZE = (VK_INDEX_TYPE_UINT32 - VK_INDEX_TYPE_UINT16 + 1), @@ -1279,7 +1278,7 @@ typedef enum VkObjectType { VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX = 1000086001, VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT = 1000128000, VK_OBJECT_TYPE_VALIDATION_CACHE_EXT = 1000160000, - VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NVX = 1000165000, + VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000, VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE, VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR = VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION, VK_OBJECT_TYPE_BEGIN_RANGE = VK_OBJECT_TYPE_UNKNOWN, @@ -1447,7 +1446,8 @@ typedef enum VkPipelineStageFlagBits { VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00040000, VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX = 0x00020000, VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV = 0x00400000, - VK_PIPELINE_STAGE_RAYTRACING_BIT_NVX = 0x00200000, + VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV = 0x00200000, + VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV = 0x02000000, VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV = 0x00080000, VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV = 0x00100000, VK_PIPELINE_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF @@ -1544,7 +1544,7 @@ typedef enum VkBufferUsageFlagBits { VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT = 0x00000800, VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT = 0x00001000, VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00000200, - VK_BUFFER_USAGE_RAYTRACING_BIT_NVX = 0x00000400, + VK_BUFFER_USAGE_RAY_TRACING_BIT_NV = 0x00000400, VK_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkBufferUsageFlagBits; typedef VkFlags VkBufferUsageFlags; @@ -1559,7 +1559,7 @@ typedef enum VkPipelineCreateFlagBits { VK_PIPELINE_CREATE_DERIVATIVE_BIT = 0x00000004, VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT = 0x00000008, VK_PIPELINE_CREATE_DISPATCH_BASE = 0x00000010, - VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NVX = 0x00000020, + VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV = 0x00000020, VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT, VK_PIPELINE_CREATE_DISPATCH_BASE_KHR = VK_PIPELINE_CREATE_DISPATCH_BASE, VK_PIPELINE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF @@ -1576,12 +1576,12 @@ typedef enum VkShaderStageFlagBits { VK_SHADER_STAGE_COMPUTE_BIT = 0x00000020, VK_SHADER_STAGE_ALL_GRAPHICS = 0x0000001F, VK_SHADER_STAGE_ALL = 0x7FFFFFFF, - VK_SHADER_STAGE_RAYGEN_BIT_NVX = 0x00000100, - VK_SHADER_STAGE_ANY_HIT_BIT_NVX = 0x00000200, - VK_SHADER_STAGE_CLOSEST_HIT_BIT_NVX = 0x00000400, - VK_SHADER_STAGE_MISS_BIT_NVX = 0x00000800, - VK_SHADER_STAGE_INTERSECTION_BIT_NVX = 0x00001000, - VK_SHADER_STAGE_CALLABLE_BIT_NVX = 0x00002000, + VK_SHADER_STAGE_RAYGEN_BIT_NV = 0x00000100, + VK_SHADER_STAGE_ANY_HIT_BIT_NV = 0x00000200, + VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV = 0x00000400, + VK_SHADER_STAGE_MISS_BIT_NV = 0x00000800, + VK_SHADER_STAGE_INTERSECTION_BIT_NV = 0x00001000, + VK_SHADER_STAGE_CALLABLE_BIT_NV = 0x00002000, VK_SHADER_STAGE_TASK_BIT_NV = 0x00000040, VK_SHADER_STAGE_MESH_BIT_NV = 0x00000080, VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF @@ -1673,8 +1673,8 @@ typedef enum VkAccessFlagBits { VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 0x00040000, VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 0x00080000, VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV = 0x00800000, - VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NVX = 0x00200000, - VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NVX = 0x00400000, + VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV = 0x00200000, + VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV = 0x00400000, VK_ACCESS_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF } VkAccessFlagBits; typedef VkFlags VkAccessFlags; @@ -6182,7 +6182,7 @@ typedef enum VkDebugReportObjectTypeEXT { VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT = 33, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT = 1000156000, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT = 1000085000, - VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NVX_EXT = 1000165000, + VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT = 1000165000, VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT, @@ -8113,81 +8113,113 @@ VKAPI_ATTR void VKAPI_CALL vkCmdSetCoarseSampleOrderNV( const VkCoarseSampleOrderCustomNV* pCustomSampleOrders); #endif -#define VK_NVX_raytracing 1 -VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureNVX) - -#define VK_NVX_RAYTRACING_SPEC_VERSION 1 -#define VK_NVX_RAYTRACING_EXTENSION_NAME "VK_NVX_raytracing" - - -typedef enum VkGeometryTypeNVX { - VK_GEOMETRY_TYPE_TRIANGLES_NVX = 0, - VK_GEOMETRY_TYPE_AABBS_NVX = 1, - VK_GEOMETRY_TYPE_BEGIN_RANGE_NVX = VK_GEOMETRY_TYPE_TRIANGLES_NVX, - VK_GEOMETRY_TYPE_END_RANGE_NVX = VK_GEOMETRY_TYPE_AABBS_NVX, - VK_GEOMETRY_TYPE_RANGE_SIZE_NVX = (VK_GEOMETRY_TYPE_AABBS_NVX - VK_GEOMETRY_TYPE_TRIANGLES_NVX + 1), - VK_GEOMETRY_TYPE_MAX_ENUM_NVX = 0x7FFFFFFF -} VkGeometryTypeNVX; - -typedef enum VkAccelerationStructureTypeNVX { - VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NVX = 0, - VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NVX = 1, - VK_ACCELERATION_STRUCTURE_TYPE_BEGIN_RANGE_NVX = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NVX, - VK_ACCELERATION_STRUCTURE_TYPE_END_RANGE_NVX = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NVX, - VK_ACCELERATION_STRUCTURE_TYPE_RANGE_SIZE_NVX = (VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NVX - VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NVX + 1), - VK_ACCELERATION_STRUCTURE_TYPE_MAX_ENUM_NVX = 0x7FFFFFFF -} VkAccelerationStructureTypeNVX; - -typedef enum VkCopyAccelerationStructureModeNVX { - VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NVX = 0, - VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NVX = 1, - VK_COPY_ACCELERATION_STRUCTURE_MODE_BEGIN_RANGE_NVX = VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NVX, - VK_COPY_ACCELERATION_STRUCTURE_MODE_END_RANGE_NVX = VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NVX, - VK_COPY_ACCELERATION_STRUCTURE_MODE_RANGE_SIZE_NVX = (VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NVX - VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NVX + 1), - VK_COPY_ACCELERATION_STRUCTURE_MODE_MAX_ENUM_NVX = 0x7FFFFFFF -} VkCopyAccelerationStructureModeNVX; - - -typedef enum VkGeometryFlagBitsNVX { - VK_GEOMETRY_OPAQUE_BIT_NVX = 0x00000001, - VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_NVX = 0x00000002, - VK_GEOMETRY_FLAG_BITS_MAX_ENUM_NVX = 0x7FFFFFFF -} VkGeometryFlagBitsNVX; -typedef VkFlags VkGeometryFlagsNVX; - -typedef enum VkGeometryInstanceFlagBitsNVX { - VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NVX = 0x00000001, - VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_FLIP_WINDING_BIT_NVX = 0x00000002, - VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_NVX = 0x00000004, - VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_NVX = 0x00000008, - VK_GEOMETRY_INSTANCE_FLAG_BITS_MAX_ENUM_NVX = 0x7FFFFFFF -} VkGeometryInstanceFlagBitsNVX; -typedef VkFlags VkGeometryInstanceFlagsNVX; - -typedef enum VkBuildAccelerationStructureFlagBitsNVX { - VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NVX = 0x00000001, - VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NVX = 0x00000002, - VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NVX = 0x00000004, - VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NVX = 0x00000008, - VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NVX = 0x00000010, - VK_BUILD_ACCELERATION_STRUCTURE_FLAG_BITS_MAX_ENUM_NVX = 0x7FFFFFFF -} VkBuildAccelerationStructureFlagBitsNVX; -typedef VkFlags VkBuildAccelerationStructureFlagsNVX; - -typedef struct VkRaytracingPipelineCreateInfoNVX { - VkStructureType sType; - const void* pNext; - VkPipelineCreateFlags flags; - uint32_t stageCount; - const VkPipelineShaderStageCreateInfo* pStages; - const uint32_t* pGroupNumbers; - uint32_t maxRecursionDepth; - VkPipelineLayout layout; - VkPipeline basePipelineHandle; - int32_t basePipelineIndex; -} VkRaytracingPipelineCreateInfoNVX; - -typedef struct VkGeometryTrianglesNVX { +#define VK_NV_ray_tracing 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureNV) + +#define VK_NV_RAY_TRACING_SPEC_VERSION 2 +#define VK_NV_RAY_TRACING_EXTENSION_NAME "VK_NV_ray_tracing" +#define VK_SHADER_UNUSED_NV (~0U) + + +typedef enum VkRayTracingShaderGroupTypeNV { + VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV = 0, + VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV = 1, + VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV = 2, + VK_RAY_TRACING_SHADER_GROUP_TYPE_BEGIN_RANGE_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV, + VK_RAY_TRACING_SHADER_GROUP_TYPE_END_RANGE_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV, + VK_RAY_TRACING_SHADER_GROUP_TYPE_RANGE_SIZE_NV = (VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV - VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV + 1), + VK_RAY_TRACING_SHADER_GROUP_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkRayTracingShaderGroupTypeNV; + +typedef enum VkGeometryTypeNV { + VK_GEOMETRY_TYPE_TRIANGLES_NV = 0, + VK_GEOMETRY_TYPE_AABBS_NV = 1, + VK_GEOMETRY_TYPE_BEGIN_RANGE_NV = VK_GEOMETRY_TYPE_TRIANGLES_NV, + VK_GEOMETRY_TYPE_END_RANGE_NV = VK_GEOMETRY_TYPE_AABBS_NV, + VK_GEOMETRY_TYPE_RANGE_SIZE_NV = (VK_GEOMETRY_TYPE_AABBS_NV - VK_GEOMETRY_TYPE_TRIANGLES_NV + 1), + VK_GEOMETRY_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkGeometryTypeNV; + +typedef enum VkAccelerationStructureTypeNV { + VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV = 0, + VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV = 1, + VK_ACCELERATION_STRUCTURE_TYPE_BEGIN_RANGE_NV = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV, + VK_ACCELERATION_STRUCTURE_TYPE_END_RANGE_NV = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV, + VK_ACCELERATION_STRUCTURE_TYPE_RANGE_SIZE_NV = (VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV - VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV + 1), + VK_ACCELERATION_STRUCTURE_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkAccelerationStructureTypeNV; + +typedef enum VkCopyAccelerationStructureModeNV { + VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV = 0, + VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV = 1, + VK_COPY_ACCELERATION_STRUCTURE_MODE_BEGIN_RANGE_NV = VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV, + VK_COPY_ACCELERATION_STRUCTURE_MODE_END_RANGE_NV = VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV, + VK_COPY_ACCELERATION_STRUCTURE_MODE_RANGE_SIZE_NV = (VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV - VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV + 1), + VK_COPY_ACCELERATION_STRUCTURE_MODE_MAX_ENUM_NV = 0x7FFFFFFF +} VkCopyAccelerationStructureModeNV; + +typedef enum VkAccelerationStructureMemoryRequirementsTypeNV { + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV = 0, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV = 1, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV = 2, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BEGIN_RANGE_NV = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_END_RANGE_NV = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_RANGE_SIZE_NV = (VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV - VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV + 1), + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkAccelerationStructureMemoryRequirementsTypeNV; + + +typedef enum VkGeometryFlagBitsNV { + VK_GEOMETRY_OPAQUE_BIT_NV = 0x00000001, + VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_NV = 0x00000002, + VK_GEOMETRY_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkGeometryFlagBitsNV; +typedef VkFlags VkGeometryFlagsNV; + +typedef enum VkGeometryInstanceFlagBitsNV { + VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV = 0x00000001, + VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_NV = 0x00000002, + VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_NV = 0x00000004, + VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_NV = 0x00000008, + VK_GEOMETRY_INSTANCE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkGeometryInstanceFlagBitsNV; +typedef VkFlags VkGeometryInstanceFlagsNV; + +typedef enum VkBuildAccelerationStructureFlagBitsNV { + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV = 0x00000001, + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV = 0x00000002, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NV = 0x00000004, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NV = 0x00000008, + VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NV = 0x00000010, + VK_BUILD_ACCELERATION_STRUCTURE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkBuildAccelerationStructureFlagBitsNV; +typedef VkFlags VkBuildAccelerationStructureFlagsNV; + +typedef struct VkRayTracingShaderGroupCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkRayTracingShaderGroupTypeNV type; + uint32_t generalShader; + uint32_t closestHitShader; + uint32_t anyHitShader; + uint32_t intersectionShader; +} VkRayTracingShaderGroupCreateInfoNV; + +typedef struct VkRayTracingPipelineCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + uint32_t groupCount; + const VkRayTracingShaderGroupCreateInfoNV* pGroups; + uint32_t maxRecursionDepth; + VkPipelineLayout layout; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkRayTracingPipelineCreateInfoNV; + +typedef struct VkGeometryTrianglesNV { VkStructureType sType; const void* pNext; VkBuffer vertexData; @@ -8201,136 +8233,138 @@ typedef struct VkGeometryTrianglesNVX { VkIndexType indexType; VkBuffer transformData; VkDeviceSize transformOffset; -} VkGeometryTrianglesNVX; +} VkGeometryTrianglesNV; -typedef struct VkGeometryAABBNVX { +typedef struct VkGeometryAABBNV { VkStructureType sType; const void* pNext; VkBuffer aabbData; uint32_t numAABBs; uint32_t stride; VkDeviceSize offset; -} VkGeometryAABBNVX; - -typedef struct VkGeometryDataNVX { - VkGeometryTrianglesNVX triangles; - VkGeometryAABBNVX aabbs; -} VkGeometryDataNVX; +} VkGeometryAABBNV; -typedef struct VkGeometryNVX { - VkStructureType sType; - const void* pNext; - VkGeometryTypeNVX geometryType; - VkGeometryDataNVX geometry; - VkGeometryFlagsNVX flags; -} VkGeometryNVX; +typedef struct VkGeometryDataNV { + VkGeometryTrianglesNV triangles; + VkGeometryAABBNV aabbs; +} VkGeometryDataNV; -typedef struct VkAccelerationStructureCreateInfoNVX { - VkStructureType sType; - const void* pNext; - VkAccelerationStructureTypeNVX type; - VkBuildAccelerationStructureFlagsNVX flags; - VkDeviceSize compactedSize; - uint32_t instanceCount; - uint32_t geometryCount; - const VkGeometryNVX* pGeometries; -} VkAccelerationStructureCreateInfoNVX; - -typedef struct VkBindAccelerationStructureMemoryInfoNVX { - VkStructureType sType; - const void* pNext; - VkAccelerationStructureNVX accelerationStructure; - VkDeviceMemory memory; - VkDeviceSize memoryOffset; - uint32_t deviceIndexCount; - const uint32_t* pDeviceIndices; -} VkBindAccelerationStructureMemoryInfoNVX; - -typedef struct VkDescriptorAccelerationStructureInfoNVX { - VkStructureType sType; - const void* pNext; - uint32_t accelerationStructureCount; - const VkAccelerationStructureNVX* pAccelerationStructures; -} VkDescriptorAccelerationStructureInfoNVX; +typedef struct VkGeometryNV { + VkStructureType sType; + const void* pNext; + VkGeometryTypeNV geometryType; + VkGeometryDataNV geometry; + VkGeometryFlagsNV flags; +} VkGeometryNV; -typedef struct VkAccelerationStructureMemoryRequirementsInfoNVX { - VkStructureType sType; - const void* pNext; - VkAccelerationStructureNVX accelerationStructure; -} VkAccelerationStructureMemoryRequirementsInfoNVX; +typedef struct VkAccelerationStructureInfoNV { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureTypeNV type; + VkBuildAccelerationStructureFlagsNV flags; + uint32_t instanceCount; + uint32_t geometryCount; + const VkGeometryNV* pGeometries; +} VkAccelerationStructureInfoNV; + +typedef struct VkAccelerationStructureCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkDeviceSize compactedSize; + VkAccelerationStructureInfoNV info; +} VkAccelerationStructureCreateInfoNV; -typedef struct VkPhysicalDeviceRaytracingPropertiesNVX { +typedef struct VkBindAccelerationStructureMemoryInfoNV { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureNV accelerationStructure; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; +} VkBindAccelerationStructureMemoryInfoNV; + +typedef struct VkWriteDescriptorSetAccelerationStructureNV { + VkStructureType sType; + const void* pNext; + uint32_t accelerationStructureCount; + const VkAccelerationStructureNV* pAccelerationStructures; +} VkWriteDescriptorSetAccelerationStructureNV; + +typedef struct VkAccelerationStructureMemoryRequirementsInfoNV { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureMemoryRequirementsTypeNV type; + VkAccelerationStructureNV accelerationStructure; +} VkAccelerationStructureMemoryRequirementsInfoNV; + +typedef struct VkPhysicalDeviceRayTracingPropertiesNV { VkStructureType sType; void* pNext; - uint32_t shaderHeaderSize; + uint32_t shaderGroupHandleSize; uint32_t maxRecursionDepth; - uint32_t maxGeometryCount; -} VkPhysicalDeviceRaytracingPropertiesNVX; - - -typedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureNVX)(VkDevice device, const VkAccelerationStructureCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNVX* pAccelerationStructure); -typedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureNVX)(VkDevice device, VkAccelerationStructureNVX accelerationStructure, const VkAllocationCallbacks* pAllocator); -typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureMemoryRequirementsNVX)(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNVX* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements); -typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureScratchMemoryRequirementsNVX)(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNVX* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements); -typedef VkResult (VKAPI_PTR *PFN_vkBindAccelerationStructureMemoryNVX)(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNVX* pBindInfos); -typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureNVX)(VkCommandBuffer commandBuffer, VkAccelerationStructureTypeNVX type, uint32_t instanceCount, VkBuffer instanceData, VkDeviceSize instanceOffset, uint32_t geometryCount, const VkGeometryNVX* pGeometries, VkBuildAccelerationStructureFlagsNVX flags, VkBool32 update, VkAccelerationStructureNVX dst, VkAccelerationStructureNVX src, VkBuffer scratch, VkDeviceSize scratchOffset); -typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureNVX)(VkCommandBuffer commandBuffer, VkAccelerationStructureNVX dst, VkAccelerationStructureNVX src, VkCopyAccelerationStructureModeNVX mode); -typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysNVX)(VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, uint32_t width, uint32_t height); -typedef VkResult (VKAPI_PTR *PFN_vkCreateRaytracingPipelinesNVX)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRaytracingPipelineCreateInfoNVX* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); -typedef VkResult (VKAPI_PTR *PFN_vkGetRaytracingShaderHandlesNVX)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); -typedef VkResult (VKAPI_PTR *PFN_vkGetAccelerationStructureHandleNVX)(VkDevice device, VkAccelerationStructureNVX accelerationStructure, size_t dataSize, void* pData); -typedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructurePropertiesNVX)(VkCommandBuffer commandBuffer, VkAccelerationStructureNVX accelerationStructure, VkQueryType queryType, VkQueryPool queryPool, uint32_t query); -typedef VkResult (VKAPI_PTR *PFN_vkCompileDeferredNVX)(VkDevice device, VkPipeline pipeline, uint32_t shader); + uint32_t maxShaderGroupStride; + uint32_t shaderGroupBaseAlignment; + uint64_t maxGeometryCount; + uint64_t maxInstanceCount; + uint64_t maxTriangleCount; + uint32_t maxDescriptorSetAccelerationStructures; +} VkPhysicalDeviceRayTracingPropertiesNV; + + +typedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureNV)(VkDevice device, const VkAccelerationStructureCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNV* pAccelerationStructure); +typedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureNV)(VkDevice device, VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureMemoryRequirementsNV)(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements); +typedef VkResult (VKAPI_PTR *PFN_vkBindAccelerationStructureMemoryNV)(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV* pBindInfos); +typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureNV)(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset); +typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureNV)(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkCopyAccelerationStructureModeNV mode); +typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysNV)(VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer, VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRayTracingPipelinesNV)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupHandlesNV)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkGetAccelerationStructureHandleNV)(VkDevice device, VkAccelerationStructureNV accelerationStructure, size_t dataSize, void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructuresPropertiesNV)(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureNV* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery); +typedef VkResult (VKAPI_PTR *PFN_vkCompileDeferredNV)(VkDevice device, VkPipeline pipeline, uint32_t shader); #ifndef VK_NO_PROTOTYPES -VKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureNVX( +VKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureNV( VkDevice device, - const VkAccelerationStructureCreateInfoNVX* pCreateInfo, + const VkAccelerationStructureCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, - VkAccelerationStructureNVX* pAccelerationStructure); + VkAccelerationStructureNV* pAccelerationStructure); -VKAPI_ATTR void VKAPI_CALL vkDestroyAccelerationStructureNVX( +VKAPI_ATTR void VKAPI_CALL vkDestroyAccelerationStructureNV( VkDevice device, - VkAccelerationStructureNVX accelerationStructure, + VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks* pAllocator); -VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureMemoryRequirementsNVX( - VkDevice device, - const VkAccelerationStructureMemoryRequirementsInfoNVX* pInfo, - VkMemoryRequirements2KHR* pMemoryRequirements); - -VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureScratchMemoryRequirementsNVX( +VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureMemoryRequirementsNV( VkDevice device, - const VkAccelerationStructureMemoryRequirementsInfoNVX* pInfo, + const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements); -VKAPI_ATTR VkResult VKAPI_CALL vkBindAccelerationStructureMemoryNVX( +VKAPI_ATTR VkResult VKAPI_CALL vkBindAccelerationStructureMemoryNV( VkDevice device, uint32_t bindInfoCount, - const VkBindAccelerationStructureMemoryInfoNVX* pBindInfos); + const VkBindAccelerationStructureMemoryInfoNV* pBindInfos); -VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureNVX( +VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureNV( VkCommandBuffer commandBuffer, - VkAccelerationStructureTypeNVX type, - uint32_t instanceCount, + const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, - uint32_t geometryCount, - const VkGeometryNVX* pGeometries, - VkBuildAccelerationStructureFlagsNVX flags, VkBool32 update, - VkAccelerationStructureNVX dst, - VkAccelerationStructureNVX src, + VkAccelerationStructureNV dst, + VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset); -VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureNVX( +VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureNV( VkCommandBuffer commandBuffer, - VkAccelerationStructureNVX dst, - VkAccelerationStructureNVX src, - VkCopyAccelerationStructureModeNVX mode); + VkAccelerationStructureNV dst, + VkAccelerationStructureNV src, + VkCopyAccelerationStructureModeNV mode); -VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysNVX( +VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysNV( VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, @@ -8340,18 +8374,22 @@ VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysNVX( VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, + VkBuffer callableShaderBindingTableBuffer, + VkDeviceSize callableShaderBindingOffset, + VkDeviceSize callableShaderBindingStride, uint32_t width, - uint32_t height); + uint32_t height, + uint32_t depth); -VKAPI_ATTR VkResult VKAPI_CALL vkCreateRaytracingPipelinesNVX( +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRayTracingPipelinesNV( VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, - const VkRaytracingPipelineCreateInfoNVX* pCreateInfos, + const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); -VKAPI_ATTR VkResult VKAPI_CALL vkGetRaytracingShaderHandlesNVX( +VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingShaderGroupHandlesNV( VkDevice device, VkPipeline pipeline, uint32_t firstGroup, @@ -8359,20 +8397,21 @@ VKAPI_ATTR VkResult VKAPI_CALL vkGetRaytracingShaderHandlesNVX( size_t dataSize, void* pData); -VKAPI_ATTR VkResult VKAPI_CALL vkGetAccelerationStructureHandleNVX( +VKAPI_ATTR VkResult VKAPI_CALL vkGetAccelerationStructureHandleNV( VkDevice device, - VkAccelerationStructureNVX accelerationStructure, + VkAccelerationStructureNV accelerationStructure, size_t dataSize, void* pData); -VKAPI_ATTR void VKAPI_CALL vkCmdWriteAccelerationStructurePropertiesNVX( +VKAPI_ATTR void VKAPI_CALL vkCmdWriteAccelerationStructuresPropertiesNV( VkCommandBuffer commandBuffer, - VkAccelerationStructureNVX accelerationStructure, + uint32_t accelerationStructureCount, + const VkAccelerationStructureNV* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, - uint32_t query); + uint32_t firstQuery); -VKAPI_ATTR VkResult VKAPI_CALL vkCompileDeferredNVX( +VKAPI_ATTR VkResult VKAPI_CALL vkCompileDeferredNV( VkDevice device, VkPipeline pipeline, uint32_t shader); @@ -8534,6 +8573,29 @@ typedef struct VkPhysicalDeviceShaderCorePropertiesAMD { +#define VK_AMD_memory_overallocation_behavior 1 +#define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_SPEC_VERSION 1 +#define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_EXTENSION_NAME "VK_AMD_memory_overallocation_behavior" + + +typedef enum VkMemoryOverallocationBehaviorAMD { + VK_MEMORY_OVERALLOCATION_BEHAVIOR_DEFAULT_AMD = 0, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_ALLOWED_AMD = 1, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD = 2, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_BEGIN_RANGE_AMD = VK_MEMORY_OVERALLOCATION_BEHAVIOR_DEFAULT_AMD, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_END_RANGE_AMD = VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_RANGE_SIZE_AMD = (VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD - VK_MEMORY_OVERALLOCATION_BEHAVIOR_DEFAULT_AMD + 1), + VK_MEMORY_OVERALLOCATION_BEHAVIOR_MAX_ENUM_AMD = 0x7FFFFFFF +} VkMemoryOverallocationBehaviorAMD; + +typedef struct VkDeviceMemoryOverallocationCreateInfoAMD { + VkStructureType sType; + const void* pNext; + VkMemoryOverallocationBehaviorAMD overallocationBehavior; +} VkDeviceMemoryOverallocationCreateInfoAMD; + + + #define VK_EXT_vertex_attribute_divisor 1 #define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_SPEC_VERSION 3 #define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME "VK_EXT_vertex_attribute_divisor" |