/* * Copyright 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // module header #include "loader.h" // standard C headers #include #include #include #include #include #include #include // standard C++ headers #include #include #include #include #include #include // platform/library headers #include #include #include #include #include // #define ENABLE_ALLOC_CALLSTACKS 1 #if ENABLE_ALLOC_CALLSTACKS #include #define ALOGD_CALLSTACK(...) \ do { \ ALOGD(__VA_ARGS__); \ android::CallStack callstack; \ callstack.update(); \ callstack.log(LOG_TAG, ANDROID_LOG_DEBUG, " "); \ } while (false) #else #define ALOGD_CALLSTACK(...) \ do { \ } while (false) #endif using namespace vulkan; static const uint32_t kMaxPhysicalDevices = 4; namespace { // ---------------------------------------------------------------------------- // Standard-library allocator that delegates to VkAllocationCallbacks. // // TODO(jessehall): This class currently always uses // VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE. The scope to use could be a template // parameter or a constructor parameter. The former would help catch bugs // where we use the wrong scope, e.g. adding a command-scope string to an // instance-scope vector. But that might also be pretty annoying to deal with. template class CallbackAllocator { public: typedef T value_type; CallbackAllocator(const VkAllocationCallbacks* alloc_input) : alloc(alloc_input) {} template CallbackAllocator(const CallbackAllocator& other) : alloc(other.alloc) {} T* allocate(std::size_t n) { void* mem = alloc->pfnAllocation(alloc->pUserData, n * sizeof(T), alignof(T), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); if (!mem) throw std::bad_alloc(); return static_cast(mem); } void deallocate(T* array, std::size_t /*n*/) noexcept { alloc->pfnFree(alloc->pUserData, array); } const VkAllocationCallbacks* alloc; }; // These are needed in order to move Strings template bool operator==(const CallbackAllocator& alloc1, const CallbackAllocator& alloc2) { return alloc1.alloc == alloc2.alloc; } template bool operator!=(const CallbackAllocator& alloc1, const CallbackAllocator& alloc2) { return !(alloc1 == alloc2); } template using Vector = std::vector>; typedef std::basic_string, CallbackAllocator> String; // ---------------------------------------------------------------------------- VKAPI_ATTR void* DefaultAllocate(void*, size_t size, size_t alignment, VkSystemAllocationScope) { void* ptr = nullptr; // Vulkan requires 'alignment' to be a power of two, but posix_memalign // additionally requires that it be at least sizeof(void*). int ret = posix_memalign(&ptr, std::max(alignment, sizeof(void*)), size); ALOGD_CALLSTACK("Allocate: size=%zu align=%zu => (%d) %p", size, alignment, ret, ptr); return ret == 0 ? ptr : nullptr; } VKAPI_ATTR void* DefaultReallocate(void*, void* ptr, size_t size, size_t alignment, VkSystemAllocationScope) { if (size == 0) { free(ptr); return nullptr; } // TODO(jessehall): Right now we never shrink allocations; if the new // request is smaller than the existing chunk, we just continue using it. // Right now the loader never reallocs, so this doesn't matter. If that // changes, or if this code is copied into some other project, this should // probably have a heuristic to allocate-copy-free when doing so will save // "enough" space. size_t old_size = ptr ? malloc_usable_size(ptr) : 0; if (size <= old_size) return ptr; void* new_ptr = nullptr; if (posix_memalign(&new_ptr, alignment, size) != 0) return nullptr; if (ptr) { memcpy(new_ptr, ptr, std::min(old_size, size)); free(ptr); } return new_ptr; } VKAPI_ATTR void DefaultFree(void*, void* ptr) { ALOGD_CALLSTACK("Free: %p", ptr); free(ptr); } const VkAllocationCallbacks kDefaultAllocCallbacks = { .pUserData = nullptr, .pfnAllocation = DefaultAllocate, .pfnReallocation = DefaultReallocate, .pfnFree = DefaultFree, }; // ---------------------------------------------------------------------------- // Global Data and Initialization hwvulkan_device_t* g_hwdevice = nullptr; InstanceExtensionSet g_driver_instance_extensions; void LoadVulkanHAL() { static const hwvulkan_module_t* module; int result = hw_get_module("vulkan", reinterpret_cast(&module)); if (result != 0) { ALOGE("failed to load vulkan hal: %s (%d)", strerror(-result), result); return; } result = module->common.methods->open( &module->common, HWVULKAN_DEVICE_0, reinterpret_cast(&g_hwdevice)); if (result != 0) { ALOGE("failed to open vulkan driver: %s (%d)", strerror(-result), result); module = nullptr; return; } VkResult vkresult; uint32_t count; if ((vkresult = g_hwdevice->EnumerateInstanceExtensionProperties( nullptr, &count, nullptr)) != VK_SUCCESS) { ALOGE("driver EnumerateInstanceExtensionProperties failed: %d", vkresult); g_hwdevice->common.close(&g_hwdevice->common); g_hwdevice = nullptr; module = nullptr; return; } VkExtensionProperties* extensions = static_cast( alloca(count * sizeof(VkExtensionProperties))); if ((vkresult = g_hwdevice->EnumerateInstanceExtensionProperties( nullptr, &count, extensions)) != VK_SUCCESS) { ALOGE("driver EnumerateInstanceExtensionProperties failed: %d", vkresult); g_hwdevice->common.close(&g_hwdevice->common); g_hwdevice = nullptr; module = nullptr; return; } ALOGV_IF(count > 0, "Driver-supported instance extensions:"); for (uint32_t i = 0; i < count; i++) { ALOGV(" %s (v%u)", extensions[i].extensionName, extensions[i].specVersion); InstanceExtension id = InstanceExtensionFromName(extensions[i].extensionName); if (id != kInstanceExtensionCount) g_driver_instance_extensions.set(id); } // Ignore driver attempts to support loader extensions g_driver_instance_extensions.reset(kKHR_surface); g_driver_instance_extensions.reset(kKHR_android_surface); } bool EnsureInitialized() { static std::once_flag once_flag; std::call_once(once_flag, []() { LoadVulkanHAL(); DiscoverLayers(); }); return g_hwdevice != nullptr; } // ----------------------------------------------------------------------------- struct Instance { Instance(const VkAllocationCallbacks* alloc_callbacks) : dispatch_ptr(&dispatch), handle(reinterpret_cast(&dispatch_ptr)), alloc(alloc_callbacks), num_physical_devices(0), active_layers(CallbackAllocator(alloc)), message(VK_NULL_HANDLE) { memset(&dispatch, 0, sizeof(dispatch)); memset(physical_devices, 0, sizeof(physical_devices)); enabled_extensions.reset(); drv.instance = VK_NULL_HANDLE; memset(&drv.dispatch, 0, sizeof(drv.dispatch)); drv.num_physical_devices = 0; } ~Instance() {} const InstanceDispatchTable* dispatch_ptr; const VkInstance handle; InstanceDispatchTable dispatch; const VkAllocationCallbacks* alloc; uint32_t num_physical_devices; VkPhysicalDevice physical_devices_top[kMaxPhysicalDevices]; VkPhysicalDevice physical_devices[kMaxPhysicalDevices]; DeviceExtensionSet physical_device_driver_extensions[kMaxPhysicalDevices]; Vector active_layers; VkDebugReportCallbackEXT message; DebugReportCallbackList debug_report_callbacks; InstanceExtensionSet enabled_extensions; struct { VkInstance instance; DriverDispatchTable dispatch; uint32_t num_physical_devices; } drv; // may eventually be an array }; struct Device { Device(Instance* instance_) : instance(instance_), active_layers(CallbackAllocator(instance->alloc)) { memset(&dispatch, 0, sizeof(dispatch)); enabled_extensions.reset(); } DeviceDispatchTable dispatch; Instance* instance; PFN_vkGetDeviceProcAddr get_device_proc_addr; Vector active_layers; DeviceExtensionSet enabled_extensions; }; template struct HandleTraits {}; template <> struct HandleTraits { typedef Instance LoaderObjectType; }; template <> struct HandleTraits { typedef Instance LoaderObjectType; }; template <> struct HandleTraits { typedef Device LoaderObjectType; }; template <> struct HandleTraits { typedef Device LoaderObjectType; }; template <> struct HandleTraits { typedef Device LoaderObjectType; }; template typename HandleTraits::LoaderObjectType& GetDispatchParent( THandle handle) { // TODO(jessehall): Make Instance and Device POD types (by removing the // non-default constructors), so that offsetof is actually legal to use. // The specific case we're using here is safe in gcc/clang (and probably // most other C++ compilers), but isn't guaranteed by C++. typedef typename HandleTraits::LoaderObjectType ObjectType; #pragma clang diagnostic push #pragma clang diagnostic ignored "-Winvalid-offsetof" const size_t kDispatchOffset = offsetof(ObjectType, dispatch); #pragma clang diagnostic pop const auto& dispatch = GetDispatchTable(handle); uintptr_t dispatch_addr = reinterpret_cast(&dispatch); uintptr_t object_addr = dispatch_addr - kDispatchOffset; return *reinterpret_cast(object_addr); } // ----------------------------------------------------------------------------- void DestroyDevice(Device* device) { const VkAllocationCallbacks* alloc = device->instance->alloc; device->~Device(); alloc->pfnFree(alloc->pUserData, device); } template LayerRef GetLayerRef(const char* name); template <> LayerRef GetLayerRef(const char* name) { return GetInstanceLayerRef(name); } template <> LayerRef GetLayerRef(const char* name) { return GetDeviceLayerRef(name); } template bool ActivateLayer(TObject* object, const char* name) { LayerRef layer(GetLayerRef(name)); if (!layer) return false; if (std::find(object->active_layers.begin(), object->active_layers.end(), layer) == object->active_layers.end()) { try { object->active_layers.push_back(std::move(layer)); } catch (std::bad_alloc&) { // TODO(jessehall): We should fail with VK_ERROR_OUT_OF_MEMORY // if we can't enable a requested layer. Callers currently ignore // ActivateLayer's return value. ALOGW("failed to activate layer '%s': out of memory", name); return false; } } ALOGV("activated layer '%s'", name); return true; } struct InstanceNamesPair { Instance* instance; Vector* layer_names; }; void SetLayerNamesFromProperty(const char* name, const char* value, void* data) { try { const char prefix[] = "debug.vulkan.layer."; const size_t prefixlen = sizeof(prefix) - 1; if (value[0] == '\0' || strncmp(name, prefix, prefixlen) != 0) return; const char* number_str = name + prefixlen; long layer_number = strtol(number_str, nullptr, 10); if (layer_number <= 0 || layer_number == LONG_MAX) { ALOGW("Cannot use a layer at number %ld from string %s", layer_number, number_str); return; } auto instance_names_pair = static_cast(data); Vector* layer_names = instance_names_pair->layer_names; Instance* instance = instance_names_pair->instance; size_t layer_size = static_cast(layer_number); if (layer_size > layer_names->size()) { layer_names->resize( layer_size, String(CallbackAllocator(instance->alloc))); } (*layer_names)[layer_size - 1] = value; } catch (std::bad_alloc&) { ALOGW("failed to handle property '%s'='%s': out of memory", name, value); return; } } template VkResult ActivateAllLayers(TInfo create_info, Instance* instance, TObject* object) { ALOG_ASSERT(create_info->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO || create_info->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, "Cannot activate layers for unknown object %p", object); CallbackAllocator string_allocator(instance->alloc); // Load system layers if (prctl(PR_GET_DUMPABLE, 0, 0, 0, 0)) { char layer_prop[PROPERTY_VALUE_MAX]; property_get("debug.vulkan.layers", layer_prop, ""); char* strtok_state; char* layer_name = nullptr; while ((layer_name = strtok_r(layer_name ? nullptr : layer_prop, ":", &strtok_state))) { ActivateLayer(object, layer_name); } Vector layer_names(CallbackAllocator(instance->alloc)); InstanceNamesPair instance_names_pair = {.instance = instance, .layer_names = &layer_names}; property_list(SetLayerNamesFromProperty, static_cast(&instance_names_pair)); for (auto layer_name_element : layer_names) { ActivateLayer(object, layer_name_element.c_str()); } } // Load app layers for (uint32_t i = 0; i < create_info->enabledLayerCount; ++i) { if (!ActivateLayer(object, create_info->ppEnabledLayerNames[i])) { ALOGE("requested %s layer '%s' not present", create_info->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO ? "instance" : "device", create_info->ppEnabledLayerNames[i]); return VK_ERROR_LAYER_NOT_PRESENT; } } return VK_SUCCESS; } template bool AddLayersToCreateInfo(TCreateInfo& local_create_info, const TObject& object, const VkAllocationCallbacks* alloc, bool& allocatedMemory) { // This should never happen and means there is a likely a bug in layer // tracking if (object->active_layers.size() < local_create_info.enabledLayerCount) { ALOGE("Total number of layers is less than those enabled by the app!"); return false; } // Check if the total number of layers enabled is greater than those // enabled by the application. If it is then we have system enabled // layers which need to be added to the list of layers passed in through // create. if (object->active_layers.size() > local_create_info.enabledLayerCount) { void* mem = alloc->pfnAllocation( alloc->pUserData, object->active_layers.size() * sizeof(char*), alignof(char*), VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); if (mem) { local_create_info.enabledLayerCount = 0; const char** names = static_cast(mem); for (const auto& layer : object->active_layers) { const char* name = layer.GetName(); names[local_create_info.enabledLayerCount++] = name; } local_create_info.ppEnabledLayerNames = names; } else { ALOGE("System layers cannot be enabled: memory allocation failed"); return false; } allocatedMemory = true; } else { allocatedMemory = false; } return true; } template void FreeAllocatedLayerCreateInfo(T& local_create_info, const VkAllocationCallbacks* alloc) { alloc->pfnFree(alloc->pUserData, const_cast(local_create_info.ppEnabledLayerNames)); } template bool AddExtensionToCreateInfo(TCreateInfo& local_create_info, const char* extension_name, const VkAllocationCallbacks* alloc) { uint32_t extension_count = local_create_info.enabledExtensionCount; local_create_info.enabledExtensionCount++; void* mem = alloc->pfnAllocation( alloc->pUserData, local_create_info.enabledExtensionCount * sizeof(char*), alignof(char*), VK_SYSTEM_ALLOCATION_SCOPE_COMMAND); if (mem) { const char** enabled_extensions = static_cast(mem); for (uint32_t i = 0; i < extension_count; ++i) { enabled_extensions[i] = local_create_info.ppEnabledExtensionNames[i]; } enabled_extensions[extension_count] = extension_name; local_create_info.ppEnabledExtensionNames = enabled_extensions; } else { ALOGE("%s extension cannot be enabled: memory allocation failed", extension_name); return false; } return true; } template void FreeAllocatedExtensionCreateInfo(T& local_create_info, const VkAllocationCallbacks* alloc) { alloc->pfnFree( alloc->pUserData, const_cast(local_create_info.ppEnabledExtensionNames)); } VKAPI_ATTR VkBool32 LogDebugMessageCallback(VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT /*objectType*/, uint64_t /*object*/, size_t /*location*/, int32_t message_code, const char* layer_prefix, const char* message, void* /*user_data*/) { if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) { ALOGE("[%s] Code %d : %s", layer_prefix, message_code, message); } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) { ALOGW("[%s] Code %d : %s", layer_prefix, message_code, message); } return false; } VkResult Noop() { return VK_SUCCESS; } /* * This function will return the pNext pointer of any * CreateInfo extensions that are not loader extensions. * This is used to skip past the loader extensions prepended * to the list during CreateInstance and CreateDevice. */ void* StripCreateExtensions(const void* pNext) { VkLayerInstanceCreateInfo* create_info = const_cast( static_cast(pNext)); while ( create_info && (create_info->sType == VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO || create_info->sType == VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO)) { create_info = const_cast( static_cast(create_info->pNext)); } return create_info; } // Clean up and deallocate an Instance; called from both the failure paths in // CreateInstance_Top as well as from DestroyInstance_Top. This function does // not call down the dispatch chain; that should be done before calling this // function, iff the lower vkCreateInstance call has been made and returned // successfully. void DestroyInstance(Instance* instance, const VkAllocationCallbacks* allocator) { if (instance->message) { PFN_vkDestroyDebugReportCallbackEXT destroy_debug_report_callback; destroy_debug_report_callback = reinterpret_cast( GetInstanceProcAddr_Top(instance->handle, "vkDestroyDebugReportCallbackEXT")); destroy_debug_report_callback(instance->handle, instance->message, allocator); } instance->~Instance(); allocator->pfnFree(allocator->pUserData, instance); } } // anonymous namespace namespace vulkan { // ----------------------------------------------------------------------------- // "Bottom" functions. These are called at the end of the instance dispatch // chain. VkResult CreateInstance_Bottom(const VkInstanceCreateInfo* create_info, const VkAllocationCallbacks* allocator, VkInstance* vkinstance) { VkResult result; VkLayerInstanceCreateInfo* chain_info = const_cast( static_cast(create_info->pNext)); while ( chain_info && !(chain_info->sType == VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO && chain_info->function == VK_LAYER_FUNCTION_INSTANCE)) { chain_info = const_cast( static_cast(chain_info->pNext)); } ALOG_ASSERT(chain_info != nullptr, "Missing initialization chain info!"); Instance& instance = GetDispatchParent( static_cast(chain_info->u.instanceInfo.instance_info)); // Check that all enabled extensions are supported uint32_t num_driver_extensions = 0; for (uint32_t i = 0; i < create_info->enabledExtensionCount; i++) { const char* name = create_info->ppEnabledExtensionNames[i]; InstanceExtension id = InstanceExtensionFromName(name); if (id != kInstanceExtensionCount) { if (g_driver_instance_extensions[id]) { num_driver_extensions++; instance.enabled_extensions.set(id); continue; } if (id == kKHR_surface || id == kKHR_android_surface) { instance.enabled_extensions.set(id); continue; } // The loader natively supports debug report. if (id == kEXT_debug_report) { continue; } } bool supported = false; for (const auto& layer : instance.active_layers) { if (layer.SupportsExtension(name)) supported = true; } if (!supported) { ALOGE( "requested instance extension '%s' not supported by " "loader, driver, or any active layers", name); DestroyInstance_Bottom(instance.handle, allocator); return VK_ERROR_EXTENSION_NOT_PRESENT; } } VkInstanceCreateInfo driver_create_info = *create_info; driver_create_info.pNext = StripCreateExtensions(create_info->pNext); driver_create_info.enabledLayerCount = 0; driver_create_info.ppEnabledLayerNames = nullptr; driver_create_info.enabledExtensionCount = 0; driver_create_info.ppEnabledExtensionNames = nullptr; if (num_driver_extensions > 0) { const char** names = static_cast( alloca(num_driver_extensions * sizeof(char*))); for (uint32_t i = 0; i < create_info->enabledExtensionCount; i++) { const char* name = create_info->ppEnabledExtensionNames[i]; InstanceExtension id = InstanceExtensionFromName(name); if (id != kInstanceExtensionCount) { if (g_driver_instance_extensions[id]) { names[driver_create_info.enabledExtensionCount++] = name; continue; } } } driver_create_info.ppEnabledExtensionNames = names; ALOG_ASSERT( driver_create_info.enabledExtensionCount == num_driver_extensions, "counted enabled driver instance extensions twice and got " "different answers!"); } result = g_hwdevice->CreateInstance(&driver_create_info, instance.alloc, &instance.drv.instance); if (result != VK_SUCCESS) { DestroyInstance_Bottom(instance.handle, allocator); return result; } hwvulkan_dispatch_t* drv_dispatch = reinterpret_cast(instance.drv.instance); if (drv_dispatch->magic != HWVULKAN_DISPATCH_MAGIC) { ALOGE("invalid VkInstance dispatch magic: 0x%" PRIxPTR, drv_dispatch->magic); DestroyInstance_Bottom(instance.handle, allocator); return VK_ERROR_INITIALIZATION_FAILED; } // Skip setting drv_dispatch->vtbl, since we never call through it; // we go through instance.drv.dispatch instead. if (!LoadDriverDispatchTable( instance.drv.instance, g_hwdevice->GetInstanceProcAddr, instance.enabled_extensions, instance.drv.dispatch)) { DestroyInstance_Bottom(instance.handle, allocator); return VK_ERROR_INITIALIZATION_FAILED; } uint32_t num_physical_devices = 0; result = instance.drv.dispatch.EnumeratePhysicalDevices( instance.drv.instance, &num_physical_devices, nullptr); if (result != VK_SUCCESS) { DestroyInstance_Bottom(instance.handle, allocator); return VK_ERROR_INITIALIZATION_FAILED; } num_physical_devices = std::min(num_physical_devices, kMaxPhysicalDevices); result = instance.drv.dispatch.EnumeratePhysicalDevices( instance.drv.instance, &num_physical_devices, instance.physical_devices); if (result != VK_SUCCESS) { DestroyInstance_Bottom(instance.handle, allocator); return VK_ERROR_INITIALIZATION_FAILED; } Vector extensions( Vector::allocator_type(instance.alloc)); for (uint32_t i = 0; i < num_physical_devices; i++) { hwvulkan_dispatch_t* pdev_dispatch = reinterpret_cast( instance.physical_devices[i]); if (pdev_dispatch->magic != HWVULKAN_DISPATCH_MAGIC) { ALOGE("invalid VkPhysicalDevice dispatch magic: 0x%" PRIxPTR, pdev_dispatch->magic); DestroyInstance_Bottom(instance.handle, allocator); return VK_ERROR_INITIALIZATION_FAILED; } pdev_dispatch->vtbl = instance.dispatch_ptr; uint32_t count; if ((result = instance.drv.dispatch.EnumerateDeviceExtensionProperties( instance.physical_devices[i], nullptr, &count, nullptr)) != VK_SUCCESS) { ALOGW("driver EnumerateDeviceExtensionProperties(%u) failed: %d", i, result); continue; } try { extensions.resize(count); } catch (std::bad_alloc&) { ALOGE("instance creation failed: out of memory"); DestroyInstance_Bottom(instance.handle, allocator); return VK_ERROR_OUT_OF_HOST_MEMORY; } if ((result = instance.drv.dispatch.EnumerateDeviceExtensionProperties( instance.physical_devices[i], nullptr, &count, extensions.data())) != VK_SUCCESS) { ALOGW("driver EnumerateDeviceExtensionProperties(%u) failed: %d", i, result); continue; } ALOGV_IF(count > 0, "driver gpu[%u] supports extensions:", i); for (const auto& extension : extensions) { ALOGV(" %s (v%u)", extension.extensionName, extension.specVersion); DeviceExtension id = DeviceExtensionFromName(extension.extensionName); if (id == kDeviceExtensionCount) { ALOGW("driver gpu[%u] extension '%s' unknown to loader", i, extension.extensionName); } else { instance.physical_device_driver_extensions[i].set(id); } } // Ignore driver attempts to support loader extensions instance.physical_device_driver_extensions[i].reset(kKHR_swapchain); } instance.drv.num_physical_devices = num_physical_devices; instance.num_physical_devices = instance.drv.num_physical_devices; *vkinstance = instance.handle; return VK_SUCCESS; } VkResult CreateAndroidSurfaceKHR_Disabled(VkInstance, const VkAndroidSurfaceCreateInfoKHR*, const VkAllocationCallbacks*, VkSurfaceKHR*) { ALOGE( "VK_KHR_android_surface not enabled. vkCreateAndroidSurfaceKHR not " "executed."); return VK_SUCCESS; } void DestroySurfaceKHR_Disabled(VkInstance, VkSurfaceKHR, const VkAllocationCallbacks*) { ALOGE("VK_KHR_surface not enabled. vkDestroySurfaceKHR not executed."); } VkResult GetPhysicalDeviceSurfaceSupportKHR_Disabled(VkPhysicalDevice, uint32_t, VkSurfaceKHR, VkBool32*) { ALOGE( "VK_KHR_surface not enabled. vkGetPhysicalDeviceSurfaceSupportKHR not " "executed."); return VK_SUCCESS; } VkResult GetPhysicalDeviceSurfaceCapabilitiesKHR_Disabled( VkPhysicalDevice, VkSurfaceKHR, VkSurfaceCapabilitiesKHR*) { ALOGE( "VK_KHR_surface not enabled. vkGetPhysicalDeviceSurfaceapabilitiesKHR " "not executed."); return VK_SUCCESS; } VkResult GetPhysicalDeviceSurfaceFormatsKHR_Disabled(VkPhysicalDevice, VkSurfaceKHR, uint32_t*, VkSurfaceFormatKHR*) { ALOGE( "VK_KHR_surface not enabled. vkGetPhysicalDeviceSurfaceFormatsKHR not " "executed."); return VK_SUCCESS; } VkResult GetPhysicalDeviceSurfacePresentModesKHR_Disabled(VkPhysicalDevice, VkSurfaceKHR, uint32_t*, VkPresentModeKHR*) { ALOGE( "VK_KHR_surface not enabled. vkGetPhysicalDeviceSurfacePresentModesKHR " "not executed."); return VK_SUCCESS; } PFN_vkVoidFunction GetInstanceProcAddr_Bottom(VkInstance vkinstance, const char* name) { PFN_vkVoidFunction pfn; if (vkinstance) { Instance& instance = GetDispatchParent(vkinstance); if (!instance.enabled_extensions[kKHR_android_surface]) { // KHR_android_surface is not enabled, use error stubs instead if (strcmp(name, "vkCreateAndroidSurfaceKHR") == 0) { return reinterpret_cast( CreateAndroidSurfaceKHR_Disabled); } } if (!instance.enabled_extensions[kKHR_surface]) { // KHR_surface is not enabled, use error stubs instead if (strcmp(name, "vkDestroySurfaceKHR") == 0) { return reinterpret_cast( DestroySurfaceKHR_Disabled); } if (strcmp(name, "vkGetPhysicalDeviceSurfaceSupportKHR") == 0) { return reinterpret_cast( GetPhysicalDeviceSurfaceSupportKHR_Disabled); } if (strcmp(name, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR") == 0) { return reinterpret_cast( GetPhysicalDeviceSurfaceCapabilitiesKHR_Disabled); } if (strcmp(name, "vkGetPhysicalDeviceSurfaceFormatsKHR") == 0) { return reinterpret_cast( GetPhysicalDeviceSurfaceFormatsKHR_Disabled); } if (strcmp(name, "vkGetPhysicalDeviceSurfacePresentModesKHR") == 0) { return reinterpret_cast( GetPhysicalDeviceSurfacePresentModesKHR_Disabled); } } } if ((pfn = GetLoaderBottomProcAddr(name))) return pfn; return nullptr; } VkResult EnumeratePhysicalDevices_Bottom(VkInstance vkinstance, uint32_t* pdev_count, VkPhysicalDevice* pdevs) { Instance& instance = GetDispatchParent(vkinstance); uint32_t count = instance.num_physical_devices; if (pdevs) { count = std::min(count, *pdev_count); std::copy(instance.physical_devices, instance.physical_devices + count, pdevs); } *pdev_count = count; return VK_SUCCESS; } void GetPhysicalDeviceProperties_Bottom( VkPhysicalDevice pdev, VkPhysicalDeviceProperties* properties) { GetDispatchParent(pdev).drv.dispatch.GetPhysicalDeviceProperties( pdev, properties); } void GetPhysicalDeviceFeatures_Bottom(VkPhysicalDevice pdev, VkPhysicalDeviceFeatures* features) { GetDispatchParent(pdev).drv.dispatch.GetPhysicalDeviceFeatures(pdev, features); } void GetPhysicalDeviceMemoryProperties_Bottom( VkPhysicalDevice pdev, VkPhysicalDeviceMemoryProperties* properties) { GetDispatchParent(pdev).drv.dispatch.GetPhysicalDeviceMemoryProperties( pdev, properties); } void GetPhysicalDeviceQueueFamilyProperties_Bottom( VkPhysicalDevice pdev, uint32_t* pCount, VkQueueFamilyProperties* properties) { GetDispatchParent(pdev).drv.dispatch.GetPhysicalDeviceQueueFamilyProperties( pdev, pCount, properties); } void GetPhysicalDeviceFormatProperties_Bottom(VkPhysicalDevice pdev, VkFormat format, VkFormatProperties* properties) { GetDispatchParent(pdev).drv.dispatch.GetPhysicalDeviceFormatProperties( pdev, format, properties); } VkResult GetPhysicalDeviceImageFormatProperties_Bottom( VkPhysicalDevice pdev, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* properties) { return GetDispatchParent(pdev) .drv.dispatch.GetPhysicalDeviceImageFormatProperties( pdev, format, type, tiling, usage, flags, properties); } void GetPhysicalDeviceSparseImageFormatProperties_Bottom( VkPhysicalDevice pdev, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t* properties_count, VkSparseImageFormatProperties* properties) { GetDispatchParent(pdev) .drv.dispatch.GetPhysicalDeviceSparseImageFormatProperties( pdev, format, type, samples, usage, tiling, properties_count, properties); } // This is a no-op, the Top function returns the aggregate layer property // data. This is to keep the dispatch generator happy. VKAPI_ATTR VkResult EnumerateDeviceExtensionProperties_Bottom( VkPhysicalDevice /*pdev*/, const char* /*layer_name*/, uint32_t* /*properties_count*/, VkExtensionProperties* /*properties*/) { return VK_SUCCESS; } // This is a no-op, the Top function returns the aggregate layer property // data. This is to keep the dispatch generator happy. VKAPI_ATTR VkResult EnumerateDeviceLayerProperties_Bottom( VkPhysicalDevice /*pdev*/, uint32_t* /*properties_count*/, VkLayerProperties* /*properties*/) { return VK_SUCCESS; } VKAPI_ATTR VkResult CreateDevice_Bottom(VkPhysicalDevice gpu, const VkDeviceCreateInfo* create_info, const VkAllocationCallbacks* allocator, VkDevice* device_out) { VkLayerDeviceCreateInfo* chain_info = const_cast( static_cast(create_info->pNext)); while (chain_info && !(chain_info->sType == VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO && chain_info->function == VK_LAYER_FUNCTION_DEVICE)) { chain_info = const_cast( static_cast(chain_info->pNext)); } ALOG_ASSERT(chain_info != nullptr, "Missing initialization chain info!"); Instance& instance = GetDispatchParent(gpu); size_t gpu_idx = 0; while (instance.physical_devices[gpu_idx] != gpu) gpu_idx++; Device* device = static_cast(chain_info->u.deviceInfo.device_info); PFN_vkGetInstanceProcAddr get_instance_proc_addr = chain_info->u.deviceInfo.pfnNextGetInstanceProcAddr; VkDeviceCreateInfo driver_create_info = *create_info; driver_create_info.pNext = StripCreateExtensions(create_info->pNext); driver_create_info.enabledLayerCount = 0; driver_create_info.ppEnabledLayerNames = nullptr; uint32_t num_driver_extensions = 0; const char** driver_extensions = static_cast( alloca(create_info->enabledExtensionCount * sizeof(const char*))); for (uint32_t i = 0; i < create_info->enabledExtensionCount; i++) { const char* name = create_info->ppEnabledExtensionNames[i]; DeviceExtension id = DeviceExtensionFromName(name); if (id != kDeviceExtensionCount) { if (instance.physical_device_driver_extensions[gpu_idx][id]) { driver_extensions[num_driver_extensions++] = name; device->enabled_extensions.set(id); continue; } // Add the VK_ANDROID_native_buffer extension to the list iff // the VK_KHR_swapchain extension was requested if (id == kKHR_swapchain && instance.physical_device_driver_extensions [gpu_idx][kANDROID_native_buffer]) { driver_extensions[num_driver_extensions++] = VK_ANDROID_NATIVE_BUFFER_EXTENSION_NAME; device->enabled_extensions.set(id); continue; } } bool supported = false; for (const auto& layer : device->active_layers) { if (layer.SupportsExtension(name)) supported = true; } if (!supported) { ALOGE( "requested device extension '%s' not supported by loader, " "driver, or any active layers", name); return VK_ERROR_EXTENSION_NOT_PRESENT; } } driver_create_info.enabledExtensionCount = num_driver_extensions; driver_create_info.ppEnabledExtensionNames = driver_extensions; VkDevice drv_device; VkResult result = instance.drv.dispatch.CreateDevice( gpu, &driver_create_info, allocator, &drv_device); if (result != VK_SUCCESS) { return VK_ERROR_INITIALIZATION_FAILED; } hwvulkan_dispatch_t* drv_dispatch = reinterpret_cast(drv_device); if (drv_dispatch->magic != HWVULKAN_DISPATCH_MAGIC) { ALOGE("invalid VkDevice dispatch magic: 0x%" PRIxPTR, drv_dispatch->magic); PFN_vkDestroyDevice destroy_device = reinterpret_cast( instance.drv.dispatch.GetDeviceProcAddr(drv_device, "vkDestroyDevice")); destroy_device(drv_device, allocator); return VK_ERROR_INITIALIZATION_FAILED; } // Set dispatch table for newly created Device // CreateDevice_Top will fill in the details drv_dispatch->vtbl = &device->dispatch; device->get_device_proc_addr = reinterpret_cast( instance.drv.dispatch.GetDeviceProcAddr(drv_device, "vkGetDeviceProcAddr")); *device_out = drv_device; return VK_SUCCESS; } void DestroyInstance_Bottom(VkInstance vkinstance, const VkAllocationCallbacks* allocator) { Instance& instance = GetDispatchParent(vkinstance); // These checks allow us to call DestroyInstance_Bottom from any error // path in CreateInstance_Bottom, before the driver instance is fully // initialized. if (instance.drv.instance != VK_NULL_HANDLE && instance.drv.dispatch.DestroyInstance) { instance.drv.dispatch.DestroyInstance(instance.drv.instance, allocator); instance.drv.instance = VK_NULL_HANDLE; } } VkResult CreateSwapchainKHR_Disabled(VkDevice, const VkSwapchainCreateInfoKHR*, const VkAllocationCallbacks*, VkSwapchainKHR*) { ALOGE("VK_KHR_swapchain not enabled. vkCreateSwapchainKHR not executed."); return VK_SUCCESS; } void DestroySwapchainKHR_Disabled(VkDevice, VkSwapchainKHR, const VkAllocationCallbacks*) { ALOGE("VK_KHR_swapchain not enabled. vkDestroySwapchainKHR not executed."); } VkResult GetSwapchainImagesKHR_Disabled(VkDevice, VkSwapchainKHR, uint32_t*, VkImage*) { ALOGE( "VK_KHR_swapchain not enabled. vkGetSwapchainImagesKHR not executed."); return VK_SUCCESS; } VkResult AcquireNextImageKHR_Disabled(VkDevice, VkSwapchainKHR, uint64_t, VkSemaphore, VkFence, uint32_t*) { ALOGE("VK_KHR_swapchain not enabled. vkAcquireNextImageKHR not executed."); return VK_SUCCESS; } VkResult QueuePresentKHR_Disabled(VkQueue, const VkPresentInfoKHR*) { ALOGE("VK_KHR_swapchain not enabled. vkQueuePresentKHR not executed."); return VK_SUCCESS; } PFN_vkVoidFunction GetDeviceProcAddr_Bottom(VkDevice vkdevice, const char* name) { if (strcmp(name, "vkCreateDevice") == 0) { return reinterpret_cast(CreateDevice_Bottom); } Device& device = GetDispatchParent(vkdevice); if (!device.enabled_extensions[kKHR_swapchain]) { if (strcmp(name, "vkCreateSwapchainKHR") == 0) { return reinterpret_cast( CreateSwapchainKHR_Disabled); } if (strcmp(name, "vkDestroySwapchainKHR") == 0) { return reinterpret_cast( DestroySwapchainKHR_Disabled); } if (strcmp(name, "vkGetSwapchainImagesKHR") == 0) { return reinterpret_cast( GetSwapchainImagesKHR_Disabled); } if (strcmp(name, "vkAcquireNextSwapchainImageKHR") == 0) { return reinterpret_cast( AcquireNextImageKHR_Disabled); } if (strcmp(name, "vkQueuePresentKHR") == 0) { return reinterpret_cast( QueuePresentKHR_Disabled); } } // VK_ANDROID_native_buffer should be hidden from applications and layers. // TODO(jessehall): Generate this as part of GetLoaderBottomProcAddr. PFN_vkVoidFunction pfn; if (strcmp(name, "vkGetSwapchainGrallocUsageANDROID") == 0 || strcmp(name, "vkAcquireImageANDROID") == 0 || strcmp(name, "vkQueueSignalReleaseImageANDROID") == 0) { return nullptr; } if ((pfn = GetLoaderBottomProcAddr(name))) return pfn; return GetDispatchParent(vkdevice).get_device_proc_addr(vkdevice, name); } // ----------------------------------------------------------------------------- // Loader top functions. These are called directly from the loader entry // points or from the application (via vkGetInstanceProcAddr) without going // through a dispatch table. VkResult EnumerateInstanceExtensionProperties_Top( const char* layer_name, uint32_t* properties_count, VkExtensionProperties* properties) { if (!EnsureInitialized()) return VK_ERROR_INITIALIZATION_FAILED; const VkExtensionProperties* extensions = nullptr; uint32_t num_extensions = 0; if (layer_name) { GetInstanceLayerExtensions(layer_name, &extensions, &num_extensions); } else { VkExtensionProperties* available = static_cast( alloca(kInstanceExtensionCount * sizeof(VkExtensionProperties))); available[num_extensions++] = VkExtensionProperties{ VK_KHR_SURFACE_EXTENSION_NAME, VK_KHR_SURFACE_SPEC_VERSION}; available[num_extensions++] = VkExtensionProperties{VK_KHR_ANDROID_SURFACE_EXTENSION_NAME, VK_KHR_ANDROID_SURFACE_SPEC_VERSION}; if (g_driver_instance_extensions[kEXT_debug_report]) { available[num_extensions++] = VkExtensionProperties{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}; } // TODO(jessehall): We need to also enumerate extensions supported by // implicitly-enabled layers. Currently we don't have that list of // layers until instance creation. extensions = available; } if (!properties || *properties_count > num_extensions) *properties_count = num_extensions; if (properties) std::copy(extensions, extensions + *properties_count, properties); return *properties_count < num_extensions ? VK_INCOMPLETE : VK_SUCCESS; } VkResult EnumerateInstanceLayerProperties_Top(uint32_t* properties_count, VkLayerProperties* properties) { if (!EnsureInitialized()) return VK_ERROR_INITIALIZATION_FAILED; uint32_t layer_count = EnumerateInstanceLayers(properties ? *properties_count : 0, properties); if (!properties || *properties_count > layer_count) *properties_count = layer_count; return *properties_count < layer_count ? VK_INCOMPLETE : VK_SUCCESS; } VKAPI_ATTR VkResult EnumerateDeviceExtensionProperties_Top( VkPhysicalDevice gpu, const char* layer_name, uint32_t* properties_count, VkExtensionProperties* properties) { const VkExtensionProperties* extensions = nullptr; uint32_t num_extensions = 0; ALOGV("EnumerateDeviceExtensionProperties_Top:"); if (layer_name) { ALOGV(" layer %s", layer_name); GetDeviceLayerExtensions(layer_name, &extensions, &num_extensions); } else { ALOGV(" no layer"); Instance& instance = GetDispatchParent(gpu); size_t gpu_idx = 0; while (instance.physical_devices_top[gpu_idx] != gpu) gpu_idx++; const DeviceExtensionSet driver_extensions = instance.physical_device_driver_extensions[gpu_idx]; // We only support VK_KHR_swapchain if the GPU supports // VK_ANDROID_native_buffer VkExtensionProperties* available = static_cast( alloca(kDeviceExtensionCount * sizeof(VkExtensionProperties))); if (driver_extensions[kANDROID_native_buffer]) { available[num_extensions++] = VkExtensionProperties{ VK_KHR_SWAPCHAIN_EXTENSION_NAME, VK_KHR_SWAPCHAIN_SPEC_VERSION}; } // TODO(jessehall): We need to also enumerate extensions supported by // implicitly-enabled layers. Currently we don't have that list of // layers until instance creation. extensions = available; } ALOGV(" num: %d, extensions: %p", num_extensions, extensions); if (!properties || *properties_count > num_extensions) *properties_count = num_extensions; if (properties) std::copy(extensions, extensions + *properties_count, properties); return *properties_count < num_extensions ? VK_INCOMPLETE : VK_SUCCESS; } VkResult CreateInstance_Top(const VkInstanceCreateInfo* create_info, const VkAllocationCallbacks* allocator, VkInstance* instance_out) { VkResult result; if (!EnsureInitialized()) return VK_ERROR_INITIALIZATION_FAILED; if (!allocator) allocator = &kDefaultAllocCallbacks; VkInstanceCreateInfo local_create_info = *create_info; create_info = &local_create_info; void* instance_mem = allocator->pfnAllocation( allocator->pUserData, sizeof(Instance), alignof(Instance), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); if (!instance_mem) return VK_ERROR_OUT_OF_HOST_MEMORY; Instance* instance = new (instance_mem) Instance(allocator); result = ActivateAllLayers(create_info, instance, instance); if (result != VK_SUCCESS) { DestroyInstance(instance, allocator); return result; } uint32_t activated_layers = 0; VkLayerInstanceCreateInfo chain_info; VkLayerInstanceLink* layer_instance_link_info = nullptr; PFN_vkGetInstanceProcAddr next_gipa = GetInstanceProcAddr_Bottom; VkInstance local_instance = nullptr; if (instance->active_layers.size() > 0) { chain_info.u.pLayerInfo = nullptr; chain_info.pNext = create_info->pNext; chain_info.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO; chain_info.function = VK_LAYER_FUNCTION_LINK; local_create_info.pNext = &chain_info; layer_instance_link_info = static_cast(alloca( sizeof(VkLayerInstanceLink) * instance->active_layers.size())); if (!layer_instance_link_info) { ALOGE("Failed to alloc Instance objects for layers"); DestroyInstance(instance, allocator); return VK_ERROR_OUT_OF_HOST_MEMORY; } /* Create instance chain of enabled layers */ for (auto rit = instance->active_layers.rbegin(); rit != instance->active_layers.rend(); ++rit) { LayerRef& layer = *rit; layer_instance_link_info[activated_layers].pNext = chain_info.u.pLayerInfo; layer_instance_link_info[activated_layers] .pfnNextGetInstanceProcAddr = next_gipa; chain_info.u.pLayerInfo = &layer_instance_link_info[activated_layers]; next_gipa = layer.GetGetInstanceProcAddr(); ALOGV("Insert instance layer %s (v%u)", layer.GetName(), layer.GetSpecVersion()); activated_layers++; } } PFN_vkCreateInstance create_instance = reinterpret_cast( next_gipa(VK_NULL_HANDLE, "vkCreateInstance")); if (!create_instance) { DestroyInstance(instance, allocator); return VK_ERROR_INITIALIZATION_FAILED; } VkLayerInstanceCreateInfo instance_create_info; instance_create_info.sType = VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO; instance_create_info.function = VK_LAYER_FUNCTION_INSTANCE; instance_create_info.u.instanceInfo.instance_info = instance; instance_create_info.u.instanceInfo.pfnNextGetInstanceProcAddr = next_gipa; instance_create_info.pNext = local_create_info.pNext; local_create_info.pNext = &instance_create_info; // Force enable callback extension if required bool enable_callback = false; if (prctl(PR_GET_DUMPABLE, 0, 0, 0, 0)) { enable_callback = property_get_bool("debug.vulkan.enable_callback", false); if (enable_callback) { if (!AddExtensionToCreateInfo(local_create_info, "VK_EXT_debug_report", allocator)) { DestroyInstance(instance, allocator); return VK_ERROR_INITIALIZATION_FAILED; } } } bool allocatedLayerMem; if (!AddLayersToCreateInfo(local_create_info, instance, allocator, allocatedLayerMem)) { if (enable_callback) { FreeAllocatedExtensionCreateInfo(local_create_info, allocator); } DestroyInstance(instance, allocator); return VK_ERROR_INITIALIZATION_FAILED; } result = create_instance(&local_create_info, allocator, &local_instance); if (allocatedLayerMem) { FreeAllocatedLayerCreateInfo(local_create_info, allocator); } if (enable_callback) { FreeAllocatedExtensionCreateInfo(local_create_info, allocator); } if (result != VK_SUCCESS) { DestroyInstance(instance, allocator); return result; } const InstanceDispatchTable& instance_dispatch = GetDispatchTable(local_instance); if (!LoadInstanceDispatchTable( local_instance, next_gipa, const_cast(instance_dispatch))) { ALOGV("Failed to initialize instance dispatch table"); PFN_vkDestroyInstance destroy_instance = reinterpret_cast( next_gipa(local_instance, "vkDestroyInstance")); if (!destroy_instance) { ALOGD("Loader unable to find DestroyInstance"); return VK_ERROR_INITIALIZATION_FAILED; } destroy_instance(local_instance, allocator); DestroyInstance(instance, allocator); return VK_ERROR_INITIALIZATION_FAILED; } // Capture the physical devices from the top of the // chain in case it has been wrapped by a layer. uint32_t num_physical_devices = 0; result = instance_dispatch.EnumeratePhysicalDevices( local_instance, &num_physical_devices, nullptr); if (result != VK_SUCCESS) { DestroyInstance(instance, allocator); return VK_ERROR_INITIALIZATION_FAILED; } num_physical_devices = std::min(num_physical_devices, kMaxPhysicalDevices); result = instance_dispatch.EnumeratePhysicalDevices( local_instance, &num_physical_devices, instance->physical_devices_top); if (result != VK_SUCCESS) { DestroyInstance(instance, allocator); return VK_ERROR_INITIALIZATION_FAILED; } *instance_out = local_instance; if (enable_callback) { const VkDebugReportCallbackCreateInfoEXT callback_create_info = { .sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT, .flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT, .pfnCallback = LogDebugMessageCallback, }; PFN_vkCreateDebugReportCallbackEXT create_debug_report_callback = reinterpret_cast( GetInstanceProcAddr_Top(instance->handle, "vkCreateDebugReportCallbackEXT")); create_debug_report_callback(instance->handle, &callback_create_info, allocator, &instance->message); } return result; } PFN_vkVoidFunction GetInstanceProcAddr_Top(VkInstance vkinstance, const char* name) { // vkGetInstanceProcAddr(NULL_HANDLE, ..) only works for global commands if (!vkinstance) return GetLoaderGlobalProcAddr(name); const InstanceDispatchTable& dispatch = GetDispatchTable(vkinstance); PFN_vkVoidFunction pfn; // Always go through the loader-top function if there is one. if ((pfn = GetLoaderTopProcAddr(name))) return pfn; // Otherwise, look up the handler in the instance dispatch table if ((pfn = GetDispatchProcAddr(dispatch, name))) return pfn; // Anything not handled already must be a device-dispatched function // without a loader-top. We must return a function that will dispatch based // on the dispatchable object parameter -- which is exactly what the // exported functions do. So just return them here. return GetLoaderExportProcAddr(name); } void DestroyInstance_Top(VkInstance vkinstance, const VkAllocationCallbacks* allocator) { if (!vkinstance) return; if (!allocator) allocator = &kDefaultAllocCallbacks; GetDispatchTable(vkinstance).DestroyInstance(vkinstance, allocator); DestroyInstance(&(GetDispatchParent(vkinstance)), allocator); } VKAPI_ATTR VkResult EnumerateDeviceLayerProperties_Top(VkPhysicalDevice /*pdev*/, uint32_t* properties_count, VkLayerProperties* properties) { uint32_t layer_count = EnumerateDeviceLayers(properties ? *properties_count : 0, properties); if (!properties || *properties_count > layer_count) *properties_count = layer_count; return *properties_count < layer_count ? VK_INCOMPLETE : VK_SUCCESS; } VKAPI_ATTR VkResult CreateDevice_Top(VkPhysicalDevice gpu, const VkDeviceCreateInfo* create_info, const VkAllocationCallbacks* allocator, VkDevice* device_out) { Instance& instance = GetDispatchParent(gpu); VkResult result; // FIXME(jessehall): We don't have good conventions or infrastructure yet to // do better than just using the instance allocator and scope for // everything. See b/26732122. if (true /*!allocator*/) allocator = instance.alloc; void* mem = allocator->pfnAllocation(allocator->pUserData, sizeof(Device), alignof(Device), VK_SYSTEM_ALLOCATION_SCOPE_DEVICE); if (!mem) return VK_ERROR_OUT_OF_HOST_MEMORY; Device* device = new (mem) Device(&instance); result = ActivateAllLayers(create_info, &instance, device); if (result != VK_SUCCESS) { DestroyDevice(device); return result; } uint32_t activated_layers = 0; VkLayerDeviceCreateInfo chain_info; VkLayerDeviceLink* layer_device_link_info = nullptr; PFN_vkGetInstanceProcAddr next_gipa = GetInstanceProcAddr_Bottom; PFN_vkGetDeviceProcAddr next_gdpa = GetDeviceProcAddr_Bottom; VkDeviceCreateInfo local_create_info = *create_info; VkDevice local_device = nullptr; if (device->active_layers.size() > 0) { chain_info.u.pLayerInfo = nullptr; chain_info.pNext = local_create_info.pNext; chain_info.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO; chain_info.function = VK_LAYER_FUNCTION_LINK; local_create_info.pNext = &chain_info; layer_device_link_info = static_cast( alloca(sizeof(VkLayerDeviceLink) * device->active_layers.size())); if (!layer_device_link_info) { ALOGE("Failed to alloc Device objects for layers"); DestroyDevice(device); return VK_ERROR_OUT_OF_HOST_MEMORY; } /* Create device chain of enabled layers */ for (auto rit = device->active_layers.rbegin(); rit != device->active_layers.rend(); ++rit) { LayerRef& layer = *rit; layer_device_link_info[activated_layers].pNext = chain_info.u.pLayerInfo; layer_device_link_info[activated_layers].pfnNextGetDeviceProcAddr = next_gdpa; layer_device_link_info[activated_layers] .pfnNextGetInstanceProcAddr = next_gipa; chain_info.u.pLayerInfo = &layer_device_link_info[activated_layers]; next_gipa = layer.GetGetInstanceProcAddr(); next_gdpa = layer.GetGetDeviceProcAddr(); ALOGV("Insert device layer %s (v%u)", layer.GetName(), layer.GetSpecVersion()); activated_layers++; } } PFN_vkCreateDevice create_device = reinterpret_cast( next_gipa(instance.handle, "vkCreateDevice")); if (!create_device) { ALOGE("Unable to find vkCreateDevice for driver"); DestroyDevice(device); return VK_ERROR_INITIALIZATION_FAILED; } VkLayerDeviceCreateInfo device_create_info; device_create_info.sType = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO; device_create_info.function = VK_LAYER_FUNCTION_DEVICE; device_create_info.u.deviceInfo.device_info = device; device_create_info.u.deviceInfo.pfnNextGetInstanceProcAddr = next_gipa; device_create_info.pNext = local_create_info.pNext; local_create_info.pNext = &device_create_info; bool allocatedLayerMem; if (!AddLayersToCreateInfo(local_create_info, device, allocator, allocatedLayerMem)) { DestroyDevice(device); return VK_ERROR_INITIALIZATION_FAILED; } result = create_device(gpu, &local_create_info, allocator, &local_device); if (allocatedLayerMem) { FreeAllocatedLayerCreateInfo(local_create_info, allocator); } if (result != VK_SUCCESS) { DestroyDevice(device); return result; } // Set dispatch table for newly created Device hwvulkan_dispatch_t* vulkan_dispatch = reinterpret_cast(local_device); vulkan_dispatch->vtbl = &device->dispatch; const DeviceDispatchTable& device_dispatch = GetDispatchTable(local_device); if (!LoadDeviceDispatchTable( local_device, next_gdpa, const_cast(device_dispatch))) { ALOGV("Failed to initialize device dispatch table"); PFN_vkDestroyDevice destroy_device = reinterpret_cast( next_gipa(instance.handle, "vkDestroyDevice")); ALOG_ASSERT(destroy_device != nullptr, "Loader unable to find DestroyDevice"); destroy_device(local_device, allocator); return VK_ERROR_INITIALIZATION_FAILED; } *device_out = local_device; return VK_SUCCESS; } PFN_vkVoidFunction GetDeviceProcAddr_Top(VkDevice device, const char* name) { PFN_vkVoidFunction pfn; if (!device) return nullptr; if ((pfn = GetLoaderTopProcAddr(name))) return pfn; return GetDispatchProcAddr(GetDispatchTable(device), name); } void GetDeviceQueue_Top(VkDevice vkdevice, uint32_t family, uint32_t index, VkQueue* queue_out) { const auto& table = GetDispatchTable(vkdevice); table.GetDeviceQueue(vkdevice, family, index, queue_out); hwvulkan_dispatch_t* queue_dispatch = reinterpret_cast(*queue_out); if (queue_dispatch->magic != HWVULKAN_DISPATCH_MAGIC && queue_dispatch->vtbl != &table) ALOGE("invalid VkQueue dispatch magic: 0x%" PRIxPTR, queue_dispatch->magic); queue_dispatch->vtbl = &table; } VkResult AllocateCommandBuffers_Top( VkDevice vkdevice, const VkCommandBufferAllocateInfo* alloc_info, VkCommandBuffer* cmdbufs) { const auto& table = GetDispatchTable(vkdevice); VkResult result = table.AllocateCommandBuffers(vkdevice, alloc_info, cmdbufs); if (result != VK_SUCCESS) return result; for (uint32_t i = 0; i < alloc_info->commandBufferCount; i++) { hwvulkan_dispatch_t* cmdbuf_dispatch = reinterpret_cast(cmdbufs[i]); ALOGE_IF(cmdbuf_dispatch->magic != HWVULKAN_DISPATCH_MAGIC, "invalid VkCommandBuffer dispatch magic: 0x%" PRIxPTR, cmdbuf_dispatch->magic); cmdbuf_dispatch->vtbl = &table; } return VK_SUCCESS; } void DestroyDevice_Top(VkDevice vkdevice, const VkAllocationCallbacks* /*allocator*/) { if (!vkdevice) return; Device& device = GetDispatchParent(vkdevice); device.dispatch.DestroyDevice(vkdevice, device.instance->alloc); DestroyDevice(&device); } // ----------------------------------------------------------------------------- const VkAllocationCallbacks* GetAllocator(VkInstance vkinstance) { return GetDispatchParent(vkinstance).alloc; } const VkAllocationCallbacks* GetAllocator(VkDevice vkdevice) { return GetDispatchParent(vkdevice).instance->alloc; } VkInstance GetDriverInstance(VkInstance instance) { return GetDispatchParent(instance).drv.instance; } const DriverDispatchTable& GetDriverDispatch(VkInstance instance) { return GetDispatchParent(instance).drv.dispatch; } const DriverDispatchTable& GetDriverDispatch(VkDevice device) { return GetDispatchParent(device).instance->drv.dispatch; } const DriverDispatchTable& GetDriverDispatch(VkQueue queue) { return GetDispatchParent(queue).instance->drv.dispatch; } DebugReportCallbackList& GetDebugReportCallbacks(VkInstance instance) { return GetDispatchParent(instance).debug_report_callbacks; } } // namespace vulkan