From 93ae2337732bf206e6ef4faecc6b30c3881e8359 Mon Sep 17 00:00:00 2001 From: Greg Daniel Date: Fri, 22 Jun 2018 16:50:21 -0400 Subject: Make GrVkBackendContext no longer derive from SkRefCnt. Also moves the helper Create functions to VkTestUtils since no clients are using them anymore. Bug: skia: Change-Id: I7e8e4912e7ef6fb00a7e2a00407aed5e83211799 Reviewed-on: https://skia-review.googlesource.com/135323 Reviewed-by: Jim Van Verth Reviewed-by: Brian Salomon Commit-Queue: Greg Daniel --- gn/gpu.gni | 1 - include/gpu/GrContext.h | 4 - include/gpu/vk/GrVkBackendContext.h | 55 +----- src/gpu/GrDirectContext.cpp | 38 +--- src/gpu/vk/GrVkBackendContext.cpp | 338 ----------------------------------- src/gpu/vk/GrVkGpu.cpp | 50 +++--- src/gpu/vk/GrVkGpu.h | 39 ++-- tools/gpu/vk/VkTestContext.cpp | 30 ++-- tools/gpu/vk/VkTestContext.h | 10 +- tools/gpu/vk/VkTestUtils.cpp | 320 +++++++++++++++++++++++++++++++++ tools/gpu/vk/VkTestUtils.h | 12 ++ tools/sk_app/VulkanWindowContext.cpp | 196 +++++++++++--------- tools/sk_app/VulkanWindowContext.h | 19 +- 13 files changed, 538 insertions(+), 574 deletions(-) delete mode 100644 src/gpu/vk/GrVkBackendContext.cpp diff --git a/gn/gpu.gni b/gn/gpu.gni index 7e5dd625da..aac56400d4 100644 --- a/gn/gpu.gni +++ b/gn/gpu.gni @@ -543,7 +543,6 @@ skia_vk_sources = [ "$_include/private/GrVkTypesPriv.h", "$_src/gpu/vk/GrVkAMDMemoryAllocator.cpp", "$_src/gpu/vk/GrVkAMDMemoryAllocator.h", - "$_src/gpu/vk/GrVkBackendContext.cpp", "$_src/gpu/vk/GrVkBuffer.cpp", "$_src/gpu/vk/GrVkBuffer.h", "$_src/gpu/vk/GrVkBufferView.cpp", diff --git a/include/gpu/GrContext.h b/include/gpu/GrContext.h index 517e23a627..73bf76a5e3 100644 --- a/include/gpu/GrContext.h +++ b/include/gpu/GrContext.h @@ -71,10 +71,6 @@ public: #ifdef SK_VULKAN static sk_sp MakeVulkan(const GrVkBackendContext&, const GrContextOptions&); static sk_sp MakeVulkan(const GrVkBackendContext&); - // These calls that take an sk_sp GrVkBackendContext are deprecated. Use the previous calls and - // set fOwnsInstanceAndDevice to false on the GrVkBackendContext. - static sk_sp MakeVulkan(sk_sp, const GrContextOptions&); - static sk_sp MakeVulkan(sk_sp); #endif #ifdef SK_METAL diff --git a/include/gpu/vk/GrVkBackendContext.h b/include/gpu/vk/GrVkBackendContext.h index e68e27988a..fe018e494f 100644 --- a/include/gpu/vk/GrVkBackendContext.h +++ b/include/gpu/vk/GrVkBackendContext.h @@ -34,9 +34,11 @@ enum GrVkFeatureFlags { // is that the client will set these up and pass them to the GrVkGpu constructor. The VkDevice // created must support at least one graphics queue, which is passed in as well. // The QueueFamilyIndex must match the family of the given queue. It is needed for CommandPool -// creation, and any GrBackendObjects handed to us (e.g., for wrapped textures) need to be created -// in or transitioned to that family. -struct SK_API GrVkBackendContext : public SkRefCnt { +// creation, and any GrBackendObjects handed to us (e.g., for wrapped textures) needs to be created +// in or transitioned to that family. The refs held by members of this struct must be released +// (either by deleting the struct or manually releasing the refs) before the underlying vulkan +// device and instance are destroyed. +struct SK_API GrVkBackendContext { VkInstance fInstance; VkPhysicalDevice fPhysicalDevice; VkDevice fDevice; @@ -48,50 +50,9 @@ struct SK_API GrVkBackendContext : public SkRefCnt { sk_sp fInterface; sk_sp fMemoryAllocator; - /** - * Controls whether this object destroys the instance and device upon destruction. The default - * is temporarily 'true' to avoid breaking existing clients but will be changed to 'false'. - */ - bool fOwnsInstanceAndDevice = true; - -#if GR_TEST_UTILS || defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) - using CanPresentFn = std::function; - - /** - * Helper function to create the Vulkan objects needed for a Vulkan-backed GrContext. - * Note that the version that uses the unified "GetProc" instead of separate "GetInstanceProc" - * and "GetDeviceProc" functions will be removed. - * - * If presentQueueIndex is non-NULL, will try to set up presentQueue as part of device - * creation using the platform-specific canPresent() function. - * - * This will set fOwnsInstanceAndDevice to 'true'. If it is subsequently set to 'false' then - * the client owns the lifetime of the created VkDevice and VkInstance. - */ - static const GrVkBackendContext* Create(uint32_t* presentQueueIndex = nullptr, - CanPresentFn = CanPresentFn(), - GrVkInterface::GetProc getProc = nullptr); - - static const GrVkBackendContext* Create(const GrVkInterface::GetInstanceProc& getInstanceProc, - const GrVkInterface::GetDeviceProc& getDeviceProc, - uint32_t* presentQueueIndex = nullptr, - CanPresentFn canPresent = CanPresentFn()) { - if (!getInstanceProc || !getDeviceProc) { - return nullptr; - } - auto getProc = [&getInstanceProc, &getDeviceProc](const char* proc_name, - VkInstance instance, VkDevice device) { - if (device != VK_NULL_HANDLE) { - return getDeviceProc(device, proc_name); - } - return getInstanceProc(instance, proc_name); - }; - return Create(presentQueueIndex, canPresent, getProc); - } -#endif // GR_TEST_UTILS || defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) - - ~GrVkBackendContext() override; + // This is deprecated and should be set to false. The client is responsible for managing the + // lifetime of the VkInstance and VkDevice objects. + bool fOwnsInstanceAndDevice = false; }; #endif diff --git a/src/gpu/GrDirectContext.cpp b/src/gpu/GrDirectContext.cpp index d0406dd6f6..7a054e72a4 100644 --- a/src/gpu/GrDirectContext.cpp +++ b/src/gpu/GrDirectContext.cpp @@ -156,43 +156,7 @@ sk_sp GrContext::MakeVulkan(const GrVkBackendContext& backendContext, const GrContextOptions& options) { sk_sp context(new GrDirectContext(kVulkan_GrBackend)); - sk_sp backendContextRef(new GrVkBackendContext()); - backendContextRef->fInstance = backendContext.fInstance; - backendContextRef->fPhysicalDevice = backendContext.fPhysicalDevice; - backendContextRef->fDevice = backendContext.fDevice; - backendContextRef->fQueue = backendContext.fQueue; - backendContextRef->fGraphicsQueueIndex = backendContext.fGraphicsQueueIndex; - backendContextRef->fMinAPIVersion = backendContext.fMinAPIVersion; - backendContextRef->fExtensions = backendContext.fExtensions; - backendContextRef->fFeatures = backendContext.fFeatures; - backendContextRef->fInterface = backendContext.fInterface; - backendContextRef->fMemoryAllocator = backendContext.fMemoryAllocator; - - SkASSERT(!backendContext.fOwnsInstanceAndDevice); - backendContextRef->fOwnsInstanceAndDevice = false; - - context->fGpu = GrVkGpu::Make(std::move(backendContextRef), options, context.get()); - if (!context->fGpu) { - return nullptr; - } - - context->fCaps = context->fGpu->refCaps(); - if (!context->init(options)) { - return nullptr; - } - return context; -} - -sk_sp GrContext::MakeVulkan(sk_sp backendContext) { - GrContextOptions defaultOptions; - return MakeVulkan(std::move(backendContext), defaultOptions); -} - -sk_sp GrContext::MakeVulkan(sk_sp backendContext, - const GrContextOptions& options) { - sk_sp context(new GrDirectContext(kVulkan_GrBackend)); - - context->fGpu = GrVkGpu::Make(std::move(backendContext), options, context.get()); + context->fGpu = GrVkGpu::Make(backendContext, options, context.get()); if (!context->fGpu) { return nullptr; } diff --git a/src/gpu/vk/GrVkBackendContext.cpp b/src/gpu/vk/GrVkBackendContext.cpp deleted file mode 100644 index 3c4e8d7a60..0000000000 --- a/src/gpu/vk/GrVkBackendContext.cpp +++ /dev/null @@ -1,338 +0,0 @@ -/* - * Copyright 2015 Google Inc. - * - * Use of this source code is governed by a BSD-style license that can be - * found in the LICENSE file. - */ - -#include "SkAutoMalloc.h" -#include "vk/GrVkBackendContext.h" -#include "vk/GrVkExtensions.h" -#include "vk/GrVkUtil.h" - -#if GR_TEST_UTILS || defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) - -//////////////////////////////////////////////////////////////////////////////// -// Helper code to set up Vulkan context objects - -#ifdef SK_ENABLE_VK_LAYERS -const char* kDebugLayerNames[] = { - // elements of VK_LAYER_LUNARG_standard_validation - "VK_LAYER_GOOGLE_threading", - "VK_LAYER_LUNARG_parameter_validation", - "VK_LAYER_LUNARG_object_tracker", - "VK_LAYER_LUNARG_image", - "VK_LAYER_LUNARG_core_validation", - "VK_LAYER_LUNARG_swapchain", - "VK_LAYER_GOOGLE_unique_objects", - // not included in standard_validation - //"VK_LAYER_LUNARG_api_dump", - //"VK_LAYER_LUNARG_vktrace", - //"VK_LAYER_LUNARG_screenshot", -}; -#endif - -// the minimum version of Vulkan supported -#ifdef SK_BUILD_FOR_ANDROID -const uint32_t kGrVkMinimumVersion = VK_MAKE_VERSION(1, 0, 3); -#else -const uint32_t kGrVkMinimumVersion = VK_MAKE_VERSION(1, 0, 8); -#endif - -#define ACQUIRE_VK_PROC(name, instance, device) \ - PFN_vk##name grVk##name = \ - reinterpret_cast(getProc("vk" #name, instance, device)); \ - if (grVk##name == nullptr) { \ - SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \ - return nullptr; \ - } - -// Create the base Vulkan objects needed by the GrVkGpu object -const GrVkBackendContext* GrVkBackendContext::Create(uint32_t* presentQueueIndexPtr, - CanPresentFn canPresent, - GrVkInterface::GetProc getProc) { - if (!getProc) { - return nullptr; - } - SkASSERT(getProc); - - VkPhysicalDevice physDev; - VkDevice device; - VkInstance inst; - VkResult err; - - const VkApplicationInfo app_info = { - VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType - nullptr, // pNext - "vktest", // pApplicationName - 0, // applicationVersion - "vktest", // pEngineName - 0, // engineVerison - kGrVkMinimumVersion, // apiVersion - }; - - GrVkExtensions extensions(getProc); - extensions.initInstance(kGrVkMinimumVersion); - - SkTArray instanceLayerNames; - SkTArray instanceExtensionNames; - uint32_t extensionFlags = 0; -#ifdef SK_ENABLE_VK_LAYERS - for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) { - if (extensions.hasInstanceLayer(kDebugLayerNames[i])) { - instanceLayerNames.push_back(kDebugLayerNames[i]); - } - } - if (extensions.hasInstanceExtension(VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) { - instanceExtensionNames.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME); - extensionFlags |= kEXT_debug_report_GrVkExtensionFlag; - } -#endif - - if (extensions.hasInstanceExtension(VK_KHR_SURFACE_EXTENSION_NAME)) { - instanceExtensionNames.push_back(VK_KHR_SURFACE_EXTENSION_NAME); - extensionFlags |= kKHR_surface_GrVkExtensionFlag; - } - if (extensions.hasInstanceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME)) { - instanceExtensionNames.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); - extensionFlags |= kKHR_swapchain_GrVkExtensionFlag; - } -#ifdef SK_BUILD_FOR_WIN - if (extensions.hasInstanceExtension(VK_KHR_WIN32_SURFACE_EXTENSION_NAME)) { - instanceExtensionNames.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME); - extensionFlags |= kKHR_win32_surface_GrVkExtensionFlag; - } -#elif defined(SK_BUILD_FOR_ANDROID) - if (extensions.hasInstanceExtension(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) { - instanceExtensionNames.push_back(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME); - extensionFlags |= kKHR_android_surface_GrVkExtensionFlag; - } -#elif defined(SK_BUILD_FOR_UNIX) && !defined(__Fuchsia__) - if (extensions.hasInstanceExtension(VK_KHR_XCB_SURFACE_EXTENSION_NAME)) { - instanceExtensionNames.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME); - extensionFlags |= kKHR_xcb_surface_GrVkExtensionFlag; - } -#endif - - const VkInstanceCreateInfo instance_create = { - VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType - nullptr, // pNext - 0, // flags - &app_info, // pApplicationInfo - (uint32_t) instanceLayerNames.count(), // enabledLayerNameCount - instanceLayerNames.begin(), // ppEnabledLayerNames - (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount - instanceExtensionNames.begin(), // ppEnabledExtensionNames - }; - - ACQUIRE_VK_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE); - err = grVkCreateInstance(&instance_create, nullptr, &inst); - if (err < 0) { - SkDebugf("vkCreateInstance failed: %d\n", err); - return nullptr; - } - - ACQUIRE_VK_PROC(DestroyInstance, inst, VK_NULL_HANDLE); - ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE); - ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE); - ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE); - ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE); - ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE); - ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE); - ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE); - - uint32_t gpuCount; - err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr); - if (err) { - SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err); - grVkDestroyInstance(inst, nullptr); - return nullptr; - } - if (!gpuCount) { - SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n"); - grVkDestroyInstance(inst, nullptr); - return nullptr; - } - // Just returning the first physical device instead of getting the whole array. - // TODO: find best match for our needs - gpuCount = 1; - err = grVkEnumeratePhysicalDevices(inst, &gpuCount, &physDev); - // VK_INCOMPLETE is returned when the count we provide is less than the total device count. - if (err && VK_INCOMPLETE != err) { - SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err); - grVkDestroyInstance(inst, nullptr); - return nullptr; - } - - // query to get the initial queue props size - uint32_t queueCount; - grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr); - if (!queueCount) { - SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n"); - grVkDestroyInstance(inst, nullptr); - return nullptr; - } - - SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties)); - // now get the actual queue props - VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get(); - - grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps); - - // iterate to find the graphics queue - uint32_t graphicsQueueIndex = queueCount; - for (uint32_t i = 0; i < queueCount; i++) { - if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) { - graphicsQueueIndex = i; - break; - } - } - if (graphicsQueueIndex == queueCount) { - SkDebugf("Could not find any supported graphics queues.\n"); - grVkDestroyInstance(inst, nullptr); - return nullptr; - } - - // iterate to find the present queue, if needed - uint32_t presentQueueIndex = queueCount; - if (presentQueueIndexPtr && canPresent) { - for (uint32_t i = 0; i < queueCount; i++) { - if (canPresent(inst, physDev, i)) { - presentQueueIndex = i; - break; - } - } - if (presentQueueIndex == queueCount) { - SkDebugf("Could not find any supported present queues.\n"); - grVkDestroyInstance(inst, nullptr); - return nullptr; - } - *presentQueueIndexPtr = presentQueueIndex; - } else { - // Just setting this so we end up make a single queue for graphics since there was no - // request for a present queue. - presentQueueIndex = graphicsQueueIndex; - } - - extensions.initDevice(kGrVkMinimumVersion, inst, physDev); - - SkTArray deviceLayerNames; - SkTArray deviceExtensionNames; -#ifdef SK_ENABLE_VK_LAYERS - for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) { - if (extensions.hasDeviceLayer(kDebugLayerNames[i])) { - deviceLayerNames.push_back(kDebugLayerNames[i]); - } - } -#endif - if (extensions.hasDeviceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME)) { - deviceExtensionNames.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); - extensionFlags |= kKHR_swapchain_GrVkExtensionFlag; - } - if (extensions.hasDeviceExtension("VK_NV_glsl_shader")) { - deviceExtensionNames.push_back("VK_NV_glsl_shader"); - extensionFlags |= kNV_glsl_shader_GrVkExtensionFlag; - } - - // query to get the physical device properties - VkPhysicalDeviceFeatures deviceFeatures; - grVkGetPhysicalDeviceFeatures(physDev, &deviceFeatures); - // this looks like it would slow things down, - // and we can't depend on it on all platforms - deviceFeatures.robustBufferAccess = VK_FALSE; - - uint32_t featureFlags = 0; - if (deviceFeatures.geometryShader) { - featureFlags |= kGeometryShader_GrVkFeatureFlag; - } - if (deviceFeatures.dualSrcBlend) { - featureFlags |= kDualSrcBlend_GrVkFeatureFlag; - } - if (deviceFeatures.sampleRateShading) { - featureFlags |= kSampleRateShading_GrVkFeatureFlag; - } - - float queuePriorities[1] = { 0.0 }; - // Here we assume no need for swapchain queue - // If one is needed, the client will need its own setup code - const VkDeviceQueueCreateInfo queueInfo[2] = { - { - VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType - nullptr, // pNext - 0, // VkDeviceQueueCreateFlags - graphicsQueueIndex, // queueFamilyIndex - 1, // queueCount - queuePriorities, // pQueuePriorities - }, - { - VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType - nullptr, // pNext - 0, // VkDeviceQueueCreateFlags - presentQueueIndex, // queueFamilyIndex - 1, // queueCount - queuePriorities, // pQueuePriorities - } - }; - uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1; - - const VkDeviceCreateInfo deviceInfo = { - VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType - nullptr, // pNext - 0, // VkDeviceCreateFlags - queueInfoCount, // queueCreateInfoCount - queueInfo, // pQueueCreateInfos - (uint32_t) deviceLayerNames.count(), // layerCount - deviceLayerNames.begin(), // ppEnabledLayerNames - (uint32_t) deviceExtensionNames.count(), // extensionCount - deviceExtensionNames.begin(), // ppEnabledExtensionNames - &deviceFeatures // ppEnabledFeatures - }; - - err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device); - if (err) { - SkDebugf("CreateDevice failed: %d\n", err); - grVkDestroyInstance(inst, nullptr); - return nullptr; - } - - auto interface = - sk_make_sp(getProc, inst, device, extensionFlags); - if (!interface->validate(extensionFlags)) { - SkDebugf("Vulkan interface validation failed\n"); - grVkDeviceWaitIdle(device); - grVkDestroyDevice(device, nullptr); - grVkDestroyInstance(inst, nullptr); - return nullptr; - } - - VkQueue queue; - grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue); - - GrVkBackendContext* ctx = new GrVkBackendContext(); - ctx->fInstance = inst; - ctx->fPhysicalDevice = physDev; - ctx->fDevice = device; - ctx->fQueue = queue; - ctx->fGraphicsQueueIndex = graphicsQueueIndex; - ctx->fMinAPIVersion = kGrVkMinimumVersion; - ctx->fExtensions = extensionFlags; - ctx->fFeatures = featureFlags; - ctx->fInterface.reset(interface.release()); - ctx->fOwnsInstanceAndDevice = true; - - return ctx; -} -#endif // GR_TEST_UTILS || defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) - -GrVkBackendContext::~GrVkBackendContext() { - fMemoryAllocator.reset(); - if (fInterface == nullptr || !fOwnsInstanceAndDevice) { - return; - } - - fInterface->fFunctions.fDeviceWaitIdle(fDevice); - fInterface->fFunctions.fDestroyDevice(fDevice, nullptr); - fDevice = VK_NULL_HANDLE; - fInterface->fFunctions.fDestroyInstance(fInstance, nullptr); - fInstance = VK_NULL_HANDLE; -} diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp index ba1f7caf2d..0b160a6ade 100644 --- a/src/gpu/vk/GrVkGpu.cpp +++ b/src/gpu/vk/GrVkGpu.cpp @@ -73,33 +73,38 @@ VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback( } #endif -sk_sp GrVkGpu::Make(sk_sp backendContext, +sk_sp GrVkGpu::Make(const GrVkBackendContext& backendContext, const GrContextOptions& options, GrContext* context) { - if (!backendContext) { + if (backendContext.fInstance == VK_NULL_HANDLE || + backendContext.fPhysicalDevice == VK_NULL_HANDLE || + backendContext.fDevice == VK_NULL_HANDLE || + backendContext.fQueue == VK_NULL_HANDLE) { return nullptr; } - - if (!backendContext->fInterface->validate(backendContext->fExtensions)) { + if (!backendContext.fInterface || + !backendContext.fInterface->validate(backendContext.fExtensions)) { return nullptr; } - return sk_sp(new GrVkGpu(context, options, std::move(backendContext))); + return sk_sp(new GrVkGpu(context, options, backendContext)); } //////////////////////////////////////////////////////////////////////////////// GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options, - sk_sp backendCtx) + const GrVkBackendContext& backendContext) : INHERITED(context) - , fBackendContext(std::move(backendCtx)) - , fMemoryAllocator(fBackendContext->fMemoryAllocator) - , fDevice(fBackendContext->fDevice) - , fQueue(fBackendContext->fQueue) + , fInterface(std::move(backendContext.fInterface)) + , fMemoryAllocator(backendContext.fMemoryAllocator) + , fInstance(backendContext.fInstance) + , fDevice(backendContext.fDevice) + , fQueue(backendContext.fQueue) , fResourceProvider(this) , fDisconnected(false) { + SkASSERT(!backendContext.fOwnsInstanceAndDevice); #ifdef SK_ENABLE_VK_LAYERS fCallback = VK_NULL_HANDLE; - if (fBackendContext->fExtensions & kEXT_debug_report_GrVkExtensionFlag) { + if (backendContext.fExtensions & kEXT_debug_report_GrVkExtensionFlag) { // Setup callback creation information VkDebugReportCallbackCreateInfoEXT callbackCreateInfo; callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT; @@ -114,32 +119,32 @@ GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options, // Register the callback GR_VK_CALL_ERRCHECK(this->vkInterface(), - CreateDebugReportCallbackEXT(fBackendContext->fInstance, + CreateDebugReportCallbackEXT(backendContext.fInstance, &callbackCreateInfo, nullptr, &fCallback)); } #endif if (!fMemoryAllocator) { // We were not given a memory allocator at creation - fMemoryAllocator.reset(new GrVkAMDMemoryAllocator(fBackendContext->fPhysicalDevice, - fDevice, fBackendContext->fInterface)); + fMemoryAllocator.reset(new GrVkAMDMemoryAllocator(backendContext.fPhysicalDevice, + fDevice, backendContext.fInterface)); } fCompiler = new SkSL::Compiler(); - fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), fBackendContext->fPhysicalDevice, - fBackendContext->fFeatures, fBackendContext->fExtensions)); + fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendContext.fPhysicalDevice, + backendContext.fFeatures, backendContext.fExtensions)); fCaps.reset(SkRef(fVkCaps.get())); - VK_CALL(GetPhysicalDeviceProperties(fBackendContext->fPhysicalDevice, &fPhysDevProps)); - VK_CALL(GetPhysicalDeviceMemoryProperties(fBackendContext->fPhysicalDevice, &fPhysDevMemProps)); + VK_CALL(GetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &fPhysDevProps)); + VK_CALL(GetPhysicalDeviceMemoryProperties(backendContext.fPhysicalDevice, &fPhysDevMemProps)); const VkCommandPoolCreateInfo cmdPoolInfo = { VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType nullptr, // pNext VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, // CmdPoolCreateFlags - fBackendContext->fGraphicsQueueIndex, // queueFamilyIndex + backendContext.fGraphicsQueueIndex, // queueFamilyIndex }; GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateCommandPool(fDevice, &cmdPoolInfo, nullptr, &fCmdPool)); @@ -201,10 +206,15 @@ void GrVkGpu::destroyResources() { #ifdef SK_ENABLE_VK_LAYERS if (fCallback) { - VK_CALL(DestroyDebugReportCallbackEXT(fBackendContext->fInstance, fCallback, nullptr)); + VK_CALL(DestroyDebugReportCallbackEXT(fInstance, fCallback, nullptr)); } #endif + fMemoryAllocator.reset(); + + fQueue = VK_NULL_HANDLE; + fDevice = VK_NULL_HANDLE; + fInstance = VK_NULL_HANDLE; } GrVkGpu::~GrVkGpu() { diff --git a/src/gpu/vk/GrVkGpu.h b/src/gpu/vk/GrVkGpu.h index 052476098d..f506d28b0e 100644 --- a/src/gpu/vk/GrVkGpu.h +++ b/src/gpu/vk/GrVkGpu.h @@ -38,13 +38,13 @@ namespace SkSL { class GrVkGpu : public GrGpu { public: - static sk_sp Make(sk_sp, const GrContextOptions&, GrContext*); + static sk_sp Make(const GrVkBackendContext&, const GrContextOptions&, GrContext*); ~GrVkGpu() override; void disconnect(DisconnectType) override; - const GrVkInterface* vkInterface() const { return fBackendContext->fInterface.get(); } + const GrVkInterface* vkInterface() const { return fInterface.get(); } const GrVkCaps& vkCaps() const { return *fVkCaps; } GrVkMemoryAllocator* memoryAllocator() const { return fMemoryAllocator.get(); } @@ -144,7 +144,7 @@ public: bool updateBuffer(GrVkBuffer* buffer, const void* src, VkDeviceSize offset, VkDeviceSize size); private: - GrVkGpu(GrContext*, const GrContextOptions&, sk_sp backendContext); + GrVkGpu(GrContext*, const GrContextOptions&, const GrVkBackendContext& backendContext); void onResetContext(uint32_t resetBits) override {} @@ -222,28 +222,27 @@ private: GrVkImageInfo* info); #endif - sk_sp fBackendContext; - sk_sp fMemoryAllocator; - sk_sp fVkCaps; + sk_sp fInterface; + sk_sp fMemoryAllocator; + sk_sp fVkCaps; - // These Vulkan objects are provided by the client, and also stored in fBackendContext. - // They're copied here for convenient access. - VkDevice fDevice; - VkQueue fQueue; // Must be Graphics queue + VkInstance fInstance; + VkDevice fDevice; + VkQueue fQueue; // Must be Graphics queue // Created by GrVkGpu - GrVkResourceProvider fResourceProvider; - VkCommandPool fCmdPool; + GrVkResourceProvider fResourceProvider; + VkCommandPool fCmdPool; - GrVkPrimaryCommandBuffer* fCurrentCmdBuffer; + GrVkPrimaryCommandBuffer* fCurrentCmdBuffer; - SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToWaitOn; - SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToSignal; + SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToWaitOn; + SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToSignal; - VkPhysicalDeviceProperties fPhysDevProps; - VkPhysicalDeviceMemoryProperties fPhysDevMemProps; + VkPhysicalDeviceProperties fPhysDevProps; + VkPhysicalDeviceMemoryProperties fPhysDevMemProps; - GrVkCopyManager fCopyManager; + GrVkCopyManager fCopyManager; #ifdef SK_ENABLE_VK_LAYERS // For reporting validation layer errors @@ -252,11 +251,11 @@ private: // compiler used for compiling sksl into spirv. We only want to create the compiler once since // there is significant overhead to the first compile of any compiler. - SkSL::Compiler* fCompiler; + SkSL::Compiler* fCompiler; // We need a bool to track whether or not we've already disconnected all the gpu resources from // vulkan context. - bool fDisconnected; + bool fDisconnected; typedef GrGpu INHERITED; }; diff --git a/tools/gpu/vk/VkTestContext.cpp b/tools/gpu/vk/VkTestContext.cpp index 25069fe521..592fb0f7b0 100644 --- a/tools/gpu/vk/VkTestContext.cpp +++ b/tools/gpu/vk/VkTestContext.cpp @@ -110,21 +110,24 @@ GR_STATIC_ASSERT(sizeof(VkFence) <= sizeof(sk_gpu_test::PlatformFence)); class VkTestContextImpl : public sk_gpu_test::VkTestContext { public: static VkTestContext* Create(VkTestContext* sharedContext) { - sk_sp backendContext; + GrVkBackendContext backendContext; + bool ownsContext = true; if (sharedContext) { backendContext = sharedContext->getVkBackendContext(); + // We always delete the parent context last so make sure the child does not think they + // own the vulkan context. + ownsContext = false; } else { PFN_vkGetInstanceProcAddr instProc; PFN_vkGetDeviceProcAddr devProc; if (!sk_gpu_test::LoadVkLibraryAndGetProcAddrFuncs(&instProc, &devProc)) { return nullptr; } - backendContext.reset(GrVkBackendContext::Create(instProc, devProc)); - } - if (!backendContext) { - return nullptr; + if (!sk_gpu_test::CreateVkBackendContext(instProc, devProc, &backendContext)) { + return nullptr; + } } - return new VkTestContextImpl(std::move(backendContext)); + return new VkTestContextImpl(backendContext, ownsContext); } ~VkTestContextImpl() override { this->teardown(); } @@ -143,14 +146,19 @@ public: protected: void teardown() override { INHERITED::teardown(); - fVk.reset(nullptr); + fVk.fMemoryAllocator.reset(); + if (fOwnsContext) { + GR_VK_CALL(this->vk(), DeviceWaitIdle(fVk.fDevice)); + GR_VK_CALL(this->vk(), DestroyDevice(fVk.fDevice, nullptr)); + GR_VK_CALL(this->vk(), DestroyInstance(fVk.fInstance, nullptr)); + } } private: - VkTestContextImpl(sk_sp backendContext) - : VkTestContext(std::move(backendContext)) { - fFenceSync.reset(new VkFenceSync(fVk->fInterface, fVk->fDevice, fVk->fQueue, - fVk->fGraphicsQueueIndex)); + VkTestContextImpl(const GrVkBackendContext& backendContext, bool ownsContext) + : VkTestContext(backendContext, ownsContext) { + fFenceSync.reset(new VkFenceSync(fVk.fInterface, fVk.fDevice, fVk.fQueue, + fVk.fGraphicsQueueIndex)); } void onPlatformMakeCurrent() const override {} diff --git a/tools/gpu/vk/VkTestContext.h b/tools/gpu/vk/VkTestContext.h index 9fd4170d9d..0e62cc0b8c 100644 --- a/tools/gpu/vk/VkTestContext.h +++ b/tools/gpu/vk/VkTestContext.h @@ -19,16 +19,18 @@ class VkTestContext : public TestContext { public: virtual GrBackend backend() override { return kVulkan_GrBackend; } - sk_sp getVkBackendContext() { + const GrVkBackendContext& getVkBackendContext() { return fVk; } - const GrVkInterface* vk() const { return fVk->fInterface.get(); } + const GrVkInterface* vk() const { return fVk.fInterface.get(); } protected: - VkTestContext(sk_sp vk) : fVk(std::move(vk)) {} + VkTestContext(const GrVkBackendContext& vk, bool ownsContext) + : fVk(vk), fOwnsContext(ownsContext) {} - sk_sp fVk; + GrVkBackendContext fVk; + bool fOwnsContext; private: typedef TestContext INHERITED; diff --git a/tools/gpu/vk/VkTestUtils.cpp b/tools/gpu/vk/VkTestUtils.cpp index c28a42b2c9..927f4fbe82 100644 --- a/tools/gpu/vk/VkTestUtils.cpp +++ b/tools/gpu/vk/VkTestUtils.cpp @@ -9,6 +9,9 @@ #ifdef SK_VULKAN +#include "SkAutoMalloc.h" +#include "vk/GrVkBackendContext.h" +#include "vk/GrVkExtensions.h" #include "../ports/SkOSLibrary.h" namespace sk_gpu_test { @@ -46,6 +49,323 @@ bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc, return true; #endif } + +//////////////////////////////////////////////////////////////////////////////// +// Helper code to set up Vulkan context objects + +#ifdef SK_ENABLE_VK_LAYERS +const char* kDebugLayerNames[] = { + // elements of VK_LAYER_LUNARG_standard_validation + "VK_LAYER_GOOGLE_threading", + "VK_LAYER_LUNARG_parameter_validation", + "VK_LAYER_LUNARG_object_tracker", + "VK_LAYER_LUNARG_image", + "VK_LAYER_LUNARG_core_validation", + "VK_LAYER_LUNARG_swapchain", + "VK_LAYER_GOOGLE_unique_objects", + // not included in standard_validation + //"VK_LAYER_LUNARG_api_dump", + //"VK_LAYER_LUNARG_vktrace", + //"VK_LAYER_LUNARG_screenshot", +}; +#endif + +// the minimum version of Vulkan supported +#ifdef SK_BUILD_FOR_ANDROID +const uint32_t kGrVkMinimumVersion = VK_MAKE_VERSION(1, 0, 3); +#else +const uint32_t kGrVkMinimumVersion = VK_MAKE_VERSION(1, 0, 8); +#endif + +#define ACQUIRE_VK_PROC(name, instance, device) \ + PFN_vk##name grVk##name = \ + reinterpret_cast(getProc("vk" #name, instance, device)); \ + if (grVk##name == nullptr) { \ + SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \ + return false; \ + } + +bool CreateVkBackendContext(const GrVkInterface::GetInstanceProc& getInstanceProc, + const GrVkInterface::GetDeviceProc& getDeviceProc, + GrVkBackendContext* ctx, + uint32_t* presentQueueIndexPtr, + CanPresentFn canPresent) { + auto getProc = [&getInstanceProc, &getDeviceProc](const char* proc_name, + VkInstance instance, VkDevice device) { + if (device != VK_NULL_HANDLE) { + return getDeviceProc(device, proc_name); + } + return getInstanceProc(instance, proc_name); + }; + + VkPhysicalDevice physDev; + VkDevice device; + VkInstance inst; + VkResult err; + + const VkApplicationInfo app_info = { + VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType + nullptr, // pNext + "vktest", // pApplicationName + 0, // applicationVersion + "vktest", // pEngineName + 0, // engineVerison + kGrVkMinimumVersion, // apiVersion + }; + + GrVkExtensions extensions(getProc); + extensions.initInstance(kGrVkMinimumVersion); + + SkTArray instanceLayerNames; + SkTArray instanceExtensionNames; + uint32_t extensionFlags = 0; +#ifdef SK_ENABLE_VK_LAYERS + for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) { + if (extensions.hasInstanceLayer(kDebugLayerNames[i])) { + instanceLayerNames.push_back(kDebugLayerNames[i]); + } + } + if (extensions.hasInstanceExtension(VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) { + instanceExtensionNames.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME); + extensionFlags |= kEXT_debug_report_GrVkExtensionFlag; + } +#endif + + if (extensions.hasInstanceExtension(VK_KHR_SURFACE_EXTENSION_NAME)) { + instanceExtensionNames.push_back(VK_KHR_SURFACE_EXTENSION_NAME); + extensionFlags |= kKHR_surface_GrVkExtensionFlag; + } + if (extensions.hasInstanceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME)) { + instanceExtensionNames.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); + extensionFlags |= kKHR_swapchain_GrVkExtensionFlag; + } +#ifdef SK_BUILD_FOR_WIN + if (extensions.hasInstanceExtension(VK_KHR_WIN32_SURFACE_EXTENSION_NAME)) { + instanceExtensionNames.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME); + extensionFlags |= kKHR_win32_surface_GrVkExtensionFlag; + } +#elif defined(SK_BUILD_FOR_ANDROID) + if (extensions.hasInstanceExtension(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) { + instanceExtensionNames.push_back(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME); + extensionFlags |= kKHR_android_surface_GrVkExtensionFlag; + } +#elif defined(SK_BUILD_FOR_UNIX) && !defined(__Fuchsia__) + if (extensions.hasInstanceExtension(VK_KHR_XCB_SURFACE_EXTENSION_NAME)) { + instanceExtensionNames.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME); + extensionFlags |= kKHR_xcb_surface_GrVkExtensionFlag; + } +#endif + + const VkInstanceCreateInfo instance_create = { + VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType + nullptr, // pNext + 0, // flags + &app_info, // pApplicationInfo + (uint32_t) instanceLayerNames.count(), // enabledLayerNameCount + instanceLayerNames.begin(), // ppEnabledLayerNames + (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount + instanceExtensionNames.begin(), // ppEnabledExtensionNames + }; + + ACQUIRE_VK_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE); + err = grVkCreateInstance(&instance_create, nullptr, &inst); + if (err < 0) { + SkDebugf("vkCreateInstance failed: %d\n", err); + return false; + } + + ACQUIRE_VK_PROC(DestroyInstance, inst, VK_NULL_HANDLE); + ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE); + ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE); + ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE); + ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE); + ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE); + ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE); + ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE); + + uint32_t gpuCount; + err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr); + if (err) { + SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err); + grVkDestroyInstance(inst, nullptr); + return false; + } + if (!gpuCount) { + SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n"); + grVkDestroyInstance(inst, nullptr); + return false; + } + // Just returning the first physical device instead of getting the whole array. + // TODO: find best match for our needs + gpuCount = 1; + err = grVkEnumeratePhysicalDevices(inst, &gpuCount, &physDev); + // VK_INCOMPLETE is returned when the count we provide is less than the total device count. + if (err && VK_INCOMPLETE != err) { + SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err); + grVkDestroyInstance(inst, nullptr); + return false; + } + + // query to get the initial queue props size + uint32_t queueCount; + grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr); + if (!queueCount) { + SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n"); + grVkDestroyInstance(inst, nullptr); + return false; + } + + SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties)); + // now get the actual queue props + VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get(); + + grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps); + + // iterate to find the graphics queue + uint32_t graphicsQueueIndex = queueCount; + for (uint32_t i = 0; i < queueCount; i++) { + if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) { + graphicsQueueIndex = i; + break; + } + } + if (graphicsQueueIndex == queueCount) { + SkDebugf("Could not find any supported graphics queues.\n"); + grVkDestroyInstance(inst, nullptr); + return false; + } + + // iterate to find the present queue, if needed + uint32_t presentQueueIndex = queueCount; + if (presentQueueIndexPtr && canPresent) { + for (uint32_t i = 0; i < queueCount; i++) { + if (canPresent(inst, physDev, i)) { + presentQueueIndex = i; + break; + } + } + if (presentQueueIndex == queueCount) { + SkDebugf("Could not find any supported present queues.\n"); + grVkDestroyInstance(inst, nullptr); + return false; + } + *presentQueueIndexPtr = presentQueueIndex; + } else { + // Just setting this so we end up make a single queue for graphics since there was no + // request for a present queue. + presentQueueIndex = graphicsQueueIndex; + } + + extensions.initDevice(kGrVkMinimumVersion, inst, physDev); + + SkTArray deviceLayerNames; + SkTArray deviceExtensionNames; +#ifdef SK_ENABLE_VK_LAYERS + for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) { + if (extensions.hasDeviceLayer(kDebugLayerNames[i])) { + deviceLayerNames.push_back(kDebugLayerNames[i]); + } + } +#endif + if (extensions.hasDeviceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME)) { + deviceExtensionNames.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); + extensionFlags |= kKHR_swapchain_GrVkExtensionFlag; + } + if (extensions.hasDeviceExtension("VK_NV_glsl_shader")) { + deviceExtensionNames.push_back("VK_NV_glsl_shader"); + extensionFlags |= kNV_glsl_shader_GrVkExtensionFlag; + } + + // query to get the physical device properties + VkPhysicalDeviceFeatures deviceFeatures; + grVkGetPhysicalDeviceFeatures(physDev, &deviceFeatures); + // this looks like it would slow things down, + // and we can't depend on it on all platforms + deviceFeatures.robustBufferAccess = VK_FALSE; + + uint32_t featureFlags = 0; + if (deviceFeatures.geometryShader) { + featureFlags |= kGeometryShader_GrVkFeatureFlag; + } + if (deviceFeatures.dualSrcBlend) { + featureFlags |= kDualSrcBlend_GrVkFeatureFlag; + } + if (deviceFeatures.sampleRateShading) { + featureFlags |= kSampleRateShading_GrVkFeatureFlag; + } + + float queuePriorities[1] = { 0.0 }; + // Here we assume no need for swapchain queue + // If one is needed, the client will need its own setup code + const VkDeviceQueueCreateInfo queueInfo[2] = { + { + VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType + nullptr, // pNext + 0, // VkDeviceQueueCreateFlags + graphicsQueueIndex, // queueFamilyIndex + 1, // queueCount + queuePriorities, // pQueuePriorities + }, + { + VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType + nullptr, // pNext + 0, // VkDeviceQueueCreateFlags + presentQueueIndex, // queueFamilyIndex + 1, // queueCount + queuePriorities, // pQueuePriorities + } + }; + uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1; + + const VkDeviceCreateInfo deviceInfo = { + VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType + nullptr, // pNext + 0, // VkDeviceCreateFlags + queueInfoCount, // queueCreateInfoCount + queueInfo, // pQueueCreateInfos + (uint32_t) deviceLayerNames.count(), // layerCount + deviceLayerNames.begin(), // ppEnabledLayerNames + (uint32_t) deviceExtensionNames.count(), // extensionCount + deviceExtensionNames.begin(), // ppEnabledExtensionNames + &deviceFeatures // ppEnabledFeatures + }; + + err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device); + if (err) { + SkDebugf("CreateDevice failed: %d\n", err); + grVkDestroyInstance(inst, nullptr); + return false; + } + + auto interface = + sk_make_sp(getProc, inst, device, extensionFlags); + if (!interface->validate(extensionFlags)) { + SkDebugf("Vulkan interface validation failed\n"); + grVkDeviceWaitIdle(device); + grVkDestroyDevice(device, nullptr); + grVkDestroyInstance(inst, nullptr); + return false; + } + + VkQueue queue; + grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue); + + ctx->fInstance = inst; + ctx->fPhysicalDevice = physDev; + ctx->fDevice = device; + ctx->fQueue = queue; + ctx->fGraphicsQueueIndex = graphicsQueueIndex; + ctx->fMinAPIVersion = kGrVkMinimumVersion; + ctx->fExtensions = extensionFlags; + ctx->fFeatures = featureFlags; + ctx->fInterface.reset(interface.release()); + ctx->fOwnsInstanceAndDevice = false; + + return true; + + +} + } #endif diff --git a/tools/gpu/vk/VkTestUtils.h b/tools/gpu/vk/VkTestUtils.h index 9f34ef09c5..30471a78c0 100644 --- a/tools/gpu/vk/VkTestUtils.h +++ b/tools/gpu/vk/VkTestUtils.h @@ -13,9 +13,21 @@ #ifdef SK_VULKAN #include "vk/GrVkDefines.h" +#include "vk/GrVkInterface.h" + +struct GrVkBackendContext; namespace sk_gpu_test { bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr*, PFN_vkGetDeviceProcAddr*); + + using CanPresentFn = std::function; + + bool CreateVkBackendContext(const GrVkInterface::GetInstanceProc& getInstanceProc, + const GrVkInterface::GetDeviceProc& getDeviceProc, + GrVkBackendContext* ctx, + uint32_t* presentQueueIndexPtr = nullptr, + CanPresentFn canPresent = CanPresentFn()); } #endif diff --git a/tools/sk_app/VulkanWindowContext.cpp b/tools/sk_app/VulkanWindowContext.cpp index ad0e15ca7b..411f114e63 100644 --- a/tools/sk_app/VulkanWindowContext.cpp +++ b/tools/sk_app/VulkanWindowContext.cpp @@ -13,7 +13,6 @@ #include "VulkanWindowContext.h" #include "vk/GrVkImage.h" -#include "vk/GrVkInterface.h" #include "vk/GrVkUtil.h" #include "vk/GrVkTypes.h" @@ -22,8 +21,8 @@ #undef CreateSemaphore #endif -#define GET_PROC(F) f ## F = (PFN_vk ## F) fGetInstanceProcAddr(instance, "vk" #F) -#define GET_DEV_PROC(F) f ## F = (PFN_vk ## F) fGetDeviceProcAddr(device, "vk" #F) +#define GET_PROC(F) f ## F = (PFN_vk ## F) fGetInstanceProcAddr(fInstance, "vk" #F) +#define GET_DEV_PROC(F) f ## F = (PFN_vk ## F) fGetDeviceProcAddr(fDevice, "vk" #F) namespace sk_app { @@ -49,22 +48,34 @@ VulkanWindowContext::VulkanWindowContext(const DisplayParams& params, void VulkanWindowContext::initializeContext() { // any config code here (particularly for msaa)? - fBackendContext.reset(GrVkBackendContext::Create(fGetInstanceProcAddr, fGetDeviceProcAddr, - &fPresentQueueIndex, fCanPresentFn)); - if (!(fBackendContext->fExtensions & kKHR_surface_GrVkExtensionFlag) || - !(fBackendContext->fExtensions & kKHR_swapchain_GrVkExtensionFlag)) { - fBackendContext.reset(nullptr); + GrVkBackendContext backendContext; + if (!sk_gpu_test::CreateVkBackendContext(fGetInstanceProcAddr, fGetDeviceProcAddr, + &backendContext, &fPresentQueueIndex, fCanPresentFn)) { return; } - VkInstance instance = fBackendContext->fInstance; - VkDevice device = fBackendContext->fDevice; + if (!(backendContext.fExtensions & kKHR_surface_GrVkExtensionFlag) || + !(backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag)) { + return; + } + + fInstance = backendContext.fInstance; + fPhysicalDevice = backendContext.fPhysicalDevice; + fDevice = backendContext.fDevice; + fGraphicsQueueIndex = backendContext.fGraphicsQueueIndex; + fGraphicsQueue = backendContext.fQueue; + fInterface = backendContext.fInterface; + + GET_PROC(DestroyInstance); GET_PROC(DestroySurfaceKHR); GET_PROC(GetPhysicalDeviceSurfaceSupportKHR); GET_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR); GET_PROC(GetPhysicalDeviceSurfaceFormatsKHR); GET_PROC(GetPhysicalDeviceSurfacePresentModesKHR); + GET_DEV_PROC(DeviceWaitIdle); + GET_DEV_PROC(QueueWaitIdle); + GET_DEV_PROC(DestroyDevice); GET_DEV_PROC(CreateSwapchainKHR); GET_DEV_PROC(DestroySwapchainKHR); GET_DEV_PROC(GetSwapchainImagesKHR); @@ -72,18 +83,17 @@ void VulkanWindowContext::initializeContext() { GET_DEV_PROC(QueuePresentKHR); GET_DEV_PROC(GetDeviceQueue); - fContext = GrContext::MakeVulkan(fBackendContext, fDisplayParams.fGrContextOptions); + fContext = GrContext::MakeVulkan(backendContext, fDisplayParams.fGrContextOptions); - fSurface = fCreateVkSurfaceFn(instance); + fSurface = fCreateVkSurfaceFn(fInstance); if (VK_NULL_HANDLE == fSurface) { - fBackendContext.reset(nullptr); + this->destroyContext(); return; } VkBool32 supported; - VkResult res = fGetPhysicalDeviceSurfaceSupportKHR(fBackendContext->fPhysicalDevice, - fPresentQueueIndex, fSurface, - &supported); + VkResult res = fGetPhysicalDeviceSurfaceSupportKHR(fPhysicalDevice, fPresentQueueIndex, + fSurface, &supported); if (VK_SUCCESS != res) { this->destroyContext(); return; @@ -95,45 +105,44 @@ void VulkanWindowContext::initializeContext() { } // create presentQueue - fGetDeviceQueue(fBackendContext->fDevice, fPresentQueueIndex, 0, &fPresentQueue); + fGetDeviceQueue(fDevice, fPresentQueueIndex, 0, &fPresentQueue); } bool VulkanWindowContext::createSwapchain(int width, int height, const DisplayParams& params) { // check for capabilities VkSurfaceCapabilitiesKHR caps; - VkResult res = fGetPhysicalDeviceSurfaceCapabilitiesKHR(fBackendContext->fPhysicalDevice, - fSurface, &caps); + VkResult res = fGetPhysicalDeviceSurfaceCapabilitiesKHR(fPhysicalDevice, fSurface, &caps); if (VK_SUCCESS != res) { return false; } uint32_t surfaceFormatCount; - res = fGetPhysicalDeviceSurfaceFormatsKHR(fBackendContext->fPhysicalDevice, fSurface, - &surfaceFormatCount, nullptr); + res = fGetPhysicalDeviceSurfaceFormatsKHR(fPhysicalDevice, fSurface, &surfaceFormatCount, + nullptr); if (VK_SUCCESS != res) { return false; } SkAutoMalloc surfaceFormatAlloc(surfaceFormatCount * sizeof(VkSurfaceFormatKHR)); VkSurfaceFormatKHR* surfaceFormats = (VkSurfaceFormatKHR*)surfaceFormatAlloc.get(); - res = fGetPhysicalDeviceSurfaceFormatsKHR(fBackendContext->fPhysicalDevice, fSurface, - &surfaceFormatCount, surfaceFormats); + res = fGetPhysicalDeviceSurfaceFormatsKHR(fPhysicalDevice, fSurface, &surfaceFormatCount, + surfaceFormats); if (VK_SUCCESS != res) { return false; } uint32_t presentModeCount; - res = fGetPhysicalDeviceSurfacePresentModesKHR(fBackendContext->fPhysicalDevice, fSurface, - &presentModeCount, nullptr); + res = fGetPhysicalDeviceSurfacePresentModesKHR(fPhysicalDevice, fSurface, &presentModeCount, + nullptr); if (VK_SUCCESS != res) { return false; } SkAutoMalloc presentModeAlloc(presentModeCount * sizeof(VkPresentModeKHR)); VkPresentModeKHR* presentModes = (VkPresentModeKHR*)presentModeAlloc.get(); - res = fGetPhysicalDeviceSurfacePresentModesKHR(fBackendContext->fPhysicalDevice, fSurface, - &presentModeCount, presentModes); + res = fGetPhysicalDeviceSurfacePresentModesKHR(fPhysicalDevice, fSurface, &presentModeCount, + presentModes); if (VK_SUCCESS != res) { return false; } @@ -234,8 +243,8 @@ bool VulkanWindowContext::createSwapchain(int width, int height, swapchainCreateInfo.imageArrayLayers = 1; swapchainCreateInfo.imageUsage = usageFlags; - uint32_t queueFamilies[] = { fBackendContext->fGraphicsQueueIndex, fPresentQueueIndex }; - if (fBackendContext->fGraphicsQueueIndex != fPresentQueueIndex) { + uint32_t queueFamilies[] = { fGraphicsQueueIndex, fPresentQueueIndex }; + if (fGraphicsQueueIndex != fPresentQueueIndex) { swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT; swapchainCreateInfo.queueFamilyIndexCount = 2; swapchainCreateInfo.pQueueFamilyIndices = queueFamilies; @@ -251,18 +260,18 @@ bool VulkanWindowContext::createSwapchain(int width, int height, swapchainCreateInfo.clipped = true; swapchainCreateInfo.oldSwapchain = fSwapchain; - res = fCreateSwapchainKHR(fBackendContext->fDevice, &swapchainCreateInfo, nullptr, &fSwapchain); + res = fCreateSwapchainKHR(fDevice, &swapchainCreateInfo, nullptr, &fSwapchain); if (VK_SUCCESS != res) { return false; } // destroy the old swapchain if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) { - GR_VK_CALL(fBackendContext->fInterface, DeviceWaitIdle(fBackendContext->fDevice)); + fDeviceWaitIdle(fDevice); this->destroyBuffers(); - fDestroySwapchainKHR(fBackendContext->fDevice, swapchainCreateInfo.oldSwapchain, nullptr); + fDestroySwapchainKHR(fDevice, swapchainCreateInfo.oldSwapchain, nullptr); } this->createBuffers(swapchainCreateInfo.imageFormat, colorType); @@ -271,10 +280,10 @@ bool VulkanWindowContext::createSwapchain(int width, int height, } void VulkanWindowContext::createBuffers(VkFormat format, SkColorType colorType) { - fGetSwapchainImagesKHR(fBackendContext->fDevice, fSwapchain, &fImageCount, nullptr); + fGetSwapchainImagesKHR(fDevice, fSwapchain, &fImageCount, nullptr); SkASSERT(fImageCount); fImages = new VkImage[fImageCount]; - fGetSwapchainImagesKHR(fBackendContext->fDevice, fSwapchain, &fImageCount, fImages); + fGetSwapchainImagesKHR(fDevice, fSwapchain, &fImageCount, fImages); // set up initial image layouts and create surfaces fImageLayouts = new VkImageLayout[fImageCount]; @@ -306,10 +315,10 @@ void VulkanWindowContext::createBuffers(VkFormat format, SkColorType colorType) memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo)); commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; // this needs to be on the render queue - commandPoolInfo.queueFamilyIndex = fBackendContext->fGraphicsQueueIndex; + commandPoolInfo.queueFamilyIndex = fGraphicsQueueIndex; commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; - GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, - CreateCommandPool(fBackendContext->fDevice, &commandPoolInfo, + GR_VK_CALL_ERRCHECK(fInterface, + CreateCommandPool(fDevice, &commandPoolInfo, nullptr, &fCommandPool)); } @@ -337,20 +346,20 @@ void VulkanWindowContext::createBuffers(VkFormat format, SkColorType colorType) fBackbuffers = new BackbufferInfo[fImageCount + 1]; for (uint32_t i = 0; i < fImageCount + 1; ++i) { fBackbuffers[i].fImageIndex = -1; - GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, - CreateSemaphore(fBackendContext->fDevice, &semaphoreInfo, + GR_VK_CALL_ERRCHECK(fInterface, + CreateSemaphore(fDevice, &semaphoreInfo, nullptr, &fBackbuffers[i].fAcquireSemaphore)); - GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, - CreateSemaphore(fBackendContext->fDevice, &semaphoreInfo, + GR_VK_CALL_ERRCHECK(fInterface, + CreateSemaphore(fDevice, &semaphoreInfo, nullptr, &fBackbuffers[i].fRenderSemaphore)); - GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, - AllocateCommandBuffers(fBackendContext->fDevice, &commandBuffersInfo, + GR_VK_CALL_ERRCHECK(fInterface, + AllocateCommandBuffers(fDevice, &commandBuffersInfo, fBackbuffers[i].fTransitionCmdBuffers)); - GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, - CreateFence(fBackendContext->fDevice, &fenceInfo, nullptr, + GR_VK_CALL_ERRCHECK(fInterface, + CreateFence(fDevice, &fenceInfo, nullptr, &fBackbuffers[i].fUsageFences[0])); - GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, - CreateFence(fBackendContext->fDevice, &fenceInfo, nullptr, + GR_VK_CALL_ERRCHECK(fInterface, + CreateFence(fDevice, &fenceInfo, nullptr, &fBackbuffers[i].fUsageFences[1])); } fCurrentBackbufferIndex = fImageCount; @@ -360,26 +369,26 @@ void VulkanWindowContext::destroyBuffers() { if (fBackbuffers) { for (uint32_t i = 0; i < fImageCount + 1; ++i) { - GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, - WaitForFences(fBackendContext->fDevice, 2, + GR_VK_CALL_ERRCHECK(fInterface, + WaitForFences(fDevice, 2, fBackbuffers[i].fUsageFences, true, UINT64_MAX)); fBackbuffers[i].fImageIndex = -1; - GR_VK_CALL(fBackendContext->fInterface, - DestroySemaphore(fBackendContext->fDevice, + GR_VK_CALL(fInterface, + DestroySemaphore(fDevice, fBackbuffers[i].fAcquireSemaphore, nullptr)); - GR_VK_CALL(fBackendContext->fInterface, - DestroySemaphore(fBackendContext->fDevice, + GR_VK_CALL(fInterface, + DestroySemaphore(fDevice, fBackbuffers[i].fRenderSemaphore, nullptr)); - GR_VK_CALL(fBackendContext->fInterface, - FreeCommandBuffers(fBackendContext->fDevice, fCommandPool, 2, + GR_VK_CALL(fInterface, + FreeCommandBuffers(fDevice, fCommandPool, 2, fBackbuffers[i].fTransitionCmdBuffers)); - GR_VK_CALL(fBackendContext->fInterface, - DestroyFence(fBackendContext->fDevice, fBackbuffers[i].fUsageFences[0], 0)); - GR_VK_CALL(fBackendContext->fInterface, - DestroyFence(fBackendContext->fDevice, fBackbuffers[i].fUsageFences[1], 0)); + GR_VK_CALL(fInterface, + DestroyFence(fDevice, fBackbuffers[i].fUsageFences[0], 0)); + GR_VK_CALL(fInterface, + DestroyFence(fDevice, fBackbuffers[i].fUsageFences[1], 0)); } } @@ -400,34 +409,43 @@ VulkanWindowContext::~VulkanWindowContext() { } void VulkanWindowContext::destroyContext() { - if (!fBackendContext.get()) { + if (!this->isValid()) { return; } - GR_VK_CALL(fBackendContext->fInterface, QueueWaitIdle(fPresentQueue)); - GR_VK_CALL(fBackendContext->fInterface, DeviceWaitIdle(fBackendContext->fDevice)); + fQueueWaitIdle(fPresentQueue); + fDeviceWaitIdle(fDevice); this->destroyBuffers(); if (VK_NULL_HANDLE != fCommandPool) { - GR_VK_CALL(fBackendContext->fInterface, DestroyCommandPool(fBackendContext->fDevice, - fCommandPool, nullptr)); + GR_VK_CALL(fInterface, DestroyCommandPool(fDevice, fCommandPool, nullptr)); fCommandPool = VK_NULL_HANDLE; } if (VK_NULL_HANDLE != fSwapchain) { - fDestroySwapchainKHR(fBackendContext->fDevice, fSwapchain, nullptr); + fDestroySwapchainKHR(fDevice, fSwapchain, nullptr); fSwapchain = VK_NULL_HANDLE; } if (VK_NULL_HANDLE != fSurface) { - fDestroySurfaceKHR(fBackendContext->fInstance, fSurface, nullptr); + fDestroySurfaceKHR(fInstance, fSurface, nullptr); fSurface = VK_NULL_HANDLE; } fContext.reset(); + fInterface.reset(); + + if (VK_NULL_HANDLE != fDevice) { + fDestroyDevice(fDevice, nullptr); + fDevice = VK_NULL_HANDLE; + } + fPhysicalDevice = VK_NULL_HANDLE; - fBackendContext.reset(nullptr); + if (VK_NULL_HANDLE != fInstance) { + fDestroyInstance(fInstance, nullptr); + fInstance = VK_NULL_HANDLE; + } } VulkanWindowContext::BackbufferInfo* VulkanWindowContext::getAvailableBackbuffer() { @@ -439,8 +457,8 @@ VulkanWindowContext::BackbufferInfo* VulkanWindowContext::getAvailableBackbuffer } BackbufferInfo* backbuffer = fBackbuffers + fCurrentBackbufferIndex; - GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, - WaitForFences(fBackendContext->fDevice, 2, backbuffer->fUsageFences, + GR_VK_CALL_ERRCHECK(fInterface, + WaitForFences(fDevice, 2, backbuffer->fUsageFences, true, UINT64_MAX)); return backbuffer; } @@ -450,12 +468,12 @@ sk_sp VulkanWindowContext::getBackbufferSurface() { SkASSERT(backbuffer); // reset the fence - GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, - ResetFences(fBackendContext->fDevice, 2, backbuffer->fUsageFences)); + GR_VK_CALL_ERRCHECK(fInterface, + ResetFences(fDevice, 2, backbuffer->fUsageFences)); // semaphores should be in unsignaled state // acquire the image - VkResult res = fAcquireNextImageKHR(fBackendContext->fDevice, fSwapchain, UINT64_MAX, + VkResult res = fAcquireNextImageKHR(fDevice, fSwapchain, UINT64_MAX, backbuffer->fAcquireSemaphore, VK_NULL_HANDLE, &backbuffer->fImageIndex); if (VK_ERROR_SURFACE_LOST_KHR == res) { @@ -469,11 +487,11 @@ sk_sp VulkanWindowContext::getBackbufferSurface() { return nullptr; } backbuffer = this->getAvailableBackbuffer(); - GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, - ResetFences(fBackendContext->fDevice, 2, backbuffer->fUsageFences)); + GR_VK_CALL_ERRCHECK(fInterface, + ResetFences(fDevice, 2, backbuffer->fUsageFences)); // acquire the image - res = fAcquireNextImageKHR(fBackendContext->fDevice, fSwapchain, UINT64_MAX, + res = fAcquireNextImageKHR(fDevice, fSwapchain, UINT64_MAX, backbuffer->fAcquireSemaphore, VK_NULL_HANDLE, &backbuffer->fImageIndex); @@ -501,27 +519,27 @@ sk_sp VulkanWindowContext::getBackbufferSurface() { layout, // oldLayout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout fPresentQueueIndex, // srcQueueFamilyIndex - fBackendContext->fGraphicsQueueIndex, // dstQueueFamilyIndex + fGraphicsQueueIndex, // dstQueueFamilyIndex fImages[backbuffer->fImageIndex], // image { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } // subresourceRange }; - GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, + GR_VK_CALL_ERRCHECK(fInterface, ResetCommandBuffer(backbuffer->fTransitionCmdBuffers[0], 0)); VkCommandBufferBeginInfo info; memset(&info, 0, sizeof(VkCommandBufferBeginInfo)); info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; info.flags = 0; - GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, + GR_VK_CALL_ERRCHECK(fInterface, BeginCommandBuffer(backbuffer->fTransitionCmdBuffers[0], &info)); - GR_VK_CALL(fBackendContext->fInterface, + GR_VK_CALL(fInterface, CmdPipelineBarrier(backbuffer->fTransitionCmdBuffers[0], srcStageMask, dstStageMask, 0, 0, nullptr, 0, nullptr, 1, &imageMemoryBarrier)); - GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, + GR_VK_CALL_ERRCHECK(fInterface, EndCommandBuffer(backbuffer->fTransitionCmdBuffers[0])); VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; @@ -536,8 +554,8 @@ sk_sp VulkanWindowContext::getBackbufferSurface() { submitInfo.pCommandBuffers = &backbuffer->fTransitionCmdBuffers[0]; submitInfo.signalSemaphoreCount = 0; - GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, - QueueSubmit(fBackendContext->fQueue, 1, &submitInfo, + GR_VK_CALL_ERRCHECK(fInterface, + QueueSubmit(fGraphicsQueue, 1, &submitInfo, backbuffer->fUsageFences[0])); SkSurface* surface = fSurfaces[backbuffer->fImageIndex].get(); @@ -574,26 +592,26 @@ void VulkanWindowContext::swapBuffers() { dstAccessMask, // inputMask layout, // oldLayout VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout - fBackendContext->fGraphicsQueueIndex, // srcQueueFamilyIndex + fGraphicsQueueIndex, // srcQueueFamilyIndex fPresentQueueIndex, // dstQueueFamilyIndex fImages[backbuffer->fImageIndex], // image { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } // subresourceRange }; - GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, + GR_VK_CALL_ERRCHECK(fInterface, ResetCommandBuffer(backbuffer->fTransitionCmdBuffers[1], 0)); VkCommandBufferBeginInfo info; memset(&info, 0, sizeof(VkCommandBufferBeginInfo)); info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; info.flags = 0; - GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, + GR_VK_CALL_ERRCHECK(fInterface, BeginCommandBuffer(backbuffer->fTransitionCmdBuffers[1], &info)); - GR_VK_CALL(fBackendContext->fInterface, + GR_VK_CALL(fInterface, CmdPipelineBarrier(backbuffer->fTransitionCmdBuffers[1], srcStageMask, dstStageMask, 0, 0, nullptr, 0, nullptr, 1, &imageMemoryBarrier)); - GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, + GR_VK_CALL_ERRCHECK(fInterface, EndCommandBuffer(backbuffer->fTransitionCmdBuffers[1])); fImageLayouts[backbuffer->fImageIndex] = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; @@ -609,8 +627,8 @@ void VulkanWindowContext::swapBuffers() { submitInfo.signalSemaphoreCount = 1; submitInfo.pSignalSemaphores = &backbuffer->fRenderSemaphore; - GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, - QueueSubmit(fBackendContext->fQueue, 1, &submitInfo, + GR_VK_CALL_ERRCHECK(fInterface, + QueueSubmit(fGraphicsQueue, 1, &submitInfo, backbuffer->fUsageFences[1])); // Submit present operation to present queue diff --git a/tools/sk_app/VulkanWindowContext.h b/tools/sk_app/VulkanWindowContext.h index 1fca9dc83e..79298f6b38 100644 --- a/tools/sk_app/VulkanWindowContext.h +++ b/tools/sk_app/VulkanWindowContext.h @@ -13,6 +13,8 @@ #ifdef SK_VULKAN #include "vk/GrVkBackendContext.h" +#include "vk/GrVkInterface.h" +#include "vk/VkTestUtils.h" #include "WindowContext.h" class GrRenderTarget; @@ -26,7 +28,7 @@ public: sk_sp getBackbufferSurface() override; void swapBuffers() override; - bool isValid() override { return SkToBool(fBackendContext.get()); } + bool isValid() override { return fDevice != VK_NULL_HANDLE; } void resize(int w, int h) override { this->createSwapchain(w, h, fDisplayParams); @@ -41,7 +43,7 @@ public: /** Platform specific function that creates a VkSurfaceKHR for a window */ using CreateVkSurfaceFn = std::function; /** Platform specific function that determines whether presentation will succeed. */ - using CanPresentFn = GrVkBackendContext::CanPresentFn; + using CanPresentFn = sk_gpu_test::CanPresentFn; VulkanWindowContext(const DisplayParams&, CreateVkSurfaceFn, CanPresentFn, PFN_vkGetInstanceProcAddr, PFN_vkGetDeviceProcAddr); @@ -63,7 +65,9 @@ private: void createBuffers(VkFormat format, SkColorType colorType); void destroyBuffers(); - sk_sp fBackendContext; + VkInstance fInstance = VK_NULL_HANDLE; + VkPhysicalDevice fPhysicalDevice = VK_NULL_HANDLE; + VkDevice fDevice = VK_NULL_HANDLE; // simple wrapper class that exists only to initialize a pointer to NULL template class VkPtr { @@ -95,10 +99,19 @@ private: VkPtr fGetSwapchainImagesKHR; VkPtr fAcquireNextImageKHR; VkPtr fQueuePresentKHR; + + VkPtr fDestroyInstance; + VkPtr fDeviceWaitIdle; + VkPtr fQueueWaitIdle; + VkPtr fDestroyDevice; VkPtr fGetDeviceQueue; + sk_sp fInterface; + VkSurfaceKHR fSurface; VkSwapchainKHR fSwapchain; + uint32_t fGraphicsQueueIndex; + VkQueue fGraphicsQueue; uint32_t fPresentQueueIndex; VkQueue fPresentQueue; -- cgit v1.2.3