aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Greg Daniel <egdaniel@google.com>2018-06-27 15:26:54 +0000
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2018-06-27 15:53:55 +0000
commit059a9ab4bcd07a4bfdbfef333c27ef3d277e0e46 (patch)
tree6316b27ce95e899bd2d6a22b6e04b2039a67853e
parent224c700a1fb0b7f6abd85a9729d29cbbdf5872dd (diff)
Revert "Make GrVkBackendContext no longer derive from SkRefCnt."
This reverts commit 93ae2337732bf206e6ef4faecc6b30c3881e8359. Reason for revert: <INSERT REASONING HERE> Original change's description: > Make GrVkBackendContext no longer derive from SkRefCnt. > > Also moves the helper Create functions to VkTestUtils since no clients > are using them anymore. > > Bug: skia: > Change-Id: I7e8e4912e7ef6fb00a7e2a00407aed5e83211799 > Reviewed-on: https://skia-review.googlesource.com/135323 > Reviewed-by: Jim Van Verth <jvanverth@google.com> > Reviewed-by: Brian Salomon <bsalomon@google.com> > Commit-Queue: Greg Daniel <egdaniel@google.com> TBR=egdaniel@google.com,jvanverth@google.com,bsalomon@google.com # Not skipping CQ checks because original CL landed > 1 day ago. Bug: skia: Change-Id: If7201917631dc22753ea3fa6e9d2984463e38e4c Reviewed-on: https://skia-review.googlesource.com/137903 Reviewed-by: Greg Daniel <egdaniel@google.com> Commit-Queue: Greg Daniel <egdaniel@google.com>
-rw-r--r--gn/gpu.gni1
-rw-r--r--include/gpu/GrContext.h4
-rw-r--r--include/gpu/vk/GrVkBackendContext.h55
-rw-r--r--src/gpu/GrDirectContext.cpp38
-rw-r--r--src/gpu/vk/GrVkBackendContext.cpp338
-rw-r--r--src/gpu/vk/GrVkGpu.cpp50
-rw-r--r--src/gpu/vk/GrVkGpu.h39
-rw-r--r--tools/gpu/vk/VkTestContext.cpp30
-rw-r--r--tools/gpu/vk/VkTestContext.h10
-rw-r--r--tools/gpu/vk/VkTestUtils.cpp320
-rw-r--r--tools/gpu/vk/VkTestUtils.h12
-rw-r--r--tools/sk_app/VulkanWindowContext.cpp196
-rw-r--r--tools/sk_app/VulkanWindowContext.h19
13 files changed, 574 insertions, 538 deletions
diff --git a/gn/gpu.gni b/gn/gpu.gni
index aac56400d4..7e5dd625da 100644
--- a/gn/gpu.gni
+++ b/gn/gpu.gni
@@ -543,6 +543,7 @@ skia_vk_sources = [
"$_include/private/GrVkTypesPriv.h",
"$_src/gpu/vk/GrVkAMDMemoryAllocator.cpp",
"$_src/gpu/vk/GrVkAMDMemoryAllocator.h",
+ "$_src/gpu/vk/GrVkBackendContext.cpp",
"$_src/gpu/vk/GrVkBuffer.cpp",
"$_src/gpu/vk/GrVkBuffer.h",
"$_src/gpu/vk/GrVkBufferView.cpp",
diff --git a/include/gpu/GrContext.h b/include/gpu/GrContext.h
index c1804e00ff..565a3d504f 100644
--- a/include/gpu/GrContext.h
+++ b/include/gpu/GrContext.h
@@ -71,6 +71,10 @@ public:
#ifdef SK_VULKAN
static sk_sp<GrContext> MakeVulkan(const GrVkBackendContext&, const GrContextOptions&);
static sk_sp<GrContext> MakeVulkan(const GrVkBackendContext&);
+ // These calls that take an sk_sp GrVkBackendContext are deprecated. Use the previous calls and
+ // set fOwnsInstanceAndDevice to false on the GrVkBackendContext.
+ static sk_sp<GrContext> MakeVulkan(sk_sp<const GrVkBackendContext>, const GrContextOptions&);
+ static sk_sp<GrContext> MakeVulkan(sk_sp<const GrVkBackendContext>);
#endif
#ifdef SK_METAL
diff --git a/include/gpu/vk/GrVkBackendContext.h b/include/gpu/vk/GrVkBackendContext.h
index fe018e494f..e68e27988a 100644
--- a/include/gpu/vk/GrVkBackendContext.h
+++ b/include/gpu/vk/GrVkBackendContext.h
@@ -34,11 +34,9 @@ enum GrVkFeatureFlags {
// is that the client will set these up and pass them to the GrVkGpu constructor. The VkDevice
// created must support at least one graphics queue, which is passed in as well.
// The QueueFamilyIndex must match the family of the given queue. It is needed for CommandPool
-// creation, and any GrBackendObjects handed to us (e.g., for wrapped textures) needs to be created
-// in or transitioned to that family. The refs held by members of this struct must be released
-// (either by deleting the struct or manually releasing the refs) before the underlying vulkan
-// device and instance are destroyed.
-struct SK_API GrVkBackendContext {
+// creation, and any GrBackendObjects handed to us (e.g., for wrapped textures) need to be created
+// in or transitioned to that family.
+struct SK_API GrVkBackendContext : public SkRefCnt {
VkInstance fInstance;
VkPhysicalDevice fPhysicalDevice;
VkDevice fDevice;
@@ -50,9 +48,50 @@ struct SK_API GrVkBackendContext {
sk_sp<const GrVkInterface> fInterface;
sk_sp<GrVkMemoryAllocator> fMemoryAllocator;
- // This is deprecated and should be set to false. The client is responsible for managing the
- // lifetime of the VkInstance and VkDevice objects.
- bool fOwnsInstanceAndDevice = false;
+ /**
+ * Controls whether this object destroys the instance and device upon destruction. The default
+ * is temporarily 'true' to avoid breaking existing clients but will be changed to 'false'.
+ */
+ bool fOwnsInstanceAndDevice = true;
+
+#if GR_TEST_UTILS || defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
+ using CanPresentFn = std::function<bool(VkInstance, VkPhysicalDevice,
+ uint32_t queueFamilyIndex)>;
+
+ /**
+ * Helper function to create the Vulkan objects needed for a Vulkan-backed GrContext.
+ * Note that the version that uses the unified "GetProc" instead of separate "GetInstanceProc"
+ * and "GetDeviceProc" functions will be removed.
+ *
+ * If presentQueueIndex is non-NULL, will try to set up presentQueue as part of device
+ * creation using the platform-specific canPresent() function.
+ *
+ * This will set fOwnsInstanceAndDevice to 'true'. If it is subsequently set to 'false' then
+ * the client owns the lifetime of the created VkDevice and VkInstance.
+ */
+ static const GrVkBackendContext* Create(uint32_t* presentQueueIndex = nullptr,
+ CanPresentFn = CanPresentFn(),
+ GrVkInterface::GetProc getProc = nullptr);
+
+ static const GrVkBackendContext* Create(const GrVkInterface::GetInstanceProc& getInstanceProc,
+ const GrVkInterface::GetDeviceProc& getDeviceProc,
+ uint32_t* presentQueueIndex = nullptr,
+ CanPresentFn canPresent = CanPresentFn()) {
+ if (!getInstanceProc || !getDeviceProc) {
+ return nullptr;
+ }
+ auto getProc = [&getInstanceProc, &getDeviceProc](const char* proc_name,
+ VkInstance instance, VkDevice device) {
+ if (device != VK_NULL_HANDLE) {
+ return getDeviceProc(device, proc_name);
+ }
+ return getInstanceProc(instance, proc_name);
+ };
+ return Create(presentQueueIndex, canPresent, getProc);
+ }
+#endif // GR_TEST_UTILS || defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
+
+ ~GrVkBackendContext() override;
};
#endif
diff --git a/src/gpu/GrDirectContext.cpp b/src/gpu/GrDirectContext.cpp
index 7a054e72a4..d0406dd6f6 100644
--- a/src/gpu/GrDirectContext.cpp
+++ b/src/gpu/GrDirectContext.cpp
@@ -156,7 +156,43 @@ sk_sp<GrContext> GrContext::MakeVulkan(const GrVkBackendContext& backendContext,
const GrContextOptions& options) {
sk_sp<GrContext> context(new GrDirectContext(kVulkan_GrBackend));
- context->fGpu = GrVkGpu::Make(backendContext, options, context.get());
+ sk_sp<GrVkBackendContext> backendContextRef(new GrVkBackendContext());
+ backendContextRef->fInstance = backendContext.fInstance;
+ backendContextRef->fPhysicalDevice = backendContext.fPhysicalDevice;
+ backendContextRef->fDevice = backendContext.fDevice;
+ backendContextRef->fQueue = backendContext.fQueue;
+ backendContextRef->fGraphicsQueueIndex = backendContext.fGraphicsQueueIndex;
+ backendContextRef->fMinAPIVersion = backendContext.fMinAPIVersion;
+ backendContextRef->fExtensions = backendContext.fExtensions;
+ backendContextRef->fFeatures = backendContext.fFeatures;
+ backendContextRef->fInterface = backendContext.fInterface;
+ backendContextRef->fMemoryAllocator = backendContext.fMemoryAllocator;
+
+ SkASSERT(!backendContext.fOwnsInstanceAndDevice);
+ backendContextRef->fOwnsInstanceAndDevice = false;
+
+ context->fGpu = GrVkGpu::Make(std::move(backendContextRef), options, context.get());
+ if (!context->fGpu) {
+ return nullptr;
+ }
+
+ context->fCaps = context->fGpu->refCaps();
+ if (!context->init(options)) {
+ return nullptr;
+ }
+ return context;
+}
+
+sk_sp<GrContext> GrContext::MakeVulkan(sk_sp<const GrVkBackendContext> backendContext) {
+ GrContextOptions defaultOptions;
+ return MakeVulkan(std::move(backendContext), defaultOptions);
+}
+
+sk_sp<GrContext> GrContext::MakeVulkan(sk_sp<const GrVkBackendContext> backendContext,
+ const GrContextOptions& options) {
+ sk_sp<GrContext> context(new GrDirectContext(kVulkan_GrBackend));
+
+ context->fGpu = GrVkGpu::Make(std::move(backendContext), options, context.get());
if (!context->fGpu) {
return nullptr;
}
diff --git a/src/gpu/vk/GrVkBackendContext.cpp b/src/gpu/vk/GrVkBackendContext.cpp
new file mode 100644
index 0000000000..3c4e8d7a60
--- /dev/null
+++ b/src/gpu/vk/GrVkBackendContext.cpp
@@ -0,0 +1,338 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "SkAutoMalloc.h"
+#include "vk/GrVkBackendContext.h"
+#include "vk/GrVkExtensions.h"
+#include "vk/GrVkUtil.h"
+
+#if GR_TEST_UTILS || defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
+
+////////////////////////////////////////////////////////////////////////////////
+// Helper code to set up Vulkan context objects
+
+#ifdef SK_ENABLE_VK_LAYERS
+const char* kDebugLayerNames[] = {
+ // elements of VK_LAYER_LUNARG_standard_validation
+ "VK_LAYER_GOOGLE_threading",
+ "VK_LAYER_LUNARG_parameter_validation",
+ "VK_LAYER_LUNARG_object_tracker",
+ "VK_LAYER_LUNARG_image",
+ "VK_LAYER_LUNARG_core_validation",
+ "VK_LAYER_LUNARG_swapchain",
+ "VK_LAYER_GOOGLE_unique_objects",
+ // not included in standard_validation
+ //"VK_LAYER_LUNARG_api_dump",
+ //"VK_LAYER_LUNARG_vktrace",
+ //"VK_LAYER_LUNARG_screenshot",
+};
+#endif
+
+// the minimum version of Vulkan supported
+#ifdef SK_BUILD_FOR_ANDROID
+const uint32_t kGrVkMinimumVersion = VK_MAKE_VERSION(1, 0, 3);
+#else
+const uint32_t kGrVkMinimumVersion = VK_MAKE_VERSION(1, 0, 8);
+#endif
+
+#define ACQUIRE_VK_PROC(name, instance, device) \
+ PFN_vk##name grVk##name = \
+ reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
+ if (grVk##name == nullptr) { \
+ SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
+ return nullptr; \
+ }
+
+// Create the base Vulkan objects needed by the GrVkGpu object
+const GrVkBackendContext* GrVkBackendContext::Create(uint32_t* presentQueueIndexPtr,
+ CanPresentFn canPresent,
+ GrVkInterface::GetProc getProc) {
+ if (!getProc) {
+ return nullptr;
+ }
+ SkASSERT(getProc);
+
+ VkPhysicalDevice physDev;
+ VkDevice device;
+ VkInstance inst;
+ VkResult err;
+
+ const VkApplicationInfo app_info = {
+ VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
+ nullptr, // pNext
+ "vktest", // pApplicationName
+ 0, // applicationVersion
+ "vktest", // pEngineName
+ 0, // engineVerison
+ kGrVkMinimumVersion, // apiVersion
+ };
+
+ GrVkExtensions extensions(getProc);
+ extensions.initInstance(kGrVkMinimumVersion);
+
+ SkTArray<const char*> instanceLayerNames;
+ SkTArray<const char*> instanceExtensionNames;
+ uint32_t extensionFlags = 0;
+#ifdef SK_ENABLE_VK_LAYERS
+ for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
+ if (extensions.hasInstanceLayer(kDebugLayerNames[i])) {
+ instanceLayerNames.push_back(kDebugLayerNames[i]);
+ }
+ }
+ if (extensions.hasInstanceExtension(VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
+ instanceExtensionNames.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME);
+ extensionFlags |= kEXT_debug_report_GrVkExtensionFlag;
+ }
+#endif
+
+ if (extensions.hasInstanceExtension(VK_KHR_SURFACE_EXTENSION_NAME)) {
+ instanceExtensionNames.push_back(VK_KHR_SURFACE_EXTENSION_NAME);
+ extensionFlags |= kKHR_surface_GrVkExtensionFlag;
+ }
+ if (extensions.hasInstanceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
+ instanceExtensionNames.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
+ extensionFlags |= kKHR_swapchain_GrVkExtensionFlag;
+ }
+#ifdef SK_BUILD_FOR_WIN
+ if (extensions.hasInstanceExtension(VK_KHR_WIN32_SURFACE_EXTENSION_NAME)) {
+ instanceExtensionNames.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME);
+ extensionFlags |= kKHR_win32_surface_GrVkExtensionFlag;
+ }
+#elif defined(SK_BUILD_FOR_ANDROID)
+ if (extensions.hasInstanceExtension(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
+ instanceExtensionNames.push_back(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME);
+ extensionFlags |= kKHR_android_surface_GrVkExtensionFlag;
+ }
+#elif defined(SK_BUILD_FOR_UNIX) && !defined(__Fuchsia__)
+ if (extensions.hasInstanceExtension(VK_KHR_XCB_SURFACE_EXTENSION_NAME)) {
+ instanceExtensionNames.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME);
+ extensionFlags |= kKHR_xcb_surface_GrVkExtensionFlag;
+ }
+#endif
+
+ const VkInstanceCreateInfo instance_create = {
+ VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // flags
+ &app_info, // pApplicationInfo
+ (uint32_t) instanceLayerNames.count(), // enabledLayerNameCount
+ instanceLayerNames.begin(), // ppEnabledLayerNames
+ (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount
+ instanceExtensionNames.begin(), // ppEnabledExtensionNames
+ };
+
+ ACQUIRE_VK_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE);
+ err = grVkCreateInstance(&instance_create, nullptr, &inst);
+ if (err < 0) {
+ SkDebugf("vkCreateInstance failed: %d\n", err);
+ return nullptr;
+ }
+
+ ACQUIRE_VK_PROC(DestroyInstance, inst, VK_NULL_HANDLE);
+ ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE);
+ ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE);
+ ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE);
+ ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE);
+ ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE);
+ ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE);
+ ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE);
+
+ uint32_t gpuCount;
+ err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
+ if (err) {
+ SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
+ grVkDestroyInstance(inst, nullptr);
+ return nullptr;
+ }
+ if (!gpuCount) {
+ SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n");
+ grVkDestroyInstance(inst, nullptr);
+ return nullptr;
+ }
+ // Just returning the first physical device instead of getting the whole array.
+ // TODO: find best match for our needs
+ gpuCount = 1;
+ err = grVkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
+ // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
+ if (err && VK_INCOMPLETE != err) {
+ SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
+ grVkDestroyInstance(inst, nullptr);
+ return nullptr;
+ }
+
+ // query to get the initial queue props size
+ uint32_t queueCount;
+ grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
+ if (!queueCount) {
+ SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
+ grVkDestroyInstance(inst, nullptr);
+ return nullptr;
+ }
+
+ SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
+ // now get the actual queue props
+ VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
+
+ grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
+
+ // iterate to find the graphics queue
+ uint32_t graphicsQueueIndex = queueCount;
+ for (uint32_t i = 0; i < queueCount; i++) {
+ if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
+ graphicsQueueIndex = i;
+ break;
+ }
+ }
+ if (graphicsQueueIndex == queueCount) {
+ SkDebugf("Could not find any supported graphics queues.\n");
+ grVkDestroyInstance(inst, nullptr);
+ return nullptr;
+ }
+
+ // iterate to find the present queue, if needed
+ uint32_t presentQueueIndex = queueCount;
+ if (presentQueueIndexPtr && canPresent) {
+ for (uint32_t i = 0; i < queueCount; i++) {
+ if (canPresent(inst, physDev, i)) {
+ presentQueueIndex = i;
+ break;
+ }
+ }
+ if (presentQueueIndex == queueCount) {
+ SkDebugf("Could not find any supported present queues.\n");
+ grVkDestroyInstance(inst, nullptr);
+ return nullptr;
+ }
+ *presentQueueIndexPtr = presentQueueIndex;
+ } else {
+ // Just setting this so we end up make a single queue for graphics since there was no
+ // request for a present queue.
+ presentQueueIndex = graphicsQueueIndex;
+ }
+
+ extensions.initDevice(kGrVkMinimumVersion, inst, physDev);
+
+ SkTArray<const char*> deviceLayerNames;
+ SkTArray<const char*> deviceExtensionNames;
+#ifdef SK_ENABLE_VK_LAYERS
+ for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
+ if (extensions.hasDeviceLayer(kDebugLayerNames[i])) {
+ deviceLayerNames.push_back(kDebugLayerNames[i]);
+ }
+ }
+#endif
+ if (extensions.hasDeviceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
+ deviceExtensionNames.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
+ extensionFlags |= kKHR_swapchain_GrVkExtensionFlag;
+ }
+ if (extensions.hasDeviceExtension("VK_NV_glsl_shader")) {
+ deviceExtensionNames.push_back("VK_NV_glsl_shader");
+ extensionFlags |= kNV_glsl_shader_GrVkExtensionFlag;
+ }
+
+ // query to get the physical device properties
+ VkPhysicalDeviceFeatures deviceFeatures;
+ grVkGetPhysicalDeviceFeatures(physDev, &deviceFeatures);
+ // this looks like it would slow things down,
+ // and we can't depend on it on all platforms
+ deviceFeatures.robustBufferAccess = VK_FALSE;
+
+ uint32_t featureFlags = 0;
+ if (deviceFeatures.geometryShader) {
+ featureFlags |= kGeometryShader_GrVkFeatureFlag;
+ }
+ if (deviceFeatures.dualSrcBlend) {
+ featureFlags |= kDualSrcBlend_GrVkFeatureFlag;
+ }
+ if (deviceFeatures.sampleRateShading) {
+ featureFlags |= kSampleRateShading_GrVkFeatureFlag;
+ }
+
+ float queuePriorities[1] = { 0.0 };
+ // Here we assume no need for swapchain queue
+ // If one is needed, the client will need its own setup code
+ const VkDeviceQueueCreateInfo queueInfo[2] = {
+ {
+ VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // VkDeviceQueueCreateFlags
+ graphicsQueueIndex, // queueFamilyIndex
+ 1, // queueCount
+ queuePriorities, // pQueuePriorities
+ },
+ {
+ VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // VkDeviceQueueCreateFlags
+ presentQueueIndex, // queueFamilyIndex
+ 1, // queueCount
+ queuePriorities, // pQueuePriorities
+ }
+ };
+ uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
+
+ const VkDeviceCreateInfo deviceInfo = {
+ VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // VkDeviceCreateFlags
+ queueInfoCount, // queueCreateInfoCount
+ queueInfo, // pQueueCreateInfos
+ (uint32_t) deviceLayerNames.count(), // layerCount
+ deviceLayerNames.begin(), // ppEnabledLayerNames
+ (uint32_t) deviceExtensionNames.count(), // extensionCount
+ deviceExtensionNames.begin(), // ppEnabledExtensionNames
+ &deviceFeatures // ppEnabledFeatures
+ };
+
+ err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device);
+ if (err) {
+ SkDebugf("CreateDevice failed: %d\n", err);
+ grVkDestroyInstance(inst, nullptr);
+ return nullptr;
+ }
+
+ auto interface =
+ sk_make_sp<GrVkInterface>(getProc, inst, device, extensionFlags);
+ if (!interface->validate(extensionFlags)) {
+ SkDebugf("Vulkan interface validation failed\n");
+ grVkDeviceWaitIdle(device);
+ grVkDestroyDevice(device, nullptr);
+ grVkDestroyInstance(inst, nullptr);
+ return nullptr;
+ }
+
+ VkQueue queue;
+ grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
+
+ GrVkBackendContext* ctx = new GrVkBackendContext();
+ ctx->fInstance = inst;
+ ctx->fPhysicalDevice = physDev;
+ ctx->fDevice = device;
+ ctx->fQueue = queue;
+ ctx->fGraphicsQueueIndex = graphicsQueueIndex;
+ ctx->fMinAPIVersion = kGrVkMinimumVersion;
+ ctx->fExtensions = extensionFlags;
+ ctx->fFeatures = featureFlags;
+ ctx->fInterface.reset(interface.release());
+ ctx->fOwnsInstanceAndDevice = true;
+
+ return ctx;
+}
+#endif // GR_TEST_UTILS || defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
+
+GrVkBackendContext::~GrVkBackendContext() {
+ fMemoryAllocator.reset();
+ if (fInterface == nullptr || !fOwnsInstanceAndDevice) {
+ return;
+ }
+
+ fInterface->fFunctions.fDeviceWaitIdle(fDevice);
+ fInterface->fFunctions.fDestroyDevice(fDevice, nullptr);
+ fDevice = VK_NULL_HANDLE;
+ fInterface->fFunctions.fDestroyInstance(fInstance, nullptr);
+ fInstance = VK_NULL_HANDLE;
+}
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
index 0b160a6ade..ba1f7caf2d 100644
--- a/src/gpu/vk/GrVkGpu.cpp
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -73,38 +73,33 @@ VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
}
#endif
-sk_sp<GrGpu> GrVkGpu::Make(const GrVkBackendContext& backendContext,
+sk_sp<GrGpu> GrVkGpu::Make(sk_sp<const GrVkBackendContext> backendContext,
const GrContextOptions& options, GrContext* context) {
- if (backendContext.fInstance == VK_NULL_HANDLE ||
- backendContext.fPhysicalDevice == VK_NULL_HANDLE ||
- backendContext.fDevice == VK_NULL_HANDLE ||
- backendContext.fQueue == VK_NULL_HANDLE) {
+ if (!backendContext) {
return nullptr;
}
- if (!backendContext.fInterface ||
- !backendContext.fInterface->validate(backendContext.fExtensions)) {
+
+ if (!backendContext->fInterface->validate(backendContext->fExtensions)) {
return nullptr;
}
- return sk_sp<GrGpu>(new GrVkGpu(context, options, backendContext));
+ return sk_sp<GrGpu>(new GrVkGpu(context, options, std::move(backendContext)));
}
////////////////////////////////////////////////////////////////////////////////
GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
- const GrVkBackendContext& backendContext)
+ sk_sp<const GrVkBackendContext> backendCtx)
: INHERITED(context)
- , fInterface(std::move(backendContext.fInterface))
- , fMemoryAllocator(backendContext.fMemoryAllocator)
- , fInstance(backendContext.fInstance)
- , fDevice(backendContext.fDevice)
- , fQueue(backendContext.fQueue)
+ , fBackendContext(std::move(backendCtx))
+ , fMemoryAllocator(fBackendContext->fMemoryAllocator)
+ , fDevice(fBackendContext->fDevice)
+ , fQueue(fBackendContext->fQueue)
, fResourceProvider(this)
, fDisconnected(false) {
- SkASSERT(!backendContext.fOwnsInstanceAndDevice);
#ifdef SK_ENABLE_VK_LAYERS
fCallback = VK_NULL_HANDLE;
- if (backendContext.fExtensions & kEXT_debug_report_GrVkExtensionFlag) {
+ if (fBackendContext->fExtensions & kEXT_debug_report_GrVkExtensionFlag) {
// Setup callback creation information
VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
@@ -119,32 +114,32 @@ GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
// Register the callback
GR_VK_CALL_ERRCHECK(this->vkInterface(),
- CreateDebugReportCallbackEXT(backendContext.fInstance,
+ CreateDebugReportCallbackEXT(fBackendContext->fInstance,
&callbackCreateInfo, nullptr, &fCallback));
}
#endif
if (!fMemoryAllocator) {
// We were not given a memory allocator at creation
- fMemoryAllocator.reset(new GrVkAMDMemoryAllocator(backendContext.fPhysicalDevice,
- fDevice, backendContext.fInterface));
+ fMemoryAllocator.reset(new GrVkAMDMemoryAllocator(fBackendContext->fPhysicalDevice,
+ fDevice, fBackendContext->fInterface));
}
fCompiler = new SkSL::Compiler();
- fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendContext.fPhysicalDevice,
- backendContext.fFeatures, backendContext.fExtensions));
+ fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), fBackendContext->fPhysicalDevice,
+ fBackendContext->fFeatures, fBackendContext->fExtensions));
fCaps.reset(SkRef(fVkCaps.get()));
- VK_CALL(GetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &fPhysDevProps));
- VK_CALL(GetPhysicalDeviceMemoryProperties(backendContext.fPhysicalDevice, &fPhysDevMemProps));
+ VK_CALL(GetPhysicalDeviceProperties(fBackendContext->fPhysicalDevice, &fPhysDevProps));
+ VK_CALL(GetPhysicalDeviceMemoryProperties(fBackendContext->fPhysicalDevice, &fPhysDevMemProps));
const VkCommandPoolCreateInfo cmdPoolInfo = {
VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
nullptr, // pNext
VK_COMMAND_POOL_CREATE_TRANSIENT_BIT |
VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, // CmdPoolCreateFlags
- backendContext.fGraphicsQueueIndex, // queueFamilyIndex
+ fBackendContext->fGraphicsQueueIndex, // queueFamilyIndex
};
GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateCommandPool(fDevice, &cmdPoolInfo, nullptr,
&fCmdPool));
@@ -206,15 +201,10 @@ void GrVkGpu::destroyResources() {
#ifdef SK_ENABLE_VK_LAYERS
if (fCallback) {
- VK_CALL(DestroyDebugReportCallbackEXT(fInstance, fCallback, nullptr));
+ VK_CALL(DestroyDebugReportCallbackEXT(fBackendContext->fInstance, fCallback, nullptr));
}
#endif
- fMemoryAllocator.reset();
-
- fQueue = VK_NULL_HANDLE;
- fDevice = VK_NULL_HANDLE;
- fInstance = VK_NULL_HANDLE;
}
GrVkGpu::~GrVkGpu() {
diff --git a/src/gpu/vk/GrVkGpu.h b/src/gpu/vk/GrVkGpu.h
index f506d28b0e..052476098d 100644
--- a/src/gpu/vk/GrVkGpu.h
+++ b/src/gpu/vk/GrVkGpu.h
@@ -38,13 +38,13 @@ namespace SkSL {
class GrVkGpu : public GrGpu {
public:
- static sk_sp<GrGpu> Make(const GrVkBackendContext&, const GrContextOptions&, GrContext*);
+ static sk_sp<GrGpu> Make(sk_sp<const GrVkBackendContext>, const GrContextOptions&, GrContext*);
~GrVkGpu() override;
void disconnect(DisconnectType) override;
- const GrVkInterface* vkInterface() const { return fInterface.get(); }
+ const GrVkInterface* vkInterface() const { return fBackendContext->fInterface.get(); }
const GrVkCaps& vkCaps() const { return *fVkCaps; }
GrVkMemoryAllocator* memoryAllocator() const { return fMemoryAllocator.get(); }
@@ -144,7 +144,7 @@ public:
bool updateBuffer(GrVkBuffer* buffer, const void* src, VkDeviceSize offset, VkDeviceSize size);
private:
- GrVkGpu(GrContext*, const GrContextOptions&, const GrVkBackendContext& backendContext);
+ GrVkGpu(GrContext*, const GrContextOptions&, sk_sp<const GrVkBackendContext> backendContext);
void onResetContext(uint32_t resetBits) override {}
@@ -222,27 +222,28 @@ private:
GrVkImageInfo* info);
#endif
- sk_sp<const GrVkInterface> fInterface;
- sk_sp<GrVkMemoryAllocator> fMemoryAllocator;
- sk_sp<GrVkCaps> fVkCaps;
+ sk_sp<const GrVkBackendContext> fBackendContext;
+ sk_sp<GrVkMemoryAllocator> fMemoryAllocator;
+ sk_sp<GrVkCaps> fVkCaps;
- VkInstance fInstance;
- VkDevice fDevice;
- VkQueue fQueue; // Must be Graphics queue
+ // These Vulkan objects are provided by the client, and also stored in fBackendContext.
+ // They're copied here for convenient access.
+ VkDevice fDevice;
+ VkQueue fQueue; // Must be Graphics queue
// Created by GrVkGpu
- GrVkResourceProvider fResourceProvider;
- VkCommandPool fCmdPool;
+ GrVkResourceProvider fResourceProvider;
+ VkCommandPool fCmdPool;
- GrVkPrimaryCommandBuffer* fCurrentCmdBuffer;
+ GrVkPrimaryCommandBuffer* fCurrentCmdBuffer;
- SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToWaitOn;
- SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToSignal;
+ SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToWaitOn;
+ SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToSignal;
- VkPhysicalDeviceProperties fPhysDevProps;
- VkPhysicalDeviceMemoryProperties fPhysDevMemProps;
+ VkPhysicalDeviceProperties fPhysDevProps;
+ VkPhysicalDeviceMemoryProperties fPhysDevMemProps;
- GrVkCopyManager fCopyManager;
+ GrVkCopyManager fCopyManager;
#ifdef SK_ENABLE_VK_LAYERS
// For reporting validation layer errors
@@ -251,11 +252,11 @@ private:
// compiler used for compiling sksl into spirv. We only want to create the compiler once since
// there is significant overhead to the first compile of any compiler.
- SkSL::Compiler* fCompiler;
+ SkSL::Compiler* fCompiler;
// We need a bool to track whether or not we've already disconnected all the gpu resources from
// vulkan context.
- bool fDisconnected;
+ bool fDisconnected;
typedef GrGpu INHERITED;
};
diff --git a/tools/gpu/vk/VkTestContext.cpp b/tools/gpu/vk/VkTestContext.cpp
index 592fb0f7b0..25069fe521 100644
--- a/tools/gpu/vk/VkTestContext.cpp
+++ b/tools/gpu/vk/VkTestContext.cpp
@@ -110,24 +110,21 @@ GR_STATIC_ASSERT(sizeof(VkFence) <= sizeof(sk_gpu_test::PlatformFence));
class VkTestContextImpl : public sk_gpu_test::VkTestContext {
public:
static VkTestContext* Create(VkTestContext* sharedContext) {
- GrVkBackendContext backendContext;
- bool ownsContext = true;
+ sk_sp<const GrVkBackendContext> backendContext;
if (sharedContext) {
backendContext = sharedContext->getVkBackendContext();
- // We always delete the parent context last so make sure the child does not think they
- // own the vulkan context.
- ownsContext = false;
} else {
PFN_vkGetInstanceProcAddr instProc;
PFN_vkGetDeviceProcAddr devProc;
if (!sk_gpu_test::LoadVkLibraryAndGetProcAddrFuncs(&instProc, &devProc)) {
return nullptr;
}
- if (!sk_gpu_test::CreateVkBackendContext(instProc, devProc, &backendContext)) {
- return nullptr;
- }
+ backendContext.reset(GrVkBackendContext::Create(instProc, devProc));
}
- return new VkTestContextImpl(backendContext, ownsContext);
+ if (!backendContext) {
+ return nullptr;
+ }
+ return new VkTestContextImpl(std::move(backendContext));
}
~VkTestContextImpl() override { this->teardown(); }
@@ -146,19 +143,14 @@ public:
protected:
void teardown() override {
INHERITED::teardown();
- fVk.fMemoryAllocator.reset();
- if (fOwnsContext) {
- GR_VK_CALL(this->vk(), DeviceWaitIdle(fVk.fDevice));
- GR_VK_CALL(this->vk(), DestroyDevice(fVk.fDevice, nullptr));
- GR_VK_CALL(this->vk(), DestroyInstance(fVk.fInstance, nullptr));
- }
+ fVk.reset(nullptr);
}
private:
- VkTestContextImpl(const GrVkBackendContext& backendContext, bool ownsContext)
- : VkTestContext(backendContext, ownsContext) {
- fFenceSync.reset(new VkFenceSync(fVk.fInterface, fVk.fDevice, fVk.fQueue,
- fVk.fGraphicsQueueIndex));
+ VkTestContextImpl(sk_sp<const GrVkBackendContext> backendContext)
+ : VkTestContext(std::move(backendContext)) {
+ fFenceSync.reset(new VkFenceSync(fVk->fInterface, fVk->fDevice, fVk->fQueue,
+ fVk->fGraphicsQueueIndex));
}
void onPlatformMakeCurrent() const override {}
diff --git a/tools/gpu/vk/VkTestContext.h b/tools/gpu/vk/VkTestContext.h
index 0e62cc0b8c..9fd4170d9d 100644
--- a/tools/gpu/vk/VkTestContext.h
+++ b/tools/gpu/vk/VkTestContext.h
@@ -19,18 +19,16 @@ class VkTestContext : public TestContext {
public:
virtual GrBackend backend() override { return kVulkan_GrBackend; }
- const GrVkBackendContext& getVkBackendContext() {
+ sk_sp<const GrVkBackendContext> getVkBackendContext() {
return fVk;
}
- const GrVkInterface* vk() const { return fVk.fInterface.get(); }
+ const GrVkInterface* vk() const { return fVk->fInterface.get(); }
protected:
- VkTestContext(const GrVkBackendContext& vk, bool ownsContext)
- : fVk(vk), fOwnsContext(ownsContext) {}
+ VkTestContext(sk_sp<const GrVkBackendContext> vk) : fVk(std::move(vk)) {}
- GrVkBackendContext fVk;
- bool fOwnsContext;
+ sk_sp<const GrVkBackendContext> fVk;
private:
typedef TestContext INHERITED;
diff --git a/tools/gpu/vk/VkTestUtils.cpp b/tools/gpu/vk/VkTestUtils.cpp
index 927f4fbe82..c28a42b2c9 100644
--- a/tools/gpu/vk/VkTestUtils.cpp
+++ b/tools/gpu/vk/VkTestUtils.cpp
@@ -9,9 +9,6 @@
#ifdef SK_VULKAN
-#include "SkAutoMalloc.h"
-#include "vk/GrVkBackendContext.h"
-#include "vk/GrVkExtensions.h"
#include "../ports/SkOSLibrary.h"
namespace sk_gpu_test {
@@ -49,323 +46,6 @@ bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc,
return true;
#endif
}
-
-////////////////////////////////////////////////////////////////////////////////
-// Helper code to set up Vulkan context objects
-
-#ifdef SK_ENABLE_VK_LAYERS
-const char* kDebugLayerNames[] = {
- // elements of VK_LAYER_LUNARG_standard_validation
- "VK_LAYER_GOOGLE_threading",
- "VK_LAYER_LUNARG_parameter_validation",
- "VK_LAYER_LUNARG_object_tracker",
- "VK_LAYER_LUNARG_image",
- "VK_LAYER_LUNARG_core_validation",
- "VK_LAYER_LUNARG_swapchain",
- "VK_LAYER_GOOGLE_unique_objects",
- // not included in standard_validation
- //"VK_LAYER_LUNARG_api_dump",
- //"VK_LAYER_LUNARG_vktrace",
- //"VK_LAYER_LUNARG_screenshot",
-};
-#endif
-
-// the minimum version of Vulkan supported
-#ifdef SK_BUILD_FOR_ANDROID
-const uint32_t kGrVkMinimumVersion = VK_MAKE_VERSION(1, 0, 3);
-#else
-const uint32_t kGrVkMinimumVersion = VK_MAKE_VERSION(1, 0, 8);
-#endif
-
-#define ACQUIRE_VK_PROC(name, instance, device) \
- PFN_vk##name grVk##name = \
- reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
- if (grVk##name == nullptr) { \
- SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
- return false; \
- }
-
-bool CreateVkBackendContext(const GrVkInterface::GetInstanceProc& getInstanceProc,
- const GrVkInterface::GetDeviceProc& getDeviceProc,
- GrVkBackendContext* ctx,
- uint32_t* presentQueueIndexPtr,
- CanPresentFn canPresent) {
- auto getProc = [&getInstanceProc, &getDeviceProc](const char* proc_name,
- VkInstance instance, VkDevice device) {
- if (device != VK_NULL_HANDLE) {
- return getDeviceProc(device, proc_name);
- }
- return getInstanceProc(instance, proc_name);
- };
-
- VkPhysicalDevice physDev;
- VkDevice device;
- VkInstance inst;
- VkResult err;
-
- const VkApplicationInfo app_info = {
- VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
- nullptr, // pNext
- "vktest", // pApplicationName
- 0, // applicationVersion
- "vktest", // pEngineName
- 0, // engineVerison
- kGrVkMinimumVersion, // apiVersion
- };
-
- GrVkExtensions extensions(getProc);
- extensions.initInstance(kGrVkMinimumVersion);
-
- SkTArray<const char*> instanceLayerNames;
- SkTArray<const char*> instanceExtensionNames;
- uint32_t extensionFlags = 0;
-#ifdef SK_ENABLE_VK_LAYERS
- for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
- if (extensions.hasInstanceLayer(kDebugLayerNames[i])) {
- instanceLayerNames.push_back(kDebugLayerNames[i]);
- }
- }
- if (extensions.hasInstanceExtension(VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
- instanceExtensionNames.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME);
- extensionFlags |= kEXT_debug_report_GrVkExtensionFlag;
- }
-#endif
-
- if (extensions.hasInstanceExtension(VK_KHR_SURFACE_EXTENSION_NAME)) {
- instanceExtensionNames.push_back(VK_KHR_SURFACE_EXTENSION_NAME);
- extensionFlags |= kKHR_surface_GrVkExtensionFlag;
- }
- if (extensions.hasInstanceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
- instanceExtensionNames.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
- extensionFlags |= kKHR_swapchain_GrVkExtensionFlag;
- }
-#ifdef SK_BUILD_FOR_WIN
- if (extensions.hasInstanceExtension(VK_KHR_WIN32_SURFACE_EXTENSION_NAME)) {
- instanceExtensionNames.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME);
- extensionFlags |= kKHR_win32_surface_GrVkExtensionFlag;
- }
-#elif defined(SK_BUILD_FOR_ANDROID)
- if (extensions.hasInstanceExtension(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
- instanceExtensionNames.push_back(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME);
- extensionFlags |= kKHR_android_surface_GrVkExtensionFlag;
- }
-#elif defined(SK_BUILD_FOR_UNIX) && !defined(__Fuchsia__)
- if (extensions.hasInstanceExtension(VK_KHR_XCB_SURFACE_EXTENSION_NAME)) {
- instanceExtensionNames.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME);
- extensionFlags |= kKHR_xcb_surface_GrVkExtensionFlag;
- }
-#endif
-
- const VkInstanceCreateInfo instance_create = {
- VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
- nullptr, // pNext
- 0, // flags
- &app_info, // pApplicationInfo
- (uint32_t) instanceLayerNames.count(), // enabledLayerNameCount
- instanceLayerNames.begin(), // ppEnabledLayerNames
- (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount
- instanceExtensionNames.begin(), // ppEnabledExtensionNames
- };
-
- ACQUIRE_VK_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE);
- err = grVkCreateInstance(&instance_create, nullptr, &inst);
- if (err < 0) {
- SkDebugf("vkCreateInstance failed: %d\n", err);
- return false;
- }
-
- ACQUIRE_VK_PROC(DestroyInstance, inst, VK_NULL_HANDLE);
- ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE);
- ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE);
- ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE);
- ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE);
- ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE);
- ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE);
- ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE);
-
- uint32_t gpuCount;
- err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
- if (err) {
- SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
- grVkDestroyInstance(inst, nullptr);
- return false;
- }
- if (!gpuCount) {
- SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n");
- grVkDestroyInstance(inst, nullptr);
- return false;
- }
- // Just returning the first physical device instead of getting the whole array.
- // TODO: find best match for our needs
- gpuCount = 1;
- err = grVkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
- // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
- if (err && VK_INCOMPLETE != err) {
- SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
- grVkDestroyInstance(inst, nullptr);
- return false;
- }
-
- // query to get the initial queue props size
- uint32_t queueCount;
- grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
- if (!queueCount) {
- SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
- grVkDestroyInstance(inst, nullptr);
- return false;
- }
-
- SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
- // now get the actual queue props
- VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
-
- grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
-
- // iterate to find the graphics queue
- uint32_t graphicsQueueIndex = queueCount;
- for (uint32_t i = 0; i < queueCount; i++) {
- if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
- graphicsQueueIndex = i;
- break;
- }
- }
- if (graphicsQueueIndex == queueCount) {
- SkDebugf("Could not find any supported graphics queues.\n");
- grVkDestroyInstance(inst, nullptr);
- return false;
- }
-
- // iterate to find the present queue, if needed
- uint32_t presentQueueIndex = queueCount;
- if (presentQueueIndexPtr && canPresent) {
- for (uint32_t i = 0; i < queueCount; i++) {
- if (canPresent(inst, physDev, i)) {
- presentQueueIndex = i;
- break;
- }
- }
- if (presentQueueIndex == queueCount) {
- SkDebugf("Could not find any supported present queues.\n");
- grVkDestroyInstance(inst, nullptr);
- return false;
- }
- *presentQueueIndexPtr = presentQueueIndex;
- } else {
- // Just setting this so we end up make a single queue for graphics since there was no
- // request for a present queue.
- presentQueueIndex = graphicsQueueIndex;
- }
-
- extensions.initDevice(kGrVkMinimumVersion, inst, physDev);
-
- SkTArray<const char*> deviceLayerNames;
- SkTArray<const char*> deviceExtensionNames;
-#ifdef SK_ENABLE_VK_LAYERS
- for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
- if (extensions.hasDeviceLayer(kDebugLayerNames[i])) {
- deviceLayerNames.push_back(kDebugLayerNames[i]);
- }
- }
-#endif
- if (extensions.hasDeviceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
- deviceExtensionNames.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
- extensionFlags |= kKHR_swapchain_GrVkExtensionFlag;
- }
- if (extensions.hasDeviceExtension("VK_NV_glsl_shader")) {
- deviceExtensionNames.push_back("VK_NV_glsl_shader");
- extensionFlags |= kNV_glsl_shader_GrVkExtensionFlag;
- }
-
- // query to get the physical device properties
- VkPhysicalDeviceFeatures deviceFeatures;
- grVkGetPhysicalDeviceFeatures(physDev, &deviceFeatures);
- // this looks like it would slow things down,
- // and we can't depend on it on all platforms
- deviceFeatures.robustBufferAccess = VK_FALSE;
-
- uint32_t featureFlags = 0;
- if (deviceFeatures.geometryShader) {
- featureFlags |= kGeometryShader_GrVkFeatureFlag;
- }
- if (deviceFeatures.dualSrcBlend) {
- featureFlags |= kDualSrcBlend_GrVkFeatureFlag;
- }
- if (deviceFeatures.sampleRateShading) {
- featureFlags |= kSampleRateShading_GrVkFeatureFlag;
- }
-
- float queuePriorities[1] = { 0.0 };
- // Here we assume no need for swapchain queue
- // If one is needed, the client will need its own setup code
- const VkDeviceQueueCreateInfo queueInfo[2] = {
- {
- VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
- nullptr, // pNext
- 0, // VkDeviceQueueCreateFlags
- graphicsQueueIndex, // queueFamilyIndex
- 1, // queueCount
- queuePriorities, // pQueuePriorities
- },
- {
- VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
- nullptr, // pNext
- 0, // VkDeviceQueueCreateFlags
- presentQueueIndex, // queueFamilyIndex
- 1, // queueCount
- queuePriorities, // pQueuePriorities
- }
- };
- uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
-
- const VkDeviceCreateInfo deviceInfo = {
- VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
- nullptr, // pNext
- 0, // VkDeviceCreateFlags
- queueInfoCount, // queueCreateInfoCount
- queueInfo, // pQueueCreateInfos
- (uint32_t) deviceLayerNames.count(), // layerCount
- deviceLayerNames.begin(), // ppEnabledLayerNames
- (uint32_t) deviceExtensionNames.count(), // extensionCount
- deviceExtensionNames.begin(), // ppEnabledExtensionNames
- &deviceFeatures // ppEnabledFeatures
- };
-
- err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device);
- if (err) {
- SkDebugf("CreateDevice failed: %d\n", err);
- grVkDestroyInstance(inst, nullptr);
- return false;
- }
-
- auto interface =
- sk_make_sp<GrVkInterface>(getProc, inst, device, extensionFlags);
- if (!interface->validate(extensionFlags)) {
- SkDebugf("Vulkan interface validation failed\n");
- grVkDeviceWaitIdle(device);
- grVkDestroyDevice(device, nullptr);
- grVkDestroyInstance(inst, nullptr);
- return false;
- }
-
- VkQueue queue;
- grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
-
- ctx->fInstance = inst;
- ctx->fPhysicalDevice = physDev;
- ctx->fDevice = device;
- ctx->fQueue = queue;
- ctx->fGraphicsQueueIndex = graphicsQueueIndex;
- ctx->fMinAPIVersion = kGrVkMinimumVersion;
- ctx->fExtensions = extensionFlags;
- ctx->fFeatures = featureFlags;
- ctx->fInterface.reset(interface.release());
- ctx->fOwnsInstanceAndDevice = false;
-
- return true;
-
-
-}
-
}
#endif
diff --git a/tools/gpu/vk/VkTestUtils.h b/tools/gpu/vk/VkTestUtils.h
index 30471a78c0..9f34ef09c5 100644
--- a/tools/gpu/vk/VkTestUtils.h
+++ b/tools/gpu/vk/VkTestUtils.h
@@ -13,21 +13,9 @@
#ifdef SK_VULKAN
#include "vk/GrVkDefines.h"
-#include "vk/GrVkInterface.h"
-
-struct GrVkBackendContext;
namespace sk_gpu_test {
bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr*, PFN_vkGetDeviceProcAddr*);
-
- using CanPresentFn = std::function<bool(VkInstance, VkPhysicalDevice,
- uint32_t queueFamilyIndex)>;
-
- bool CreateVkBackendContext(const GrVkInterface::GetInstanceProc& getInstanceProc,
- const GrVkInterface::GetDeviceProc& getDeviceProc,
- GrVkBackendContext* ctx,
- uint32_t* presentQueueIndexPtr = nullptr,
- CanPresentFn canPresent = CanPresentFn());
}
#endif
diff --git a/tools/sk_app/VulkanWindowContext.cpp b/tools/sk_app/VulkanWindowContext.cpp
index 411f114e63..ad0e15ca7b 100644
--- a/tools/sk_app/VulkanWindowContext.cpp
+++ b/tools/sk_app/VulkanWindowContext.cpp
@@ -13,6 +13,7 @@
#include "VulkanWindowContext.h"
#include "vk/GrVkImage.h"
+#include "vk/GrVkInterface.h"
#include "vk/GrVkUtil.h"
#include "vk/GrVkTypes.h"
@@ -21,8 +22,8 @@
#undef CreateSemaphore
#endif
-#define GET_PROC(F) f ## F = (PFN_vk ## F) fGetInstanceProcAddr(fInstance, "vk" #F)
-#define GET_DEV_PROC(F) f ## F = (PFN_vk ## F) fGetDeviceProcAddr(fDevice, "vk" #F)
+#define GET_PROC(F) f ## F = (PFN_vk ## F) fGetInstanceProcAddr(instance, "vk" #F)
+#define GET_DEV_PROC(F) f ## F = (PFN_vk ## F) fGetDeviceProcAddr(device, "vk" #F)
namespace sk_app {
@@ -48,34 +49,22 @@ VulkanWindowContext::VulkanWindowContext(const DisplayParams& params,
void VulkanWindowContext::initializeContext() {
// any config code here (particularly for msaa)?
+ fBackendContext.reset(GrVkBackendContext::Create(fGetInstanceProcAddr, fGetDeviceProcAddr,
+ &fPresentQueueIndex, fCanPresentFn));
- GrVkBackendContext backendContext;
- if (!sk_gpu_test::CreateVkBackendContext(fGetInstanceProcAddr, fGetDeviceProcAddr,
- &backendContext, &fPresentQueueIndex, fCanPresentFn)) {
+ if (!(fBackendContext->fExtensions & kKHR_surface_GrVkExtensionFlag) ||
+ !(fBackendContext->fExtensions & kKHR_swapchain_GrVkExtensionFlag)) {
+ fBackendContext.reset(nullptr);
return;
}
- if (!(backendContext.fExtensions & kKHR_surface_GrVkExtensionFlag) ||
- !(backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag)) {
- return;
- }
-
- fInstance = backendContext.fInstance;
- fPhysicalDevice = backendContext.fPhysicalDevice;
- fDevice = backendContext.fDevice;
- fGraphicsQueueIndex = backendContext.fGraphicsQueueIndex;
- fGraphicsQueue = backendContext.fQueue;
- fInterface = backendContext.fInterface;
-
- GET_PROC(DestroyInstance);
+ VkInstance instance = fBackendContext->fInstance;
+ VkDevice device = fBackendContext->fDevice;
GET_PROC(DestroySurfaceKHR);
GET_PROC(GetPhysicalDeviceSurfaceSupportKHR);
GET_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
GET_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
GET_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
- GET_DEV_PROC(DeviceWaitIdle);
- GET_DEV_PROC(QueueWaitIdle);
- GET_DEV_PROC(DestroyDevice);
GET_DEV_PROC(CreateSwapchainKHR);
GET_DEV_PROC(DestroySwapchainKHR);
GET_DEV_PROC(GetSwapchainImagesKHR);
@@ -83,17 +72,18 @@ void VulkanWindowContext::initializeContext() {
GET_DEV_PROC(QueuePresentKHR);
GET_DEV_PROC(GetDeviceQueue);
- fContext = GrContext::MakeVulkan(backendContext, fDisplayParams.fGrContextOptions);
+ fContext = GrContext::MakeVulkan(fBackendContext, fDisplayParams.fGrContextOptions);
- fSurface = fCreateVkSurfaceFn(fInstance);
+ fSurface = fCreateVkSurfaceFn(instance);
if (VK_NULL_HANDLE == fSurface) {
- this->destroyContext();
+ fBackendContext.reset(nullptr);
return;
}
VkBool32 supported;
- VkResult res = fGetPhysicalDeviceSurfaceSupportKHR(fPhysicalDevice, fPresentQueueIndex,
- fSurface, &supported);
+ VkResult res = fGetPhysicalDeviceSurfaceSupportKHR(fBackendContext->fPhysicalDevice,
+ fPresentQueueIndex, fSurface,
+ &supported);
if (VK_SUCCESS != res) {
this->destroyContext();
return;
@@ -105,44 +95,45 @@ void VulkanWindowContext::initializeContext() {
}
// create presentQueue
- fGetDeviceQueue(fDevice, fPresentQueueIndex, 0, &fPresentQueue);
+ fGetDeviceQueue(fBackendContext->fDevice, fPresentQueueIndex, 0, &fPresentQueue);
}
bool VulkanWindowContext::createSwapchain(int width, int height,
const DisplayParams& params) {
// check for capabilities
VkSurfaceCapabilitiesKHR caps;
- VkResult res = fGetPhysicalDeviceSurfaceCapabilitiesKHR(fPhysicalDevice, fSurface, &caps);
+ VkResult res = fGetPhysicalDeviceSurfaceCapabilitiesKHR(fBackendContext->fPhysicalDevice,
+ fSurface, &caps);
if (VK_SUCCESS != res) {
return false;
}
uint32_t surfaceFormatCount;
- res = fGetPhysicalDeviceSurfaceFormatsKHR(fPhysicalDevice, fSurface, &surfaceFormatCount,
- nullptr);
+ res = fGetPhysicalDeviceSurfaceFormatsKHR(fBackendContext->fPhysicalDevice, fSurface,
+ &surfaceFormatCount, nullptr);
if (VK_SUCCESS != res) {
return false;
}
SkAutoMalloc surfaceFormatAlloc(surfaceFormatCount * sizeof(VkSurfaceFormatKHR));
VkSurfaceFormatKHR* surfaceFormats = (VkSurfaceFormatKHR*)surfaceFormatAlloc.get();
- res = fGetPhysicalDeviceSurfaceFormatsKHR(fPhysicalDevice, fSurface, &surfaceFormatCount,
- surfaceFormats);
+ res = fGetPhysicalDeviceSurfaceFormatsKHR(fBackendContext->fPhysicalDevice, fSurface,
+ &surfaceFormatCount, surfaceFormats);
if (VK_SUCCESS != res) {
return false;
}
uint32_t presentModeCount;
- res = fGetPhysicalDeviceSurfacePresentModesKHR(fPhysicalDevice, fSurface, &presentModeCount,
- nullptr);
+ res = fGetPhysicalDeviceSurfacePresentModesKHR(fBackendContext->fPhysicalDevice, fSurface,
+ &presentModeCount, nullptr);
if (VK_SUCCESS != res) {
return false;
}
SkAutoMalloc presentModeAlloc(presentModeCount * sizeof(VkPresentModeKHR));
VkPresentModeKHR* presentModes = (VkPresentModeKHR*)presentModeAlloc.get();
- res = fGetPhysicalDeviceSurfacePresentModesKHR(fPhysicalDevice, fSurface, &presentModeCount,
- presentModes);
+ res = fGetPhysicalDeviceSurfacePresentModesKHR(fBackendContext->fPhysicalDevice, fSurface,
+ &presentModeCount, presentModes);
if (VK_SUCCESS != res) {
return false;
}
@@ -243,8 +234,8 @@ bool VulkanWindowContext::createSwapchain(int width, int height,
swapchainCreateInfo.imageArrayLayers = 1;
swapchainCreateInfo.imageUsage = usageFlags;
- uint32_t queueFamilies[] = { fGraphicsQueueIndex, fPresentQueueIndex };
- if (fGraphicsQueueIndex != fPresentQueueIndex) {
+ uint32_t queueFamilies[] = { fBackendContext->fGraphicsQueueIndex, fPresentQueueIndex };
+ if (fBackendContext->fGraphicsQueueIndex != fPresentQueueIndex) {
swapchainCreateInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
swapchainCreateInfo.queueFamilyIndexCount = 2;
swapchainCreateInfo.pQueueFamilyIndices = queueFamilies;
@@ -260,18 +251,18 @@ bool VulkanWindowContext::createSwapchain(int width, int height,
swapchainCreateInfo.clipped = true;
swapchainCreateInfo.oldSwapchain = fSwapchain;
- res = fCreateSwapchainKHR(fDevice, &swapchainCreateInfo, nullptr, &fSwapchain);
+ res = fCreateSwapchainKHR(fBackendContext->fDevice, &swapchainCreateInfo, nullptr, &fSwapchain);
if (VK_SUCCESS != res) {
return false;
}
// destroy the old swapchain
if (swapchainCreateInfo.oldSwapchain != VK_NULL_HANDLE) {
- fDeviceWaitIdle(fDevice);
+ GR_VK_CALL(fBackendContext->fInterface, DeviceWaitIdle(fBackendContext->fDevice));
this->destroyBuffers();
- fDestroySwapchainKHR(fDevice, swapchainCreateInfo.oldSwapchain, nullptr);
+ fDestroySwapchainKHR(fBackendContext->fDevice, swapchainCreateInfo.oldSwapchain, nullptr);
}
this->createBuffers(swapchainCreateInfo.imageFormat, colorType);
@@ -280,10 +271,10 @@ bool VulkanWindowContext::createSwapchain(int width, int height,
}
void VulkanWindowContext::createBuffers(VkFormat format, SkColorType colorType) {
- fGetSwapchainImagesKHR(fDevice, fSwapchain, &fImageCount, nullptr);
+ fGetSwapchainImagesKHR(fBackendContext->fDevice, fSwapchain, &fImageCount, nullptr);
SkASSERT(fImageCount);
fImages = new VkImage[fImageCount];
- fGetSwapchainImagesKHR(fDevice, fSwapchain, &fImageCount, fImages);
+ fGetSwapchainImagesKHR(fBackendContext->fDevice, fSwapchain, &fImageCount, fImages);
// set up initial image layouts and create surfaces
fImageLayouts = new VkImageLayout[fImageCount];
@@ -315,10 +306,10 @@ void VulkanWindowContext::createBuffers(VkFormat format, SkColorType colorType)
memset(&commandPoolInfo, 0, sizeof(VkCommandPoolCreateInfo));
commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
// this needs to be on the render queue
- commandPoolInfo.queueFamilyIndex = fGraphicsQueueIndex;
+ commandPoolInfo.queueFamilyIndex = fBackendContext->fGraphicsQueueIndex;
commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
- GR_VK_CALL_ERRCHECK(fInterface,
- CreateCommandPool(fDevice, &commandPoolInfo,
+ GR_VK_CALL_ERRCHECK(fBackendContext->fInterface,
+ CreateCommandPool(fBackendContext->fDevice, &commandPoolInfo,
nullptr, &fCommandPool));
}
@@ -346,20 +337,20 @@ void VulkanWindowContext::createBuffers(VkFormat format, SkColorType colorType)
fBackbuffers = new BackbufferInfo[fImageCount + 1];
for (uint32_t i = 0; i < fImageCount + 1; ++i) {
fBackbuffers[i].fImageIndex = -1;
- GR_VK_CALL_ERRCHECK(fInterface,
- CreateSemaphore(fDevice, &semaphoreInfo,
+ GR_VK_CALL_ERRCHECK(fBackendContext->fInterface,
+ CreateSemaphore(fBackendContext->fDevice, &semaphoreInfo,
nullptr, &fBackbuffers[i].fAcquireSemaphore));
- GR_VK_CALL_ERRCHECK(fInterface,
- CreateSemaphore(fDevice, &semaphoreInfo,
+ GR_VK_CALL_ERRCHECK(fBackendContext->fInterface,
+ CreateSemaphore(fBackendContext->fDevice, &semaphoreInfo,
nullptr, &fBackbuffers[i].fRenderSemaphore));
- GR_VK_CALL_ERRCHECK(fInterface,
- AllocateCommandBuffers(fDevice, &commandBuffersInfo,
+ GR_VK_CALL_ERRCHECK(fBackendContext->fInterface,
+ AllocateCommandBuffers(fBackendContext->fDevice, &commandBuffersInfo,
fBackbuffers[i].fTransitionCmdBuffers));
- GR_VK_CALL_ERRCHECK(fInterface,
- CreateFence(fDevice, &fenceInfo, nullptr,
+ GR_VK_CALL_ERRCHECK(fBackendContext->fInterface,
+ CreateFence(fBackendContext->fDevice, &fenceInfo, nullptr,
&fBackbuffers[i].fUsageFences[0]));
- GR_VK_CALL_ERRCHECK(fInterface,
- CreateFence(fDevice, &fenceInfo, nullptr,
+ GR_VK_CALL_ERRCHECK(fBackendContext->fInterface,
+ CreateFence(fBackendContext->fDevice, &fenceInfo, nullptr,
&fBackbuffers[i].fUsageFences[1]));
}
fCurrentBackbufferIndex = fImageCount;
@@ -369,26 +360,26 @@ void VulkanWindowContext::destroyBuffers() {
if (fBackbuffers) {
for (uint32_t i = 0; i < fImageCount + 1; ++i) {
- GR_VK_CALL_ERRCHECK(fInterface,
- WaitForFences(fDevice, 2,
+ GR_VK_CALL_ERRCHECK(fBackendContext->fInterface,
+ WaitForFences(fBackendContext->fDevice, 2,
fBackbuffers[i].fUsageFences,
true, UINT64_MAX));
fBackbuffers[i].fImageIndex = -1;
- GR_VK_CALL(fInterface,
- DestroySemaphore(fDevice,
+ GR_VK_CALL(fBackendContext->fInterface,
+ DestroySemaphore(fBackendContext->fDevice,
fBackbuffers[i].fAcquireSemaphore,
nullptr));
- GR_VK_CALL(fInterface,
- DestroySemaphore(fDevice,
+ GR_VK_CALL(fBackendContext->fInterface,
+ DestroySemaphore(fBackendContext->fDevice,
fBackbuffers[i].fRenderSemaphore,
nullptr));
- GR_VK_CALL(fInterface,
- FreeCommandBuffers(fDevice, fCommandPool, 2,
+ GR_VK_CALL(fBackendContext->fInterface,
+ FreeCommandBuffers(fBackendContext->fDevice, fCommandPool, 2,
fBackbuffers[i].fTransitionCmdBuffers));
- GR_VK_CALL(fInterface,
- DestroyFence(fDevice, fBackbuffers[i].fUsageFences[0], 0));
- GR_VK_CALL(fInterface,
- DestroyFence(fDevice, fBackbuffers[i].fUsageFences[1], 0));
+ GR_VK_CALL(fBackendContext->fInterface,
+ DestroyFence(fBackendContext->fDevice, fBackbuffers[i].fUsageFences[0], 0));
+ GR_VK_CALL(fBackendContext->fInterface,
+ DestroyFence(fBackendContext->fDevice, fBackbuffers[i].fUsageFences[1], 0));
}
}
@@ -409,43 +400,34 @@ VulkanWindowContext::~VulkanWindowContext() {
}
void VulkanWindowContext::destroyContext() {
- if (!this->isValid()) {
+ if (!fBackendContext.get()) {
return;
}
- fQueueWaitIdle(fPresentQueue);
- fDeviceWaitIdle(fDevice);
+ GR_VK_CALL(fBackendContext->fInterface, QueueWaitIdle(fPresentQueue));
+ GR_VK_CALL(fBackendContext->fInterface, DeviceWaitIdle(fBackendContext->fDevice));
this->destroyBuffers();
if (VK_NULL_HANDLE != fCommandPool) {
- GR_VK_CALL(fInterface, DestroyCommandPool(fDevice, fCommandPool, nullptr));
+ GR_VK_CALL(fBackendContext->fInterface, DestroyCommandPool(fBackendContext->fDevice,
+ fCommandPool, nullptr));
fCommandPool = VK_NULL_HANDLE;
}
if (VK_NULL_HANDLE != fSwapchain) {
- fDestroySwapchainKHR(fDevice, fSwapchain, nullptr);
+ fDestroySwapchainKHR(fBackendContext->fDevice, fSwapchain, nullptr);
fSwapchain = VK_NULL_HANDLE;
}
if (VK_NULL_HANDLE != fSurface) {
- fDestroySurfaceKHR(fInstance, fSurface, nullptr);
+ fDestroySurfaceKHR(fBackendContext->fInstance, fSurface, nullptr);
fSurface = VK_NULL_HANDLE;
}
fContext.reset();
- fInterface.reset();
-
- if (VK_NULL_HANDLE != fDevice) {
- fDestroyDevice(fDevice, nullptr);
- fDevice = VK_NULL_HANDLE;
- }
- fPhysicalDevice = VK_NULL_HANDLE;
- if (VK_NULL_HANDLE != fInstance) {
- fDestroyInstance(fInstance, nullptr);
- fInstance = VK_NULL_HANDLE;
- }
+ fBackendContext.reset(nullptr);
}
VulkanWindowContext::BackbufferInfo* VulkanWindowContext::getAvailableBackbuffer() {
@@ -457,8 +439,8 @@ VulkanWindowContext::BackbufferInfo* VulkanWindowContext::getAvailableBackbuffer
}
BackbufferInfo* backbuffer = fBackbuffers + fCurrentBackbufferIndex;
- GR_VK_CALL_ERRCHECK(fInterface,
- WaitForFences(fDevice, 2, backbuffer->fUsageFences,
+ GR_VK_CALL_ERRCHECK(fBackendContext->fInterface,
+ WaitForFences(fBackendContext->fDevice, 2, backbuffer->fUsageFences,
true, UINT64_MAX));
return backbuffer;
}
@@ -468,12 +450,12 @@ sk_sp<SkSurface> VulkanWindowContext::getBackbufferSurface() {
SkASSERT(backbuffer);
// reset the fence
- GR_VK_CALL_ERRCHECK(fInterface,
- ResetFences(fDevice, 2, backbuffer->fUsageFences));
+ GR_VK_CALL_ERRCHECK(fBackendContext->fInterface,
+ ResetFences(fBackendContext->fDevice, 2, backbuffer->fUsageFences));
// semaphores should be in unsignaled state
// acquire the image
- VkResult res = fAcquireNextImageKHR(fDevice, fSwapchain, UINT64_MAX,
+ VkResult res = fAcquireNextImageKHR(fBackendContext->fDevice, fSwapchain, UINT64_MAX,
backbuffer->fAcquireSemaphore, VK_NULL_HANDLE,
&backbuffer->fImageIndex);
if (VK_ERROR_SURFACE_LOST_KHR == res) {
@@ -487,11 +469,11 @@ sk_sp<SkSurface> VulkanWindowContext::getBackbufferSurface() {
return nullptr;
}
backbuffer = this->getAvailableBackbuffer();
- GR_VK_CALL_ERRCHECK(fInterface,
- ResetFences(fDevice, 2, backbuffer->fUsageFences));
+ GR_VK_CALL_ERRCHECK(fBackendContext->fInterface,
+ ResetFences(fBackendContext->fDevice, 2, backbuffer->fUsageFences));
// acquire the image
- res = fAcquireNextImageKHR(fDevice, fSwapchain, UINT64_MAX,
+ res = fAcquireNextImageKHR(fBackendContext->fDevice, fSwapchain, UINT64_MAX,
backbuffer->fAcquireSemaphore, VK_NULL_HANDLE,
&backbuffer->fImageIndex);
@@ -519,27 +501,27 @@ sk_sp<SkSurface> VulkanWindowContext::getBackbufferSurface() {
layout, // oldLayout
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
fPresentQueueIndex, // srcQueueFamilyIndex
- fGraphicsQueueIndex, // dstQueueFamilyIndex
+ fBackendContext->fGraphicsQueueIndex, // dstQueueFamilyIndex
fImages[backbuffer->fImageIndex], // image
{ VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } // subresourceRange
};
- GR_VK_CALL_ERRCHECK(fInterface,
+ GR_VK_CALL_ERRCHECK(fBackendContext->fInterface,
ResetCommandBuffer(backbuffer->fTransitionCmdBuffers[0], 0));
VkCommandBufferBeginInfo info;
memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
info.flags = 0;
- GR_VK_CALL_ERRCHECK(fInterface,
+ GR_VK_CALL_ERRCHECK(fBackendContext->fInterface,
BeginCommandBuffer(backbuffer->fTransitionCmdBuffers[0], &info));
- GR_VK_CALL(fInterface,
+ GR_VK_CALL(fBackendContext->fInterface,
CmdPipelineBarrier(backbuffer->fTransitionCmdBuffers[0],
srcStageMask, dstStageMask, 0,
0, nullptr,
0, nullptr,
1, &imageMemoryBarrier));
- GR_VK_CALL_ERRCHECK(fInterface,
+ GR_VK_CALL_ERRCHECK(fBackendContext->fInterface,
EndCommandBuffer(backbuffer->fTransitionCmdBuffers[0]));
VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
@@ -554,8 +536,8 @@ sk_sp<SkSurface> VulkanWindowContext::getBackbufferSurface() {
submitInfo.pCommandBuffers = &backbuffer->fTransitionCmdBuffers[0];
submitInfo.signalSemaphoreCount = 0;
- GR_VK_CALL_ERRCHECK(fInterface,
- QueueSubmit(fGraphicsQueue, 1, &submitInfo,
+ GR_VK_CALL_ERRCHECK(fBackendContext->fInterface,
+ QueueSubmit(fBackendContext->fQueue, 1, &submitInfo,
backbuffer->fUsageFences[0]));
SkSurface* surface = fSurfaces[backbuffer->fImageIndex].get();
@@ -592,26 +574,26 @@ void VulkanWindowContext::swapBuffers() {
dstAccessMask, // inputMask
layout, // oldLayout
VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, // newLayout
- fGraphicsQueueIndex, // srcQueueFamilyIndex
+ fBackendContext->fGraphicsQueueIndex, // srcQueueFamilyIndex
fPresentQueueIndex, // dstQueueFamilyIndex
fImages[backbuffer->fImageIndex], // image
{ VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } // subresourceRange
};
- GR_VK_CALL_ERRCHECK(fInterface,
+ GR_VK_CALL_ERRCHECK(fBackendContext->fInterface,
ResetCommandBuffer(backbuffer->fTransitionCmdBuffers[1], 0));
VkCommandBufferBeginInfo info;
memset(&info, 0, sizeof(VkCommandBufferBeginInfo));
info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
info.flags = 0;
- GR_VK_CALL_ERRCHECK(fInterface,
+ GR_VK_CALL_ERRCHECK(fBackendContext->fInterface,
BeginCommandBuffer(backbuffer->fTransitionCmdBuffers[1], &info));
- GR_VK_CALL(fInterface,
+ GR_VK_CALL(fBackendContext->fInterface,
CmdPipelineBarrier(backbuffer->fTransitionCmdBuffers[1],
srcStageMask, dstStageMask, 0,
0, nullptr,
0, nullptr,
1, &imageMemoryBarrier));
- GR_VK_CALL_ERRCHECK(fInterface,
+ GR_VK_CALL_ERRCHECK(fBackendContext->fInterface,
EndCommandBuffer(backbuffer->fTransitionCmdBuffers[1]));
fImageLayouts[backbuffer->fImageIndex] = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
@@ -627,8 +609,8 @@ void VulkanWindowContext::swapBuffers() {
submitInfo.signalSemaphoreCount = 1;
submitInfo.pSignalSemaphores = &backbuffer->fRenderSemaphore;
- GR_VK_CALL_ERRCHECK(fInterface,
- QueueSubmit(fGraphicsQueue, 1, &submitInfo,
+ GR_VK_CALL_ERRCHECK(fBackendContext->fInterface,
+ QueueSubmit(fBackendContext->fQueue, 1, &submitInfo,
backbuffer->fUsageFences[1]));
// Submit present operation to present queue
diff --git a/tools/sk_app/VulkanWindowContext.h b/tools/sk_app/VulkanWindowContext.h
index 79298f6b38..1fca9dc83e 100644
--- a/tools/sk_app/VulkanWindowContext.h
+++ b/tools/sk_app/VulkanWindowContext.h
@@ -13,8 +13,6 @@
#ifdef SK_VULKAN
#include "vk/GrVkBackendContext.h"
-#include "vk/GrVkInterface.h"
-#include "vk/VkTestUtils.h"
#include "WindowContext.h"
class GrRenderTarget;
@@ -28,7 +26,7 @@ public:
sk_sp<SkSurface> getBackbufferSurface() override;
void swapBuffers() override;
- bool isValid() override { return fDevice != VK_NULL_HANDLE; }
+ bool isValid() override { return SkToBool(fBackendContext.get()); }
void resize(int w, int h) override {
this->createSwapchain(w, h, fDisplayParams);
@@ -43,7 +41,7 @@ public:
/** Platform specific function that creates a VkSurfaceKHR for a window */
using CreateVkSurfaceFn = std::function<VkSurfaceKHR(VkInstance)>;
/** Platform specific function that determines whether presentation will succeed. */
- using CanPresentFn = sk_gpu_test::CanPresentFn;
+ using CanPresentFn = GrVkBackendContext::CanPresentFn;
VulkanWindowContext(const DisplayParams&, CreateVkSurfaceFn, CanPresentFn,
PFN_vkGetInstanceProcAddr, PFN_vkGetDeviceProcAddr);
@@ -65,9 +63,7 @@ private:
void createBuffers(VkFormat format, SkColorType colorType);
void destroyBuffers();
- VkInstance fInstance = VK_NULL_HANDLE;
- VkPhysicalDevice fPhysicalDevice = VK_NULL_HANDLE;
- VkDevice fDevice = VK_NULL_HANDLE;
+ sk_sp<const GrVkBackendContext> fBackendContext;
// simple wrapper class that exists only to initialize a pointer to NULL
template <typename FNPTR_TYPE> class VkPtr {
@@ -99,19 +95,10 @@ private:
VkPtr<PFN_vkGetSwapchainImagesKHR> fGetSwapchainImagesKHR;
VkPtr<PFN_vkAcquireNextImageKHR> fAcquireNextImageKHR;
VkPtr<PFN_vkQueuePresentKHR> fQueuePresentKHR;
-
- VkPtr<PFN_vkDestroyInstance> fDestroyInstance;
- VkPtr<PFN_vkDeviceWaitIdle> fDeviceWaitIdle;
- VkPtr<PFN_vkQueueWaitIdle> fQueueWaitIdle;
- VkPtr<PFN_vkDestroyDevice> fDestroyDevice;
VkPtr<PFN_vkGetDeviceQueue> fGetDeviceQueue;
- sk_sp<const GrVkInterface> fInterface;
-
VkSurfaceKHR fSurface;
VkSwapchainKHR fSwapchain;
- uint32_t fGraphicsQueueIndex;
- VkQueue fGraphicsQueue;
uint32_t fPresentQueueIndex;
VkQueue fPresentQueue;