diff options
author | Greg Daniel <egdaniel@google.com> | 2018-06-27 15:26:54 +0000 |
---|---|---|
committer | Skia Commit-Bot <skia-commit-bot@chromium.org> | 2018-06-27 15:53:55 +0000 |
commit | 059a9ab4bcd07a4bfdbfef333c27ef3d277e0e46 (patch) | |
tree | 6316b27ce95e899bd2d6a22b6e04b2039a67853e /src/gpu/vk | |
parent | 224c700a1fb0b7f6abd85a9729d29cbbdf5872dd (diff) |
Revert "Make GrVkBackendContext no longer derive from SkRefCnt."
This reverts commit 93ae2337732bf206e6ef4faecc6b30c3881e8359.
Reason for revert: <INSERT REASONING HERE>
Original change's description:
> Make GrVkBackendContext no longer derive from SkRefCnt.
>
> Also moves the helper Create functions to VkTestUtils since no clients
> are using them anymore.
>
> Bug: skia:
> Change-Id: I7e8e4912e7ef6fb00a7e2a00407aed5e83211799
> Reviewed-on: https://skia-review.googlesource.com/135323
> Reviewed-by: Jim Van Verth <jvanverth@google.com>
> Reviewed-by: Brian Salomon <bsalomon@google.com>
> Commit-Queue: Greg Daniel <egdaniel@google.com>
TBR=egdaniel@google.com,jvanverth@google.com,bsalomon@google.com
# Not skipping CQ checks because original CL landed > 1 day ago.
Bug: skia:
Change-Id: If7201917631dc22753ea3fa6e9d2984463e38e4c
Reviewed-on: https://skia-review.googlesource.com/137903
Reviewed-by: Greg Daniel <egdaniel@google.com>
Commit-Queue: Greg Daniel <egdaniel@google.com>
Diffstat (limited to 'src/gpu/vk')
-rw-r--r-- | src/gpu/vk/GrVkBackendContext.cpp | 338 | ||||
-rw-r--r-- | src/gpu/vk/GrVkGpu.cpp | 50 | ||||
-rw-r--r-- | src/gpu/vk/GrVkGpu.h | 39 |
3 files changed, 378 insertions, 49 deletions
diff --git a/src/gpu/vk/GrVkBackendContext.cpp b/src/gpu/vk/GrVkBackendContext.cpp new file mode 100644 index 0000000000..3c4e8d7a60 --- /dev/null +++ b/src/gpu/vk/GrVkBackendContext.cpp @@ -0,0 +1,338 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include "SkAutoMalloc.h" +#include "vk/GrVkBackendContext.h" +#include "vk/GrVkExtensions.h" +#include "vk/GrVkUtil.h" + +#if GR_TEST_UTILS || defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) + +//////////////////////////////////////////////////////////////////////////////// +// Helper code to set up Vulkan context objects + +#ifdef SK_ENABLE_VK_LAYERS +const char* kDebugLayerNames[] = { + // elements of VK_LAYER_LUNARG_standard_validation + "VK_LAYER_GOOGLE_threading", + "VK_LAYER_LUNARG_parameter_validation", + "VK_LAYER_LUNARG_object_tracker", + "VK_LAYER_LUNARG_image", + "VK_LAYER_LUNARG_core_validation", + "VK_LAYER_LUNARG_swapchain", + "VK_LAYER_GOOGLE_unique_objects", + // not included in standard_validation + //"VK_LAYER_LUNARG_api_dump", + //"VK_LAYER_LUNARG_vktrace", + //"VK_LAYER_LUNARG_screenshot", +}; +#endif + +// the minimum version of Vulkan supported +#ifdef SK_BUILD_FOR_ANDROID +const uint32_t kGrVkMinimumVersion = VK_MAKE_VERSION(1, 0, 3); +#else +const uint32_t kGrVkMinimumVersion = VK_MAKE_VERSION(1, 0, 8); +#endif + +#define ACQUIRE_VK_PROC(name, instance, device) \ + PFN_vk##name grVk##name = \ + reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \ + if (grVk##name == nullptr) { \ + SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \ + return nullptr; \ + } + +// Create the base Vulkan objects needed by the GrVkGpu object +const GrVkBackendContext* GrVkBackendContext::Create(uint32_t* presentQueueIndexPtr, + CanPresentFn canPresent, + GrVkInterface::GetProc getProc) { + if (!getProc) { + return nullptr; + } + SkASSERT(getProc); + + VkPhysicalDevice physDev; + VkDevice device; + VkInstance inst; + VkResult err; + + const VkApplicationInfo app_info = { + VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType + nullptr, // pNext + "vktest", // pApplicationName + 0, // applicationVersion + "vktest", // pEngineName + 0, // engineVerison + kGrVkMinimumVersion, // apiVersion + }; + + GrVkExtensions extensions(getProc); + extensions.initInstance(kGrVkMinimumVersion); + + SkTArray<const char*> instanceLayerNames; + SkTArray<const char*> instanceExtensionNames; + uint32_t extensionFlags = 0; +#ifdef SK_ENABLE_VK_LAYERS + for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) { + if (extensions.hasInstanceLayer(kDebugLayerNames[i])) { + instanceLayerNames.push_back(kDebugLayerNames[i]); + } + } + if (extensions.hasInstanceExtension(VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) { + instanceExtensionNames.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME); + extensionFlags |= kEXT_debug_report_GrVkExtensionFlag; + } +#endif + + if (extensions.hasInstanceExtension(VK_KHR_SURFACE_EXTENSION_NAME)) { + instanceExtensionNames.push_back(VK_KHR_SURFACE_EXTENSION_NAME); + extensionFlags |= kKHR_surface_GrVkExtensionFlag; + } + if (extensions.hasInstanceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME)) { + instanceExtensionNames.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); + extensionFlags |= kKHR_swapchain_GrVkExtensionFlag; + } +#ifdef SK_BUILD_FOR_WIN + if (extensions.hasInstanceExtension(VK_KHR_WIN32_SURFACE_EXTENSION_NAME)) { + instanceExtensionNames.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME); + extensionFlags |= kKHR_win32_surface_GrVkExtensionFlag; + } +#elif defined(SK_BUILD_FOR_ANDROID) + if (extensions.hasInstanceExtension(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) { + instanceExtensionNames.push_back(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME); + extensionFlags |= kKHR_android_surface_GrVkExtensionFlag; + } +#elif defined(SK_BUILD_FOR_UNIX) && !defined(__Fuchsia__) + if (extensions.hasInstanceExtension(VK_KHR_XCB_SURFACE_EXTENSION_NAME)) { + instanceExtensionNames.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME); + extensionFlags |= kKHR_xcb_surface_GrVkExtensionFlag; + } +#endif + + const VkInstanceCreateInfo instance_create = { + VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType + nullptr, // pNext + 0, // flags + &app_info, // pApplicationInfo + (uint32_t) instanceLayerNames.count(), // enabledLayerNameCount + instanceLayerNames.begin(), // ppEnabledLayerNames + (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount + instanceExtensionNames.begin(), // ppEnabledExtensionNames + }; + + ACQUIRE_VK_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE); + err = grVkCreateInstance(&instance_create, nullptr, &inst); + if (err < 0) { + SkDebugf("vkCreateInstance failed: %d\n", err); + return nullptr; + } + + ACQUIRE_VK_PROC(DestroyInstance, inst, VK_NULL_HANDLE); + ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE); + ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE); + ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE); + ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE); + ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE); + ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE); + ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE); + + uint32_t gpuCount; + err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr); + if (err) { + SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err); + grVkDestroyInstance(inst, nullptr); + return nullptr; + } + if (!gpuCount) { + SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n"); + grVkDestroyInstance(inst, nullptr); + return nullptr; + } + // Just returning the first physical device instead of getting the whole array. + // TODO: find best match for our needs + gpuCount = 1; + err = grVkEnumeratePhysicalDevices(inst, &gpuCount, &physDev); + // VK_INCOMPLETE is returned when the count we provide is less than the total device count. + if (err && VK_INCOMPLETE != err) { + SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err); + grVkDestroyInstance(inst, nullptr); + return nullptr; + } + + // query to get the initial queue props size + uint32_t queueCount; + grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr); + if (!queueCount) { + SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n"); + grVkDestroyInstance(inst, nullptr); + return nullptr; + } + + SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties)); + // now get the actual queue props + VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get(); + + grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps); + + // iterate to find the graphics queue + uint32_t graphicsQueueIndex = queueCount; + for (uint32_t i = 0; i < queueCount; i++) { + if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) { + graphicsQueueIndex = i; + break; + } + } + if (graphicsQueueIndex == queueCount) { + SkDebugf("Could not find any supported graphics queues.\n"); + grVkDestroyInstance(inst, nullptr); + return nullptr; + } + + // iterate to find the present queue, if needed + uint32_t presentQueueIndex = queueCount; + if (presentQueueIndexPtr && canPresent) { + for (uint32_t i = 0; i < queueCount; i++) { + if (canPresent(inst, physDev, i)) { + presentQueueIndex = i; + break; + } + } + if (presentQueueIndex == queueCount) { + SkDebugf("Could not find any supported present queues.\n"); + grVkDestroyInstance(inst, nullptr); + return nullptr; + } + *presentQueueIndexPtr = presentQueueIndex; + } else { + // Just setting this so we end up make a single queue for graphics since there was no + // request for a present queue. + presentQueueIndex = graphicsQueueIndex; + } + + extensions.initDevice(kGrVkMinimumVersion, inst, physDev); + + SkTArray<const char*> deviceLayerNames; + SkTArray<const char*> deviceExtensionNames; +#ifdef SK_ENABLE_VK_LAYERS + for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) { + if (extensions.hasDeviceLayer(kDebugLayerNames[i])) { + deviceLayerNames.push_back(kDebugLayerNames[i]); + } + } +#endif + if (extensions.hasDeviceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME)) { + deviceExtensionNames.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); + extensionFlags |= kKHR_swapchain_GrVkExtensionFlag; + } + if (extensions.hasDeviceExtension("VK_NV_glsl_shader")) { + deviceExtensionNames.push_back("VK_NV_glsl_shader"); + extensionFlags |= kNV_glsl_shader_GrVkExtensionFlag; + } + + // query to get the physical device properties + VkPhysicalDeviceFeatures deviceFeatures; + grVkGetPhysicalDeviceFeatures(physDev, &deviceFeatures); + // this looks like it would slow things down, + // and we can't depend on it on all platforms + deviceFeatures.robustBufferAccess = VK_FALSE; + + uint32_t featureFlags = 0; + if (deviceFeatures.geometryShader) { + featureFlags |= kGeometryShader_GrVkFeatureFlag; + } + if (deviceFeatures.dualSrcBlend) { + featureFlags |= kDualSrcBlend_GrVkFeatureFlag; + } + if (deviceFeatures.sampleRateShading) { + featureFlags |= kSampleRateShading_GrVkFeatureFlag; + } + + float queuePriorities[1] = { 0.0 }; + // Here we assume no need for swapchain queue + // If one is needed, the client will need its own setup code + const VkDeviceQueueCreateInfo queueInfo[2] = { + { + VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType + nullptr, // pNext + 0, // VkDeviceQueueCreateFlags + graphicsQueueIndex, // queueFamilyIndex + 1, // queueCount + queuePriorities, // pQueuePriorities + }, + { + VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType + nullptr, // pNext + 0, // VkDeviceQueueCreateFlags + presentQueueIndex, // queueFamilyIndex + 1, // queueCount + queuePriorities, // pQueuePriorities + } + }; + uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1; + + const VkDeviceCreateInfo deviceInfo = { + VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType + nullptr, // pNext + 0, // VkDeviceCreateFlags + queueInfoCount, // queueCreateInfoCount + queueInfo, // pQueueCreateInfos + (uint32_t) deviceLayerNames.count(), // layerCount + deviceLayerNames.begin(), // ppEnabledLayerNames + (uint32_t) deviceExtensionNames.count(), // extensionCount + deviceExtensionNames.begin(), // ppEnabledExtensionNames + &deviceFeatures // ppEnabledFeatures + }; + + err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device); + if (err) { + SkDebugf("CreateDevice failed: %d\n", err); + grVkDestroyInstance(inst, nullptr); + return nullptr; + } + + auto interface = + sk_make_sp<GrVkInterface>(getProc, inst, device, extensionFlags); + if (!interface->validate(extensionFlags)) { + SkDebugf("Vulkan interface validation failed\n"); + grVkDeviceWaitIdle(device); + grVkDestroyDevice(device, nullptr); + grVkDestroyInstance(inst, nullptr); + return nullptr; + } + + VkQueue queue; + grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue); + + GrVkBackendContext* ctx = new GrVkBackendContext(); + ctx->fInstance = inst; + ctx->fPhysicalDevice = physDev; + ctx->fDevice = device; + ctx->fQueue = queue; + ctx->fGraphicsQueueIndex = graphicsQueueIndex; + ctx->fMinAPIVersion = kGrVkMinimumVersion; + ctx->fExtensions = extensionFlags; + ctx->fFeatures = featureFlags; + ctx->fInterface.reset(interface.release()); + ctx->fOwnsInstanceAndDevice = true; + + return ctx; +} +#endif // GR_TEST_UTILS || defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) + +GrVkBackendContext::~GrVkBackendContext() { + fMemoryAllocator.reset(); + if (fInterface == nullptr || !fOwnsInstanceAndDevice) { + return; + } + + fInterface->fFunctions.fDeviceWaitIdle(fDevice); + fInterface->fFunctions.fDestroyDevice(fDevice, nullptr); + fDevice = VK_NULL_HANDLE; + fInterface->fFunctions.fDestroyInstance(fInstance, nullptr); + fInstance = VK_NULL_HANDLE; +} diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp index 0b160a6ade..ba1f7caf2d 100644 --- a/src/gpu/vk/GrVkGpu.cpp +++ b/src/gpu/vk/GrVkGpu.cpp @@ -73,38 +73,33 @@ VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback( } #endif -sk_sp<GrGpu> GrVkGpu::Make(const GrVkBackendContext& backendContext, +sk_sp<GrGpu> GrVkGpu::Make(sk_sp<const GrVkBackendContext> backendContext, const GrContextOptions& options, GrContext* context) { - if (backendContext.fInstance == VK_NULL_HANDLE || - backendContext.fPhysicalDevice == VK_NULL_HANDLE || - backendContext.fDevice == VK_NULL_HANDLE || - backendContext.fQueue == VK_NULL_HANDLE) { + if (!backendContext) { return nullptr; } - if (!backendContext.fInterface || - !backendContext.fInterface->validate(backendContext.fExtensions)) { + + if (!backendContext->fInterface->validate(backendContext->fExtensions)) { return nullptr; } - return sk_sp<GrGpu>(new GrVkGpu(context, options, backendContext)); + return sk_sp<GrGpu>(new GrVkGpu(context, options, std::move(backendContext))); } //////////////////////////////////////////////////////////////////////////////// GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options, - const GrVkBackendContext& backendContext) + sk_sp<const GrVkBackendContext> backendCtx) : INHERITED(context) - , fInterface(std::move(backendContext.fInterface)) - , fMemoryAllocator(backendContext.fMemoryAllocator) - , fInstance(backendContext.fInstance) - , fDevice(backendContext.fDevice) - , fQueue(backendContext.fQueue) + , fBackendContext(std::move(backendCtx)) + , fMemoryAllocator(fBackendContext->fMemoryAllocator) + , fDevice(fBackendContext->fDevice) + , fQueue(fBackendContext->fQueue) , fResourceProvider(this) , fDisconnected(false) { - SkASSERT(!backendContext.fOwnsInstanceAndDevice); #ifdef SK_ENABLE_VK_LAYERS fCallback = VK_NULL_HANDLE; - if (backendContext.fExtensions & kEXT_debug_report_GrVkExtensionFlag) { + if (fBackendContext->fExtensions & kEXT_debug_report_GrVkExtensionFlag) { // Setup callback creation information VkDebugReportCallbackCreateInfoEXT callbackCreateInfo; callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT; @@ -119,32 +114,32 @@ GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options, // Register the callback GR_VK_CALL_ERRCHECK(this->vkInterface(), - CreateDebugReportCallbackEXT(backendContext.fInstance, + CreateDebugReportCallbackEXT(fBackendContext->fInstance, &callbackCreateInfo, nullptr, &fCallback)); } #endif if (!fMemoryAllocator) { // We were not given a memory allocator at creation - fMemoryAllocator.reset(new GrVkAMDMemoryAllocator(backendContext.fPhysicalDevice, - fDevice, backendContext.fInterface)); + fMemoryAllocator.reset(new GrVkAMDMemoryAllocator(fBackendContext->fPhysicalDevice, + fDevice, fBackendContext->fInterface)); } fCompiler = new SkSL::Compiler(); - fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendContext.fPhysicalDevice, - backendContext.fFeatures, backendContext.fExtensions)); + fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), fBackendContext->fPhysicalDevice, + fBackendContext->fFeatures, fBackendContext->fExtensions)); fCaps.reset(SkRef(fVkCaps.get())); - VK_CALL(GetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &fPhysDevProps)); - VK_CALL(GetPhysicalDeviceMemoryProperties(backendContext.fPhysicalDevice, &fPhysDevMemProps)); + VK_CALL(GetPhysicalDeviceProperties(fBackendContext->fPhysicalDevice, &fPhysDevProps)); + VK_CALL(GetPhysicalDeviceMemoryProperties(fBackendContext->fPhysicalDevice, &fPhysDevMemProps)); const VkCommandPoolCreateInfo cmdPoolInfo = { VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType nullptr, // pNext VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, // CmdPoolCreateFlags - backendContext.fGraphicsQueueIndex, // queueFamilyIndex + fBackendContext->fGraphicsQueueIndex, // queueFamilyIndex }; GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateCommandPool(fDevice, &cmdPoolInfo, nullptr, &fCmdPool)); @@ -206,15 +201,10 @@ void GrVkGpu::destroyResources() { #ifdef SK_ENABLE_VK_LAYERS if (fCallback) { - VK_CALL(DestroyDebugReportCallbackEXT(fInstance, fCallback, nullptr)); + VK_CALL(DestroyDebugReportCallbackEXT(fBackendContext->fInstance, fCallback, nullptr)); } #endif - fMemoryAllocator.reset(); - - fQueue = VK_NULL_HANDLE; - fDevice = VK_NULL_HANDLE; - fInstance = VK_NULL_HANDLE; } GrVkGpu::~GrVkGpu() { diff --git a/src/gpu/vk/GrVkGpu.h b/src/gpu/vk/GrVkGpu.h index f506d28b0e..052476098d 100644 --- a/src/gpu/vk/GrVkGpu.h +++ b/src/gpu/vk/GrVkGpu.h @@ -38,13 +38,13 @@ namespace SkSL { class GrVkGpu : public GrGpu { public: - static sk_sp<GrGpu> Make(const GrVkBackendContext&, const GrContextOptions&, GrContext*); + static sk_sp<GrGpu> Make(sk_sp<const GrVkBackendContext>, const GrContextOptions&, GrContext*); ~GrVkGpu() override; void disconnect(DisconnectType) override; - const GrVkInterface* vkInterface() const { return fInterface.get(); } + const GrVkInterface* vkInterface() const { return fBackendContext->fInterface.get(); } const GrVkCaps& vkCaps() const { return *fVkCaps; } GrVkMemoryAllocator* memoryAllocator() const { return fMemoryAllocator.get(); } @@ -144,7 +144,7 @@ public: bool updateBuffer(GrVkBuffer* buffer, const void* src, VkDeviceSize offset, VkDeviceSize size); private: - GrVkGpu(GrContext*, const GrContextOptions&, const GrVkBackendContext& backendContext); + GrVkGpu(GrContext*, const GrContextOptions&, sk_sp<const GrVkBackendContext> backendContext); void onResetContext(uint32_t resetBits) override {} @@ -222,27 +222,28 @@ private: GrVkImageInfo* info); #endif - sk_sp<const GrVkInterface> fInterface; - sk_sp<GrVkMemoryAllocator> fMemoryAllocator; - sk_sp<GrVkCaps> fVkCaps; + sk_sp<const GrVkBackendContext> fBackendContext; + sk_sp<GrVkMemoryAllocator> fMemoryAllocator; + sk_sp<GrVkCaps> fVkCaps; - VkInstance fInstance; - VkDevice fDevice; - VkQueue fQueue; // Must be Graphics queue + // These Vulkan objects are provided by the client, and also stored in fBackendContext. + // They're copied here for convenient access. + VkDevice fDevice; + VkQueue fQueue; // Must be Graphics queue // Created by GrVkGpu - GrVkResourceProvider fResourceProvider; - VkCommandPool fCmdPool; + GrVkResourceProvider fResourceProvider; + VkCommandPool fCmdPool; - GrVkPrimaryCommandBuffer* fCurrentCmdBuffer; + GrVkPrimaryCommandBuffer* fCurrentCmdBuffer; - SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToWaitOn; - SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToSignal; + SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToWaitOn; + SkSTArray<1, GrVkSemaphore::Resource*> fSemaphoresToSignal; - VkPhysicalDeviceProperties fPhysDevProps; - VkPhysicalDeviceMemoryProperties fPhysDevMemProps; + VkPhysicalDeviceProperties fPhysDevProps; + VkPhysicalDeviceMemoryProperties fPhysDevMemProps; - GrVkCopyManager fCopyManager; + GrVkCopyManager fCopyManager; #ifdef SK_ENABLE_VK_LAYERS // For reporting validation layer errors @@ -251,11 +252,11 @@ private: // compiler used for compiling sksl into spirv. We only want to create the compiler once since // there is significant overhead to the first compile of any compiler. - SkSL::Compiler* fCompiler; + SkSL::Compiler* fCompiler; // We need a bool to track whether or not we've already disconnected all the gpu resources from // vulkan context. - bool fDisconnected; + bool fDisconnected; typedef GrGpu INHERITED; }; |