aboutsummaryrefslogtreecommitdiffhomepage
path: root/tools/gpu
diff options
context:
space:
mode:
authorGravatar Greg Daniel <egdaniel@google.com>2018-06-28 16:47:48 +0000
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2018-06-28 17:20:27 +0000
commit0db3a8846a4d05d55c36b7fa95fa6fbc1a6639cd (patch)
tree1bf5b267f22fcc844b9a6e246921e2e217195e44 /tools/gpu
parent8849c95710227b6442a3d4ce5c48f6f4e4127e93 (diff)
Reland "Make GrVkBackendContext no longer derive from SkRefCnt."
This reverts commit 059a9ab4bcd07a4bfdbfef333c27ef3d277e0e46. Reason for revert: Fix landed in Fuchsia Original change's description: > Revert "Make GrVkBackendContext no longer derive from SkRefCnt." > > This reverts commit 93ae2337732bf206e6ef4faecc6b30c3881e8359. > > Reason for revert: <INSERT REASONING HERE> > > Original change's description: > > Make GrVkBackendContext no longer derive from SkRefCnt. > > > > Also moves the helper Create functions to VkTestUtils since no clients > > are using them anymore. > > > > Bug: skia: > > Change-Id: I7e8e4912e7ef6fb00a7e2a00407aed5e83211799 > > Reviewed-on: https://skia-review.googlesource.com/135323 > > Reviewed-by: Jim Van Verth <jvanverth@google.com> > > Reviewed-by: Brian Salomon <bsalomon@google.com> > > Commit-Queue: Greg Daniel <egdaniel@google.com> > > TBR=egdaniel@google.com,jvanverth@google.com,bsalomon@google.com > > # Not skipping CQ checks because original CL landed > 1 day ago. > > Bug: skia: > Change-Id: If7201917631dc22753ea3fa6e9d2984463e38e4c > Reviewed-on: https://skia-review.googlesource.com/137903 > Reviewed-by: Greg Daniel <egdaniel@google.com> > Commit-Queue: Greg Daniel <egdaniel@google.com> TBR=egdaniel@google.com,jvanverth@google.com,bsalomon@google.com # Not skipping CQ checks because original CL landed > 1 day ago. Bug: skia: Change-Id: Ia4b7c0bb2c7b5dba809d85c69f0b41b473140526 Reviewed-on: https://skia-review.googlesource.com/138181 Reviewed-by: Greg Daniel <egdaniel@google.com> Commit-Queue: Greg Daniel <egdaniel@google.com>
Diffstat (limited to 'tools/gpu')
-rw-r--r--tools/gpu/vk/VkTestContext.cpp30
-rw-r--r--tools/gpu/vk/VkTestContext.h10
-rw-r--r--tools/gpu/vk/VkTestUtils.cpp320
-rw-r--r--tools/gpu/vk/VkTestUtils.h12
4 files changed, 357 insertions, 15 deletions
diff --git a/tools/gpu/vk/VkTestContext.cpp b/tools/gpu/vk/VkTestContext.cpp
index 25069fe521..592fb0f7b0 100644
--- a/tools/gpu/vk/VkTestContext.cpp
+++ b/tools/gpu/vk/VkTestContext.cpp
@@ -110,21 +110,24 @@ GR_STATIC_ASSERT(sizeof(VkFence) <= sizeof(sk_gpu_test::PlatformFence));
class VkTestContextImpl : public sk_gpu_test::VkTestContext {
public:
static VkTestContext* Create(VkTestContext* sharedContext) {
- sk_sp<const GrVkBackendContext> backendContext;
+ GrVkBackendContext backendContext;
+ bool ownsContext = true;
if (sharedContext) {
backendContext = sharedContext->getVkBackendContext();
+ // We always delete the parent context last so make sure the child does not think they
+ // own the vulkan context.
+ ownsContext = false;
} else {
PFN_vkGetInstanceProcAddr instProc;
PFN_vkGetDeviceProcAddr devProc;
if (!sk_gpu_test::LoadVkLibraryAndGetProcAddrFuncs(&instProc, &devProc)) {
return nullptr;
}
- backendContext.reset(GrVkBackendContext::Create(instProc, devProc));
- }
- if (!backendContext) {
- return nullptr;
+ if (!sk_gpu_test::CreateVkBackendContext(instProc, devProc, &backendContext)) {
+ return nullptr;
+ }
}
- return new VkTestContextImpl(std::move(backendContext));
+ return new VkTestContextImpl(backendContext, ownsContext);
}
~VkTestContextImpl() override { this->teardown(); }
@@ -143,14 +146,19 @@ public:
protected:
void teardown() override {
INHERITED::teardown();
- fVk.reset(nullptr);
+ fVk.fMemoryAllocator.reset();
+ if (fOwnsContext) {
+ GR_VK_CALL(this->vk(), DeviceWaitIdle(fVk.fDevice));
+ GR_VK_CALL(this->vk(), DestroyDevice(fVk.fDevice, nullptr));
+ GR_VK_CALL(this->vk(), DestroyInstance(fVk.fInstance, nullptr));
+ }
}
private:
- VkTestContextImpl(sk_sp<const GrVkBackendContext> backendContext)
- : VkTestContext(std::move(backendContext)) {
- fFenceSync.reset(new VkFenceSync(fVk->fInterface, fVk->fDevice, fVk->fQueue,
- fVk->fGraphicsQueueIndex));
+ VkTestContextImpl(const GrVkBackendContext& backendContext, bool ownsContext)
+ : VkTestContext(backendContext, ownsContext) {
+ fFenceSync.reset(new VkFenceSync(fVk.fInterface, fVk.fDevice, fVk.fQueue,
+ fVk.fGraphicsQueueIndex));
}
void onPlatformMakeCurrent() const override {}
diff --git a/tools/gpu/vk/VkTestContext.h b/tools/gpu/vk/VkTestContext.h
index 9fd4170d9d..0e62cc0b8c 100644
--- a/tools/gpu/vk/VkTestContext.h
+++ b/tools/gpu/vk/VkTestContext.h
@@ -19,16 +19,18 @@ class VkTestContext : public TestContext {
public:
virtual GrBackend backend() override { return kVulkan_GrBackend; }
- sk_sp<const GrVkBackendContext> getVkBackendContext() {
+ const GrVkBackendContext& getVkBackendContext() {
return fVk;
}
- const GrVkInterface* vk() const { return fVk->fInterface.get(); }
+ const GrVkInterface* vk() const { return fVk.fInterface.get(); }
protected:
- VkTestContext(sk_sp<const GrVkBackendContext> vk) : fVk(std::move(vk)) {}
+ VkTestContext(const GrVkBackendContext& vk, bool ownsContext)
+ : fVk(vk), fOwnsContext(ownsContext) {}
- sk_sp<const GrVkBackendContext> fVk;
+ GrVkBackendContext fVk;
+ bool fOwnsContext;
private:
typedef TestContext INHERITED;
diff --git a/tools/gpu/vk/VkTestUtils.cpp b/tools/gpu/vk/VkTestUtils.cpp
index c28a42b2c9..927f4fbe82 100644
--- a/tools/gpu/vk/VkTestUtils.cpp
+++ b/tools/gpu/vk/VkTestUtils.cpp
@@ -9,6 +9,9 @@
#ifdef SK_VULKAN
+#include "SkAutoMalloc.h"
+#include "vk/GrVkBackendContext.h"
+#include "vk/GrVkExtensions.h"
#include "../ports/SkOSLibrary.h"
namespace sk_gpu_test {
@@ -46,6 +49,323 @@ bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc,
return true;
#endif
}
+
+////////////////////////////////////////////////////////////////////////////////
+// Helper code to set up Vulkan context objects
+
+#ifdef SK_ENABLE_VK_LAYERS
+const char* kDebugLayerNames[] = {
+ // elements of VK_LAYER_LUNARG_standard_validation
+ "VK_LAYER_GOOGLE_threading",
+ "VK_LAYER_LUNARG_parameter_validation",
+ "VK_LAYER_LUNARG_object_tracker",
+ "VK_LAYER_LUNARG_image",
+ "VK_LAYER_LUNARG_core_validation",
+ "VK_LAYER_LUNARG_swapchain",
+ "VK_LAYER_GOOGLE_unique_objects",
+ // not included in standard_validation
+ //"VK_LAYER_LUNARG_api_dump",
+ //"VK_LAYER_LUNARG_vktrace",
+ //"VK_LAYER_LUNARG_screenshot",
+};
+#endif
+
+// the minimum version of Vulkan supported
+#ifdef SK_BUILD_FOR_ANDROID
+const uint32_t kGrVkMinimumVersion = VK_MAKE_VERSION(1, 0, 3);
+#else
+const uint32_t kGrVkMinimumVersion = VK_MAKE_VERSION(1, 0, 8);
+#endif
+
+#define ACQUIRE_VK_PROC(name, instance, device) \
+ PFN_vk##name grVk##name = \
+ reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
+ if (grVk##name == nullptr) { \
+ SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
+ return false; \
+ }
+
+bool CreateVkBackendContext(const GrVkInterface::GetInstanceProc& getInstanceProc,
+ const GrVkInterface::GetDeviceProc& getDeviceProc,
+ GrVkBackendContext* ctx,
+ uint32_t* presentQueueIndexPtr,
+ CanPresentFn canPresent) {
+ auto getProc = [&getInstanceProc, &getDeviceProc](const char* proc_name,
+ VkInstance instance, VkDevice device) {
+ if (device != VK_NULL_HANDLE) {
+ return getDeviceProc(device, proc_name);
+ }
+ return getInstanceProc(instance, proc_name);
+ };
+
+ VkPhysicalDevice physDev;
+ VkDevice device;
+ VkInstance inst;
+ VkResult err;
+
+ const VkApplicationInfo app_info = {
+ VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
+ nullptr, // pNext
+ "vktest", // pApplicationName
+ 0, // applicationVersion
+ "vktest", // pEngineName
+ 0, // engineVerison
+ kGrVkMinimumVersion, // apiVersion
+ };
+
+ GrVkExtensions extensions(getProc);
+ extensions.initInstance(kGrVkMinimumVersion);
+
+ SkTArray<const char*> instanceLayerNames;
+ SkTArray<const char*> instanceExtensionNames;
+ uint32_t extensionFlags = 0;
+#ifdef SK_ENABLE_VK_LAYERS
+ for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
+ if (extensions.hasInstanceLayer(kDebugLayerNames[i])) {
+ instanceLayerNames.push_back(kDebugLayerNames[i]);
+ }
+ }
+ if (extensions.hasInstanceExtension(VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
+ instanceExtensionNames.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME);
+ extensionFlags |= kEXT_debug_report_GrVkExtensionFlag;
+ }
+#endif
+
+ if (extensions.hasInstanceExtension(VK_KHR_SURFACE_EXTENSION_NAME)) {
+ instanceExtensionNames.push_back(VK_KHR_SURFACE_EXTENSION_NAME);
+ extensionFlags |= kKHR_surface_GrVkExtensionFlag;
+ }
+ if (extensions.hasInstanceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
+ instanceExtensionNames.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
+ extensionFlags |= kKHR_swapchain_GrVkExtensionFlag;
+ }
+#ifdef SK_BUILD_FOR_WIN
+ if (extensions.hasInstanceExtension(VK_KHR_WIN32_SURFACE_EXTENSION_NAME)) {
+ instanceExtensionNames.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME);
+ extensionFlags |= kKHR_win32_surface_GrVkExtensionFlag;
+ }
+#elif defined(SK_BUILD_FOR_ANDROID)
+ if (extensions.hasInstanceExtension(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
+ instanceExtensionNames.push_back(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME);
+ extensionFlags |= kKHR_android_surface_GrVkExtensionFlag;
+ }
+#elif defined(SK_BUILD_FOR_UNIX) && !defined(__Fuchsia__)
+ if (extensions.hasInstanceExtension(VK_KHR_XCB_SURFACE_EXTENSION_NAME)) {
+ instanceExtensionNames.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME);
+ extensionFlags |= kKHR_xcb_surface_GrVkExtensionFlag;
+ }
+#endif
+
+ const VkInstanceCreateInfo instance_create = {
+ VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // flags
+ &app_info, // pApplicationInfo
+ (uint32_t) instanceLayerNames.count(), // enabledLayerNameCount
+ instanceLayerNames.begin(), // ppEnabledLayerNames
+ (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount
+ instanceExtensionNames.begin(), // ppEnabledExtensionNames
+ };
+
+ ACQUIRE_VK_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE);
+ err = grVkCreateInstance(&instance_create, nullptr, &inst);
+ if (err < 0) {
+ SkDebugf("vkCreateInstance failed: %d\n", err);
+ return false;
+ }
+
+ ACQUIRE_VK_PROC(DestroyInstance, inst, VK_NULL_HANDLE);
+ ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE);
+ ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE);
+ ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE);
+ ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE);
+ ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE);
+ ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE);
+ ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE);
+
+ uint32_t gpuCount;
+ err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
+ if (err) {
+ SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
+ grVkDestroyInstance(inst, nullptr);
+ return false;
+ }
+ if (!gpuCount) {
+ SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n");
+ grVkDestroyInstance(inst, nullptr);
+ return false;
+ }
+ // Just returning the first physical device instead of getting the whole array.
+ // TODO: find best match for our needs
+ gpuCount = 1;
+ err = grVkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
+ // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
+ if (err && VK_INCOMPLETE != err) {
+ SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
+ grVkDestroyInstance(inst, nullptr);
+ return false;
+ }
+
+ // query to get the initial queue props size
+ uint32_t queueCount;
+ grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
+ if (!queueCount) {
+ SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
+ grVkDestroyInstance(inst, nullptr);
+ return false;
+ }
+
+ SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
+ // now get the actual queue props
+ VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
+
+ grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
+
+ // iterate to find the graphics queue
+ uint32_t graphicsQueueIndex = queueCount;
+ for (uint32_t i = 0; i < queueCount; i++) {
+ if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
+ graphicsQueueIndex = i;
+ break;
+ }
+ }
+ if (graphicsQueueIndex == queueCount) {
+ SkDebugf("Could not find any supported graphics queues.\n");
+ grVkDestroyInstance(inst, nullptr);
+ return false;
+ }
+
+ // iterate to find the present queue, if needed
+ uint32_t presentQueueIndex = queueCount;
+ if (presentQueueIndexPtr && canPresent) {
+ for (uint32_t i = 0; i < queueCount; i++) {
+ if (canPresent(inst, physDev, i)) {
+ presentQueueIndex = i;
+ break;
+ }
+ }
+ if (presentQueueIndex == queueCount) {
+ SkDebugf("Could not find any supported present queues.\n");
+ grVkDestroyInstance(inst, nullptr);
+ return false;
+ }
+ *presentQueueIndexPtr = presentQueueIndex;
+ } else {
+ // Just setting this so we end up make a single queue for graphics since there was no
+ // request for a present queue.
+ presentQueueIndex = graphicsQueueIndex;
+ }
+
+ extensions.initDevice(kGrVkMinimumVersion, inst, physDev);
+
+ SkTArray<const char*> deviceLayerNames;
+ SkTArray<const char*> deviceExtensionNames;
+#ifdef SK_ENABLE_VK_LAYERS
+ for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
+ if (extensions.hasDeviceLayer(kDebugLayerNames[i])) {
+ deviceLayerNames.push_back(kDebugLayerNames[i]);
+ }
+ }
+#endif
+ if (extensions.hasDeviceExtension(VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
+ deviceExtensionNames.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
+ extensionFlags |= kKHR_swapchain_GrVkExtensionFlag;
+ }
+ if (extensions.hasDeviceExtension("VK_NV_glsl_shader")) {
+ deviceExtensionNames.push_back("VK_NV_glsl_shader");
+ extensionFlags |= kNV_glsl_shader_GrVkExtensionFlag;
+ }
+
+ // query to get the physical device properties
+ VkPhysicalDeviceFeatures deviceFeatures;
+ grVkGetPhysicalDeviceFeatures(physDev, &deviceFeatures);
+ // this looks like it would slow things down,
+ // and we can't depend on it on all platforms
+ deviceFeatures.robustBufferAccess = VK_FALSE;
+
+ uint32_t featureFlags = 0;
+ if (deviceFeatures.geometryShader) {
+ featureFlags |= kGeometryShader_GrVkFeatureFlag;
+ }
+ if (deviceFeatures.dualSrcBlend) {
+ featureFlags |= kDualSrcBlend_GrVkFeatureFlag;
+ }
+ if (deviceFeatures.sampleRateShading) {
+ featureFlags |= kSampleRateShading_GrVkFeatureFlag;
+ }
+
+ float queuePriorities[1] = { 0.0 };
+ // Here we assume no need for swapchain queue
+ // If one is needed, the client will need its own setup code
+ const VkDeviceQueueCreateInfo queueInfo[2] = {
+ {
+ VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // VkDeviceQueueCreateFlags
+ graphicsQueueIndex, // queueFamilyIndex
+ 1, // queueCount
+ queuePriorities, // pQueuePriorities
+ },
+ {
+ VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // VkDeviceQueueCreateFlags
+ presentQueueIndex, // queueFamilyIndex
+ 1, // queueCount
+ queuePriorities, // pQueuePriorities
+ }
+ };
+ uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
+
+ const VkDeviceCreateInfo deviceInfo = {
+ VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // VkDeviceCreateFlags
+ queueInfoCount, // queueCreateInfoCount
+ queueInfo, // pQueueCreateInfos
+ (uint32_t) deviceLayerNames.count(), // layerCount
+ deviceLayerNames.begin(), // ppEnabledLayerNames
+ (uint32_t) deviceExtensionNames.count(), // extensionCount
+ deviceExtensionNames.begin(), // ppEnabledExtensionNames
+ &deviceFeatures // ppEnabledFeatures
+ };
+
+ err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device);
+ if (err) {
+ SkDebugf("CreateDevice failed: %d\n", err);
+ grVkDestroyInstance(inst, nullptr);
+ return false;
+ }
+
+ auto interface =
+ sk_make_sp<GrVkInterface>(getProc, inst, device, extensionFlags);
+ if (!interface->validate(extensionFlags)) {
+ SkDebugf("Vulkan interface validation failed\n");
+ grVkDeviceWaitIdle(device);
+ grVkDestroyDevice(device, nullptr);
+ grVkDestroyInstance(inst, nullptr);
+ return false;
+ }
+
+ VkQueue queue;
+ grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
+
+ ctx->fInstance = inst;
+ ctx->fPhysicalDevice = physDev;
+ ctx->fDevice = device;
+ ctx->fQueue = queue;
+ ctx->fGraphicsQueueIndex = graphicsQueueIndex;
+ ctx->fMinAPIVersion = kGrVkMinimumVersion;
+ ctx->fExtensions = extensionFlags;
+ ctx->fFeatures = featureFlags;
+ ctx->fInterface.reset(interface.release());
+ ctx->fOwnsInstanceAndDevice = false;
+
+ return true;
+
+
+}
+
}
#endif
diff --git a/tools/gpu/vk/VkTestUtils.h b/tools/gpu/vk/VkTestUtils.h
index 9f34ef09c5..30471a78c0 100644
--- a/tools/gpu/vk/VkTestUtils.h
+++ b/tools/gpu/vk/VkTestUtils.h
@@ -13,9 +13,21 @@
#ifdef SK_VULKAN
#include "vk/GrVkDefines.h"
+#include "vk/GrVkInterface.h"
+
+struct GrVkBackendContext;
namespace sk_gpu_test {
bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr*, PFN_vkGetDeviceProcAddr*);
+
+ using CanPresentFn = std::function<bool(VkInstance, VkPhysicalDevice,
+ uint32_t queueFamilyIndex)>;
+
+ bool CreateVkBackendContext(const GrVkInterface::GetInstanceProc& getInstanceProc,
+ const GrVkInterface::GetDeviceProc& getDeviceProc,
+ GrVkBackendContext* ctx,
+ uint32_t* presentQueueIndexPtr = nullptr,
+ CanPresentFn canPresent = CanPresentFn());
}
#endif