aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar Greg Daniel <egdaniel@google.com>2016-02-22 09:56:40 -0500
committerGravatar Greg Daniel <egdaniel@google.com>2016-02-22 09:56:40 -0500
commit164a9f061c5186ae931cc23a3c73f32472e80ff5 (patch)
tree79929f22803c622a2e0c8dbc333717c275ca0405 /src
parent129ed1cd6d792f3f6cf563aefa9756fc6308289d (diff)
Add vulkan files into skia repo. This is an incomplete backend with only partial functionality at this time.
Diffstat (limited to 'src')
-rw-r--r--src/gpu/vk/GrVkBuffer.cpp177
-rw-r--r--src/gpu/vk/GrVkBuffer.h92
-rw-r--r--src/gpu/vk/GrVkCaps.cpp244
-rw-r--r--src/gpu/vk/GrVkCaps.h108
-rw-r--r--src/gpu/vk/GrVkCommandBuffer.cpp371
-rw-r--r--src/gpu/vk/GrVkCommandBuffer.h206
-rw-r--r--src/gpu/vk/GrVkDescriptorPool.cpp79
-rw-r--r--src/gpu/vk/GrVkDescriptorPool.h56
-rw-r--r--src/gpu/vk/GrVkFramebuffer.cpp63
-rw-r--r--src/gpu/vk/GrVkFramebuffer.h45
-rw-r--r--src/gpu/vk/GrVkGpu.cpp1272
-rw-r--r--src/gpu/vk/GrVkGpu.h220
-rw-r--r--src/gpu/vk/GrVkImage.cpp118
-rw-r--r--src/gpu/vk/GrVkImage.h108
-rw-r--r--src/gpu/vk/GrVkImageView.cpp49
-rw-r--r--src/gpu/vk/GrVkImageView.h41
-rw-r--r--src/gpu/vk/GrVkIndexBuffer.cpp76
-rw-r--r--src/gpu/vk/GrVkIndexBuffer.h39
-rw-r--r--src/gpu/vk/GrVkInterface.cpp340
-rw-r--r--src/gpu/vk/GrVkMemory.cpp157
-rw-r--r--src/gpu/vk/GrVkMemory.h35
-rw-r--r--src/gpu/vk/GrVkPipeline.cpp507
-rw-r--r--src/gpu/vk/GrVkPipeline.h49
-rw-r--r--src/gpu/vk/GrVkProgram.cpp367
-rw-r--r--src/gpu/vk/GrVkProgram.h161
-rw-r--r--src/gpu/vk/GrVkProgramBuilder.cpp323
-rw-r--r--src/gpu/vk/GrVkProgramBuilder.h74
-rw-r--r--src/gpu/vk/GrVkProgramDataManager.cpp315
-rw-r--r--src/gpu/vk/GrVkProgramDataManager.h70
-rw-r--r--src/gpu/vk/GrVkProgramDesc.cpp155
-rw-r--r--src/gpu/vk/GrVkProgramDesc.h69
-rw-r--r--src/gpu/vk/GrVkRenderPass.cpp220
-rw-r--r--src/gpu/vk/GrVkRenderPass.h90
-rw-r--r--src/gpu/vk/GrVkRenderTarget.cpp391
-rw-r--r--src/gpu/vk/GrVkRenderTarget.h137
-rw-r--r--src/gpu/vk/GrVkResource.h170
-rw-r--r--src/gpu/vk/GrVkResourceProvider.cpp118
-rw-r--r--src/gpu/vk/GrVkResourceProvider.h77
-rw-r--r--src/gpu/vk/GrVkSampler.cpp74
-rw-r--r--src/gpu/vk/GrVkSampler.h35
-rw-r--r--src/gpu/vk/GrVkStencilAttachment.cpp105
-rw-r--r--src/gpu/vk/GrVkStencilAttachment.h62
-rw-r--r--src/gpu/vk/GrVkTexture.cpp124
-rw-r--r--src/gpu/vk/GrVkTexture.h61
-rw-r--r--src/gpu/vk/GrVkTextureRenderTarget.cpp150
-rw-r--r--src/gpu/vk/GrVkTextureRenderTarget.h89
-rw-r--r--src/gpu/vk/GrVkTransferBuffer.cpp58
-rw-r--r--src/gpu/vk/GrVkTransferBuffer.h54
-rw-r--r--src/gpu/vk/GrVkUniformBuffer.cpp31
-rw-r--r--src/gpu/vk/GrVkUniformBuffer.h45
-rw-r--r--src/gpu/vk/GrVkUniformHandler.cpp149
-rw-r--r--src/gpu/vk/GrVkUniformHandler.h85
-rw-r--r--src/gpu/vk/GrVkUtil.cpp94
-rw-r--r--src/gpu/vk/GrVkUtil.h36
-rw-r--r--src/gpu/vk/GrVkVaryingHandler.cpp26
-rw-r--r--src/gpu/vk/GrVkVaryingHandler.h27
-rw-r--r--src/gpu/vk/GrVkVertexBuffer.cpp75
-rw-r--r--src/gpu/vk/GrVkVertexBuffer.h38
58 files changed, 8607 insertions, 0 deletions
diff --git a/src/gpu/vk/GrVkBuffer.cpp b/src/gpu/vk/GrVkBuffer.cpp
new file mode 100644
index 0000000000..049fe414f3
--- /dev/null
+++ b/src/gpu/vk/GrVkBuffer.cpp
@@ -0,0 +1,177 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkBuffer.h"
+#include "GrVkGpu.h"
+#include "GrVkMemory.h"
+#include "GrVkUtil.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+#ifdef SK_DEBUG
+#define VALIDATE() this->validate()
+#else
+#define VALIDATE() do {} while(false)
+#endif
+
+const GrVkBuffer::Resource* GrVkBuffer::Create(const GrVkGpu* gpu, const Desc& desc) {
+ VkBuffer buffer;
+ VkDeviceMemory alloc;
+
+ // create the buffer object
+ VkBufferCreateInfo bufInfo;
+ memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
+ bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ bufInfo.flags = 0;
+ bufInfo.size = desc.fSizeInBytes;
+ switch (desc.fType) {
+ case kVertex_Type:
+ bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
+ break;
+ case kIndex_Type:
+ bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
+ break;
+ case kUniform_Type:
+ bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+ break;
+ case kCopyRead_Type:
+ bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
+ break;
+ case kCopyWrite_Type:
+ bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+ break;
+
+ }
+ bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ bufInfo.queueFamilyIndexCount = 0;
+ bufInfo.pQueueFamilyIndices = nullptr;
+
+ VkResult err;
+ err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
+ if (err) {
+ return nullptr;
+ }
+
+ VkMemoryPropertyFlags requiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
+
+ if (!GrVkMemory::AllocAndBindBufferMemory(gpu,
+ buffer,
+ requiredMemProps,
+ &alloc)) {
+ VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
+ return nullptr;
+ }
+
+ const GrVkBuffer::Resource* resource = new GrVkBuffer::Resource(buffer, alloc);
+ if (!resource) {
+ VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
+ VK_CALL(gpu, FreeMemory(gpu->device(), alloc, nullptr));
+ return nullptr;
+ }
+
+ return resource;
+}
+
+
+void GrVkBuffer::addMemoryBarrier(const GrVkGpu* gpu,
+ VkAccessFlags srcAccessMask,
+ VkAccessFlags dstAccesMask,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion) const {
+ VkBufferMemoryBarrier bufferMemoryBarrier = {
+ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType
+ NULL, // pNext
+ srcAccessMask, // srcAccessMask
+ dstAccesMask, // dstAccessMask
+ VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
+ VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
+ this->buffer(), // buffer
+ 0, // offset
+ fDesc.fSizeInBytes, // size
+ };
+
+ // TODO: restrict to area of buffer we're interested in
+ gpu->addBufferMemoryBarrier(srcStageMask, dstStageMask, byRegion, &bufferMemoryBarrier);
+}
+
+void GrVkBuffer::Resource::freeGPUData(const GrVkGpu* gpu) const {
+ SkASSERT(fBuffer);
+ SkASSERT(fAlloc);
+ VK_CALL(gpu, DestroyBuffer(gpu->device(), fBuffer, nullptr));
+ VK_CALL(gpu, FreeMemory(gpu->device(), fAlloc, nullptr));
+}
+
+void GrVkBuffer::vkRelease(const GrVkGpu* gpu) {
+ VALIDATE();
+ fResource->unref(gpu);
+ fResource = nullptr;
+ fMapPtr = nullptr;
+ VALIDATE();
+}
+
+void GrVkBuffer::vkAbandon() {
+ fResource->unrefAndAbandon();
+ fMapPtr = nullptr;
+ VALIDATE();
+}
+
+void* GrVkBuffer::vkMap(const GrVkGpu* gpu) {
+ VALIDATE();
+ SkASSERT(!this->vkIsMapped());
+
+ VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc(), 0, VK_WHOLE_SIZE, 0, &fMapPtr));
+ if (err) {
+ fMapPtr = nullptr;
+ }
+
+ VALIDATE();
+ return fMapPtr;
+}
+
+void GrVkBuffer::vkUnmap(const GrVkGpu* gpu) {
+ VALIDATE();
+ SkASSERT(this->vkIsMapped());
+
+ VK_CALL(gpu, UnmapMemory(gpu->device(), alloc()));
+
+ fMapPtr = nullptr;
+}
+
+bool GrVkBuffer::vkIsMapped() const {
+ VALIDATE();
+ return SkToBool(fMapPtr);
+}
+
+bool GrVkBuffer::vkUpdateData(const GrVkGpu* gpu, const void* src, size_t srcSizeInBytes) {
+ SkASSERT(!this->vkIsMapped());
+ VALIDATE();
+ if (srcSizeInBytes > fDesc.fSizeInBytes) {
+ return false;
+ }
+
+ void* mapPtr;
+ VkResult err = VK_CALL(gpu, MapMemory(gpu->device(), alloc(), 0, srcSizeInBytes, 0, &mapPtr));
+
+ if (VK_SUCCESS != err) {
+ return false;
+ }
+
+ memcpy(mapPtr, src, srcSizeInBytes);
+
+ VK_CALL(gpu, UnmapMemory(gpu->device(), alloc()));
+
+ return true;
+}
+
+void GrVkBuffer::validate() const {
+ SkASSERT(!fResource || kVertex_Type == fDesc.fType || kIndex_Type == fDesc.fType
+ || kCopyRead_Type == fDesc.fType || kCopyWrite_Type == fDesc.fType
+ || kUniform_Type == fDesc.fType);
+}
+
diff --git a/src/gpu/vk/GrVkBuffer.h b/src/gpu/vk/GrVkBuffer.h
new file mode 100644
index 0000000000..7dac3a1149
--- /dev/null
+++ b/src/gpu/vk/GrVkBuffer.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkBuffer_DEFINED
+#define GrVkBuffer_DEFINED
+
+#include "vk/GrVkInterface.h"
+#include "GrVkResource.h"
+
+class GrVkGpu;
+
+/**
+ * This class serves as the base of GrVk*Buffer classes. It was written to avoid code
+ * duplication in those classes.
+ */
+class GrVkBuffer : public SkNoncopyable {
+public:
+ ~GrVkBuffer() {
+ // either release or abandon should have been called by the owner of this object.
+ SkASSERT(!fResource);
+ }
+
+ VkBuffer buffer() const { return fResource->fBuffer; }
+ VkDeviceMemory alloc() const { return fResource->fAlloc; }
+ const GrVkResource* resource() const { return fResource; }
+ size_t size() const { return fDesc.fSizeInBytes; }
+
+ void addMemoryBarrier(const GrVkGpu* gpu,
+ VkAccessFlags srcAccessMask,
+ VkAccessFlags dstAccessMask,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion) const;
+
+ enum Type {
+ kVertex_Type,
+ kIndex_Type,
+ kUniform_Type,
+ kCopyRead_Type,
+ kCopyWrite_Type,
+ };
+
+protected:
+ struct Desc {
+ size_t fSizeInBytes;
+ Type fType; // vertex buffer, index buffer, etc.
+ bool fDynamic;
+ };
+
+ class Resource : public GrVkResource {
+ public:
+ Resource(VkBuffer buf, VkDeviceMemory alloc) : INHERITED(), fBuffer(buf), fAlloc(alloc) {}
+
+ VkBuffer fBuffer;
+ VkDeviceMemory fAlloc;
+ private:
+ void freeGPUData(const GrVkGpu* gpu) const;
+
+ typedef GrVkResource INHERITED;
+ };
+
+ // convenience routine for raw buffer creation
+ static const Resource* Create(const GrVkGpu* gpu,
+ const Desc& descriptor);
+
+ GrVkBuffer(const Desc& desc, const GrVkBuffer::Resource* resource)
+ : fDesc(desc), fResource(resource), fMapPtr(nullptr) {
+ }
+
+ void* vkMap(const GrVkGpu* gpu);
+ void vkUnmap(const GrVkGpu* gpu);
+ bool vkUpdateData(const GrVkGpu* gpu, const void* src, size_t srcSizeInBytes);
+
+ void vkAbandon();
+ void vkRelease(const GrVkGpu* gpu);
+
+private:
+ void validate() const;
+ bool vkIsMapped() const;
+
+ Desc fDesc;
+ const Resource* fResource;
+ void* fMapPtr;
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/src/gpu/vk/GrVkCaps.cpp b/src/gpu/vk/GrVkCaps.cpp
new file mode 100644
index 0000000000..a9ad05c899
--- /dev/null
+++ b/src/gpu/vk/GrVkCaps.cpp
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkCaps.h"
+
+#include "GrVkUtil.h"
+#include "glsl/GrGLSLCaps.h"
+#include "vk/GrVkInterface.h"
+
+GrVkCaps::GrVkCaps(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
+ VkPhysicalDevice physDev) : INHERITED(contextOptions) {
+ /**************************************************************************
+ * GrDrawTargetCaps fields
+ **************************************************************************/
+ fMipMapSupport = false; //TODO: figure this out
+ fNPOTTextureTileSupport = false; //TODO: figure this out
+ fTwoSidedStencilSupport = false; //TODO: figure this out
+ fStencilWrapOpsSupport = false; //TODO: figure this out
+ fDiscardRenderTargetSupport = false; //TODO: figure this out
+ fReuseScratchTextures = true; //TODO: figure this out
+ fGpuTracingSupport = false; //TODO: figure this out
+ fCompressedTexSubImageSupport = false; //TODO: figure this out
+ fOversizedStencilSupport = false; //TODO: figure this out
+
+ fUseDrawInsteadOfClear = false; //TODO: figure this out
+
+ fMapBufferFlags = kNone_MapFlags; //TODO: figure this out
+ fGeometryBufferMapThreshold = SK_MaxS32; //TODO: figure this out
+
+ fMaxRenderTargetSize = 4096; // minimum required by spec
+ fMaxTextureSize = 4096; // minimum required by spec
+ fMaxColorSampleCount = 4; // minimum required by spec
+ fMaxStencilSampleCount = 4; // minimum required by spec
+
+
+ fShaderCaps.reset(new GrGLSLCaps(contextOptions));
+
+ /**************************************************************************
+ * GrVkCaps fields
+ **************************************************************************/
+ fMaxSampledTextures = 16; // Spec requires a minimum of 16 sampled textures per stage
+
+ this->init(contextOptions, vkInterface, physDev);
+}
+
+void GrVkCaps::init(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
+ VkPhysicalDevice physDev) {
+
+ this->initGLSLCaps(vkInterface, physDev);
+ this->initConfigTexturableTable(vkInterface, physDev);
+ this->initConfigRenderableTable(vkInterface, physDev);
+ this->initStencilFormats(vkInterface, physDev);
+
+ VkPhysicalDeviceProperties properties;
+ GR_VK_CALL(vkInterface, GetPhysicalDeviceProperties(physDev, &properties));
+
+ // We could actually querey and get a max size for each config, however maxImageDimension2D will
+ // give the minimum max size across all configs. So for simplicity we will use that for now.
+ fMaxRenderTargetSize = properties.limits.maxImageDimension2D;
+ fMaxTextureSize = properties.limits.maxImageDimension2D;
+
+ this->initSampleCount(properties);
+
+ fMaxSampledTextures = SkTMin(properties.limits.maxPerStageDescriptorSampledImages,
+ properties.limits.maxPerStageDescriptorSamplers);
+
+ this->applyOptionsOverrides(contextOptions);
+ // need to friend GrVkCaps in GrGLSLCaps.h
+ // GrGLSLCaps* glslCaps = static_cast<GrGLSLCaps*>(fShaderCaps.get());
+ // glslCaps->applyOptionsOverrides(contextOptions);
+}
+
+int get_max_sample_count(VkSampleCountFlags flags) {
+ SkASSERT(flags & VK_SAMPLE_COUNT_1_BIT);
+ if (!(flags & VK_SAMPLE_COUNT_2_BIT)) {
+ return 0;
+ }
+ if (!(flags & VK_SAMPLE_COUNT_4_BIT)) {
+ return 2;
+ }
+ if (!(flags & VK_SAMPLE_COUNT_8_BIT)) {
+ return 4;
+ }
+ if (!(flags & VK_SAMPLE_COUNT_16_BIT)) {
+ return 8;
+ }
+ if (!(flags & VK_SAMPLE_COUNT_32_BIT)) {
+ return 16;
+ }
+ if (!(flags & VK_SAMPLE_COUNT_64_BIT)) {
+ return 32;
+ }
+ return 64;
+}
+
+void GrVkCaps::initSampleCount(const VkPhysicalDeviceProperties& properties) {
+ VkSampleCountFlags colorSamples = properties.limits.framebufferColorSampleCounts;
+ VkSampleCountFlags stencilSamples = properties.limits.framebufferStencilSampleCounts;
+
+ fMaxColorSampleCount = get_max_sample_count(colorSamples);
+ fMaxStencilSampleCount = get_max_sample_count(stencilSamples);
+}
+
+void GrVkCaps::initGLSLCaps(const GrVkInterface* interface, VkPhysicalDevice physDev) {
+ GrGLSLCaps* glslCaps = static_cast<GrGLSLCaps*>(fShaderCaps.get());
+ // TODO: actually figure out a correct version here
+ glslCaps->fVersionDeclString = "#version 140\n";
+
+ // fConfigOutputSwizzle will default to RGBA so we only need to set it for alpha only config.
+ for (int i = 0; i < kGrPixelConfigCnt; ++i) {
+ GrPixelConfig config = static_cast<GrPixelConfig>(i);
+ if (GrPixelConfigIsAlphaOnly(config)) {
+ glslCaps->fConfigTextureSwizzle[i] = GrSwizzle::RRRR();
+ glslCaps->fConfigOutputSwizzle[i] = GrSwizzle::AAAA();
+ } else {
+ glslCaps->fConfigTextureSwizzle[i] = GrSwizzle::RGBA();
+ }
+ }
+}
+
+static void format_supported_for_feature(const GrVkInterface* interface,
+ VkPhysicalDevice physDev,
+ VkFormat format,
+ VkFormatFeatureFlagBits featureBit,
+ bool* linearSupport,
+ bool* optimalSupport) {
+ VkFormatProperties props;
+ memset(&props, 0, sizeof(VkFormatProperties));
+ GR_VK_CALL(interface, GetPhysicalDeviceFormatProperties(physDev, format, &props));
+ *linearSupport = SkToBool(props.linearTilingFeatures & featureBit);
+ *optimalSupport = SkToBool(props.optimalTilingFeatures & featureBit);
+}
+
+static void config_supported_for_feature(const GrVkInterface* interface,
+ VkPhysicalDevice physDev,
+ GrPixelConfig config,
+ VkFormatFeatureFlagBits featureBit,
+ bool* linearSupport,
+ bool* optimalSupport) {
+ VkFormat format;
+ if (!GrPixelConfigToVkFormat(config, &format)) {
+ *linearSupport = false;
+ *optimalSupport = false;
+ return;
+ }
+ format_supported_for_feature(interface, physDev, format, featureBit,
+ linearSupport, optimalSupport);
+}
+
+// Currently just assumeing if something can be rendered to without MSAA it also works for MSAAA
+#define SET_CONFIG_IS_RENDERABLE(config) \
+ config_supported_for_feature(interface, \
+ physDev, \
+ config, \
+ VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT, \
+ &fConfigLinearRenderSupport[config][kNo_MSAA], \
+ &fConfigRenderSupport[config][kNo_MSAA] ); \
+ fConfigRenderSupport[config][kYes_MSAA] = fConfigRenderSupport[config][kNo_MSAA]; \
+ fConfigLinearRenderSupport[config][kYes_MSAA] = fConfigLinearRenderSupport[config][kNo_MSAA];
+
+
+void GrVkCaps::initConfigRenderableTable(const GrVkInterface* interface, VkPhysicalDevice physDev) {
+ enum {
+ kNo_MSAA = 0,
+ kYes_MSAA = 1,
+ };
+
+ // Base render support
+ SET_CONFIG_IS_RENDERABLE(kAlpha_8_GrPixelConfig);
+ SET_CONFIG_IS_RENDERABLE(kRGB_565_GrPixelConfig);
+ SET_CONFIG_IS_RENDERABLE(kRGBA_4444_GrPixelConfig);
+ SET_CONFIG_IS_RENDERABLE(kRGBA_8888_GrPixelConfig);
+ SET_CONFIG_IS_RENDERABLE(kBGRA_8888_GrPixelConfig);
+
+ SET_CONFIG_IS_RENDERABLE(kSRGBA_8888_GrPixelConfig);
+
+ // Float render support
+ SET_CONFIG_IS_RENDERABLE(kRGBA_float_GrPixelConfig);
+ SET_CONFIG_IS_RENDERABLE(kRGBA_half_GrPixelConfig);
+ SET_CONFIG_IS_RENDERABLE(kAlpha_half_GrPixelConfig);
+}
+
+#define SET_CONFIG_IS_TEXTURABLE(config) \
+ config_supported_for_feature(interface, \
+ physDev, \
+ config, \
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT, \
+ &fConfigLinearTextureSupport[config], \
+ &fConfigTextureSupport[config]);
+
+void GrVkCaps::initConfigTexturableTable(const GrVkInterface* interface, VkPhysicalDevice physDev) {
+ // Base texture support
+ SET_CONFIG_IS_TEXTURABLE(kAlpha_8_GrPixelConfig);
+ SET_CONFIG_IS_TEXTURABLE(kRGB_565_GrPixelConfig);
+ SET_CONFIG_IS_TEXTURABLE(kRGBA_4444_GrPixelConfig);
+ SET_CONFIG_IS_TEXTURABLE(kRGBA_8888_GrPixelConfig);
+ SET_CONFIG_IS_TEXTURABLE(kBGRA_8888_GrPixelConfig);
+
+ SET_CONFIG_IS_TEXTURABLE(kIndex_8_GrPixelConfig);
+ SET_CONFIG_IS_TEXTURABLE(kSRGBA_8888_GrPixelConfig);
+
+ // Compressed texture support
+ SET_CONFIG_IS_TEXTURABLE(kETC1_GrPixelConfig);
+ SET_CONFIG_IS_TEXTURABLE(kLATC_GrPixelConfig);
+ SET_CONFIG_IS_TEXTURABLE(kR11_EAC_GrPixelConfig);
+ SET_CONFIG_IS_TEXTURABLE(kASTC_12x12_GrPixelConfig);
+
+ // Float texture support
+ SET_CONFIG_IS_TEXTURABLE(kRGBA_float_GrPixelConfig);
+ SET_CONFIG_IS_TEXTURABLE(kRGBA_half_GrPixelConfig);
+ SET_CONFIG_IS_TEXTURABLE(kAlpha_half_GrPixelConfig);
+}
+
+#define SET_CONFIG_CAN_STENCIL(config) \
+ bool SK_MACRO_APPEND_LINE(linearSupported); \
+ bool SK_MACRO_APPEND_LINE(optimalSupported); \
+ format_supported_for_feature(interface, \
+ physDev, \
+ config.fInternalFormat, \
+ VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT, \
+ &SK_MACRO_APPEND_LINE(linearSupported), \
+ &SK_MACRO_APPEND_LINE(optimalSupported)); \
+ if (SK_MACRO_APPEND_LINE(linearSupported)) fLinearStencilFormats.push_back(config); \
+ if (SK_MACRO_APPEND_LINE(optimalSupported)) fStencilFormats.push_back(config);
+
+void GrVkCaps::initStencilFormats(const GrVkInterface* interface, VkPhysicalDevice physDev) {
+ // Build up list of legal stencil formats (though perhaps not supported on
+ // the particular gpu/driver) from most preferred to least.
+
+ static const StencilFormat
+ // internal Format stencil bits total bits packed?
+ gS8 = { VK_FORMAT_S8_UINT, 8, 8, false },
+ gD24S8 = { VK_FORMAT_D24_UNORM_S8_UINT, 8, 32, true };
+
+ // I'm simply assuming that these two will be supported since they are used in example code.
+ // TODO: Actaully figure this out
+ SET_CONFIG_CAN_STENCIL(gS8);
+ SET_CONFIG_CAN_STENCIL(gD24S8);
+}
+
diff --git a/src/gpu/vk/GrVkCaps.h b/src/gpu/vk/GrVkCaps.h
new file mode 100644
index 0000000000..3135c9e46e
--- /dev/null
+++ b/src/gpu/vk/GrVkCaps.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkCaps_DEFINED
+#define GrVkCaps_DEFINED
+
+#include "GrCaps.h"
+#include "GrVkStencilAttachment.h"
+#include "vulkan/vulkan.h"
+
+struct GrVkInterface;
+class GrGLSLCaps;
+
+/**
+ * Stores some capabilities of a Vk backend.
+ */
+class GrVkCaps : public GrCaps {
+public:
+ typedef GrVkStencilAttachment::Format StencilFormat;
+
+ /**
+ * Creates a GrVkCaps that is set such that nothing is supported. The init function should
+ * be called to fill out the caps.
+ */
+ GrVkCaps(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
+ VkPhysicalDevice device);
+
+ bool isConfigTexturable(GrPixelConfig config) const override {
+ SkASSERT(kGrPixelConfigCnt > config);
+ return fConfigTextureSupport[config];
+ }
+
+ bool isConfigRenderable(GrPixelConfig config, bool withMSAA) const override {
+ SkASSERT(kGrPixelConfigCnt > config);
+ return fConfigRenderSupport[config][withMSAA];
+ }
+
+ bool isConfigRenderableLinearly(GrPixelConfig config, bool withMSAA) const {
+ SkASSERT(kGrPixelConfigCnt > config);
+ return fConfigLinearRenderSupport[config][withMSAA];
+ }
+
+ bool isConfigTexurableLinearly(GrPixelConfig config) const {
+ SkASSERT(kGrPixelConfigCnt > config);
+ return fConfigLinearTextureSupport[config];
+ }
+
+ /**
+ * Gets an array of legal stencil formats. These formats are not guaranteed to be supported by
+ * the driver but are legal VK_TEXTURE_FORMATs.
+ */
+ const SkTArray<StencilFormat, true>& stencilFormats() const {
+ return fStencilFormats;
+ }
+
+ /**
+ * Gets an array of legal stencil formats. These formats are not guaranteed to be supported by
+ * the driver but are legal VK_TEXTURE_FORMATs.
+ */
+ const SkTArray<StencilFormat, true>& linearStencilFormats() const {
+ return fLinearStencilFormats;
+ }
+
+ /**
+ * Returns the max number of sampled textures we can use in a program. This number is the max of
+ * max samplers and max sampled images. This number is technically the max sampled textures we
+ * can have per stage, but we'll use it for the whole program since for now we only do texture
+ * lookups in the fragment shader.
+ */
+ int maxSampledTextures() const {
+ return fMaxSampledTextures;
+ }
+
+
+ GrGLSLCaps* glslCaps() const { return reinterpret_cast<GrGLSLCaps*>(fShaderCaps.get()); }
+
+private:
+ void init(const GrContextOptions& contextOptions, const GrVkInterface* vkInterface,
+ VkPhysicalDevice device);
+ void initSampleCount(const VkPhysicalDeviceProperties& properties);
+ void initGLSLCaps(const GrVkInterface* interface, VkPhysicalDevice physDev);
+ void initConfigRenderableTable(const GrVkInterface* interface, VkPhysicalDevice physDev);
+ void initConfigTexturableTable(const GrVkInterface* interface, VkPhysicalDevice physDev);
+ void initStencilFormats(const GrVkInterface* interface, VkPhysicalDevice physDev);
+
+
+ bool fConfigTextureSupport[kGrPixelConfigCnt];
+ // For Vulkan we track whether a config is supported linearly (without need for swizzling)
+ bool fConfigLinearTextureSupport[kGrPixelConfigCnt];
+
+ // The first entry for each config is without msaa and the second is with.
+ bool fConfigRenderSupport[kGrPixelConfigCnt][2];
+ // The first entry for each config is without msaa and the second is with.
+ bool fConfigLinearRenderSupport[kGrPixelConfigCnt][2];
+
+ SkTArray<StencilFormat, true> fLinearStencilFormats;
+ SkTArray<StencilFormat, true> fStencilFormats;
+
+ int fMaxSampledTextures;
+
+ typedef GrCaps INHERITED;
+};
+
+#endif
diff --git a/src/gpu/vk/GrVkCommandBuffer.cpp b/src/gpu/vk/GrVkCommandBuffer.cpp
new file mode 100644
index 0000000000..e43c50fc4a
--- /dev/null
+++ b/src/gpu/vk/GrVkCommandBuffer.cpp
@@ -0,0 +1,371 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkCommandBuffer.h"
+
+#include "GrVkFramebuffer.h"
+#include "GrVkImageView.h"
+#include "GrVkRenderPass.h"
+#include "GrVkRenderTarget.h"
+#include "GrVkProgram.h"
+#include "GrVkTransferBuffer.h"
+#include "GrVkUtil.h"
+
+GrVkCommandBuffer* GrVkCommandBuffer::Create(const GrVkGpu* gpu, VkCommandPool cmdPool) {
+ const VkCommandBufferAllocateInfo cmdInfo = {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
+ NULL, // pNext
+ cmdPool, // commandPool
+ VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
+ 1 // bufferCount
+ };
+
+ VkCommandBuffer cmdBuffer;
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateCommandBuffers(gpu->device(),
+ &cmdInfo,
+ &cmdBuffer));
+ if (err) {
+ return nullptr;
+ }
+ return new GrVkCommandBuffer(cmdBuffer);
+}
+
+GrVkCommandBuffer::~GrVkCommandBuffer() {
+ // Should have ended any render pass we're in the middle of
+ SkASSERT(!fActiveRenderPass);
+}
+
+void GrVkCommandBuffer::invalidateState() {
+ fBoundVertexBuffer = 0;
+ fBoundVertexBufferIsValid = false;
+ fBoundIndexBuffer = 0;
+ fBoundIndexBufferIsValid = false;
+}
+
+void GrVkCommandBuffer::freeGPUData(const GrVkGpu* gpu) const {
+ SkASSERT(!fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ for (int i = 0; i < fTrackedResources.count(); ++i) {
+ fTrackedResources[i]->unref(gpu);
+ }
+
+ // Destroy the fence, if any
+ if (VK_NULL_HANDLE != fSubmitFence) {
+ GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
+ }
+
+ GR_VK_CALL(gpu->vkInterface(), FreeCommandBuffers(gpu->device(), gpu->cmdPool(),
+ 1, &fCmdBuffer));
+}
+
+void GrVkCommandBuffer::abandonSubResources() const {
+ for (int i = 0; i < fTrackedResources.count(); ++i) {
+ fTrackedResources[i]->unrefAndAbandon();
+ }
+}
+
+void GrVkCommandBuffer::begin(const GrVkGpu* gpu) {
+ SkASSERT(!fIsActive);
+ VkCommandBufferBeginInfo cmdBufferBeginInfo;
+ memset(&cmdBufferBeginInfo, 0, sizeof(VkCommandBufferBeginInfo));
+ cmdBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cmdBufferBeginInfo.pNext = nullptr;
+ cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ cmdBufferBeginInfo.pInheritanceInfo = nullptr;
+
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), BeginCommandBuffer(fCmdBuffer,
+ &cmdBufferBeginInfo));
+ fIsActive = true;
+}
+
+void GrVkCommandBuffer::end(const GrVkGpu* gpu) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), EndCommandBuffer(fCmdBuffer));
+ this->invalidateState();
+ fIsActive = false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrVkCommandBuffer::beginRenderPass(const GrVkGpu* gpu,
+ const GrVkRenderPass* renderPass,
+ const GrVkRenderTarget& target) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ VkRenderPassBeginInfo beginInfo;
+ VkSubpassContents contents;
+ renderPass->getBeginInfo(target, &beginInfo, &contents);
+ GR_VK_CALL(gpu->vkInterface(), CmdBeginRenderPass(fCmdBuffer, &beginInfo, contents));
+ fActiveRenderPass = renderPass;
+ this->addResource(renderPass);
+ target.addResources(*this);
+
+}
+
+void GrVkCommandBuffer::endRenderPass(const GrVkGpu* gpu) {
+ SkASSERT(fIsActive);
+ SkASSERT(fActiveRenderPass);
+ GR_VK_CALL(gpu->vkInterface(), CmdEndRenderPass(fCmdBuffer));
+ fActiveRenderPass = nullptr;
+}
+
+void GrVkCommandBuffer::submitToQueue(const GrVkGpu* gpu, VkQueue queue, GrVkGpu::SyncQueue sync) {
+ SkASSERT(!fIsActive);
+
+ VkResult err;
+ VkFenceCreateInfo fenceInfo;
+ memset(&fenceInfo, 0, sizeof(VkFenceCreateInfo));
+ fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+ err = GR_VK_CALL(gpu->vkInterface(), CreateFence(gpu->device(), &fenceInfo, nullptr,
+ &fSubmitFence));
+ SkASSERT(!err);
+
+ VkSubmitInfo submitInfo;
+ memset(&submitInfo, 0, sizeof(VkSubmitInfo));
+ submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ submitInfo.pNext = nullptr;
+ submitInfo.waitSemaphoreCount = 0;
+ submitInfo.pWaitSemaphores = nullptr;
+ submitInfo.commandBufferCount = 1;
+ submitInfo.pCommandBuffers = &fCmdBuffer;
+ submitInfo.signalSemaphoreCount = 0;
+ submitInfo.pSignalSemaphores = nullptr;
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), QueueSubmit(queue, 1, &submitInfo, fSubmitFence));
+
+ if (GrVkGpu::kForce_SyncQueue == sync) {
+ err = GR_VK_CALL(gpu->vkInterface(),
+ WaitForFences(gpu->device(), 1, &fSubmitFence, true, UINT64_MAX));
+ if (VK_TIMEOUT == err) {
+ SkDebugf("Fence failed to signal: %d\n", err);
+ SkFAIL("failing");
+ }
+ SkASSERT(!err);
+
+ // Destroy the fence
+ GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
+ fSubmitFence = VK_NULL_HANDLE;
+ }
+}
+
+bool GrVkCommandBuffer::finished(const GrVkGpu* gpu) const {
+ if (VK_NULL_HANDLE == fSubmitFence) {
+ return true;
+ }
+
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), GetFenceStatus(gpu->device(), fSubmitFence));
+ switch (err) {
+ case VK_SUCCESS:
+ return true;
+
+ case VK_NOT_READY:
+ return false;
+
+ default:
+ SkDebugf("Error getting fence status: %d\n", err);
+ SkFAIL("failing");
+ break;
+ }
+
+ return false;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// CommandBuffer commands
+////////////////////////////////////////////////////////////////////////////////
+
+void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ BarrierType barrierType,
+ void* barrier) const {
+ SkASSERT(fIsActive);
+ VkDependencyFlags dependencyFlags = byRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0;
+
+ switch (barrierType) {
+ case kMemory_BarrierType: {
+ const VkMemoryBarrier* barrierPtr = reinterpret_cast<VkMemoryBarrier*>(barrier);
+ GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask,
+ dstStageMask, dependencyFlags,
+ 1, barrierPtr,
+ 0, nullptr,
+ 0, nullptr));
+ break;
+ }
+
+ case kBufferMemory_BarrierType: {
+ const VkBufferMemoryBarrier* barrierPtr =
+ reinterpret_cast<VkBufferMemoryBarrier*>(barrier);
+ GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask,
+ dstStageMask, dependencyFlags,
+ 0, nullptr,
+ 1, barrierPtr,
+ 0, nullptr));
+ break;
+ }
+
+ case kImageMemory_BarrierType: {
+ const VkImageMemoryBarrier* barrierPtr =
+ reinterpret_cast<VkImageMemoryBarrier*>(barrier);
+ GR_VK_CALL(gpu->vkInterface(), CmdPipelineBarrier(fCmdBuffer, srcStageMask,
+ dstStageMask, dependencyFlags,
+ 0, nullptr,
+ 0, nullptr,
+ 1, barrierPtr));
+ break;
+ }
+ }
+
+}
+
+void GrVkCommandBuffer::copyImage(const GrVkGpu* gpu,
+ GrVkImage* srcImage,
+ VkImageLayout srcLayout,
+ GrVkImage* dstImage,
+ VkImageLayout dstLayout,
+ uint32_t copyRegionCount,
+ const VkImageCopy* copyRegions) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ this->addResource(srcImage->resource());
+ this->addResource(dstImage->resource());
+ GR_VK_CALL(gpu->vkInterface(), CmdCopyImage(fCmdBuffer,
+ srcImage->textureImage(),
+ srcLayout,
+ dstImage->textureImage(),
+ dstLayout,
+ copyRegionCount,
+ copyRegions));
+}
+
+void GrVkCommandBuffer::copyImageToBuffer(const GrVkGpu* gpu,
+ GrVkImage* srcImage,
+ VkImageLayout srcLayout,
+ GrVkTransferBuffer* dstBuffer,
+ uint32_t copyRegionCount,
+ const VkBufferImageCopy* copyRegions) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ this->addResource(srcImage->resource());
+ this->addResource(dstBuffer->resource());
+ GR_VK_CALL(gpu->vkInterface(), CmdCopyImageToBuffer(fCmdBuffer,
+ srcImage->textureImage(),
+ srcLayout,
+ dstBuffer->buffer(),
+ copyRegionCount,
+ copyRegions));
+}
+
+void GrVkCommandBuffer::copyBufferToImage(const GrVkGpu* gpu,
+ GrVkTransferBuffer* srcBuffer,
+ GrVkImage* dstImage,
+ VkImageLayout dstLayout,
+ uint32_t copyRegionCount,
+ const VkBufferImageCopy* copyRegions) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ this->addResource(srcBuffer->resource());
+ this->addResource(dstImage->resource());
+ GR_VK_CALL(gpu->vkInterface(), CmdCopyBufferToImage(fCmdBuffer,
+ srcBuffer->buffer(),
+ dstImage->textureImage(),
+ dstLayout,
+ copyRegionCount,
+ copyRegions));
+}
+
+void GrVkCommandBuffer::clearColorImage(const GrVkGpu* gpu,
+ GrVkImage* image,
+ const VkClearColorValue* color,
+ uint32_t subRangeCount,
+ const VkImageSubresourceRange* subRanges) {
+ SkASSERT(fIsActive);
+ SkASSERT(!fActiveRenderPass);
+ this->addResource(image->resource());
+ GR_VK_CALL(gpu->vkInterface(), CmdClearColorImage(fCmdBuffer,
+ image->textureImage(),
+ image->currentLayout(),
+ color,
+ subRangeCount,
+ subRanges));
+}
+
+void GrVkCommandBuffer::clearAttachments(const GrVkGpu* gpu,
+ int numAttachments,
+ const VkClearAttachment* attachments,
+ int numRects,
+ const VkClearRect* clearRects) const {
+ SkASSERT(fIsActive);
+ SkASSERT(fActiveRenderPass);
+ SkASSERT(numAttachments > 0);
+ SkASSERT(numRects > 0);
+#ifdef SK_DEBUG
+ for (int i = 0; i < numAttachments; ++i) {
+ if (attachments[i].aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
+ uint32_t testIndex;
+ SkAssertResult(fActiveRenderPass->colorAttachmentIndex(&testIndex));
+ SkASSERT(testIndex == attachments[i].colorAttachment);
+ }
+ }
+#endif
+ GR_VK_CALL(gpu->vkInterface(), CmdClearAttachments(fCmdBuffer,
+ numAttachments,
+ attachments,
+ numRects,
+ clearRects));
+}
+
+void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu,
+ GrVkProgram* program,
+ VkPipelineLayout layout,
+ uint32_t firstSet,
+ uint32_t setCount,
+ const VkDescriptorSet* descriptorSets,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets) {
+ SkASSERT(fIsActive);
+ GR_VK_CALL(gpu->vkInterface(), CmdBindDescriptorSets(fCmdBuffer,
+ VK_PIPELINE_BIND_POINT_GRAPHICS,
+ layout,
+ firstSet,
+ setCount,
+ descriptorSets,
+ dynamicOffsetCount,
+ dynamicOffsets));
+ program->addUniformResources(*this);
+}
+
+void GrVkCommandBuffer::drawIndexed(const GrVkGpu* gpu,
+ uint32_t indexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t vertexOffset,
+ uint32_t firstInstance) const {
+ SkASSERT(fIsActive);
+ SkASSERT(fActiveRenderPass);
+ GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexed(fCmdBuffer,
+ indexCount,
+ instanceCount,
+ firstIndex,
+ vertexOffset,
+ firstInstance));
+}
+
+void GrVkCommandBuffer::draw(const GrVkGpu* gpu,
+ uint32_t vertexCount,
+ uint32_t instanceCount,
+ uint32_t firstVertex,
+ uint32_t firstInstance) const {
+ SkASSERT(fIsActive);
+ SkASSERT(fActiveRenderPass);
+ GR_VK_CALL(gpu->vkInterface(), CmdDraw(fCmdBuffer,
+ vertexCount,
+ instanceCount,
+ firstVertex,
+ firstInstance));
+}
diff --git a/src/gpu/vk/GrVkCommandBuffer.h b/src/gpu/vk/GrVkCommandBuffer.h
new file mode 100644
index 0000000000..33867abc5a
--- /dev/null
+++ b/src/gpu/vk/GrVkCommandBuffer.h
@@ -0,0 +1,206 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkCommandBuffer_DEFINED
+#define GrVkCommandBuffer_DEFINED
+
+#include "GrVkGpu.h"
+#include "GrVkPipeline.h"
+#include "GrVkResource.h"
+#include "GrVkUtil.h"
+#include "vulkan/vulkan.h"
+
+class GrVkRenderPass;
+class GrVkRenderTarget;
+class GrVkTransferBuffer;
+
+class GrVkCommandBuffer : public GrVkResource {
+public:
+ static GrVkCommandBuffer* Create(const GrVkGpu* gpu, VkCommandPool cmdPool);
+ ~GrVkCommandBuffer() override;
+
+ void begin(const GrVkGpu* gpu);
+ void end(const GrVkGpu* gpu);
+
+ void invalidateState();
+
+ // Begins render pass on this command buffer. The framebuffer from GrVkRenderTarget will be used
+ // in the render pass.
+ void beginRenderPass(const GrVkGpu* gpu,
+ const GrVkRenderPass* renderPass,
+ const GrVkRenderTarget& target);
+ void endRenderPass(const GrVkGpu* gpu);
+
+ void submitToQueue(const GrVkGpu* gpu, VkQueue queue, GrVkGpu::SyncQueue sync);
+ bool finished(const GrVkGpu* gpu) const;
+
+ ////////////////////////////////////////////////////////////////////////////
+ // CommandBuffer State/Object bindings
+ ////////////////////////////////////////////////////////////////////////////
+#if 0
+ void bindPipeline(const GrVkGpu* gpu) const;
+ void bindDynamicState(const GrVkGpu* gpu) const;
+ void bindDescriptorSet(const GrVkGpu* gpu) const;
+#endif
+
+ ////////////////////////////////////////////////////////////////////////////
+ // CommandBuffer commands
+ ////////////////////////////////////////////////////////////////////////////
+ enum BarrierType {
+ kMemory_BarrierType,
+ kBufferMemory_BarrierType,
+ kImageMemory_BarrierType
+ };
+
+ void pipelineBarrier(const GrVkGpu* gpu,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ BarrierType barrierType,
+ void* barrier) const;
+
+ void bindVertexBuffer(GrVkGpu* gpu, GrVkVertexBuffer* vbuffer) {
+ VkBuffer vkBuffer = vbuffer->buffer();
+ if (!fBoundVertexBufferIsValid || vkBuffer != fBoundVertexBuffer) {
+ VkDeviceSize offset = 0;
+ GR_VK_CALL(gpu->vkInterface(), CmdBindVertexBuffers(fCmdBuffer,
+ 0,
+ 1,
+ &vkBuffer,
+ &offset));
+ fBoundVertexBufferIsValid = true;
+ fBoundVertexBuffer = vkBuffer;
+ addResource(vbuffer->resource());
+ }
+ }
+
+ void bindIndexBuffer(GrVkGpu* gpu, GrVkIndexBuffer* ibuffer) {
+ VkBuffer vkBuffer = ibuffer->buffer();
+ if (!fBoundIndexBufferIsValid || vkBuffer != fBoundIndexBuffer) {
+ GR_VK_CALL(gpu->vkInterface(), CmdBindIndexBuffer(fCmdBuffer,
+ vkBuffer,
+ 0,
+ VK_INDEX_TYPE_UINT16));
+ fBoundIndexBufferIsValid = true;
+ fBoundIndexBuffer = vkBuffer;
+ addResource(ibuffer->resource());
+ }
+ }
+
+ void bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline) {
+ GR_VK_CALL(gpu->vkInterface(), CmdBindPipeline(fCmdBuffer,
+ VK_PIPELINE_BIND_POINT_GRAPHICS,
+ pipeline->pipeline()));
+ addResource(pipeline);
+ }
+
+ void bindDescriptorSets(const GrVkGpu* gpu,
+ GrVkProgram*,
+ VkPipelineLayout layout,
+ uint32_t firstSet,
+ uint32_t setCount,
+ const VkDescriptorSet* descriptorSets,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* dynamicOffsets);
+
+ // Commands that only work outside of a render pass
+ void clearColorImage(const GrVkGpu* gpu,
+ GrVkImage* image,
+ const VkClearColorValue* color,
+ uint32_t subRangeCount,
+ const VkImageSubresourceRange* subRanges);
+
+ void copyImage(const GrVkGpu* gpu,
+ GrVkImage* srcImage,
+ VkImageLayout srcLayout,
+ GrVkImage* dstImage,
+ VkImageLayout dstLayout,
+ uint32_t copyRegionCount,
+ const VkImageCopy* copyRegions);
+
+ void copyImageToBuffer(const GrVkGpu* gpu,
+ GrVkImage* srcImage,
+ VkImageLayout srcLayout,
+ GrVkTransferBuffer* dstBuffer,
+ uint32_t copyRegionCount,
+ const VkBufferImageCopy* copyRegions);
+
+ void copyBufferToImage(const GrVkGpu* gpu,
+ GrVkTransferBuffer* srcBuffer,
+ GrVkImage* dstImage,
+ VkImageLayout dstLayout,
+ uint32_t copyRegionCount,
+ const VkBufferImageCopy* copyRegions);
+
+ // Commands that only work inside of a render pass
+ void clearAttachments(const GrVkGpu* gpu,
+ int numAttachments,
+ const VkClearAttachment* attachments,
+ int numRects,
+ const VkClearRect* clearRects) const;
+
+ void drawIndexed(const GrVkGpu* gpu,
+ uint32_t indexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t vertexOffset,
+ uint32_t firstInstance) const;
+
+ void draw(const GrVkGpu* gpu,
+ uint32_t vertexCount,
+ uint32_t instanceCount,
+ uint32_t firstVertex,
+ uint32_t firstInstance) const;
+
+ // Add ref-counted resource that will be tracked and released when this
+ // command buffer finishes execution
+ void addResource(const GrVkResource* resource) {
+ resource->ref();
+ fTrackedResources.push_back(resource);
+ }
+
+private:
+ static const int kInitialTrackedResourcesCount = 32;
+
+ explicit GrVkCommandBuffer(VkCommandBuffer cmdBuffer)
+ : fTrackedResources(kInitialTrackedResourcesCount)
+ , fCmdBuffer(cmdBuffer)
+ , fSubmitFence(VK_NULL_HANDLE)
+ , fBoundVertexBufferIsValid(false)
+ , fBoundIndexBufferIsValid(false)
+ , fIsActive(false)
+ , fActiveRenderPass(nullptr) {
+ this->invalidateState();
+ }
+
+ void freeGPUData(const GrVkGpu* gpu) const override;
+ void abandonSubResources() const override;
+
+ SkTArray<const GrVkResource*, true> fTrackedResources;
+
+ VkCommandBuffer fCmdBuffer;
+ VkFence fSubmitFence;
+
+ VkBuffer fBoundVertexBuffer;
+ bool fBoundVertexBufferIsValid;
+
+ VkBuffer fBoundIndexBuffer;
+ bool fBoundIndexBufferIsValid;
+
+ // Tracks whether we are in the middle of a command buffer begin/end calls and thus can add new
+ // commands to the buffer;
+ bool fIsActive;
+
+ // Stores a pointer to the current active render pass (i.e. begin has been called but not end).
+ // A nullptr means there is no active render pass. The GrVKCommandBuffer does not own the render
+ // pass.
+ const GrVkRenderPass* fActiveRenderPass;
+};
+
+
+#endif
+
diff --git a/src/gpu/vk/GrVkDescriptorPool.cpp b/src/gpu/vk/GrVkDescriptorPool.cpp
new file mode 100644
index 0000000000..d4dced6c32
--- /dev/null
+++ b/src/gpu/vk/GrVkDescriptorPool.cpp
@@ -0,0 +1,79 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkDescriptorPool.h"
+
+#include "GrVkGpu.h"
+#include "SkTemplates.h"
+
+
+GrVkDescriptorPool::GrVkDescriptorPool(const GrVkGpu* gpu, const DescriptorTypeCounts& typeCounts)
+ : INHERITED()
+ , fTypeCounts(typeCounts) {
+ uint32_t numPools = fTypeCounts.numPoolSizes();
+ SkAutoTDeleteArray<VkDescriptorPoolSize> poolSizes(new VkDescriptorPoolSize[numPools]);
+ int currentPool = 0;
+ for (int i = VK_DESCRIPTOR_TYPE_BEGIN_RANGE; i < VK_DESCRIPTOR_TYPE_END_RANGE; ++i) {
+ if (fTypeCounts.fDescriptorTypeCount[i]) {
+ VkDescriptorPoolSize& poolSize = poolSizes.get()[currentPool++];
+ poolSize.type = (VkDescriptorType)i;
+ poolSize.descriptorCount = fTypeCounts.fDescriptorTypeCount[i];
+ }
+ }
+ SkASSERT(currentPool == numPools);
+
+ VkDescriptorPoolCreateInfo createInfo;
+ memset(&createInfo, 0, sizeof(VkDescriptorPoolCreateInfo));
+ createInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.maxSets = 2; // Currently we allow one set for samplers and one set for uniforms
+ createInfo.poolSizeCount = numPools;
+ createInfo.pPoolSizes = poolSizes.get();
+
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateDescriptorPool(gpu->device(),
+ &createInfo,
+ nullptr,
+ &fDescPool));
+}
+
+bool GrVkDescriptorPool::isCompatible(const DescriptorTypeCounts& typeCounts) const {
+ return fTypeCounts.isSuperSet(typeCounts);
+}
+
+void GrVkDescriptorPool::reset(const GrVkGpu* gpu) {
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), ResetDescriptorPool(gpu->device(), fDescPool, 0));
+}
+
+void GrVkDescriptorPool::freeGPUData(const GrVkGpu* gpu) const {
+ // Destroying the VkDescriptorPool will automatically free and delete any VkDescriptorSets
+ // allocated from the pool.
+ GR_VK_CALL(gpu->vkInterface(), DestroyDescriptorPool(gpu->device(), fDescPool, nullptr));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+uint32_t GrVkDescriptorPool::DescriptorTypeCounts::numPoolSizes() const {
+ uint32_t count = 0;
+ for (int i = VK_DESCRIPTOR_TYPE_BEGIN_RANGE; i < VK_DESCRIPTOR_TYPE_END_RANGE; ++i) {
+ count += fDescriptorTypeCount[i] ? 1 : 0;
+ }
+ return count;
+}
+
+bool GrVkDescriptorPool::DescriptorTypeCounts::isSuperSet(const DescriptorTypeCounts& that) const {
+ for (int i = VK_DESCRIPTOR_TYPE_BEGIN_RANGE; i < VK_DESCRIPTOR_TYPE_END_RANGE; ++i) {
+ if (that.fDescriptorTypeCount[i] > fDescriptorTypeCount[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void GrVkDescriptorPool::DescriptorTypeCounts::setTypeCount(VkDescriptorType type, uint8_t count) {
+ fDescriptorTypeCount[type] = count;
+}
diff --git a/src/gpu/vk/GrVkDescriptorPool.h b/src/gpu/vk/GrVkDescriptorPool.h
new file mode 100644
index 0000000000..ebbf230708
--- /dev/null
+++ b/src/gpu/vk/GrVkDescriptorPool.h
@@ -0,0 +1,56 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkDescriptorPool_DEFINED
+#define GrVkDescriptorPool_DEFINED
+
+#include "GrVkResource.h"
+
+#include "vulkan/vulkan.h"
+
+class GrVkGpu;
+
+class GrVkDescriptorPool : public GrVkResource {
+public:
+ class DescriptorTypeCounts {
+ public:
+ DescriptorTypeCounts() {
+ memset(fDescriptorTypeCount, 0, sizeof(fDescriptorTypeCount));
+ }
+
+ void setTypeCount(VkDescriptorType type, uint8_t count);
+ uint32_t numPoolSizes() const;
+
+ // Determines if for each i, that.fDescriptorTypeCount[i] <= fDescriptorTypeCount[i];
+ bool isSuperSet(const DescriptorTypeCounts& that) const;
+ private:
+ uint8_t fDescriptorTypeCount[VK_DESCRIPTOR_TYPE_RANGE_SIZE];
+
+ friend class GrVkDescriptorPool;
+ };
+
+ explicit GrVkDescriptorPool(const GrVkGpu* gpu, const DescriptorTypeCounts& typeCounts);
+
+ VkDescriptorPool descPool() const { return fDescPool; }
+
+ void reset(const GrVkGpu* gpu);
+
+ // Returns whether or not this descriptor pool could be used, assuming it gets fully reset and
+ // not in use by another draw, to support the requested typeCounts.
+ bool isCompatible(const DescriptorTypeCounts& typeCounts) const;
+
+private:
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ DescriptorTypeCounts fTypeCounts;
+ VkDescriptorPool fDescPool;
+
+ typedef GrVkResource INHERITED;
+};
+
+
+#endif \ No newline at end of file
diff --git a/src/gpu/vk/GrVkFramebuffer.cpp b/src/gpu/vk/GrVkFramebuffer.cpp
new file mode 100644
index 0000000000..d79895ce8e
--- /dev/null
+++ b/src/gpu/vk/GrVkFramebuffer.cpp
@@ -0,0 +1,63 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkFramebuffer.h"
+
+#include "GrVkGpu.h"
+#include "GrVkImageView.h"
+#include "GrVkRenderPass.h"
+
+GrVkFramebuffer* GrVkFramebuffer::Create(GrVkGpu* gpu,
+ int width, int height,
+ const GrVkRenderPass* renderPass,
+ const GrVkImageView* colorAttachment,
+ const GrVkImageView* resolveAttachment,
+ const GrVkImageView* stencilAttachment) {
+ // At the very least we need a renderPass and a colorAttachment
+ SkASSERT(renderPass);
+ SkASSERT(colorAttachment);
+
+ VkImageView attachments[3];
+ attachments[0] = colorAttachment->imageView();
+ int numAttachments = 1;
+ if (resolveAttachment) {
+ attachments[numAttachments++] = resolveAttachment->imageView();
+ }
+ if (stencilAttachment) {
+ attachments[numAttachments++] = stencilAttachment->imageView();
+ }
+
+ VkFramebufferCreateInfo createInfo;
+ memset(&createInfo, 0, sizeof(VkFramebufferCreateInfo));
+ createInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.renderPass = renderPass->vkRenderPass();
+ createInfo.attachmentCount = numAttachments;
+ createInfo.pAttachments = attachments;
+ createInfo.width = width;
+ createInfo.height = height;
+ createInfo.layers = 1;
+
+ VkFramebuffer framebuffer;
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateFramebuffer(gpu->device(),
+ &createInfo,
+ nullptr,
+ &framebuffer));
+ if (err) {
+ return nullptr;
+ }
+
+ return new GrVkFramebuffer(framebuffer);
+}
+
+void GrVkFramebuffer::freeGPUData(const GrVkGpu* gpu) const {
+ SkASSERT(fFramebuffer);
+ GR_VK_CALL(gpu->vkInterface(), DestroyFramebuffer(gpu->device(), fFramebuffer, nullptr));
+}
+
+
diff --git a/src/gpu/vk/GrVkFramebuffer.h b/src/gpu/vk/GrVkFramebuffer.h
new file mode 100644
index 0000000000..7e67d3f0d0
--- /dev/null
+++ b/src/gpu/vk/GrVkFramebuffer.h
@@ -0,0 +1,45 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkFramebuffer_DEFINED
+#define GrVkFramebuffer_DEFINED
+
+#include "GrTypes.h"
+
+#include "GrVkResource.h"
+
+#include "vulkan/vulkan.h"
+
+class GrVkGpu;
+class GrVkImageView;
+class GrVkRenderPass;
+
+class GrVkFramebuffer : public GrVkResource {
+public:
+ static GrVkFramebuffer* Create(GrVkGpu* gpu,
+ int width, int height,
+ const GrVkRenderPass* renderPass,
+ const GrVkImageView* colorAttachment,
+ const GrVkImageView* resolveAttachment,
+ const GrVkImageView* stencilAttachment);
+
+ VkFramebuffer framebuffer() const { return fFramebuffer; }
+
+private:
+ GrVkFramebuffer(VkFramebuffer framebuffer) : INHERITED(), fFramebuffer(framebuffer) {}
+
+ GrVkFramebuffer(const GrVkFramebuffer&);
+ GrVkFramebuffer& operator=(const GrVkFramebuffer&);
+
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ VkFramebuffer fFramebuffer;
+
+ typedef GrVkResource INHERITED;
+};
+
+#endif \ No newline at end of file
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
new file mode 100644
index 0000000000..ef2d0be615
--- /dev/null
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -0,0 +1,1272 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkGpu.h"
+
+#include "GrContextOptions.h"
+#include "GrGeometryProcessor.h"
+#include "GrGpuResourceCacheAccess.h"
+#include "GrPipeline.h"
+#include "GrRenderTargetPriv.h"
+#include "GrSurfacePriv.h"
+#include "GrTexturePriv.h"
+#include "GrVertices.h"
+
+#include "GrVkCommandBuffer.h"
+#include "GrVkImage.h"
+#include "GrVkIndexBuffer.h"
+#include "GrVkMemory.h"
+#include "GrVkPipeline.h"
+#include "GrVkProgram.h"
+#include "GrVkProgramBuilder.h"
+#include "GrVkProgramDesc.h"
+#include "GrVkRenderPass.h"
+#include "GrVkResourceProvider.h"
+#include "GrVkTexture.h"
+#include "GrVkTextureRenderTarget.h"
+#include "GrVkTransferBuffer.h"
+#include "GrVkVertexBuffer.h"
+
+#include "SkConfig8888.h"
+
+#include "vk/GrVkInterface.h"
+
+#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
+#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
+#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
+
+////////////////////////////////////////////////////////////////////////////////
+// Stuff used to set up a GrVkGpu secrectly for now.
+
+// For now the VkGpuCreate is using the same signature as GL. This is mostly for ease of
+// hiding this code from offical skia. In the end the VkGpuCreate will not take a GrBackendContext
+// and mostly likely would take an optional device and queues to use.
+GrGpu* vk_gpu_create(GrBackendContext backendContext, const GrContextOptions& options,
+ GrContext* context) {
+ // Below is Vulkan setup code that normal would be done by a client, but will do here for now
+ // for testing purposes.
+ VkPhysicalDevice physDev;
+ VkDevice device;
+ VkInstance inst;
+ VkResult err;
+
+ const VkApplicationInfo app_info = {
+ VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
+ nullptr, // pNext
+ "vktest", // pApplicationName
+ 0, // applicationVersion
+ "vktest", // pEngineName
+ 0, // engineVerison
+ VK_API_VERSION, // apiVersion
+ };
+ const VkInstanceCreateInfo instance_create = {
+ VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // flags
+ &app_info, // pApplicationInfo
+ 0, // enabledLayerNameCount
+ nullptr, // ppEnabledLayerNames
+ 0, // enabledExtensionNameCount
+ nullptr, // ppEnabledExtensionNames
+ };
+ err = vkCreateInstance(&instance_create, nullptr, &inst);
+ if (err < 0) {
+ SkDebugf("vkCreateInstanced failed: %d\n", err);
+ SkFAIL("failing");
+ }
+
+ uint32_t gpuCount;
+ err = vkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
+ if (err) {
+ SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
+ SkFAIL("failing");
+ }
+ SkASSERT(gpuCount > 0);
+ // Just returning the first physical device instead of getting the whole array.
+ gpuCount = 1;
+ err = vkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
+ if (err) {
+ SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
+ SkFAIL("failing");
+ }
+
+ // query to get the initial queue props size
+ uint32_t queueCount;
+ vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
+ SkASSERT(queueCount >= 1);
+
+ SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
+ // now get the actual queue props
+ VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
+
+ vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
+
+ // iterate to find the graphics queue
+ uint32_t graphicsQueueIndex = -1;
+ for (uint32_t i = 0; i < queueCount; i++) {
+ if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
+ graphicsQueueIndex = i;
+ break;
+ }
+ }
+ SkASSERT(graphicsQueueIndex < queueCount);
+
+ float queuePriorities[1] = { 0.0 };
+ const VkDeviceQueueCreateInfo queueInfo = {
+ VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // VkDeviceQueueCreateFlags
+ 0, // queueFamilyIndex
+ 1, // queueCount
+ queuePriorities, // pQueuePriorities
+ };
+ const VkDeviceCreateInfo deviceInfo = {
+ VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // VkDeviceCreateFlags
+ 1, // queueCreateInfoCount
+ &queueInfo, // pQueueCreateInfos
+ 0, // layerCount
+ nullptr, // ppEnabledLayerNames
+ 0, // extensionCount
+ nullptr, // ppEnabledExtensionNames
+ nullptr // ppEnabledFeatures
+ };
+
+ err = vkCreateDevice(physDev, &deviceInfo, nullptr, &device);
+ if (err) {
+ SkDebugf("CreateDevice failed: %d\n", err);
+ SkFAIL("failing");
+ }
+
+ VkQueue queue;
+ vkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
+
+ const VkCommandPoolCreateInfo cmdPoolInfo = {
+ VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
+ nullptr, // pNext
+ 0, // CmdPoolCreateFlags
+ graphicsQueueIndex, // queueFamilyIndex
+ };
+
+ VkCommandPool cmdPool;
+ err = vkCreateCommandPool(device, &cmdPoolInfo, nullptr, &cmdPool);
+ if (err) {
+ SkDebugf("CreateCommandPool failed: %d\n", err);
+ SkFAIL("failing");
+ }
+
+ return new GrVkGpu(context, options, physDev, device, queue, cmdPool, inst);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options,
+ VkPhysicalDevice physDev, VkDevice device, VkQueue queue, VkCommandPool cmdPool,
+ VkInstance inst)
+ : INHERITED(context)
+ , fDevice(device)
+ , fQueue(queue)
+ , fCmdPool(cmdPool)
+ , fResourceProvider(this)
+ , fVkInstance(inst) {
+ fInterface.reset(GrVkCreateInterface(fVkInstance));
+ fCompiler = shaderc_compiler_initialize();
+
+ fVkCaps.reset(new GrVkCaps(options, fInterface, physDev));
+ fCaps.reset(SkRef(fVkCaps.get()));
+
+ fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
+ SkASSERT(fCurrentCmdBuffer);
+ fCurrentCmdBuffer->begin(this);
+ VK_CALL(GetPhysicalDeviceMemoryProperties(physDev, &fPhysDevMemProps));
+
+}
+
+GrVkGpu::~GrVkGpu() {
+ shaderc_compiler_release(fCompiler);
+ fCurrentCmdBuffer->end(this);
+ fCurrentCmdBuffer->unref(this);
+
+ // wait for all commands to finish
+ VK_CALL(QueueWaitIdle(fQueue));
+
+ // must call this just before we destroy the VkDevice
+ fResourceProvider.destroyResources();
+
+ VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr));
+ VK_CALL(DestroyDevice(fDevice, nullptr));
+ VK_CALL(DestroyInstance(fVkInstance, nullptr));
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
+ SkASSERT(fCurrentCmdBuffer);
+ fCurrentCmdBuffer->end(this);
+
+ fCurrentCmdBuffer->submitToQueue(this, fQueue, sync);
+ fResourceProvider.checkCommandBuffers();
+
+ // Release old command buffer and create a new one
+ fCurrentCmdBuffer->unref(this);
+ fCurrentCmdBuffer = fResourceProvider.createCommandBuffer();
+ SkASSERT(fCurrentCmdBuffer);
+
+ fCurrentCmdBuffer->begin(this);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+GrVertexBuffer* GrVkGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
+ return GrVkVertexBuffer::Create(this, size, dynamic);
+}
+
+GrIndexBuffer* GrVkGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
+ return GrVkIndexBuffer::Create(this, size, dynamic);
+}
+
+GrTransferBuffer* GrVkGpu::onCreateTransferBuffer(size_t size, TransferType type) {
+ GrVkBuffer::Type bufferType = kCpuToGpu_TransferType ? GrVkBuffer::kCopyRead_Type
+ : GrVkBuffer::kCopyWrite_Type;
+ return GrVkTransferBuffer::Create(this, size, bufferType);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
+ GrPixelConfig srcConfig, DrawPreference* drawPreference,
+ WritePixelTempDrawInfo* tempDrawInfo) {
+ if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
+ return false;
+ }
+
+ // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
+ if (kNoDraw_DrawPreference != *drawPreference) {
+ return false;
+ }
+
+ if (dstSurface->config() != srcConfig) {
+ // TODO: This should fall back to drawing or copying to change config of dstSurface to
+ // match that of srcConfig.
+ return false;
+ }
+
+ return true;
+}
+
+bool GrVkGpu::onWritePixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config, const void* buffer,
+ size_t rowBytes) {
+ GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
+ if (!vkTex) {
+ return false;
+ }
+
+ // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
+ if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
+ return false;
+ }
+
+ bool success = false;
+ if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
+ // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
+ SkASSERT(config == vkTex->desc().fConfig);
+ // TODO: add compressed texture support
+ // delete the following two lines and uncomment the two after that when ready
+ vkTex->unref();
+ return false;
+ //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
+ // height);
+ } else {
+ bool linearTiling = vkTex->isLinearTiled();
+ if (linearTiling && VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
+ // Need to change the layout to general in order to perform a host write
+ VkImageLayout layout = vkTex->currentLayout();
+ VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
+ VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_HOST_BIT;
+ VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
+ VkAccessFlags dstAccessMask = VK_ACCESS_HOST_WRITE_BIT;
+ vkTex->setImageLayout(this,
+ VK_IMAGE_LAYOUT_GENERAL,
+ srcAccessMask,
+ dstAccessMask,
+ srcStageMask,
+ dstStageMask,
+ false);
+ }
+ success = this->uploadTexData(vkTex, left, top, width, height, config,
+ buffer, rowBytes);
+ }
+
+ if (success) {
+ vkTex->texturePriv().dirtyMipMaps(true);
+ return true;
+ }
+
+ return false;
+}
+
+bool GrVkGpu::uploadTexData(GrVkTexture* tex,
+ int left, int top, int width, int height,
+ GrPixelConfig dataConfig,
+ const void* data,
+ size_t rowBytes) {
+ SkASSERT(data);
+
+ // If we're uploading compressed data then we should be using uploadCompressedTexData
+ SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
+
+ bool linearTiling = tex->isLinearTiled();
+
+ size_t bpp = GrBytesPerPixel(dataConfig);
+
+ const GrSurfaceDesc& desc = tex->desc();
+
+ if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
+ &width, &height, &data, &rowBytes)) {
+ return false;
+ }
+ size_t trimRowBytes = width * bpp;
+
+ if (linearTiling) {
+ SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
+ VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
+ const VkImageSubresource subres = {
+ VK_IMAGE_ASPECT_COLOR_BIT,
+ 0, // mipLevel
+ 0, // arraySlice
+ };
+ VkSubresourceLayout layout;
+ VkResult err;
+
+ const GrVkInterface* interface = this->vkInterface();
+
+ GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
+ tex->textureImage(),
+ &subres,
+ &layout));
+
+ int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height
+ : top;
+ VkDeviceSize offset = texTop*layout.rowPitch + left*bpp;
+ VkDeviceSize size = height*layout.rowPitch;
+ void* mapPtr;
+ err = GR_VK_CALL(interface, MapMemory(fDevice, tex->textureMemory(), offset, size, 0,
+ &mapPtr));
+ if (err) {
+ return false;
+ }
+
+ if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
+ // copy into buffer by rows
+ const char* srcRow = reinterpret_cast<const char*>(data);
+ char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPitch;
+ for (int y = 0; y < height; y++) {
+ memcpy(dstRow, srcRow, trimRowBytes);
+ srcRow += rowBytes;
+ dstRow -= layout.rowPitch;
+ }
+ } else {
+ // If there is no padding on the src (rowBytes) or dst (layout.rowPitch) we can memcpy
+ if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) {
+ memcpy(mapPtr, data, trimRowBytes * height);
+ } else {
+ SkRectMemcpy(mapPtr, layout.rowPitch, data, rowBytes, trimRowBytes, height);
+ }
+ }
+
+ GR_VK_CALL(interface, UnmapMemory(fDevice, tex->textureMemory()));
+ } else {
+ GrVkTransferBuffer* transferBuffer =
+ GrVkTransferBuffer::Create(this, trimRowBytes * height, GrVkBuffer::kCopyRead_Type);
+
+ void* mapPtr = transferBuffer->map();
+
+ if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
+ // copy into buffer by rows
+ const char* srcRow = reinterpret_cast<const char*>(data);
+ char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*trimRowBytes;
+ for (int y = 0; y < height; y++) {
+ memcpy(dstRow, srcRow, trimRowBytes);
+ srcRow += rowBytes;
+ dstRow -= trimRowBytes;
+ }
+ } else {
+ // If there is no padding on the src data rows, we can do a single memcpy
+ if (trimRowBytes == rowBytes) {
+ memcpy(mapPtr, data, trimRowBytes * height);
+ } else {
+ SkRectMemcpy(mapPtr, trimRowBytes, data, rowBytes, trimRowBytes, height);
+ }
+ }
+
+ transferBuffer->unmap();
+
+ // make sure the unmap has finished
+ transferBuffer->addMemoryBarrier(this,
+ VK_ACCESS_HOST_WRITE_BIT,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ false);
+
+ // Set up copy region
+ bool flipY = kBottomLeft_GrSurfaceOrigin == tex->origin();
+ VkOffset3D offset = {
+ left,
+ flipY ? tex->height() - top - height : top,
+ 0
+ };
+
+ VkBufferImageCopy region;
+ memset(&region, 0, sizeof(VkBufferImageCopy));
+ region.bufferOffset = 0;
+ region.bufferRowLength = width;
+ region.bufferImageHeight = height;
+ region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ region.imageOffset = offset;
+ region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
+
+ // Change layout of our target so it can be copied to
+ VkImageLayout layout = tex->currentLayout();
+ VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
+ VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
+ VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
+ VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+ tex->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ srcAccessMask,
+ dstAccessMask,
+ srcStageMask,
+ dstStageMask,
+ false);
+
+ // Copy the buffer to the image
+ fCurrentCmdBuffer->copyBufferToImage(this,
+ transferBuffer,
+ tex,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ 1,
+ &region);
+
+ // Submit the current command buffer to the Queue
+ this->submitCommandBuffer(kSkip_SyncQueue);
+
+ transferBuffer->unref();
+ }
+
+ return true;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle,
+ const void* srcData, size_t rowBytes) {
+ bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
+
+ VkFormat pixelFormat;
+ if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) {
+ return nullptr;
+ }
+
+ if (!fVkCaps->isConfigTexturable(desc.fConfig)) {
+ return nullptr;
+ }
+
+ bool linearTiling = false;
+ if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) {
+ if (fVkCaps->isConfigTexurableLinearly(desc.fConfig) &&
+ (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig, false))) {
+ linearTiling = true;
+ } else {
+ return nullptr;
+ }
+ }
+
+ VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
+ if (renderTarget) {
+ usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ }
+
+ // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and
+ // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know whether or not we
+ // will be using this texture in some copy or not. Also this assumes, as is the current case,
+ // that all render targets in vulkan are also texutres. If we change this practice of setting
+ // both bits, we must make sure to set the destination bit if we are uploading srcData to the
+ // texture.
+ usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
+ VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+ // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
+ // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
+ // to 1.
+ GrVkImage::ImageDesc imageDesc;
+ imageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ imageDesc.fFormat = pixelFormat;
+ imageDesc.fWidth = desc.fWidth;
+ imageDesc.fHeight = desc.fHeight;
+ imageDesc.fLevels = 1;
+ imageDesc.fSamples = 1;
+ imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
+ imageDesc.fUsageFlags = usageFlags;
+ imageDesc.fMemProps = memProps;
+
+ GrVkTexture* tex;
+ if (renderTarget) {
+ tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, desc, lifeCycle,
+ imageDesc);
+ } else {
+ tex = GrVkTexture::CreateNewTexture(this, desc, lifeCycle, imageDesc);
+ }
+
+ if (!tex) {
+ return nullptr;
+ }
+
+ if (srcData) {
+ if (!this->uploadTexData(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig, srcData,
+ rowBytes)) {
+ tex->unref();
+ return nullptr;
+ }
+ }
+
+ return tex;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin) {
+ // By default, all textures in Vk use TopLeft
+ if (kDefault_GrSurfaceOrigin == origin) {
+ return kTopLeft_GrSurfaceOrigin;
+ } else {
+ return origin;
+ }
+}
+
+GrTexture* GrVkGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
+ GrWrapOwnership ownership) {
+ VkFormat format;
+ if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) {
+ return nullptr;
+ }
+
+ if (0 == desc.fTextureHandle) {
+ return nullptr;
+ }
+
+ int maxSize = this->caps()->maxTextureSize();
+ if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
+ return nullptr;
+ }
+
+ // TODO: determine what format Chrome will actually send us and turn it into a Resource
+ GrVkImage::Resource* imageRsrc = reinterpret_cast<GrVkImage::Resource*>(desc.fTextureHandle);
+
+ GrGpuResource::LifeCycle lifeCycle;
+ switch (ownership) {
+ case kAdopt_GrWrapOwnership:
+ lifeCycle = GrGpuResource::kAdopted_LifeCycle;
+ break;
+ case kBorrow_GrWrapOwnership:
+ lifeCycle = GrGpuResource::kBorrowed_LifeCycle;
+ break;
+ }
+
+ GrSurfaceDesc surfDesc;
+ // next line relies on GrBackendTextureDesc's flags matching GrTexture's
+ surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags;
+ surfDesc.fWidth = desc.fWidth;
+ surfDesc.fHeight = desc.fHeight;
+ surfDesc.fConfig = desc.fConfig;
+ surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
+ bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
+ // In GL, Chrome assumes all textures are BottomLeft
+ // In VK, we don't have this restriction
+ surfDesc.fOrigin = resolve_origin(desc.fOrigin);
+
+ GrVkTexture* texture = nullptr;
+ if (renderTarget) {
+ texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this, surfDesc,
+ lifeCycle, format,
+ imageRsrc);
+ } else {
+ texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, lifeCycle, format, imageRsrc);
+ }
+ if (!texture) {
+ return nullptr;
+ }
+
+ return texture;
+}
+
+GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc,
+ GrWrapOwnership ownership) {
+
+ // TODO: determine what format Chrome will actually send us and turn it into a Resource
+ GrVkImage::Resource* imageRsrc =
+ reinterpret_cast<GrVkImage::Resource*>(wrapDesc.fRenderTargetHandle);
+
+ GrGpuResource::LifeCycle lifeCycle;
+ switch (ownership) {
+ case kAdopt_GrWrapOwnership:
+ lifeCycle = GrGpuResource::kAdopted_LifeCycle;
+ break;
+ case kBorrow_GrWrapOwnership:
+ lifeCycle = GrGpuResource::kBorrowed_LifeCycle;
+ break;
+ }
+
+ GrSurfaceDesc desc;
+ desc.fConfig = wrapDesc.fConfig;
+ desc.fFlags = kCheckAllocation_GrSurfaceFlag;
+ desc.fWidth = wrapDesc.fWidth;
+ desc.fHeight = wrapDesc.fHeight;
+ desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
+
+ desc.fOrigin = resolve_origin(wrapDesc.fOrigin);
+
+ GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, desc,
+ lifeCycle, imageRsrc);
+ if (tgt && wrapDesc.fStencilBits) {
+ if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeight)) {
+ tgt->unref();
+ return nullptr;
+ }
+ }
+ return tgt;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc,
+ const GrNonInstancedVertices& vertices) {
+ GrVkVertexBuffer* vbuf;
+ vbuf = (GrVkVertexBuffer*)vertices.vertexBuffer();
+ SkASSERT(vbuf);
+ SkASSERT(!vbuf->isMapped());
+
+ vbuf->addMemoryBarrier(this,
+ VK_ACCESS_HOST_WRITE_BIT,
+ VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
+ VK_PIPELINE_STAGE_HOST_BIT,
+ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
+ false);
+
+ fCurrentCmdBuffer->bindVertexBuffer(this, vbuf);
+
+ if (vertices.isIndexed()) {
+ GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)vertices.indexBuffer();
+ SkASSERT(ibuf);
+ SkASSERT(!ibuf->isMapped());
+
+ ibuf->addMemoryBarrier(this,
+ VK_ACCESS_HOST_WRITE_BIT,
+ VK_ACCESS_INDEX_READ_BIT,
+ VK_PIPELINE_STAGE_HOST_BIT,
+ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
+ false);
+
+ fCurrentCmdBuffer->bindIndexBuffer(this, ibuf);
+ }
+}
+
+void GrVkGpu::buildProgramDesc(GrProgramDesc* desc,
+ const GrPrimitiveProcessor& primProc,
+ const GrPipeline& pipeline) const {
+ if (!GrVkProgramDescBuilder::Build(desc, primProc, pipeline, *this->vkCaps().glslCaps())) {
+ SkDEBUGFAIL("Failed to generate GL program descriptor");
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
+ int width,
+ int height) {
+ SkASSERT(rt->asTexture());
+ SkASSERT(width >= rt->width());
+ SkASSERT(height >= rt->height());
+
+ int samples = rt->numStencilSamples();
+
+ SkASSERT(this->vkCaps().stencilFormats().count());
+ const GrVkCaps::StencilFormat& sFmt = this->vkCaps().stencilFormats()[0];
+
+ GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this,
+ GrGpuResource::kCached_LifeCycle,
+ width,
+ height,
+ samples,
+ sFmt));
+ fStats.incStencilAttachmentCreates();
+ return stencil;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, int h,
+ GrPixelConfig config) {
+
+ VkFormat pixelFormat;
+ if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
+ return 0;
+ }
+
+ bool linearTiling = false;
+ if (!fVkCaps->isConfigTexturable(config)) {
+ return 0;
+ }
+
+ if (fVkCaps->isConfigTexurableLinearly(config)) {
+ linearTiling = true;
+ }
+
+ // Currently this is not supported since it requires a copy which has not yet been implemented.
+ if (srcData && !linearTiling) {
+ return 0;
+ }
+
+ VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
+ usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
+ usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
+ VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT :
+ VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+ // This ImageDesc refers to the texture that will be read by the client. Thus even if msaa is
+ // requested, this ImageDesc describes the resolved texutre. Therefore we always have samples set
+ // to 1.
+ GrVkImage::ImageDesc imageDesc;
+ imageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ imageDesc.fFormat = pixelFormat;
+ imageDesc.fWidth = w;
+ imageDesc.fHeight = h;
+ imageDesc.fLevels = 1;
+ imageDesc.fSamples = 1;
+ imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
+ imageDesc.fUsageFlags = usageFlags;
+ imageDesc.fMemProps = memProps;
+
+ const GrVkImage::Resource* imageRsrc = GrVkImage::CreateResource(this, imageDesc);
+ if (!imageRsrc) {
+ return 0;
+ }
+
+ if (srcData) {
+ if (linearTiling) {
+ const VkImageSubresource subres = {
+ VK_IMAGE_ASPECT_COLOR_BIT,
+ 0, // mipLevel
+ 0, // arraySlice
+ };
+ VkSubresourceLayout layout;
+ VkResult err;
+
+ const GrVkInterface* interface = this->vkInterface();
+
+ GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
+ imageRsrc->fImage,
+ &subres,
+ &layout));
+
+ void* mapPtr;
+ err = GR_VK_CALL(interface, MapMemory(fDevice,
+ imageRsrc->fAlloc,
+ 0,
+ layout.rowPitch * h,
+ 0,
+ &mapPtr));
+ if (err) {
+ imageRsrc->unref(this);
+ return 0;
+ }
+
+ size_t bpp = GrBytesPerPixel(config);
+ size_t rowCopyBytes = bpp * w;
+ // If there is no padding on dst (layout.rowPitch) we can do a single memcopy.
+ // This assumes the srcData comes in with no padding.
+ if (rowCopyBytes == layout.rowPitch) {
+ memcpy(mapPtr, srcData, rowCopyBytes * h);
+ } else {
+ SkRectMemcpy(mapPtr, layout.rowPitch, srcData, w, rowCopyBytes, h);
+ }
+ GR_VK_CALL(interface, UnmapMemory(fDevice, imageRsrc->fAlloc));
+ } else {
+ // TODO: Add support for copying to optimal tiling
+ SkASSERT(false);
+ }
+ }
+
+ return (GrBackendObject)imageRsrc;
+}
+
+bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
+ GrVkImage::Resource* backend = reinterpret_cast<GrVkImage::Resource*>(id);
+
+ if (backend && backend->fImage && backend->fAlloc) {
+ VkMemoryRequirements req;
+ memset(&req, 0, sizeof(req));
+ GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice,
+ backend->fImage,
+ &req));
+ // TODO: find a better check
+ // This will probably fail with a different driver
+ return (req.size > 0) && (req.size <= 8192 * 8192);
+ }
+
+ return false;
+}
+
+void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon) {
+ GrVkImage::Resource* backend = reinterpret_cast<GrVkImage::Resource*>(id);
+
+ if (backend) {
+ if (!abandon) {
+ backend->unref(this);
+ } else {
+ backend->unrefAndAbandon();
+ }
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkMemoryBarrier* barrier) const {
+ SkASSERT(fCurrentCmdBuffer);
+ fCurrentCmdBuffer->pipelineBarrier(this,
+ srcStageMask,
+ dstStageMask,
+ byRegion,
+ GrVkCommandBuffer::kMemory_BarrierType,
+ barrier);
+}
+
+void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkBufferMemoryBarrier* barrier) const {
+ SkASSERT(fCurrentCmdBuffer);
+ fCurrentCmdBuffer->pipelineBarrier(this,
+ srcStageMask,
+ dstStageMask,
+ byRegion,
+ GrVkCommandBuffer::kBufferMemory_BarrierType,
+ barrier);
+}
+
+void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkImageMemoryBarrier* barrier) const {
+ SkASSERT(fCurrentCmdBuffer);
+ fCurrentCmdBuffer->pipelineBarrier(this,
+ srcStageMask,
+ dstStageMask,
+ byRegion,
+ GrVkCommandBuffer::kImageMemory_BarrierType,
+ barrier);
+}
+
+void GrVkGpu::finishDrawTarget() {
+ // Submit the current command buffer to the Queue
+ this->submitCommandBuffer(kSkip_SyncQueue);
+}
+
+void GrVkGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color) {
+ // parent class should never let us get here with no RT
+ SkASSERT(target);
+
+ VkClearColorValue vkColor;
+ GrColorToRGBAFloat(color, vkColor.float32);
+
+ GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
+ VkImageLayout origDstLayout = vkRT->currentLayout();
+
+ if (rect.width() != target->width() || rect.height() != target->height()) {
+ VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
+ VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ VkPipelineStageFlags srcStageMask =
+ GrVkMemory::LayoutToPipelineStageFlags(vkRT->currentLayout());
+ VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+ vkRT->setImageLayout(this,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ srcAccessMask,
+ dstAccessMask,
+ srcStageMask,
+ dstStageMask,
+ false);
+
+ VkClearRect clearRect;
+ clearRect.rect.offset = { rect.fLeft, rect.fTop };
+ clearRect.rect.extent = { (uint32_t)rect.width(), (uint32_t)rect.height() };
+ clearRect.baseArrayLayer = 0;
+ clearRect.layerCount = 1;
+
+
+
+ const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
+ SkASSERT(renderPass);
+ fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
+
+ uint32_t colorIndex;
+ SkAssertResult(renderPass->colorAttachmentIndex(&colorIndex));
+
+ VkClearAttachment attachment;
+ attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ attachment.colorAttachment = colorIndex;
+ attachment.clearValue.color = vkColor;
+
+ fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect);
+ fCurrentCmdBuffer->endRenderPass(this);
+ return;
+ }
+
+ VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
+ VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
+
+ VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
+ VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+
+ vkRT->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ srcAccessMask,
+ dstAccessMask,
+ srcStageMask,
+ dstStageMask,
+ false);
+
+
+ VkImageSubresourceRange subRange;
+ memset(&subRange, 0, sizeof(VkImageSubresourceRange));
+ subRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ subRange.baseMipLevel = 0;
+ subRange.levelCount = 1;
+ subRange.baseArrayLayer = 0;
+ subRange.layerCount = 1;
+
+ // In the future we may not actually be doing this type of clear at all. If we are inside a
+ // render pass or doing a non full clear then we will use CmdClearColorAttachment. The more
+ // common use case will be clearing an attachment at the start of a render pass, in which case
+ // we will use the clear load ops.
+ fCurrentCmdBuffer->clearColorImage(this,
+ vkRT,
+ &vkColor,
+ 1, &subRange);
+}
+
+inline bool can_copy_image(const GrSurface* dst,
+ const GrSurface* src,
+ const GrVkGpu* gpu) {
+ if (src->asTexture() &&
+ dst->asTexture() &&
+ src->origin() == dst->origin() &&
+ src->config() == dst->config()) {
+ return true;
+ }
+
+ // How does msaa play into this? If a VkTexture is multisampled, are we copying the multisampled
+ // or the resolved image here?
+
+ return false;
+}
+
+void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ SkASSERT(can_copy_image(dst, src, this));
+
+ // Insert memory barriers to switch src and dst to transfer_source and transfer_dst layouts
+ GrVkTexture* dstTex = static_cast<GrVkTexture*>(dst->asTexture());
+ GrVkTexture* srcTex = static_cast<GrVkTexture*>(src->asTexture());
+
+ VkImageLayout origDstLayout = dstTex->currentLayout();
+ VkImageLayout origSrcLayout = srcTex->currentLayout();
+
+ VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
+ VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
+
+ // These flags are for flushing/invalidating caches and for the dst image it doesn't matter if
+ // the cache is flushed since it is only being written to.
+ VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);;
+ VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
+
+ dstTex->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ srcAccessMask,
+ dstAccessMask,
+ srcStageMask,
+ dstStageMask,
+ false);
+
+ srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origSrcLayout);
+ dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
+
+ srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayout);
+ dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
+
+ srcTex->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ srcAccessMask,
+ dstAccessMask,
+ srcStageMask,
+ dstStageMask,
+ false);
+
+ // Flip rect if necessary
+ SkIRect srcVkRect = srcRect;
+ int32_t dstY = dstPoint.fY;
+
+ if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
+ SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin());
+ srcVkRect.fTop = src->height() - srcRect.fBottom;
+ srcVkRect.fBottom = src->height() - srcRect.fTop;
+ dstY = dst->height() - dstPoint.fY - srcVkRect.height();
+ }
+
+ VkImageCopy copyRegion;
+ memset(&copyRegion, 0, sizeof(VkImageCopy));
+ copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 };
+ copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ copyRegion.dstOffset = { dstPoint.fX, dstY, 0 };
+ copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.height(), 0 };
+
+ fCurrentCmdBuffer->copyImage(this,
+ srcTex,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ dstTex,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ 1,
+ &copyRegion);
+}
+
+inline bool can_copy_as_draw(const GrSurface* dst,
+ const GrSurface* src,
+ const GrVkGpu* gpu) {
+ return false;
+}
+
+void GrVkGpu::copySurfaceAsDraw(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ SkASSERT(false);
+}
+
+bool GrVkGpu::onCopySurface(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) {
+ if (can_copy_image(dst, src, this)) {
+ this->copySurfaceAsCopyImage(dst, src, srcRect, dstPoint);
+ return true;
+ }
+
+ if (can_copy_as_draw(dst, src, this)) {
+ this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
+ return true;
+ }
+
+ return false;
+}
+
+bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
+ GrPixelConfig readConfig, DrawPreference* drawPreference,
+ ReadPixelTempDrawInfo* tempDrawInfo) {
+ // Currently we don't handle draws, so if the caller wants/needs to do a draw we need to fail
+ if (kNoDraw_DrawPreference != *drawPreference) {
+ return false;
+ }
+
+ if (srcSurface->config() != readConfig) {
+ // TODO: This should fall back to drawing or copying to change config of srcSurface to match
+ // that of readConfig.
+ return false;
+ }
+
+ return true;
+}
+
+bool GrVkGpu::onReadPixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config,
+ void* buffer,
+ size_t rowBytes) {
+ VkFormat pixelFormat;
+ if (!GrPixelConfigToVkFormat(config, &pixelFormat)) {
+ return false;
+ }
+
+ GrVkTexture* tgt = static_cast<GrVkTexture*>(surface->asTexture());
+ if (!tgt) {
+ return false;
+ }
+
+ // Change layout of our target so it can be used as copy
+ VkImageLayout layout = tgt->currentLayout();
+ VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
+ VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
+ VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
+ VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
+ tgt->setImageLayout(this,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ srcAccessMask,
+ dstAccessMask,
+ srcStageMask,
+ dstStageMask,
+ false);
+
+ GrVkTransferBuffer* transferBuffer =
+ reinterpret_cast<GrVkTransferBuffer*>(this->createTransferBuffer(rowBytes * height,
+ kGpuToCpu_TransferType));
+
+ bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
+ VkOffset3D offset = {
+ left,
+ flipY ? surface->height() - top - height : top,
+ 0
+ };
+
+ // Copy the image to a buffer so we can map it to cpu memory
+ VkBufferImageCopy region;
+ memset(&region, 0, sizeof(VkBufferImageCopy));
+ region.bufferOffset = 0;
+ region.bufferRowLength = 0; // Forces RowLength to be imageExtent.width
+ region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
+ region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
+ region.imageOffset = offset;
+ region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
+
+ fCurrentCmdBuffer->copyImageToBuffer(this,
+ tgt,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ transferBuffer,
+ 1,
+ &region);
+
+ // make sure the copy to buffer has finished
+ transferBuffer->addMemoryBarrier(this,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_ACCESS_HOST_READ_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ VK_PIPELINE_STAGE_HOST_BIT,
+ false);
+
+ // We need to submit the current command buffer to the Queue and make sure it finishes before
+ // we can copy the data out of the buffer.
+ this->submitCommandBuffer(kForce_SyncQueue);
+
+ void* mappedMemory = transferBuffer->map();
+
+ memcpy(buffer, mappedMemory, rowBytes*height);
+
+ transferBuffer->unmap();
+ transferBuffer->unref();
+
+ if (flipY) {
+ SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
+ size_t tightRowBytes = GrBytesPerPixel(config) * width;
+ scratch.reset(tightRowBytes);
+ void* tmpRow = scratch.get();
+ // flip y in-place by rows
+ const int halfY = height >> 1;
+ char* top = reinterpret_cast<char*>(buffer);
+ char* bottom = top + (height - 1) * rowBytes;
+ for (int y = 0; y < halfY; y++) {
+ memcpy(tmpRow, top, tightRowBytes);
+ memcpy(top, bottom, tightRowBytes);
+ memcpy(bottom, tmpRow, tightRowBytes);
+ top += rowBytes;
+ bottom -= rowBytes;
+ }
+ }
+
+ return true;
+}
+
+void GrVkGpu::onDraw(const DrawArgs& args, const GrNonInstancedVertices& vertices) {
+ GrRenderTarget* rt = args.fPipeline->getRenderTarget();
+ GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
+ const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
+ SkASSERT(renderPass);
+
+
+ GrVkProgram* program = GrVkProgramBuilder::CreateProgram(this, args,
+ vertices.primitiveType(),
+ *renderPass);
+
+ if (!program) {
+ return;
+ }
+
+ program->setData(this, *args.fPrimitiveProcessor, *args.fPipeline);
+
+ fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
+
+ program->bind(this, fCurrentCmdBuffer);
+
+ this->bindGeometry(*args.fPrimitiveProcessor, vertices);
+
+ // Change layout of our render target so it can be used as the color attachment
+ VkImageLayout layout = vkRT->currentLayout();
+ // Our color attachment is purely a destination and won't be read so don't need to flush or
+ // invalidate any caches
+ VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
+ VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+ VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
+ VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ vkRT->setImageLayout(this,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ srcAccessMask,
+ dstAccessMask,
+ srcStageMask,
+ dstStageMask,
+ false);
+
+ if (vertices.isIndexed()) {
+ fCurrentCmdBuffer->drawIndexed(this,
+ vertices.indexCount(),
+ 1,
+ vertices.startIndex(),
+ vertices.startVertex(),
+ 0);
+ } else {
+ fCurrentCmdBuffer->draw(this, vertices.vertexCount(), 1, vertices.startVertex(), 0);
+ }
+
+ fCurrentCmdBuffer->endRenderPass(this);
+
+ // Technically we don't have to call this here (since there is a safety check in program:setData
+ // but this will allow for quicker freeing of resources if the program sits in a cache for a
+ // while.
+ program->freeTempResources(this);
+ // This free will go away once we setup a program cache, and then the cache will be responsible
+ // for call freeGpuResources.
+ program->freeGPUResources(this);
+ program->unref();
+
+#if SWAP_PER_DRAW
+ glFlush();
+#if defined(SK_BUILD_FOR_MAC)
+ aglSwapBuffers(aglGetCurrentContext());
+ int set_a_break_pt_here = 9;
+ aglSwapBuffers(aglGetCurrentContext());
+#elif defined(SK_BUILD_FOR_WIN32)
+ SwapBuf();
+ int set_a_break_pt_here = 9;
+ SwapBuf();
+#endif
+#endif
+}
+
diff --git a/src/gpu/vk/GrVkGpu.h b/src/gpu/vk/GrVkGpu.h
new file mode 100644
index 0000000000..5bffdfdf93
--- /dev/null
+++ b/src/gpu/vk/GrVkGpu.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkGpu_DEFINED
+#define GrVkGpu_DEFINED
+
+#include "GrGpu.h"
+#include "GrGpuFactory.h"
+#include "GrVkCaps.h"
+#include "GrVkIndexBuffer.h"
+#include "GrVkProgram.h"
+#include "GrVkResourceProvider.h"
+#include "GrVkVertexBuffer.h"
+#include "GrVkUtil.h"
+
+#include "shaderc/shaderc.h"
+#include "vulkan/vulkan.h"
+
+class GrPipeline;
+class GrNonInstancedVertices;
+
+class GrVkBufferImpl;
+class GrVkCommandBuffer;
+class GrVkPipeline;
+class GrVkRenderPass;
+class GrVkTexture;
+struct GrVkInterface;
+
+class GrVkGpu : public GrGpu {
+public:
+ // Currently passing in the inst so that we can properly delete it when we are done.
+ // Normally this would be done by the client.
+ GrVkGpu(GrContext* context, const GrContextOptions& options,
+ VkPhysicalDevice physDev, VkDevice device, VkQueue queue, VkCommandPool cmdPool,
+ VkInstance inst);
+ ~GrVkGpu() override;
+
+ const GrVkInterface* vkInterface() const { return fInterface.get(); }
+ const GrVkCaps& vkCaps() const { return *fVkCaps; }
+
+ VkDevice device() const { return fDevice; }
+ VkQueue queue() const { return fQueue; }
+ VkCommandPool cmdPool() const { return fCmdPool; }
+ VkPhysicalDeviceMemoryProperties physicalDeviceMemoryProperties() const {
+ return fPhysDevMemProps;
+ }
+
+ GrVkResourceProvider& resourceProvider() { return fResourceProvider; }
+
+ enum SyncQueue {
+ kForce_SyncQueue,
+ kSkip_SyncQueue
+ };
+
+ bool onGetReadPixelsInfo(GrSurface* srcSurface, int readWidth, int readHeight, size_t rowBytes,
+ GrPixelConfig readConfig, DrawPreference*,
+ ReadPixelTempDrawInfo*) override;
+
+ bool onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
+ GrPixelConfig srcConfig, DrawPreference*,
+ WritePixelTempDrawInfo*) override;
+
+ void buildProgramDesc(GrProgramDesc*, const GrPrimitiveProcessor&,
+ const GrPipeline&) const override;
+
+ void discard(GrRenderTarget*) override {
+ SkDebugf("discard not yet implemented for Vulkan\n");
+ }
+
+ bool onCopySurface(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint) override;
+
+ bool initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* desc) const override {
+ SkDebugf("initCopySurfaceDstDesc not yet implemented for Vulkan\n");
+ return false;
+ }
+
+ void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
+
+ GrBackendObject createTestingOnlyBackendTexture(void* pixels, int w, int h,
+ GrPixelConfig config) override;
+ bool isTestingOnlyBackendTexture(GrBackendObject id) const override;
+ void deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandonTexture) override;
+
+ GrStencilAttachment* createStencilAttachmentForRenderTarget(const GrRenderTarget*,
+ int width,
+ int height) override;
+
+ void clearStencil(GrRenderTarget* target) override {
+ SkDebugf("clearStencil not yet implemented for Vulkan\n");
+ }
+
+ void drawDebugWireRect(GrRenderTarget*, const SkIRect&, GrColor) override {
+ SkDebugf("drawDebugWireRect not yet implemented for Vulkan\n");
+ }
+
+ void addMemoryBarrier(VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkMemoryBarrier* barrier) const;
+ void addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkBufferMemoryBarrier* barrier) const;
+ void addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion,
+ VkImageMemoryBarrier* barrier) const;
+
+ shaderc_compiler_t shadercCompiler() const {
+ return fCompiler;
+ }
+
+ void finishDrawTarget() override;
+
+private:
+ void onResetContext(uint32_t resetBits) override {
+ SkDebugf("onResetContext not yet implemented for Vulkan\n");
+ }
+
+ GrTexture* onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle,
+ const void* srcData, size_t rowBytes) override;
+
+ GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc, GrGpuResource::LifeCycle,
+ const void* srcData) override {
+ SkDebugf("onCreateCompressedTexture not yet implemented for Vulkan\n");
+ return NULL;
+ }
+
+ GrTexture* onWrapBackendTexture(const GrBackendTextureDesc&, GrWrapOwnership) override;
+
+ GrRenderTarget* onWrapBackendRenderTarget(const GrBackendRenderTargetDesc&,
+ GrWrapOwnership) override;
+
+ GrVertexBuffer* onCreateVertexBuffer(size_t size, bool dynamic) override;
+ GrIndexBuffer* onCreateIndexBuffer(size_t size, bool dynamic) override;
+ GrTransferBuffer* onCreateTransferBuffer(size_t size, TransferType type) override;
+
+ void onClear(GrRenderTarget*, const SkIRect& rect, GrColor color) override;
+
+ void onClearStencilClip(GrRenderTarget*, const SkIRect& rect, bool insideClip) override {
+ SkDebugf("onClearStencilClip not yet implemented for Vulkan\n");
+ }
+
+ void onDraw(const DrawArgs&, const GrNonInstancedVertices&) override;
+
+ bool onReadPixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig,
+ void* buffer,
+ size_t rowBytes) override;
+
+ bool onWritePixels(GrSurface* surface,
+ int left, int top, int width, int height,
+ GrPixelConfig config, const void* buffer, size_t rowBytes) override;
+
+ bool onTransferPixels(GrSurface*,
+ int left, int top, int width, int height,
+ GrPixelConfig config, GrTransferBuffer* buffer,
+ size_t offset, size_t rowBytes) override {
+ SkDebugf("onTransferPixels not yet implemented for Vulkan\n");
+ return false;
+ }
+
+ void onResolveRenderTarget(GrRenderTarget* target) override {
+ SkDebugf("onResolveRenderTarget not yet implemented for Vulkan\n");
+ }
+
+ // Bind vertex and index buffers
+ void bindGeometry(const GrPrimitiveProcessor&, const GrNonInstancedVertices&);
+
+ // Ends and submits the current command buffer to the queue and then creates a new command
+ // buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all
+ // work in the queue to finish before returning.
+ void submitCommandBuffer(SyncQueue sync);
+
+ void copySurfaceAsCopyImage(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+ void copySurfaceAsDraw(GrSurface* dst,
+ GrSurface* src,
+ const SkIRect& srcRect,
+ const SkIPoint& dstPoint);
+
+ // helper for onCreateTexture and writeTexturePixels
+ bool uploadTexData(GrVkTexture* tex,
+ int left, int top, int width, int height,
+ GrPixelConfig dataConfig,
+ const void* data,
+ size_t rowBytes);
+
+ SkAutoTUnref<const GrVkInterface> fInterface;
+ SkAutoTUnref<GrVkCaps> fVkCaps;
+ VkPhysicalDeviceMemoryProperties fPhysDevMemProps;
+ VkDevice fDevice;
+ VkQueue fQueue; // for now, one queue
+ VkCommandPool fCmdPool;
+ GrVkCommandBuffer* fCurrentCmdBuffer;
+ GrVkResourceProvider fResourceProvider;
+
+ // Shaderc compiler used for compiling glsl in spirv. We only want to create the compiler once
+ // since there is significant overhead to the first compile of any compiler.
+ shaderc_compiler_t fCompiler;
+
+ // This is only for our current testing and building. The client should be holding on to the
+ // VkInstance.
+ VkInstance fVkInstance;
+
+ typedef GrGpu INHERITED;
+};
+
+#endif
diff --git a/src/gpu/vk/GrVkImage.cpp b/src/gpu/vk/GrVkImage.cpp
new file mode 100644
index 0000000000..a87a7a5a49
--- /dev/null
+++ b/src/gpu/vk/GrVkImage.cpp
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkGpu.h"
+#include "GrVkImage.h"
+#include "GrVkMemory.h"
+#include "GrVkUtil.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+void GrVkImage::setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout,
+ VkAccessFlags srcAccessMask,
+ VkAccessFlags dstAccessMask,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion) {
+ SkASSERT(VK_IMAGE_LAYOUT_GENERAL != newLayout || VK_IMAGE_LAYOUT_PREINITIALIZED != newLayout);
+ // Is this reasonable? Could someone want to keep the same layout but use the masks to force
+ // a barrier on certain things?
+ if (newLayout == fCurrentLayout) {
+ return;
+ }
+
+ VkImageMemoryBarrier imageMemoryBarrier = {
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
+ NULL, // pNext
+ srcAccessMask, // outputMask
+ dstAccessMask, // inputMask
+ fCurrentLayout, // oldLayout
+ newLayout, // newLayout
+ VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
+ VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
+ fResource->fImage, // image
+ { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } // subresourceRange
+ };
+
+ // TODO: restrict to area of image we're interested in
+ gpu->addImageMemoryBarrier(srcStageMask, dstStageMask, byRegion, &imageMemoryBarrier);
+
+ fCurrentLayout = newLayout;
+}
+
+const GrVkImage::Resource* GrVkImage::CreateResource(const GrVkGpu* gpu,
+ const ImageDesc& imageDesc) {
+ VkImage image = 0;
+ VkDeviceMemory alloc;
+
+ VkResult err;
+
+ VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling)
+ ? VK_IMAGE_LAYOUT_PREINITIALIZED
+ : VK_IMAGE_LAYOUT_UNDEFINED;
+
+ // Create Image
+ VkSampleCountFlagBits vkSamples;
+ if (!GrSampleCountToVkSampleCount(imageDesc.fSamples, &vkSamples)) {
+ return nullptr;
+ }
+ const VkImageCreateInfo imageCreateInfo = {
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
+ NULL, // pNext
+ 0, // VkImageCreateFlags
+ imageDesc.fImageType, // VkImageType
+ imageDesc.fFormat, // VkFormat
+ { imageDesc.fWidth, imageDesc.fHeight, 1 }, // VkExtent3D
+ imageDesc.fLevels, // mipLevels
+ 1, // arrayLayers
+ vkSamples, // samples
+ imageDesc.fImageTiling, // VkImageTiling
+ imageDesc.fUsageFlags, // VkImageUsageFlags
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode
+ 0, // queueFamilyCount
+ 0, // pQueueFamilyIndices
+ initialLayout // initialLayout
+ };
+
+ err = VK_CALL(gpu, CreateImage(gpu->device(), &imageCreateInfo, nullptr, &image));
+ SkASSERT(!err);
+
+ if (!GrVkMemory::AllocAndBindImageMemory(gpu, image, imageDesc.fMemProps, &alloc)) {
+ VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
+ return nullptr;
+ }
+
+ GrVkImage::Resource::Flags flags =
+ (VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling) ? Resource::kLinearTiling_Flag
+ : Resource::kNo_Flags;
+
+ return (new GrVkImage::Resource(image, alloc, flags));
+}
+
+GrVkImage::~GrVkImage() {
+ // should have been released or abandoned first
+ SkASSERT(!fResource);
+}
+
+void GrVkImage::releaseImage(const GrVkGpu* gpu) {
+ if (fResource) {
+ fResource->unref(gpu);
+ fResource = nullptr;
+ }
+}
+
+void GrVkImage::abandonImage() {
+ if (fResource) {
+ fResource->unrefAndAbandon();
+ fResource = nullptr;
+ }
+}
+
+void GrVkImage::Resource::freeGPUData(const GrVkGpu* gpu) const {
+ VK_CALL(gpu, DestroyImage(gpu->device(), fImage, nullptr));
+ VK_CALL(gpu, FreeMemory(gpu->device(), fAlloc, nullptr));
+} \ No newline at end of file
diff --git a/src/gpu/vk/GrVkImage.h b/src/gpu/vk/GrVkImage.h
new file mode 100644
index 0000000000..3467a61f2b
--- /dev/null
+++ b/src/gpu/vk/GrVkImage.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkImage_DEFINED
+#define GrVkImage_DEFINED
+
+#include "GrVkResource.h"
+#include "SkTypes.h"
+
+#include "vulkan/vulkan.h"
+
+class GrVkGpu;
+
+class GrVkImage : SkNoncopyable {
+public:
+ // unlike GrVkBuffer, this needs to be public so GrVkStencilAttachment can use it
+ class Resource : public GrVkResource {
+ public:
+ enum Flags {
+ kNo_Flags = 0,
+ kLinearTiling_Flag = 0x01
+ };
+
+ VkImage fImage;
+ VkDeviceMemory fAlloc;
+ Flags fFlags;
+
+ Resource() : INHERITED(), fImage(nullptr), fAlloc(nullptr), fFlags(kNo_Flags) {}
+
+ Resource(VkImage image, VkDeviceMemory alloc, Flags flags)
+ : fImage(image), fAlloc(alloc), fFlags(flags) {}
+
+ ~Resource() override {}
+ private:
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ typedef GrVkResource INHERITED;
+ };
+
+
+ GrVkImage(const Resource* imageResource) : fResource(imageResource) {
+ if (imageResource->fFlags & Resource::kLinearTiling_Flag) {
+ fCurrentLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
+ } else {
+ fCurrentLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ }
+ imageResource->ref();
+ }
+
+ virtual ~GrVkImage();
+
+ VkImage textureImage() const { return fResource->fImage; }
+ VkDeviceMemory textureMemory() const { return fResource->fAlloc; }
+ const Resource* resource() const { return fResource; }
+ bool isLinearTiled() const {
+ return SkToBool(fResource->fFlags & Resource::kLinearTiling_Flag);
+ }
+
+ VkImageLayout currentLayout() const { return fCurrentLayout; }
+
+ void setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout,
+ VkAccessFlags srcAccessMask,
+ VkAccessFlags dstAccessMask,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ bool byRegion);
+
+ struct ImageDesc {
+ VkImageType fImageType;
+ VkFormat fFormat;
+ uint32_t fWidth;
+ uint32_t fHeight;
+ uint32_t fLevels;
+ uint32_t fSamples;
+ VkImageTiling fImageTiling;
+ VkImageUsageFlags fUsageFlags;
+ VkFlags fMemProps;
+
+ ImageDesc()
+ : fImageType(VK_IMAGE_TYPE_2D)
+ , fFormat(VK_FORMAT_UNDEFINED)
+ , fWidth(0)
+ , fHeight(0)
+ , fLevels(1)
+ , fSamples(1)
+ , fImageTiling(VK_IMAGE_TILING_OPTIMAL)
+ , fUsageFlags(0)
+ , fMemProps(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {}
+ };
+
+ static const Resource* CreateResource(const GrVkGpu* gpu, const ImageDesc& imageDesc);
+
+protected:
+
+ void releaseImage(const GrVkGpu* gpu);
+ void abandonImage();
+
+ const Resource* fResource;
+
+ VkImageLayout fCurrentLayout;
+
+};
+
+#endif
diff --git a/src/gpu/vk/GrVkImageView.cpp b/src/gpu/vk/GrVkImageView.cpp
new file mode 100644
index 0000000000..70e6106879
--- /dev/null
+++ b/src/gpu/vk/GrVkImageView.cpp
@@ -0,0 +1,49 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkImageView.h"
+#include "GrVkGpu.h"
+#include "GrVkUtil.h"
+
+const GrVkImageView* GrVkImageView::Create(GrVkGpu* gpu, VkImage image, VkFormat format,
+ Type viewType) {
+ VkImageView imageView;
+
+ // Create the VkImageView
+ VkImageViewCreateInfo viewInfo = {
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // sType
+ NULL, // pNext
+ 0, // flags
+ image, // image
+ VK_IMAGE_VIEW_TYPE_2D, // viewType
+ format, // format
+ { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G,
+ VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A }, // components
+ { VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 }, // subresourceRange
+ };
+ if (kStencil_Type == viewType) {
+ viewInfo.components.r = VK_COMPONENT_SWIZZLE_ZERO;
+ viewInfo.components.g = VK_COMPONENT_SWIZZLE_ZERO;
+ viewInfo.components.b = VK_COMPONENT_SWIZZLE_ZERO;
+ viewInfo.components.a = VK_COMPONENT_SWIZZLE_ZERO;
+ viewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
+ }
+
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateImageView(gpu->device(), &viewInfo,
+ nullptr, &imageView));
+ if (err) {
+ return nullptr;
+ }
+
+ return new GrVkImageView(imageView);
+}
+
+void GrVkImageView::freeGPUData(const GrVkGpu* gpu) const {
+ GR_VK_CALL(gpu->vkInterface(), DestroyImageView(gpu->device(), fImageView, nullptr));
+}
+
+
diff --git a/src/gpu/vk/GrVkImageView.h b/src/gpu/vk/GrVkImageView.h
new file mode 100644
index 0000000000..43e960454e
--- /dev/null
+++ b/src/gpu/vk/GrVkImageView.h
@@ -0,0 +1,41 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkImageView_DEFINED
+#define GrVkImageView_DEFINED
+
+#include "GrTypes.h"
+
+#include "GrVkResource.h"
+
+#include "vulkan/vulkan.h"
+
+class GrVkImageView : public GrVkResource {
+public:
+ enum Type {
+ kColor_Type,
+ kStencil_Type
+ };
+
+ static const GrVkImageView* Create(GrVkGpu* gpu, VkImage image, VkFormat format, Type viewType);
+
+ VkImageView imageView() const { return fImageView; }
+
+private:
+ GrVkImageView(VkImageView imageView) : INHERITED(), fImageView(imageView) {}
+
+ GrVkImageView(const GrVkImageView&);
+ GrVkImageView& operator=(const GrVkImageView&);
+
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ VkImageView fImageView;
+
+ typedef GrVkResource INHERITED;
+};
+
+#endif \ No newline at end of file
diff --git a/src/gpu/vk/GrVkIndexBuffer.cpp b/src/gpu/vk/GrVkIndexBuffer.cpp
new file mode 100644
index 0000000000..52f7bd59b7
--- /dev/null
+++ b/src/gpu/vk/GrVkIndexBuffer.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkIndexBuffer.h"
+#include "GrVkGpu.h"
+
+GrVkIndexBuffer::GrVkIndexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* bufferResource)
+ : INHERITED(gpu, desc.fSizeInBytes, desc.fDynamic, false)
+ , GrVkBuffer(desc, bufferResource) {
+ this->registerWithCache();
+}
+
+GrVkIndexBuffer* GrVkIndexBuffer::Create(GrVkGpu* gpu, size_t size, bool dynamic) {
+ GrVkBuffer::Desc desc;
+ desc.fDynamic = dynamic;
+ desc.fType = GrVkBuffer::kIndex_Type;
+ desc.fSizeInBytes = size;
+
+ const GrVkBuffer::Resource* bufferResource = GrVkBuffer::Create(gpu, desc);
+ if (!bufferResource) {
+ return nullptr;
+ }
+
+
+ GrVkIndexBuffer* buffer = new GrVkIndexBuffer(gpu, desc, bufferResource);
+ if (!buffer) {
+ bufferResource->unref(gpu);
+ }
+ return buffer;
+}
+
+void GrVkIndexBuffer::onRelease() {
+ if (!this->wasDestroyed()) {
+ this->vkRelease(this->getVkGpu());
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrVkIndexBuffer::onAbandon() {
+ this->vkAbandon();
+ INHERITED::onAbandon();
+}
+
+void* GrVkIndexBuffer::onMap() {
+ if (!this->wasDestroyed()) {
+ return this->vkMap(this->getVkGpu());
+ } else {
+ return NULL;
+ }
+}
+
+void GrVkIndexBuffer::onUnmap() {
+ if (!this->wasDestroyed()) {
+ this->vkUnmap(this->getVkGpu());
+ }
+}
+
+bool GrVkIndexBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
+ if (!this->wasDestroyed()) {
+ return this->vkUpdateData(this->getVkGpu(), src, srcSizeInBytes);
+ } else {
+ return false;
+ }
+}
+
+GrVkGpu* GrVkIndexBuffer::getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrVkGpu*>(this->getGpu());
+}
+
diff --git a/src/gpu/vk/GrVkIndexBuffer.h b/src/gpu/vk/GrVkIndexBuffer.h
new file mode 100644
index 0000000000..84bbbd3377
--- /dev/null
+++ b/src/gpu/vk/GrVkIndexBuffer.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkIndexBuffer_DEFINED
+#define GrVkIndexBuffer_DEFINED
+
+#include "GrIndexBuffer.h"
+#include "GrVkBuffer.h"
+#include "vk/GrVkInterface.h"
+
+class GrVkGpu;
+
+class GrVkIndexBuffer : public GrIndexBuffer, public GrVkBuffer {
+
+public:
+ static GrVkIndexBuffer* Create(GrVkGpu* gpu, size_t size, bool dynamic);
+
+protected:
+ void onAbandon() override;
+ void onRelease() override;
+
+private:
+ GrVkIndexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* resource);
+
+ void* onMap() override;
+ void onUnmap() override;
+ bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
+
+ GrVkGpu* getVkGpu() const;
+
+ typedef GrIndexBuffer INHERITED;
+};
+
+#endif
diff --git a/src/gpu/vk/GrVkInterface.cpp b/src/gpu/vk/GrVkInterface.cpp
new file mode 100644
index 0000000000..07e85fcd5b
--- /dev/null
+++ b/src/gpu/vk/GrVkInterface.cpp
@@ -0,0 +1,340 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "vk/GrVkInterface.h"
+
+GrVkInterface::GrVkInterface() {
+}
+
+#define GET_PROC(F) functions->f ## F = (PFN_vk ## F) vkGetInstanceProcAddr(instance, "vk" #F)
+
+const GrVkInterface* GrVkCreateInterface(VkInstance instance) {
+
+ GrVkInterface* interface = new GrVkInterface();
+ GrVkInterface::Functions* functions = &interface->fFunctions;
+
+ GET_PROC(CreateInstance);
+ GET_PROC(DestroyInstance);
+ GET_PROC(EnumeratePhysicalDevices);
+ GET_PROC(GetPhysicalDeviceFeatures);
+ GET_PROC(GetPhysicalDeviceFormatProperties);
+ GET_PROC(GetPhysicalDeviceImageFormatProperties);
+ GET_PROC(GetPhysicalDeviceProperties);
+ GET_PROC(GetPhysicalDeviceQueueFamilyProperties);
+ GET_PROC(GetPhysicalDeviceMemoryProperties);
+ GET_PROC(CreateDevice);
+ GET_PROC(DestroyDevice);
+ GET_PROC(EnumerateInstanceExtensionProperties);
+ GET_PROC(EnumerateDeviceExtensionProperties);
+ GET_PROC(EnumerateInstanceLayerProperties);
+ GET_PROC(EnumerateDeviceLayerProperties);
+ GET_PROC(GetDeviceQueue);
+ GET_PROC(QueueSubmit);
+ GET_PROC(QueueWaitIdle);
+ GET_PROC(DeviceWaitIdle);
+ GET_PROC(AllocateMemory);
+ GET_PROC(FreeMemory);
+ GET_PROC(MapMemory);
+ GET_PROC(UnmapMemory);
+ GET_PROC(FlushMappedMemoryRanges);
+ GET_PROC(InvalidateMappedMemoryRanges);
+ GET_PROC(GetDeviceMemoryCommitment);
+ GET_PROC(BindBufferMemory);
+ GET_PROC(BindImageMemory);
+ GET_PROC(GetBufferMemoryRequirements);
+ GET_PROC(GetImageMemoryRequirements);
+ GET_PROC(GetImageSparseMemoryRequirements);
+ GET_PROC(GetPhysicalDeviceSparseImageFormatProperties);
+ GET_PROC(QueueBindSparse);
+ GET_PROC(CreateFence);
+ GET_PROC(DestroyFence);
+ GET_PROC(ResetFences);
+ GET_PROC(GetFenceStatus);
+ GET_PROC(WaitForFences);
+ GET_PROC(CreateSemaphore);
+ GET_PROC(DestroySemaphore);
+ GET_PROC(CreateEvent);
+ GET_PROC(DestroyEvent);
+ GET_PROC(GetEventStatus);
+ GET_PROC(SetEvent);
+ GET_PROC(ResetEvent);
+ GET_PROC(CreateQueryPool);
+ GET_PROC(DestroyQueryPool);
+ GET_PROC(GetQueryPoolResults);
+ GET_PROC(CreateBuffer);
+ GET_PROC(DestroyBuffer);
+ GET_PROC(CreateBufferView);
+ GET_PROC(DestroyBufferView);
+ GET_PROC(CreateImage);
+ GET_PROC(DestroyImage);
+ GET_PROC(GetImageSubresourceLayout);
+ GET_PROC(CreateImageView);
+ GET_PROC(DestroyImageView);
+ GET_PROC(CreateShaderModule);
+ GET_PROC(DestroyShaderModule);
+ GET_PROC(CreatePipelineCache);
+ GET_PROC(DestroyPipelineCache);
+ GET_PROC(GetPipelineCacheData);
+ GET_PROC(MergePipelineCaches);
+ GET_PROC(CreateGraphicsPipelines);
+ GET_PROC(CreateComputePipelines);
+ GET_PROC(DestroyPipeline);
+ GET_PROC(CreatePipelineLayout);
+ GET_PROC(DestroyPipelineLayout);
+ GET_PROC(CreateSampler);
+ GET_PROC(DestroySampler);
+ GET_PROC(CreateDescriptorSetLayout);
+ GET_PROC(DestroyDescriptorSetLayout);
+ GET_PROC(CreateDescriptorPool);
+ GET_PROC(DestroyDescriptorPool);
+ GET_PROC(ResetDescriptorPool);
+ GET_PROC(AllocateDescriptorSets);
+ GET_PROC(FreeDescriptorSets);
+ GET_PROC(UpdateDescriptorSets);
+ GET_PROC(CreateFramebuffer);
+ GET_PROC(DestroyFramebuffer);
+ GET_PROC(CreateRenderPass);
+ GET_PROC(DestroyRenderPass);
+ GET_PROC(GetRenderAreaGranularity);
+ GET_PROC(CreateCommandPool);
+ GET_PROC(DestroyCommandPool);
+ GET_PROC(ResetCommandPool);
+ GET_PROC(AllocateCommandBuffers);
+ GET_PROC(FreeCommandBuffers);
+ GET_PROC(BeginCommandBuffer);
+ GET_PROC(EndCommandBuffer);
+ GET_PROC(ResetCommandBuffer);
+ GET_PROC(CmdBindPipeline);
+ GET_PROC(CmdSetViewport);
+ GET_PROC(CmdSetScissor);
+ GET_PROC(CmdSetLineWidth);
+ GET_PROC(CmdSetDepthBias);
+ GET_PROC(CmdSetBlendConstants);
+ GET_PROC(CmdSetDepthBounds);
+ GET_PROC(CmdSetStencilCompareMask);
+ GET_PROC(CmdSetStencilWriteMask);
+ GET_PROC(CmdSetStencilReference);
+ GET_PROC(CmdBindDescriptorSets);
+ GET_PROC(CmdBindIndexBuffer);
+ GET_PROC(CmdBindVertexBuffers);
+ GET_PROC(CmdDraw);
+ GET_PROC(CmdDrawIndexed);
+ GET_PROC(CmdDrawIndirect);
+ GET_PROC(CmdDrawIndexedIndirect);
+ GET_PROC(CmdDispatch);
+ GET_PROC(CmdDispatchIndirect);
+ GET_PROC(CmdCopyBuffer);
+ GET_PROC(CmdCopyImage);
+ GET_PROC(CmdBlitImage);
+ GET_PROC(CmdCopyBufferToImage);
+ GET_PROC(CmdCopyImageToBuffer);
+ GET_PROC(CmdUpdateBuffer);
+ GET_PROC(CmdFillBuffer);
+ GET_PROC(CmdClearColorImage);
+ GET_PROC(CmdClearDepthStencilImage);
+ GET_PROC(CmdClearAttachments);
+ GET_PROC(CmdResolveImage);
+ GET_PROC(CmdSetEvent);
+ GET_PROC(CmdResetEvent);
+ GET_PROC(CmdWaitEvents);
+ GET_PROC(CmdPipelineBarrier);
+ GET_PROC(CmdBeginQuery);
+ GET_PROC(CmdEndQuery);
+ GET_PROC(CmdResetQueryPool);
+ GET_PROC(CmdWriteTimestamp);
+ GET_PROC(CmdCopyQueryPoolResults);
+ GET_PROC(CmdPushConstants);
+ GET_PROC(CmdBeginRenderPass);
+ GET_PROC(CmdNextSubpass);
+ GET_PROC(CmdEndRenderPass);
+ GET_PROC(CmdExecuteCommands);
+ GET_PROC(DestroySurfaceKHR);
+ GET_PROC(GetPhysicalDeviceSurfaceSupportKHR);
+ GET_PROC(GetPhysicalDeviceSurfaceCapabilitiesKHR);
+ GET_PROC(GetPhysicalDeviceSurfaceFormatsKHR);
+ GET_PROC(GetPhysicalDeviceSurfacePresentModesKHR);
+ GET_PROC(CreateSwapchainKHR);
+ GET_PROC(DestroySwapchainKHR);
+ GET_PROC(GetSwapchainImagesKHR);
+ GET_PROC(AcquireNextImageKHR);
+ GET_PROC(QueuePresentKHR);
+ GET_PROC(GetPhysicalDeviceDisplayPropertiesKHR);
+ GET_PROC(GetPhysicalDeviceDisplayPlanePropertiesKHR);
+ GET_PROC(GetDisplayPlaneSupportedDisplaysKHR);
+ GET_PROC(GetDisplayModePropertiesKHR);
+ GET_PROC(CreateDisplayModeKHR);
+ GET_PROC(GetDisplayPlaneCapabilitiesKHR);
+ GET_PROC(CreateDisplayPlaneSurfaceKHR);
+ GET_PROC(CreateSharedSwapchainsKHR);
+
+ return interface;
+}
+
+#define RETURN_FALSE_INTERFACE \
+ if (kIsDebug) { SkDebugf("%s:%d GrVkInterface::validate() failed.\n", __FILE__, __LINE__); } \
+ return false;
+
+bool GrVkInterface::validate() const {
+ // functions that are always required
+ if (NULL == fFunctions.fCreateInstance ||
+ NULL == fFunctions.fDestroyInstance ||
+ NULL == fFunctions.fEnumeratePhysicalDevices ||
+ NULL == fFunctions.fGetPhysicalDeviceFeatures ||
+ NULL == fFunctions.fGetPhysicalDeviceFormatProperties ||
+ NULL == fFunctions.fGetPhysicalDeviceImageFormatProperties ||
+ NULL == fFunctions.fGetPhysicalDeviceProperties ||
+ NULL == fFunctions.fGetPhysicalDeviceQueueFamilyProperties ||
+ NULL == fFunctions.fGetPhysicalDeviceMemoryProperties ||
+ NULL == fFunctions.fCreateDevice ||
+ NULL == fFunctions.fDestroyDevice ||
+ NULL == fFunctions.fEnumerateInstanceExtensionProperties ||
+ NULL == fFunctions.fEnumerateDeviceExtensionProperties ||
+ NULL == fFunctions.fEnumerateInstanceLayerProperties ||
+ NULL == fFunctions.fEnumerateDeviceLayerProperties ||
+ NULL == fFunctions.fGetDeviceQueue ||
+ NULL == fFunctions.fQueueSubmit ||
+ NULL == fFunctions.fQueueWaitIdle ||
+ NULL == fFunctions.fDeviceWaitIdle ||
+ NULL == fFunctions.fAllocateMemory ||
+ NULL == fFunctions.fFreeMemory ||
+ NULL == fFunctions.fMapMemory ||
+ NULL == fFunctions.fUnmapMemory ||
+ NULL == fFunctions.fFlushMappedMemoryRanges ||
+ NULL == fFunctions.fInvalidateMappedMemoryRanges ||
+ NULL == fFunctions.fGetDeviceMemoryCommitment ||
+ NULL == fFunctions.fBindBufferMemory ||
+ NULL == fFunctions.fBindImageMemory ||
+ NULL == fFunctions.fGetBufferMemoryRequirements ||
+ NULL == fFunctions.fGetImageMemoryRequirements ||
+ NULL == fFunctions.fGetImageSparseMemoryRequirements ||
+ NULL == fFunctions.fGetPhysicalDeviceSparseImageFormatProperties ||
+ NULL == fFunctions.fQueueBindSparse ||
+ NULL == fFunctions.fCreateFence ||
+ NULL == fFunctions.fDestroyFence ||
+ NULL == fFunctions.fResetFences ||
+ NULL == fFunctions.fGetFenceStatus ||
+ NULL == fFunctions.fWaitForFences ||
+ NULL == fFunctions.fCreateSemaphore ||
+ NULL == fFunctions.fDestroySemaphore ||
+ NULL == fFunctions.fCreateEvent ||
+ NULL == fFunctions.fDestroyEvent ||
+ NULL == fFunctions.fGetEventStatus ||
+ NULL == fFunctions.fSetEvent ||
+ NULL == fFunctions.fResetEvent ||
+ NULL == fFunctions.fCreateQueryPool ||
+ NULL == fFunctions.fDestroyQueryPool ||
+ NULL == fFunctions.fGetQueryPoolResults ||
+ NULL == fFunctions.fCreateBuffer ||
+ NULL == fFunctions.fDestroyBuffer ||
+ NULL == fFunctions.fCreateBufferView ||
+ NULL == fFunctions.fDestroyBufferView ||
+ NULL == fFunctions.fCreateImage ||
+ NULL == fFunctions.fDestroyImage ||
+ NULL == fFunctions.fGetImageSubresourceLayout ||
+ NULL == fFunctions.fCreateImageView ||
+ NULL == fFunctions.fDestroyImageView ||
+ NULL == fFunctions.fCreateShaderModule ||
+ NULL == fFunctions.fDestroyShaderModule ||
+ NULL == fFunctions.fCreatePipelineCache ||
+ NULL == fFunctions.fDestroyPipelineCache ||
+ NULL == fFunctions.fGetPipelineCacheData ||
+ NULL == fFunctions.fMergePipelineCaches ||
+ NULL == fFunctions.fCreateGraphicsPipelines ||
+ NULL == fFunctions.fCreateComputePipelines ||
+ NULL == fFunctions.fDestroyPipeline ||
+ NULL == fFunctions.fCreatePipelineLayout ||
+ NULL == fFunctions.fDestroyPipelineLayout ||
+ NULL == fFunctions.fCreateSampler ||
+ NULL == fFunctions.fDestroySampler ||
+ NULL == fFunctions.fCreateDescriptorSetLayout ||
+ NULL == fFunctions.fDestroyDescriptorSetLayout ||
+ NULL == fFunctions.fCreateDescriptorPool ||
+ NULL == fFunctions.fDestroyDescriptorPool ||
+ NULL == fFunctions.fResetDescriptorPool ||
+ NULL == fFunctions.fAllocateDescriptorSets ||
+ NULL == fFunctions.fFreeDescriptorSets ||
+ NULL == fFunctions.fUpdateDescriptorSets ||
+ NULL == fFunctions.fCreateFramebuffer ||
+ NULL == fFunctions.fDestroyFramebuffer ||
+ NULL == fFunctions.fCreateRenderPass ||
+ NULL == fFunctions.fDestroyRenderPass ||
+ NULL == fFunctions.fGetRenderAreaGranularity ||
+ NULL == fFunctions.fCreateCommandPool ||
+ NULL == fFunctions.fDestroyCommandPool ||
+ NULL == fFunctions.fResetCommandPool ||
+ NULL == fFunctions.fAllocateCommandBuffers ||
+ NULL == fFunctions.fFreeCommandBuffers ||
+ NULL == fFunctions.fBeginCommandBuffer ||
+ NULL == fFunctions.fEndCommandBuffer ||
+ NULL == fFunctions.fResetCommandBuffer ||
+ NULL == fFunctions.fCmdBindPipeline ||
+ NULL == fFunctions.fCmdSetViewport ||
+ NULL == fFunctions.fCmdSetScissor ||
+ NULL == fFunctions.fCmdSetLineWidth ||
+ NULL == fFunctions.fCmdSetDepthBias ||
+ NULL == fFunctions.fCmdSetBlendConstants ||
+ NULL == fFunctions.fCmdSetDepthBounds ||
+ NULL == fFunctions.fCmdSetStencilCompareMask ||
+ NULL == fFunctions.fCmdSetStencilWriteMask ||
+ NULL == fFunctions.fCmdSetStencilReference ||
+ NULL == fFunctions.fCmdBindDescriptorSets ||
+ NULL == fFunctions.fCmdBindIndexBuffer ||
+ NULL == fFunctions.fCmdBindVertexBuffers ||
+ NULL == fFunctions.fCmdDraw ||
+ NULL == fFunctions.fCmdDrawIndexed ||
+ NULL == fFunctions.fCmdDrawIndirect ||
+ NULL == fFunctions.fCmdDrawIndexedIndirect ||
+ NULL == fFunctions.fCmdDispatch ||
+ NULL == fFunctions.fCmdDispatchIndirect ||
+ NULL == fFunctions.fCmdCopyBuffer ||
+ NULL == fFunctions.fCmdCopyImage ||
+ NULL == fFunctions.fCmdBlitImage ||
+ NULL == fFunctions.fCmdCopyBufferToImage ||
+ NULL == fFunctions.fCmdCopyImageToBuffer ||
+ NULL == fFunctions.fCmdUpdateBuffer ||
+ NULL == fFunctions.fCmdFillBuffer ||
+ NULL == fFunctions.fCmdClearColorImage ||
+ NULL == fFunctions.fCmdClearDepthStencilImage ||
+ NULL == fFunctions.fCmdClearAttachments ||
+ NULL == fFunctions.fCmdResolveImage ||
+ NULL == fFunctions.fCmdSetEvent ||
+ NULL == fFunctions.fCmdResetEvent ||
+ NULL == fFunctions.fCmdWaitEvents ||
+ NULL == fFunctions.fCmdPipelineBarrier ||
+ NULL == fFunctions.fCmdBeginQuery ||
+ NULL == fFunctions.fCmdEndQuery ||
+ NULL == fFunctions.fCmdResetQueryPool ||
+ NULL == fFunctions.fCmdWriteTimestamp ||
+ NULL == fFunctions.fCmdCopyQueryPoolResults ||
+ NULL == fFunctions.fCmdPushConstants ||
+ NULL == fFunctions.fCmdBeginRenderPass ||
+ NULL == fFunctions.fCmdNextSubpass ||
+ NULL == fFunctions.fCmdEndRenderPass ||
+ NULL == fFunctions.fCmdExecuteCommands ||
+ NULL == fFunctions.fDestroySurfaceKHR ||
+ NULL == fFunctions.fGetPhysicalDeviceSurfaceSupportKHR ||
+ NULL == fFunctions.fGetPhysicalDeviceSurfaceCapabilitiesKHR ||
+ NULL == fFunctions.fGetPhysicalDeviceSurfaceFormatsKHR ||
+ NULL == fFunctions.fGetPhysicalDeviceSurfacePresentModesKHR ||
+ NULL == fFunctions.fCreateSwapchainKHR ||
+ NULL == fFunctions.fDestroySwapchainKHR ||
+ NULL == fFunctions.fGetSwapchainImagesKHR ||
+ NULL == fFunctions.fAcquireNextImageKHR ||
+ NULL == fFunctions.fQueuePresentKHR ||
+ NULL == fFunctions.fGetPhysicalDeviceDisplayPropertiesKHR ||
+ NULL == fFunctions.fGetPhysicalDeviceDisplayPlanePropertiesKHR ||
+ NULL == fFunctions.fGetDisplayPlaneSupportedDisplaysKHR ||
+ NULL == fFunctions.fGetDisplayModePropertiesKHR ||
+ NULL == fFunctions.fCreateDisplayModeKHR ||
+ NULL == fFunctions.fGetDisplayPlaneCapabilitiesKHR ||
+ NULL == fFunctions.fCreateDisplayPlaneSurfaceKHR ||
+ NULL == fFunctions.fCreateSharedSwapchainsKHR) {
+ return false;
+ }
+ return true;
+}
+
diff --git a/src/gpu/vk/GrVkMemory.cpp b/src/gpu/vk/GrVkMemory.cpp
new file mode 100644
index 0000000000..276fd4b0a2
--- /dev/null
+++ b/src/gpu/vk/GrVkMemory.cpp
@@ -0,0 +1,157 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkMemory.h"
+
+#include "GrVkGpu.h"
+#include "GrVkUtil.h"
+
+static bool get_valid_memory_type_index(VkPhysicalDeviceMemoryProperties physDevMemProps,
+ uint32_t typeBits,
+ VkMemoryPropertyFlags requestedMemFlags,
+ uint32_t* typeIndex) {
+ uint32_t checkBit = 1;
+ for (uint32_t i = 0; i < 32; ++i) {
+ if (typeBits & checkBit) {
+ uint32_t supportedFlags = physDevMemProps.memoryTypes[i].propertyFlags &
+ requestedMemFlags;
+ if (supportedFlags == requestedMemFlags) {
+ *typeIndex = i;
+ return true;
+ }
+ }
+ checkBit <<= 1;
+ }
+ return false;
+}
+
+static bool alloc_device_memory(const GrVkGpu* gpu,
+ VkMemoryRequirements* memReqs,
+ const VkMemoryPropertyFlags flags,
+ VkDeviceMemory* memory) {
+ uint32_t typeIndex;
+ if (!get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(),
+ memReqs->memoryTypeBits,
+ flags,
+ &typeIndex)) {
+ return false;
+ }
+
+ VkMemoryAllocateInfo allocInfo = {
+ VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
+ NULL, // pNext
+ memReqs->size, // allocationSize
+ typeIndex, // memoryTypeIndex
+ };
+
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateMemory(gpu->device(),
+ &allocInfo,
+ nullptr,
+ memory));
+ if (err) {
+ return false;
+ }
+ return true;
+}
+
+bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
+ VkBuffer buffer,
+ const VkMemoryPropertyFlags flags,
+ VkDeviceMemory* memory) {
+ const GrVkInterface* interface = gpu->vkInterface();
+ VkDevice device = gpu->device();
+
+ VkMemoryRequirements memReqs;
+ GR_VK_CALL(interface, GetBufferMemoryRequirements(device, buffer, &memReqs));
+
+
+ if (!alloc_device_memory(gpu, &memReqs, flags, memory)) {
+ return false;
+ }
+
+ // Bind Memory to queue
+ VkResult err = GR_VK_CALL(interface, BindBufferMemory(device, buffer, *memory, 0));
+ if (err) {
+ GR_VK_CALL(interface, FreeMemory(device, *memory, nullptr));
+ return false;
+ }
+ return true;
+}
+
+bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
+ VkImage image,
+ const VkMemoryPropertyFlags flags,
+ VkDeviceMemory* memory) {
+ const GrVkInterface* interface = gpu->vkInterface();
+ VkDevice device = gpu->device();
+
+ VkMemoryRequirements memReqs;
+ GR_VK_CALL(interface, GetImageMemoryRequirements(device, image, &memReqs));
+
+ if (!alloc_device_memory(gpu, &memReqs, flags, memory)) {
+ return false;
+ }
+
+ // Bind Memory to queue
+ VkResult err = GR_VK_CALL(interface, BindImageMemory(device, image, *memory, 0));
+ if (err) {
+ GR_VK_CALL(interface, FreeMemory(device, *memory, nullptr));
+ return false;
+ }
+ return true;
+}
+
+VkPipelineStageFlags GrVkMemory::LayoutToPipelineStageFlags(const VkImageLayout layout) {
+ if (VK_IMAGE_LAYOUT_GENERAL == layout) {
+ return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+ } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
+ return VK_PIPELINE_STAGE_TRANSFER_BIT;
+ } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
+ return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+ } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
+ return VK_PIPELINE_STAGE_HOST_BIT;
+ }
+
+ SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
+ return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+}
+
+VkAccessFlags GrVkMemory::LayoutToSrcAccessMask(const VkImageLayout layout) {
+ // Currently we assume we will never being doing any explict shader writes (this doesn't include
+ // color attachment or depth/stencil writes). So we will ignore the
+ // VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
+
+ // We can only directly access the host memory if we are in preinitialized or general layout,
+ // and the image is linear.
+ // TODO: Add check for linear here so we are not always adding host to general, and we should
+ // only be in preinitialized if we are linear
+ VkAccessFlags flags = 0;;
+ if (VK_IMAGE_LAYOUT_GENERAL == layout) {
+ flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_TRANSFER_WRITE_BIT |
+ VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
+ } else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
+ flags = VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
+ } else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
+ flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
+ flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
+ flags = VK_ACCESS_TRANSFER_WRITE_BIT;
+ } else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
+ flags = VK_ACCESS_TRANSFER_READ_BIT;
+ } else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
+ flags = VK_ACCESS_SHADER_READ_BIT;
+ }
+ return flags;
+}
+
diff --git a/src/gpu/vk/GrVkMemory.h b/src/gpu/vk/GrVkMemory.h
new file mode 100644
index 0000000000..4d351dcd34
--- /dev/null
+++ b/src/gpu/vk/GrVkMemory.h
@@ -0,0 +1,35 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkMemory_DEFINED
+#define GrVkMemory_DEFINED
+
+#include "vulkan/vulkan.h"
+
+class GrVkGpu;
+
+namespace GrVkMemory {
+ /**
+ * Allocates vulkan device memory and binds it to the gpu's device for the given object.
+ * Returns true of allocation succeeded.
+ */
+ bool AllocAndBindBufferMemory(const GrVkGpu* gpu,
+ VkBuffer buffer,
+ const VkMemoryPropertyFlags flags,
+ VkDeviceMemory* memory);
+
+ bool AllocAndBindImageMemory(const GrVkGpu* gpu,
+ VkImage image,
+ const VkMemoryPropertyFlags flags,
+ VkDeviceMemory* memory);
+
+ VkPipelineStageFlags LayoutToPipelineStageFlags(const VkImageLayout layout);
+
+ VkAccessFlags LayoutToSrcAccessMask(const VkImageLayout layout);
+}
+
+#endif \ No newline at end of file
diff --git a/src/gpu/vk/GrVkPipeline.cpp b/src/gpu/vk/GrVkPipeline.cpp
new file mode 100644
index 0000000000..520cb1b44d
--- /dev/null
+++ b/src/gpu/vk/GrVkPipeline.cpp
@@ -0,0 +1,507 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkPipeline.h"
+
+#include "GrGeometryProcessor.h"
+#include "GrPipeline.h"
+
+#include "GrVkGpu.h"
+#include "GrVkProgramDesc.h"
+#include "GrVkRenderTarget.h"
+#include "GrVkUtil.h"
+
+static inline const VkFormat& attrib_type_to_vkformat(GrVertexAttribType type) {
+ SkASSERT(type >= 0 && type < kGrVertexAttribTypeCount);
+ static const VkFormat kFormats[kGrVertexAttribTypeCount] = {
+ VK_FORMAT_R32_SFLOAT, // kFloat_GrVertexAttribType
+ VK_FORMAT_R32G32_SFLOAT, // kVec2f_GrVertexAttribType
+ VK_FORMAT_R32G32B32_SFLOAT, // kVec3f_GrVertexAttribType
+ VK_FORMAT_R32G32B32A32_SFLOAT, // kVec4f_GrVertexAttribType
+ VK_FORMAT_R8_UNORM, // kUByte_GrVertexAttribType
+ VK_FORMAT_R8G8B8A8_UNORM, // kVec4ub_GrVertexAttribType
+ VK_FORMAT_R16G16_SSCALED, // kVec2s_GrVertexAttribType
+ };
+ GR_STATIC_ASSERT(0 == kFloat_GrVertexAttribType);
+ GR_STATIC_ASSERT(1 == kVec2f_GrVertexAttribType);
+ GR_STATIC_ASSERT(2 == kVec3f_GrVertexAttribType);
+ GR_STATIC_ASSERT(3 == kVec4f_GrVertexAttribType);
+ GR_STATIC_ASSERT(4 == kUByte_GrVertexAttribType);
+ GR_STATIC_ASSERT(5 == kVec4ub_GrVertexAttribType);
+ GR_STATIC_ASSERT(6 == kVec2s_GrVertexAttribType);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kFormats) == kGrVertexAttribTypeCount);
+ return kFormats[type];
+}
+
+static void setup_vertex_input_state(const GrPrimitiveProcessor& primProc,
+ VkPipelineVertexInputStateCreateInfo* vertexInputInfo,
+ VkVertexInputBindingDescription* bindingDesc,
+ int maxBindingDescCount,
+ VkVertexInputAttributeDescription* attributeDesc,
+ int maxAttributeDescCount) {
+ // for now we have only one vertex buffer and one binding
+ memset(bindingDesc, 0, sizeof(VkVertexInputBindingDescription));
+ bindingDesc->binding = 0;
+ bindingDesc->stride = (uint32_t)primProc.getVertexStride();
+ bindingDesc->inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
+
+ // setup attribute descriptions
+ int vaCount = primProc.numAttribs();
+ SkASSERT(vaCount < maxAttributeDescCount);
+ if (vaCount > 0) {
+ size_t offset = 0;
+ for (int attribIndex = 0; attribIndex < vaCount; attribIndex++) {
+ const GrGeometryProcessor::Attribute& attrib = primProc.getAttrib(attribIndex);
+ GrVertexAttribType attribType = attrib.fType;
+
+ VkVertexInputAttributeDescription& vkAttrib = attributeDesc[attribIndex];
+ vkAttrib.location = attribIndex; // for now assume location = attribIndex
+ vkAttrib.binding = 0; // for now only one vertex buffer & binding
+ vkAttrib.format = attrib_type_to_vkformat(attribType);
+ vkAttrib.offset = static_cast<uint32_t>(offset);
+ offset += attrib.fOffset;
+ }
+ }
+
+ memset(vertexInputInfo, 0, sizeof(VkPipelineVertexInputStateCreateInfo));
+ vertexInputInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
+ vertexInputInfo->pNext = nullptr;
+ vertexInputInfo->flags = 0;
+ vertexInputInfo->vertexBindingDescriptionCount = 1;
+ vertexInputInfo->pVertexBindingDescriptions = bindingDesc;
+ vertexInputInfo->vertexAttributeDescriptionCount = vaCount;
+ vertexInputInfo->pVertexAttributeDescriptions = attributeDesc;
+}
+
+
+static void setup_input_assembly_state(GrPrimitiveType primitiveType,
+ VkPipelineInputAssemblyStateCreateInfo* inputAssemblyInfo) {
+ static const VkPrimitiveTopology gPrimitiveType2VkTopology[] = {
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN,
+ VK_PRIMITIVE_TOPOLOGY_POINT_LIST,
+ VK_PRIMITIVE_TOPOLOGY_LINE_LIST,
+ VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
+ };
+
+ memset(inputAssemblyInfo, 0, sizeof(VkPipelineInputAssemblyStateCreateInfo));
+ inputAssemblyInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
+ inputAssemblyInfo->pNext = nullptr;
+ inputAssemblyInfo->flags = 0;
+ inputAssemblyInfo->primitiveRestartEnable = false;
+ inputAssemblyInfo->topology = gPrimitiveType2VkTopology[primitiveType];
+}
+
+
+VkStencilOp stencil_op_to_vk_stencil_op(GrStencilOp op) {
+ static const VkStencilOp gTable[] = {
+ VK_STENCIL_OP_KEEP, // kKeep_StencilOp
+ VK_STENCIL_OP_REPLACE, // kReplace_StencilOp
+ VK_STENCIL_OP_INCREMENT_AND_WRAP, // kIncWrap_StencilOp
+ VK_STENCIL_OP_INCREMENT_AND_CLAMP, // kIncClamp_StencilOp
+ VK_STENCIL_OP_DECREMENT_AND_WRAP, // kDecWrap_StencilOp
+ VK_STENCIL_OP_DECREMENT_AND_CLAMP, // kDecClamp_StencilOp
+ VK_STENCIL_OP_ZERO, // kZero_StencilOp
+ VK_STENCIL_OP_INVERT, // kInvert_StencilOp
+ };
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kStencilOpCount);
+ GR_STATIC_ASSERT(0 == kKeep_StencilOp);
+ GR_STATIC_ASSERT(1 == kReplace_StencilOp);
+ GR_STATIC_ASSERT(2 == kIncWrap_StencilOp);
+ GR_STATIC_ASSERT(3 == kIncClamp_StencilOp);
+ GR_STATIC_ASSERT(4 == kDecWrap_StencilOp);
+ GR_STATIC_ASSERT(5 == kDecClamp_StencilOp);
+ GR_STATIC_ASSERT(6 == kZero_StencilOp);
+ GR_STATIC_ASSERT(7 == kInvert_StencilOp);
+ SkASSERT((unsigned)op < kStencilOpCount);
+ return gTable[op];
+}
+
+VkCompareOp stencil_func_to_vk_compare_op(GrStencilFunc basicFunc) {
+ static const VkCompareOp gTable[] = {
+ VK_COMPARE_OP_ALWAYS, // kAlways_StencilFunc
+ VK_COMPARE_OP_NEVER, // kNever_StencilFunc
+ VK_COMPARE_OP_GREATER, // kGreater_StencilFunc
+ VK_COMPARE_OP_GREATER_OR_EQUAL, // kGEqual_StencilFunc
+ VK_COMPARE_OP_LESS, // kLess_StencilFunc
+ VK_COMPARE_OP_LESS_OR_EQUAL, // kLEqual_StencilFunc,
+ VK_COMPARE_OP_EQUAL, // kEqual_StencilFunc,
+ VK_COMPARE_OP_NOT_EQUAL, // kNotEqual_StencilFunc,
+ };
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kBasicStencilFuncCount);
+ GR_STATIC_ASSERT(0 == kAlways_StencilFunc);
+ GR_STATIC_ASSERT(1 == kNever_StencilFunc);
+ GR_STATIC_ASSERT(2 == kGreater_StencilFunc);
+ GR_STATIC_ASSERT(3 == kGEqual_StencilFunc);
+ GR_STATIC_ASSERT(4 == kLess_StencilFunc);
+ GR_STATIC_ASSERT(5 == kLEqual_StencilFunc);
+ GR_STATIC_ASSERT(6 == kEqual_StencilFunc);
+ GR_STATIC_ASSERT(7 == kNotEqual_StencilFunc);
+ SkASSERT((unsigned)basicFunc < kBasicStencilFuncCount);
+
+ return gTable[basicFunc];
+}
+
+void setup_depth_stencil_state(const GrVkGpu* gpu,
+ const GrStencilSettings& stencilSettings,
+ VkPipelineDepthStencilStateCreateInfo* stencilInfo) {
+ memset(stencilInfo, 0, sizeof(VkPipelineDepthStencilStateCreateInfo));
+ stencilInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
+ stencilInfo->pNext = nullptr;
+ stencilInfo->flags = 0;
+ // set depth testing defaults
+ stencilInfo->depthTestEnable = VK_FALSE;
+ stencilInfo->depthWriteEnable = VK_FALSE;
+ stencilInfo->depthCompareOp = VK_COMPARE_OP_ALWAYS;
+ stencilInfo->depthBoundsTestEnable = VK_FALSE;
+ stencilInfo->stencilTestEnable = !stencilSettings.isDisabled();
+ if (!stencilSettings.isDisabled()) {
+ // Set front face
+ GrStencilSettings::Face face = GrStencilSettings::kFront_Face;
+ stencilInfo->front.failOp = stencil_op_to_vk_stencil_op(stencilSettings.failOp(face));
+ stencilInfo->front.passOp = stencil_op_to_vk_stencil_op(stencilSettings.passOp(face));
+ stencilInfo->front.depthFailOp = stencilInfo->front.failOp;
+ stencilInfo->front.compareOp = stencil_func_to_vk_compare_op(stencilSettings.func(face));
+ stencilInfo->front.compareMask = stencilSettings.funcMask(face);
+ stencilInfo->front.writeMask = 0;
+ stencilInfo->front.reference = 0;
+
+ // Set back face
+ face = GrStencilSettings::kBack_Face;
+ stencilInfo->back.failOp = stencil_op_to_vk_stencil_op(stencilSettings.failOp(face));
+ stencilInfo->back.passOp = stencil_op_to_vk_stencil_op(stencilSettings.passOp(face));
+ stencilInfo->back.depthFailOp = stencilInfo->front.failOp;
+ stencilInfo->back.compareOp = stencil_func_to_vk_compare_op(stencilSettings.func(face));
+ stencilInfo->back.compareMask = stencilSettings.funcMask(face);
+ stencilInfo->back.writeMask = 0;
+ stencilInfo->back.reference = 0;
+ }
+ stencilInfo->minDepthBounds = 0.0f;
+ stencilInfo->maxDepthBounds = 1.0f;
+}
+
+void setup_viewport_scissor_state(const GrVkGpu* gpu,
+ const GrPipeline& pipeline,
+ const GrVkRenderTarget* vkRT,
+ VkPipelineViewportStateCreateInfo* viewportInfo,
+ VkViewport* viewport,
+ VkRect2D* scissor) {
+ memset(viewportInfo, 0, sizeof(VkPipelineViewportStateCreateInfo));
+ viewportInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
+ viewportInfo->pNext = nullptr;
+ viewportInfo->flags = 0;
+
+ viewport->x = 0.0f;
+ viewport->y = 0.0f;
+ viewport->width = SkIntToScalar(vkRT->width());
+ viewport->height = SkIntToScalar(vkRT->height());
+ viewport->minDepth = 0.0f;
+ viewport->maxDepth = 1.0f;
+ viewportInfo->viewportCount = 1;
+ viewportInfo->pViewports = viewport;
+
+ const GrScissorState& scissorState = pipeline.getScissorState();
+ if (scissorState.enabled() &&
+ !scissorState.rect().contains(0, 0, vkRT->width(), vkRT->height())) {
+ // This all assumes the scissorState has previously been clipped to the device space render
+ // target.
+ scissor->offset.x = scissorState.rect().fLeft;
+ scissor->extent.width = scissorState.rect().width();
+ if (kTopLeft_GrSurfaceOrigin == vkRT->origin()) {
+ scissor->offset.y = scissorState.rect().fTop;
+ } else {
+ SkASSERT(kBottomLeft_GrSurfaceOrigin == vkRT->origin());
+ scissor->offset.y = vkRT->height() - scissorState.rect().fBottom;
+ }
+ scissor->extent.height = scissorState.rect().height();
+
+ viewportInfo->scissorCount = 1;
+ viewportInfo->pScissors = scissor;
+ SkASSERT(scissor->offset.x >= 0);
+ SkASSERT(scissor->offset.x + scissor->extent.width <= (uint32_t)vkRT->width());
+ SkASSERT(scissor->offset.y >= 0);
+ SkASSERT(scissor->offset.y + scissor->extent.height <= (uint32_t)vkRT->height());
+ } else {
+ scissor->extent.width = vkRT->width();
+ scissor->extent.height = vkRT->height();
+ scissor->offset.x = 0;
+ scissor->offset.y = 0;
+ viewportInfo->scissorCount = 1;
+ viewportInfo->pScissors = scissor;
+ }
+ SkASSERT(viewportInfo->viewportCount == viewportInfo->scissorCount);
+}
+
+void setup_multisample_state(const GrPipeline& pipeline,
+ VkPipelineMultisampleStateCreateInfo* multisampleInfo) {
+ memset(multisampleInfo, 0, sizeof(VkPipelineMultisampleStateCreateInfo));
+ multisampleInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
+ multisampleInfo->pNext = nullptr;
+ multisampleInfo->flags = 0;
+ int numSamples = pipeline.getRenderTarget()->numColorSamples();
+ SkAssertResult(GrSampleCountToVkSampleCount(numSamples,
+ &multisampleInfo->rasterizationSamples));
+ multisampleInfo->sampleShadingEnable = VK_FALSE;
+ multisampleInfo->minSampleShading = 0;
+ multisampleInfo->pSampleMask = nullptr;
+ multisampleInfo->alphaToCoverageEnable = VK_FALSE;
+ multisampleInfo->alphaToOneEnable = VK_FALSE;
+}
+
+static VkBlendFactor blend_coeff_to_vk_blend(GrBlendCoeff coeff) {
+ static const VkBlendFactor gTable[] = {
+ VK_BLEND_FACTOR_ZERO, // kZero_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE, // kOne_GrBlendCoeff
+ VK_BLEND_FACTOR_SRC_COLOR, // kSC_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR, // kISC_GrBlendCoeff
+ VK_BLEND_FACTOR_DST_COLOR, // kDC_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR, // kIDC_GrBlendCoeff
+ VK_BLEND_FACTOR_SRC_ALPHA, // kSA_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA, // kISA_GrBlendCoeff
+ VK_BLEND_FACTOR_DST_ALPHA, // kDA_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA, // kIDA_GrBlendCoeff
+ VK_BLEND_FACTOR_CONSTANT_COLOR, // kConstC_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR, // kIConstC_GrBlendCoeff
+ VK_BLEND_FACTOR_CONSTANT_ALPHA, // kConstA_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA, // kIConstA_GrBlendCoeff
+ VK_BLEND_FACTOR_SRC1_COLOR, // kS2C_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR, // kIS2C_GrBlendCoeff
+ VK_BLEND_FACTOR_SRC1_ALPHA, // kS2A_GrBlendCoeff
+ VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA, // kIS2A_GrBlendCoeff
+
+ };
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kGrBlendCoeffCnt);
+ GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff);
+ GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff);
+ GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff);
+ GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff);
+ GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff);
+ GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff);
+ GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff);
+ GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff);
+ GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff);
+ GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff);
+ GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff);
+ GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff);
+ GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff);
+ GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff);
+ GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff);
+ GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff);
+ GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff);
+ GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff);
+
+ SkASSERT((unsigned)coeff < kGrBlendCoeffCnt);
+ return gTable[coeff];
+}
+
+
+static VkBlendOp blend_equation_to_vk_blend_op(GrBlendEquation equation) {
+ static const VkBlendOp gTable[] = {
+ VK_BLEND_OP_ADD, // kAdd_GrBlendEquation
+ VK_BLEND_OP_SUBTRACT, // kSubtract_GrBlendEquation
+ VK_BLEND_OP_REVERSE_SUBTRACT, // kReverseSubtract_GrBlendEquation
+ };
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kFirstAdvancedGrBlendEquation);
+ GR_STATIC_ASSERT(0 == kAdd_GrBlendEquation);
+ GR_STATIC_ASSERT(1 == kSubtract_GrBlendEquation);
+ GR_STATIC_ASSERT(2 == kReverseSubtract_GrBlendEquation);
+
+ SkASSERT((unsigned)equation < kGrBlendCoeffCnt);
+ return gTable[equation];
+}
+
+bool blend_coeff_refs_constant(GrBlendCoeff coeff) {
+ static const bool gCoeffReferencesBlendConst[] = {
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ false,
+ true,
+ true,
+ true,
+ true,
+
+ // extended blend coeffs
+ false,
+ false,
+ false,
+ false,
+ };
+ return gCoeffReferencesBlendConst[coeff];
+ GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gCoeffReferencesBlendConst));
+ // Individual enum asserts already made in blend_coeff_to_vk_blend
+}
+
+void setup_color_blend_state(const GrVkGpu* gpu,
+ const GrPipeline& pipeline,
+ VkPipelineColorBlendStateCreateInfo* colorBlendInfo,
+ VkPipelineColorBlendAttachmentState* attachmentState) {
+ GrXferProcessor::BlendInfo blendInfo;
+ pipeline.getXferProcessor().getBlendInfo(&blendInfo);
+
+ GrBlendEquation equation = blendInfo.fEquation;
+ GrBlendCoeff srcCoeff = blendInfo.fSrcBlend;
+ GrBlendCoeff dstCoeff = blendInfo.fDstBlend;
+ bool blendOff = (kAdd_GrBlendEquation == equation || kSubtract_GrBlendEquation == equation) &&
+ kOne_GrBlendCoeff == srcCoeff && kZero_GrBlendCoeff == dstCoeff;
+
+ memset(attachmentState, 0, sizeof(VkPipelineColorBlendAttachmentState));
+ attachmentState->blendEnable = !blendOff;
+ if (!blendOff) {
+ attachmentState->srcColorBlendFactor = blend_coeff_to_vk_blend(srcCoeff);
+ attachmentState->dstColorBlendFactor = blend_coeff_to_vk_blend(dstCoeff);
+ attachmentState->colorBlendOp = blend_equation_to_vk_blend_op(equation);
+ attachmentState->srcAlphaBlendFactor = blend_coeff_to_vk_blend(srcCoeff);
+ attachmentState->dstAlphaBlendFactor = blend_coeff_to_vk_blend(dstCoeff);
+ attachmentState->alphaBlendOp = blend_equation_to_vk_blend_op(equation);
+ }
+ attachmentState->colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
+ VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
+
+ memset(colorBlendInfo, 0, sizeof(VkPipelineColorBlendStateCreateInfo));
+ colorBlendInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
+ colorBlendInfo->pNext = nullptr;
+ colorBlendInfo->flags = 0;
+ colorBlendInfo->logicOpEnable = VK_FALSE;
+ colorBlendInfo->attachmentCount = 1;
+ colorBlendInfo->pAttachments = attachmentState;
+ if (blend_coeff_refs_constant(srcCoeff) || blend_coeff_refs_constant(dstCoeff)) {
+ GrColorToRGBAFloat(blendInfo.fBlendConstant, colorBlendInfo->blendConstants);
+ }
+}
+
+VkCullModeFlags draw_face_to_vk_cull_mode(GrPipelineBuilder::DrawFace drawFace) {
+ // Assumes that we've set the front face to be ccw
+ static const VkCullModeFlags gTable[] = {
+ VK_CULL_MODE_NONE, // kBoth_DrawFace
+ VK_CULL_MODE_BACK_BIT, // kCCW_DrawFace, cull back face
+ VK_CULL_MODE_FRONT_BIT, // kCW_DrawFace, cull front face
+ };
+ GR_STATIC_ASSERT(0 == GrPipelineBuilder::kBoth_DrawFace);
+ GR_STATIC_ASSERT(1 == GrPipelineBuilder::kCCW_DrawFace);
+ GR_STATIC_ASSERT(2 == GrPipelineBuilder::kCW_DrawFace);
+ SkASSERT((unsigned)drawFace <= 2);
+
+ return gTable[drawFace];
+}
+
+void setup_raster_state(const GrVkGpu* gpu,
+ const GrPipeline& pipeline,
+ VkPipelineRasterizationStateCreateInfo* rasterInfo) {
+ memset(rasterInfo, 0, sizeof(VkPipelineRasterizationStateCreateInfo));
+ rasterInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
+ rasterInfo->pNext = nullptr;
+ rasterInfo->flags = 0;
+ rasterInfo->depthClampEnable = VK_FALSE;
+ rasterInfo->rasterizerDiscardEnable = VK_FALSE;
+ rasterInfo->polygonMode = VK_POLYGON_MODE_FILL;
+ rasterInfo->cullMode = draw_face_to_vk_cull_mode(pipeline.getDrawFace());
+ rasterInfo->frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
+ rasterInfo->depthBiasEnable = VK_FALSE;
+ rasterInfo->depthBiasConstantFactor = 0.0f;
+ rasterInfo->depthBiasClamp = 0.0f;
+ rasterInfo->depthBiasSlopeFactor = 0.0f;
+ rasterInfo->lineWidth = 1.0f;
+}
+
+void setup_dynamic_state(const GrVkGpu* gpu,
+ const GrPipeline& pipeline,
+ VkPipelineDynamicStateCreateInfo* dynamicInfo) {
+ memset(dynamicInfo, 0, sizeof(VkPipelineDynamicStateCreateInfo));
+ dynamicInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
+ // TODO: mask out any state we might want to set dynamically
+ dynamicInfo->dynamicStateCount = 0;
+}
+
+GrVkPipeline* GrVkPipeline::Create(GrVkGpu* gpu, const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ VkPipelineShaderStageCreateInfo* shaderStageInfo,
+ int shaderStageCount,
+ GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass,
+ VkPipelineLayout layout) {
+ VkPipelineVertexInputStateCreateInfo vertexInputInfo;
+ VkVertexInputBindingDescription bindingDesc;
+ // TODO: allocate this based on VkPhysicalDeviceLimits::maxVertexInputAttributes
+ static const int kMaxVertexAttributes = 16;
+ static VkVertexInputAttributeDescription attributeDesc[kMaxVertexAttributes];
+ setup_vertex_input_state(primProc, &vertexInputInfo, &bindingDesc, 1,
+ attributeDesc, kMaxVertexAttributes);
+
+ VkPipelineInputAssemblyStateCreateInfo inputAssemblyInfo;
+ setup_input_assembly_state(primitiveType, &inputAssemblyInfo);
+
+ VkPipelineDepthStencilStateCreateInfo depthStencilInfo;
+ setup_depth_stencil_state(gpu, pipeline.getStencil(), &depthStencilInfo);
+
+ GrRenderTarget* rt = pipeline.getRenderTarget();
+ GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
+ VkPipelineViewportStateCreateInfo viewportInfo;
+ VkViewport viewport;
+ VkRect2D scissor;
+ setup_viewport_scissor_state(gpu, pipeline, vkRT, &viewportInfo, &viewport, &scissor);
+
+ VkPipelineMultisampleStateCreateInfo multisampleInfo;
+ setup_multisample_state(pipeline, &multisampleInfo);
+
+ // We will only have one color attachment per pipeline.
+ VkPipelineColorBlendAttachmentState attachmentStates[1];
+ VkPipelineColorBlendStateCreateInfo colorBlendInfo;
+ setup_color_blend_state(gpu, pipeline, &colorBlendInfo, attachmentStates);
+
+ VkPipelineRasterizationStateCreateInfo rasterInfo;
+ setup_raster_state(gpu, pipeline, &rasterInfo);
+
+ VkPipelineDynamicStateCreateInfo dynamicInfo;
+ setup_dynamic_state(gpu, pipeline, &dynamicInfo);
+
+ VkGraphicsPipelineCreateInfo pipelineCreateInfo;
+ memset(&pipelineCreateInfo, 0, sizeof(VkGraphicsPipelineCreateInfo));
+ pipelineCreateInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
+ pipelineCreateInfo.pNext = nullptr;
+ pipelineCreateInfo.flags = 0;
+ pipelineCreateInfo.stageCount = shaderStageCount;
+ pipelineCreateInfo.pStages = shaderStageInfo;
+ pipelineCreateInfo.pVertexInputState = &vertexInputInfo;
+ pipelineCreateInfo.pInputAssemblyState = &inputAssemblyInfo;
+ pipelineCreateInfo.pTessellationState = nullptr;
+ pipelineCreateInfo.pViewportState = &viewportInfo;
+ pipelineCreateInfo.pRasterizationState = &rasterInfo;
+ pipelineCreateInfo.pMultisampleState = &multisampleInfo;
+ pipelineCreateInfo.pDepthStencilState = &depthStencilInfo;
+ pipelineCreateInfo.pColorBlendState = &colorBlendInfo;
+ pipelineCreateInfo.pDynamicState = &dynamicInfo;
+ pipelineCreateInfo.layout = layout;
+ pipelineCreateInfo.renderPass = renderPass.vkRenderPass();
+ pipelineCreateInfo.subpass = 0;
+ pipelineCreateInfo.basePipelineHandle = VK_NULL_HANDLE;
+ pipelineCreateInfo.basePipelineIndex = -1;
+
+ VkPipeline vkPipeline;
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateGraphicsPipelines(gpu->device(),
+ nullptr, 1,
+ &pipelineCreateInfo,
+ nullptr, &vkPipeline));
+ if (err) {
+ return nullptr;
+ }
+
+ return new GrVkPipeline(vkPipeline);
+}
+
+void GrVkPipeline::freeGPUData(const GrVkGpu* gpu) const {
+ GR_VK_CALL(gpu->vkInterface(), DestroyPipeline(gpu->device(), fPipeline, nullptr));
+}
+
+
diff --git a/src/gpu/vk/GrVkPipeline.h b/src/gpu/vk/GrVkPipeline.h
new file mode 100644
index 0000000000..6bab127916
--- /dev/null
+++ b/src/gpu/vk/GrVkPipeline.h
@@ -0,0 +1,49 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkPipeline_DEFINED
+#define GrVkPipeline_DEFINED
+
+#include "GrTypes.h"
+
+#include "GrVkResource.h"
+
+#include "vulkan/vulkan.h"
+
+class GrNonInstancedVertices;
+class GrPipeline;
+class GrPrimitiveProcessor;
+class GrVkGpu;
+class GrVkRenderPass;
+
+class GrVkPipeline : public GrVkResource {
+public:
+ static GrVkPipeline* Create(GrVkGpu* gpu,
+ const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ VkPipelineShaderStageCreateInfo* shaderStageInfo,
+ int shaderStageCount,
+ GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass,
+ VkPipelineLayout layout);
+
+ VkPipeline pipeline() const { return fPipeline; }
+
+private:
+ GrVkPipeline(VkPipeline pipeline) : INHERITED(), fPipeline(pipeline) {}
+
+ GrVkPipeline(const GrVkPipeline&);
+ GrVkPipeline& operator=(const GrVkPipeline&);
+
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ VkPipeline fPipeline;
+
+ typedef GrVkResource INHERITED;
+};
+
+#endif
diff --git a/src/gpu/vk/GrVkProgram.cpp b/src/gpu/vk/GrVkProgram.cpp
new file mode 100644
index 0000000000..7cccc7c46d
--- /dev/null
+++ b/src/gpu/vk/GrVkProgram.cpp
@@ -0,0 +1,367 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkProgram.h"
+
+#include "GrPipeline.h"
+#include "GrVkCommandBuffer.h"
+#include "GrVkDescriptorPool.h"
+#include "GrVkGpu.h"
+#include "GrVkImageView.h"
+#include "GrVkMemory.h"
+#include "GrVkPipeline.h"
+#include "GrVkSampler.h"
+#include "GrVkTexture.h"
+#include "GrVkUniformBuffer.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLGeometryProcessor.h"
+#include "glsl/GrGLSLXferProcessor.h"
+
+GrVkProgram::GrVkProgram(GrVkGpu* gpu,
+ GrVkPipeline* pipeline,
+ VkPipelineLayout layout,
+ VkDescriptorSetLayout dsLayout[2],
+ GrVkDescriptorPool* descriptorPool,
+ VkDescriptorSet descriptorSets[2],
+ const BuiltinUniformHandles& builtinUniformHandles,
+ const UniformInfoArray& uniforms,
+ uint32_t vertexUniformSize,
+ uint32_t fragmentUniformSize,
+ uint32_t numSamplers,
+ GrGLSLPrimitiveProcessor* geometryProcessor,
+ GrGLSLXferProcessor* xferProcessor,
+ const GrGLSLFragProcs& fragmentProcessors)
+ : fDescriptorPool(descriptorPool)
+ , fPipeline(pipeline)
+ , fPipelineLayout(layout)
+ , fBuiltinUniformHandles(builtinUniformHandles)
+ , fGeometryProcessor(geometryProcessor)
+ , fXferProcessor(xferProcessor)
+ , fFragmentProcessors(fragmentProcessors)
+ , fProgramDataManager(uniforms, vertexUniformSize, fragmentUniformSize) {
+ fSamplers.setReserve(numSamplers);
+ fTextureViews.setReserve(numSamplers);
+ fTextures.setReserve(numSamplers);
+
+ memcpy(fDSLayout, dsLayout, 2 * sizeof(VkDescriptorSetLayout));
+ memcpy(fDescriptorSets, descriptorSets, 2 * sizeof(VkDescriptorSetLayout));
+
+ fVertexUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, vertexUniformSize, true));
+ fFragmentUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, fragmentUniformSize, true));
+
+#ifdef SK_DEBUG
+ fNumSamplers = numSamplers;
+#endif
+}
+
+GrVkProgram::~GrVkProgram() {
+ // Must of freed all GPU resources before this is destroyed
+ SkASSERT(!fPipeline);
+ SkASSERT(!fDescriptorPool);
+ SkASSERT(!fPipelineLayout);
+ SkASSERT(!fDSLayout[0]);
+ SkASSERT(!fDSLayout[1]);
+ SkASSERT(!fSamplers.count());
+ SkASSERT(!fTextureViews.count());
+ SkASSERT(!fTextures.count());
+}
+
+void GrVkProgram::freeTempResources(const GrVkGpu* gpu) {
+ for (int i = 0; i < fSamplers.count(); ++i) {
+ fSamplers[i]->unref(gpu);
+ }
+ fSamplers.rewind();
+
+ for (int i = 0; i < fTextureViews.count(); ++i) {
+ fTextureViews[i]->unref(gpu);
+ }
+ fTextureViews.rewind();
+
+ for (int i = 0; i < fTextures.count(); ++i) {
+ fTextures[i]->unref(gpu);
+ }
+ fTextures.rewind();
+}
+
+void GrVkProgram::freeGPUResources(const GrVkGpu* gpu) {
+ if (fPipeline) {
+ fPipeline->unref(gpu);
+ fPipeline = nullptr;
+ }
+ if (fDescriptorPool) {
+ fDescriptorPool->unref(gpu);
+ fDescriptorPool = nullptr;
+ }
+ if (fPipelineLayout) {
+ GR_VK_CALL(gpu->vkInterface(), DestroyPipelineLayout(gpu->device(),
+ fPipelineLayout,
+ nullptr));
+ fPipelineLayout = nullptr;
+ }
+
+ if (fDSLayout[0]) {
+ GR_VK_CALL(gpu->vkInterface(), DestroyDescriptorSetLayout(gpu->device(), fDSLayout[0],
+ nullptr));
+ fDSLayout[0] = nullptr;
+ }
+ if (fDSLayout[1]) {
+ GR_VK_CALL(gpu->vkInterface(), DestroyDescriptorSetLayout(gpu->device(), fDSLayout[1],
+ nullptr));
+ fDSLayout[1] = nullptr;
+ }
+
+ if (fVertexUniformBuffer) {
+ fVertexUniformBuffer->release(gpu);
+ }
+
+ if (fFragmentUniformBuffer) {
+ fFragmentUniformBuffer->release(gpu);
+ }
+ this->freeTempResources(gpu);
+}
+
+void GrVkProgram::abandonGPUResources() {
+ fPipeline->unrefAndAbandon();
+ fPipeline = nullptr;
+ fDescriptorPool->unrefAndAbandon();
+ fDescriptorPool = nullptr;
+ fPipelineLayout = nullptr;
+ fDSLayout[0] = nullptr;
+ fDSLayout[1] = nullptr;
+
+ fVertexUniformBuffer->abandon();
+ fFragmentUniformBuffer->abandon();
+
+ for (int i = 0; i < fSamplers.count(); ++i) {
+ fSamplers[i]->unrefAndAbandon();
+ }
+ fSamplers.rewind();
+
+ for (int i = 0; i < fTextureViews.count(); ++i) {
+ fTextureViews[i]->unrefAndAbandon();
+ }
+ fTextureViews.rewind();
+
+ for (int i = 0; i < fTextures.count(); ++i) {
+ fTextures[i]->unrefAndAbandon();
+ }
+ fTextures.rewind();
+}
+
+static void append_texture_bindings(const GrProcessor& processor,
+ SkTArray<const GrTextureAccess*>* textureBindings) {
+ if (int numTextures = processor.numTextures()) {
+ const GrTextureAccess** bindings = textureBindings->push_back_n(numTextures);
+ int i = 0;
+ do {
+ bindings[i] = &processor.textureAccess(i);
+ } while (++i < numTextures);
+ }
+}
+
+void GrVkProgram::setData(const GrVkGpu* gpu,
+ const GrPrimitiveProcessor& primProc,
+ const GrPipeline& pipeline) {
+ // This is here to protect against someone calling setData multiple times in a row without
+ // freeing the tempData between calls.
+ this->freeTempResources(gpu);
+
+ this->setRenderTargetState(pipeline);
+
+ SkSTArray<8, const GrTextureAccess*> textureBindings;
+
+ fGeometryProcessor->setData(fProgramDataManager, primProc);
+ append_texture_bindings(primProc, &textureBindings);
+
+ for (int i = 0; i < fFragmentProcessors.count(); ++i) {
+ const GrFragmentProcessor& processor = pipeline.getFragmentProcessor(i);
+ fFragmentProcessors[i]->setData(fProgramDataManager, processor);
+ fGeometryProcessor->setTransformData(primProc, fProgramDataManager, i,
+ processor.coordTransforms());
+ append_texture_bindings(processor, &textureBindings);
+ }
+
+ fXferProcessor->setData(fProgramDataManager, pipeline.getXferProcessor());
+ append_texture_bindings(pipeline.getXferProcessor(), &textureBindings);
+
+ this->writeUniformBuffers(gpu);
+
+ this->writeSamplers(gpu, textureBindings);
+}
+
+void GrVkProgram::writeUniformBuffers(const GrVkGpu* gpu) {
+ fProgramDataManager.uploadUniformBuffers(gpu, fVertexUniformBuffer, fFragmentUniformBuffer);
+
+ VkWriteDescriptorSet descriptorWrites[2];
+ memset(descriptorWrites, 0, 2 * sizeof(VkWriteDescriptorSet));
+
+ uint32_t firstUniformWrite = 0;
+ uint32_t uniformBindingUpdateCount = 0;
+
+ // Vertex Uniform Buffer
+ if (fVertexUniformBuffer.get()) {
+ ++uniformBindingUpdateCount;
+ VkDescriptorBufferInfo vertBufferInfo;
+ memset(&vertBufferInfo, 0, sizeof(VkDescriptorBufferInfo));
+ vertBufferInfo.buffer = fVertexUniformBuffer->buffer();
+ vertBufferInfo.offset = 0;
+ vertBufferInfo.range = fVertexUniformBuffer->size();
+
+ descriptorWrites[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ descriptorWrites[0].pNext = nullptr;
+ descriptorWrites[0].dstSet = fDescriptorSets[1];
+ descriptorWrites[0].dstBinding = GrVkUniformHandler::kVertexBinding;
+ descriptorWrites[0].dstArrayElement = 0;
+ descriptorWrites[0].descriptorCount = 1;
+ descriptorWrites[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ descriptorWrites[0].pImageInfo = nullptr;
+ descriptorWrites[0].pBufferInfo = &vertBufferInfo;
+ descriptorWrites[0].pTexelBufferView = nullptr;
+ }
+
+ // Fragment Uniform Buffer
+ if (fFragmentUniformBuffer.get()) {
+ if (0 == uniformBindingUpdateCount) {
+ firstUniformWrite = 1;
+ }
+ ++uniformBindingUpdateCount;
+ VkDescriptorBufferInfo fragBufferInfo;
+ memset(&fragBufferInfo, 0, sizeof(VkDescriptorBufferInfo));
+ fragBufferInfo.buffer = fFragmentUniformBuffer->buffer();
+ fragBufferInfo.offset = 0;
+ fragBufferInfo.range = fFragmentUniformBuffer->size();
+
+ descriptorWrites[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ descriptorWrites[1].pNext = nullptr;
+ descriptorWrites[1].dstSet = fDescriptorSets[1];
+ descriptorWrites[1].dstBinding = GrVkUniformHandler::kFragBinding;;
+ descriptorWrites[1].dstArrayElement = 0;
+ descriptorWrites[1].descriptorCount = 1;
+ descriptorWrites[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ descriptorWrites[1].pImageInfo = nullptr;
+ descriptorWrites[1].pBufferInfo = &fragBufferInfo;
+ descriptorWrites[1].pTexelBufferView = nullptr;
+ }
+
+ if (uniformBindingUpdateCount) {
+ GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(),
+ uniformBindingUpdateCount,
+ &descriptorWrites[firstUniformWrite],
+ 0, nullptr));
+ }
+}
+
+void GrVkProgram::writeSamplers(const GrVkGpu* gpu,
+ const SkTArray<const GrTextureAccess*>& textureBindings) {
+ SkASSERT(fNumSamplers == textureBindings.count());
+
+ for (int i = 0; i < textureBindings.count(); ++i) {
+ fSamplers.push(GrVkSampler::Create(gpu, *textureBindings[i]));
+
+ GrVkTexture* texture = static_cast<GrVkTexture*>(textureBindings[i]->getTexture());
+
+ const GrVkImage::Resource* textureResource = texture->resource();
+ textureResource->ref();
+ fTextures.push(textureResource);
+
+ const GrVkImageView* textureView = texture->textureView();
+ textureView->ref();
+ fTextureViews.push(textureView);
+
+ // Change texture layout so it can be read in shader
+ VkImageLayout layout = texture->currentLayout();
+ VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
+ VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
+ VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
+ VkAccessFlags dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
+ texture->setImageLayout(gpu,
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
+ srcAccessMask,
+ dstAccessMask,
+ srcStageMask,
+ dstStageMask,
+ false);
+
+ VkDescriptorImageInfo imageInfo;
+ memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo));
+ imageInfo.sampler = fSamplers[i]->sampler();
+ imageInfo.imageView = texture->textureView()->imageView();
+ imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
+
+ VkWriteDescriptorSet writeInfo;
+ memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
+ writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ writeInfo.pNext = nullptr;
+ writeInfo.dstSet = fDescriptorSets[GrVkUniformHandler::kSamplerDescSet];
+ writeInfo.dstBinding = i;
+ writeInfo.dstArrayElement = 0;
+ writeInfo.descriptorCount = 1;
+ writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ writeInfo.pImageInfo = &imageInfo;
+ writeInfo.pBufferInfo = nullptr;
+ writeInfo.pTexelBufferView = nullptr;
+
+ GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(),
+ 1,
+ &writeInfo,
+ 0,
+ nullptr));
+ }
+}
+
+void GrVkProgram::setRenderTargetState(const GrPipeline& pipeline) {
+ // Load the RT height uniform if it is needed to y-flip gl_FragCoord.
+ if (fBuiltinUniformHandles.fRTHeightUni.isValid() &&
+ fRenderTargetState.fRenderTargetSize.fHeight != pipeline.getRenderTarget()->height()) {
+ fProgramDataManager.set1f(fBuiltinUniformHandles.fRTHeightUni,
+ SkIntToScalar(pipeline.getRenderTarget()->height()));
+ }
+
+ // set RT adjustment
+ const GrRenderTarget* rt = pipeline.getRenderTarget();
+ SkISize size;
+ size.set(rt->width(), rt->height());
+ SkASSERT(fBuiltinUniformHandles.fRTAdjustmentUni.isValid());
+ if (fRenderTargetState.fRenderTargetOrigin != rt->origin() ||
+ fRenderTargetState.fRenderTargetSize != size) {
+ fRenderTargetState.fRenderTargetSize = size;
+ fRenderTargetState.fRenderTargetOrigin = rt->origin();
+
+ float rtAdjustmentVec[4];
+ fRenderTargetState.getRTAdjustmentVec(rtAdjustmentVec);
+ fProgramDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, rtAdjustmentVec);
+ }
+}
+
+void GrVkProgram::bind(const GrVkGpu* gpu, GrVkCommandBuffer* commandBuffer) {
+ commandBuffer->bindPipeline(gpu, fPipeline);
+ commandBuffer->bindDescriptorSets(gpu, this, fPipelineLayout, 0, 2, fDescriptorSets, 0,
+ nullptr);
+}
+
+void GrVkProgram::addUniformResources(GrVkCommandBuffer& commandBuffer) {
+#if 1
+ commandBuffer.addResource(fDescriptorPool);
+ if (fVertexUniformBuffer.get()) {
+ commandBuffer.addResource(fVertexUniformBuffer->resource());
+ }
+ if (fFragmentUniformBuffer.get()) {
+ commandBuffer.addResource(fFragmentUniformBuffer->resource());
+ }
+ for (int i = 0; i < fSamplers.count(); ++i) {
+ commandBuffer.addResource(fSamplers[i]);
+ }
+
+ for (int i = 0; i < fTextureViews.count(); ++i) {
+ commandBuffer.addResource(fTextureViews[i]);
+ }
+
+ for (int i = 0; i < fTextures.count(); ++i) {
+ commandBuffer.addResource(fTextures[i]);
+ }
+#endif
+}
diff --git a/src/gpu/vk/GrVkProgram.h b/src/gpu/vk/GrVkProgram.h
new file mode 100644
index 0000000000..e92a52802f
--- /dev/null
+++ b/src/gpu/vk/GrVkProgram.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrVkProgram_DEFINED
+#define GrVkProgram_DEFINED
+
+#include "GrVkImage.h"
+#include "GrVkProgramDesc.h"
+#include "GrVkProgramDataManager.h"
+#include "glsl/GrGLSLProgramBuilder.h"
+
+#include "vulkan/vulkan.h"
+
+class GrPipeline;
+class GrVkCommandBuffer;
+class GrVkDescriptorPool;
+class GrVkGpu;
+class GrVkImageView;
+class GrVkPipeline;
+class GrVkSampler;
+class GrVkUniformBuffer;
+
+class GrVkProgram : public SkRefCnt {
+public:
+ typedef GrGLSLProgramBuilder::BuiltinUniformHandles BuiltinUniformHandles;
+
+ ~GrVkProgram();
+
+ GrVkPipeline* vkPipeline() const { return fPipeline; }
+
+ void setData(const GrVkGpu*, const GrPrimitiveProcessor&, const GrPipeline&);
+
+ void bind(const GrVkGpu* gpu, GrVkCommandBuffer* commandBuffer);
+
+ void addUniformResources(GrVkCommandBuffer&);
+
+ void freeGPUResources(const GrVkGpu* gpu);
+
+ // This releases resources the only a given instance of a GrVkProgram needs to hold onto and do
+ // don't need to survive across new uses of the program.
+ void freeTempResources(const GrVkGpu* gpu);
+
+ void abandonGPUResources();
+
+private:
+ typedef GrVkProgramDataManager::UniformInfoArray UniformInfoArray;
+ typedef GrGLSLProgramDataManager::UniformHandle UniformHandle;
+
+ GrVkProgram(GrVkGpu* gpu,
+ GrVkPipeline* pipeline,
+ VkPipelineLayout layout,
+ VkDescriptorSetLayout dsLayout[2],
+ GrVkDescriptorPool* descriptorPool,
+ VkDescriptorSet descriptorSets[2],
+ const BuiltinUniformHandles& builtinUniformHandles,
+ const UniformInfoArray& uniforms,
+ uint32_t vertexUniformSize,
+ uint32_t fragmentUniformSize,
+ uint32_t numSamplers,
+ GrGLSLPrimitiveProcessor* geometryProcessor,
+ GrGLSLXferProcessor* xferProcessor,
+ const GrGLSLFragProcs& fragmentProcessors);
+
+ void writeUniformBuffers(const GrVkGpu* gpu);
+
+ void writeSamplers(const GrVkGpu* gpu, const SkTArray<const GrTextureAccess*>& textureBindings);
+
+
+ /**
+ * We use the RT's size and origin to adjust from Skia device space to OpenGL normalized device
+ * space and to make device space positions have the correct origin for processors that require
+ * them.
+ */
+ struct RenderTargetState {
+ SkISize fRenderTargetSize;
+ GrSurfaceOrigin fRenderTargetOrigin;
+
+ RenderTargetState() { this->invalidate(); }
+ void invalidate() {
+ fRenderTargetSize.fWidth = -1;
+ fRenderTargetSize.fHeight = -1;
+ fRenderTargetOrigin = (GrSurfaceOrigin)-1;
+ }
+
+ /**
+ * Gets a vec4 that adjusts the position from Skia device coords to GL's normalized device
+ * coords. Assuming the transformed position, pos, is a homogeneous vec3, the vec, v, is
+ * applied as such:
+ * pos.x = dot(v.xy, pos.xz)
+ * pos.y = dot(v.zw, pos.yz)
+ */
+ void getRTAdjustmentVec(float* destVec) {
+ destVec[0] = 2.f / fRenderTargetSize.fWidth;
+ destVec[1] = -1.f;
+ if (kBottomLeft_GrSurfaceOrigin == fRenderTargetOrigin) {
+ destVec[2] = -2.f / fRenderTargetSize.fHeight;
+ destVec[3] = 1.f;
+ } else {
+ destVec[2] = 2.f / fRenderTargetSize.fHeight;
+ destVec[3] = -1.f;
+ }
+ }
+ };
+
+ // Helper for setData() that sets the view matrix and loads the render target height uniform
+ void setRenderTargetState(const GrPipeline&);
+
+// GrVkGpu* fGpu;
+
+ // GrVkResources
+ GrVkDescriptorPool* fDescriptorPool;
+ GrVkPipeline* fPipeline;
+
+ // Used for binding DescriptorSets to the command buffer but does not need to survive during
+ // command buffer execution. Thus this is not need to be a GrVkResource.
+ VkPipelineLayout fPipelineLayout;
+
+ // The first set (index 0) will be used for samplers and the second set (index 1) will be
+ // used for uniform buffers.
+ // The DSLayouts only are needed for allocating the descriptor sets and must survive until after
+ // descriptor sets have been updated. Thus the lifetime of the layouts will just be the life of
+ //the GrVkProgram.
+ VkDescriptorSetLayout fDSLayout[2];
+ // The DescriptorSets need to survive until the gpu has finished all draws that use them.
+ // However, they will only be freed by the descriptor pool. Thus by simply keeping the
+ // descriptor pool alive through the draw, the descritor sets will also stay alive. Thus we do
+ // not need a GrVkResource versions of VkDescriptorSet.
+ VkDescriptorSet fDescriptorSets[2];
+
+ SkAutoTDelete<GrVkUniformBuffer> fVertexUniformBuffer;
+ SkAutoTDelete<GrVkUniformBuffer> fFragmentUniformBuffer;
+
+ // GrVkResources used for sampling textures
+ SkTDArray<GrVkSampler*> fSamplers;
+ SkTDArray<const GrVkImageView*> fTextureViews;
+ SkTDArray<const GrVkImage::Resource*> fTextures;
+
+ // Tracks the current render target uniforms stored in the vertex buffer.
+ RenderTargetState fRenderTargetState;
+ BuiltinUniformHandles fBuiltinUniformHandles;
+
+ // Processors in the program
+ SkAutoTDelete<GrGLSLPrimitiveProcessor> fGeometryProcessor;
+ SkAutoTDelete<GrGLSLXferProcessor> fXferProcessor;
+ GrGLSLFragProcs fFragmentProcessors;
+
+ GrVkProgramDataManager fProgramDataManager;
+
+#ifdef SK_DEBUG
+ int fNumSamplers;
+#endif
+
+ friend class GrVkProgramBuilder;
+};
+
+#endif
diff --git a/src/gpu/vk/GrVkProgramBuilder.cpp b/src/gpu/vk/GrVkProgramBuilder.cpp
new file mode 100644
index 0000000000..13daf714d9
--- /dev/null
+++ b/src/gpu/vk/GrVkProgramBuilder.cpp
@@ -0,0 +1,323 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "vk/GrVkProgramBuilder.h"
+
+#include "vk/GrVkGpu.h"
+#include "vk/GrVkRenderPass.h"
+#include "vk/GrVkProgram.h"
+
+GrVkProgram* GrVkProgramBuilder::CreateProgram(GrVkGpu* gpu,
+ const DrawArgs& args,
+ GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass) {
+ // create a builder. This will be handed off to effects so they can use it to add
+ // uniforms, varyings, textures, etc
+ GrVkProgramBuilder builder(gpu, args);
+
+ GrGLSLExpr4 inputColor;
+ GrGLSLExpr4 inputCoverage;
+
+ if (!builder.emitAndInstallProcs(&inputColor,
+ &inputCoverage,
+ gpu->vkCaps().maxSampledTextures())) {
+ builder.cleanupFragmentProcessors();
+ return nullptr;
+ }
+
+ return builder.finalize(args, primitiveType, renderPass);
+}
+
+GrVkProgramBuilder::GrVkProgramBuilder(GrVkGpu* gpu, const DrawArgs& args)
+ : INHERITED(args)
+ , fGpu(gpu)
+ , fVaryingHandler(this)
+ , fUniformHandler(this) {
+}
+
+const GrCaps* GrVkProgramBuilder::caps() const {
+ return fGpu->caps();
+}
+const GrGLSLCaps* GrVkProgramBuilder::glslCaps() const {
+ return fGpu->vkCaps().glslCaps();
+}
+
+void GrVkProgramBuilder::finalizeFragmentOutputColor(GrGLSLShaderVar& outputColor) {
+ outputColor.setLayoutQualifier("location = 0");
+}
+
+void GrVkProgramBuilder::emitSamplers(const GrProcessor& processor,
+ GrGLSLTextureSampler::TextureSamplerArray* outSamplers) {
+ int numTextures = processor.numTextures();
+ UniformHandle* localSamplerUniforms = fSamplerUniforms.push_back_n(numTextures);
+ SkString name;
+ for (int t = 0; t < numTextures; ++t) {
+ name.printf("%d", t);
+ localSamplerUniforms[t] =
+ fUniformHandler.addUniform(kFragment_GrShaderFlag,
+ kSampler2D_GrSLType, kDefault_GrSLPrecision,
+ name.c_str());
+ outSamplers->emplace_back(localSamplerUniforms[t], processor.textureAccess(t));
+ }
+}
+
+VkShaderStageFlags visibility_to_vk_stage_flags(uint32_t visibility) {
+ VkShaderStageFlags flags = 0;
+
+ if (visibility & kVertex_GrShaderFlag) {
+ flags |= VK_SHADER_STAGE_VERTEX_BIT;
+ }
+ if (visibility & kGeometry_GrShaderFlag) {
+ flags |= VK_SHADER_STAGE_GEOMETRY_BIT;
+ }
+ if (visibility & kFragment_GrShaderFlag) {
+ flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
+ }
+ return flags;
+}
+
+shaderc_shader_kind vk_shader_stage_to_shaderc_kind(VkShaderStageFlagBits stage) {
+ if (VK_SHADER_STAGE_VERTEX_BIT == stage) {
+ return shaderc_glsl_vertex_shader;
+ }
+ SkASSERT(VK_SHADER_STAGE_FRAGMENT_BIT == stage);
+ return shaderc_glsl_fragment_shader;
+}
+
+bool GrVkProgramBuilder::CreateVkShaderModule(const GrVkGpu* gpu,
+ VkShaderStageFlagBits stage,
+ const GrGLSLShaderBuilder& builder,
+ VkShaderModule* shaderModule,
+ VkPipelineShaderStageCreateInfo* stageInfo) {
+ SkString shaderString;
+ for (int i = 0; i < builder.fCompilerStrings.count(); ++i) {
+ if (builder.fCompilerStrings[i]) {
+ shaderString.append(builder.fCompilerStrings[i]);
+ shaderString.append("\n");
+ }
+ }
+
+ shaderc_compiler_t compiler = gpu->shadercCompiler();
+
+ shaderc_compile_options_t options = shaderc_compile_options_initialize();
+ shaderc_compile_options_set_forced_version_profile(options, 140, shaderc_profile_none);
+
+ shaderc_shader_kind shadercStage = vk_shader_stage_to_shaderc_kind(stage);
+ shaderc_spv_module_t module = shaderc_compile_into_spv(compiler,
+ shaderString.c_str(),
+ strlen(shaderString.c_str()),
+ shadercStage,
+ "shader",
+ "main",
+ options);
+ shaderc_compile_options_release(options);
+#ifdef SK_DEBUG
+ if (shaderc_module_get_num_errors(module)) {
+ SkDebugf("%s\n", shaderString.c_str());
+ SkDebugf("%s\n", shaderc_module_get_error_message(module));
+ return false;
+ }
+#endif
+
+ VkShaderModuleCreateInfo moduleCreateInfo;
+ memset(&moduleCreateInfo, 0, sizeof(VkShaderModuleCreateInfo));
+ moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
+ moduleCreateInfo.pNext = nullptr;
+ moduleCreateInfo.flags = 0;
+ moduleCreateInfo.codeSize = shaderc_module_get_length(module);
+ moduleCreateInfo.pCode = (const uint32_t*)shaderc_module_get_bytes(module);
+
+ VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateShaderModule(gpu->device(),
+ &moduleCreateInfo,
+ nullptr,
+ shaderModule));
+ shaderc_module_release(module);
+ if (err) {
+ return false;
+ }
+
+ memset(stageInfo, 0, sizeof(VkPipelineShaderStageCreateInfo));
+ stageInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ stageInfo->pNext = nullptr;
+ stageInfo->flags = 0;
+ stageInfo->stage = stage;
+ stageInfo->module = *shaderModule;
+ stageInfo->pName = "main";
+ stageInfo->pSpecializationInfo = nullptr;
+
+ return true;
+}
+
+GrVkProgram* GrVkProgramBuilder::finalize(const DrawArgs& args,
+ GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass) {
+ VkDescriptorSetLayout dsLayout[2];
+ VkPipelineLayout pipelineLayout;
+ VkShaderModule vertShaderModule;
+ VkShaderModule fragShaderModule;
+
+ uint32_t numSamplers = fSamplerUniforms.count();
+
+ SkAutoTDeleteArray<VkDescriptorSetLayoutBinding> dsSamplerBindings(
+ new VkDescriptorSetLayoutBinding[numSamplers]);
+ for (uint32_t i = 0; i < numSamplers; ++i) {
+ UniformHandle uniHandle = fSamplerUniforms[i];
+ GrVkUniformHandler::UniformInfo uniformInfo = fUniformHandler.getUniformInfo(uniHandle);
+ SkASSERT(kSampler2D_GrSLType == uniformInfo.fVariable.getType());
+ SkASSERT(0 == uniformInfo.fSetNumber);
+ SkASSERT(uniformInfo.fBinding == i);
+ dsSamplerBindings[i].binding = uniformInfo.fBinding;
+ dsSamplerBindings[i].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ dsSamplerBindings[i].descriptorCount = 1;
+ dsSamplerBindings[i].stageFlags = visibility_to_vk_stage_flags(uniformInfo.fVisibility);
+ dsSamplerBindings[i].pImmutableSamplers = nullptr;
+ }
+
+ VkDescriptorSetLayoutCreateInfo dsSamplerLayoutCreateInfo;
+ memset(&dsSamplerLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
+ dsSamplerLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+ dsSamplerLayoutCreateInfo.pNext = nullptr;
+ dsSamplerLayoutCreateInfo.flags = 0;
+ dsSamplerLayoutCreateInfo.bindingCount = fSamplerUniforms.count();
+ // Setting to nullptr fixes an error in the param checker validation layer. Even though
+ // bindingCount is 0 (which is valid), it still tries to validate pBindings unless it is null.
+ dsSamplerLayoutCreateInfo.pBindings = fSamplerUniforms.count() ? dsSamplerBindings.get() :
+ nullptr;
+
+ GR_VK_CALL_ERRCHECK(fGpu->vkInterface(),
+ CreateDescriptorSetLayout(fGpu->device(),
+ &dsSamplerLayoutCreateInfo,
+ nullptr,
+ &dsLayout[GrVkUniformHandler::kSamplerDescSet]));
+
+ // Create Uniform Buffer Descriptor
+ // We always attach uniform buffers to descriptor set 1. The vertex uniform buffer will have
+ // binding 0 and the fragment binding 1.
+ VkDescriptorSetLayoutBinding dsUniBindings[2];
+ memset(&dsUniBindings, 0, 2 * sizeof(VkDescriptorSetLayoutBinding));
+ dsUniBindings[0].binding = GrVkUniformHandler::kVertexBinding;
+ dsUniBindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ dsUniBindings[0].descriptorCount = fUniformHandler.hasVertexUniforms() ? 1 : 0;
+ dsUniBindings[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
+ dsUniBindings[0].pImmutableSamplers = nullptr;
+ dsUniBindings[1].binding = GrVkUniformHandler::kFragBinding;
+ dsUniBindings[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ dsUniBindings[1].descriptorCount = fUniformHandler.hasFragmentUniforms() ? 1 : 0;
+ dsUniBindings[1].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
+ dsUniBindings[1].pImmutableSamplers = nullptr;
+
+ VkDescriptorSetLayoutCreateInfo dsUniformLayoutCreateInfo;
+ memset(&dsUniformLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
+ dsUniformLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+ dsUniformLayoutCreateInfo.pNext = nullptr;
+ dsUniformLayoutCreateInfo.flags = 0;
+ dsUniformLayoutCreateInfo.bindingCount = 2;
+ dsUniformLayoutCreateInfo.pBindings = dsUniBindings;
+
+ GR_VK_CALL_ERRCHECK(fGpu->vkInterface(), CreateDescriptorSetLayout(
+ fGpu->device(),
+ &dsUniformLayoutCreateInfo,
+ nullptr,
+ &dsLayout[GrVkUniformHandler::kUniformBufferDescSet]));
+
+ // Create the VkPipelineLayout
+ VkPipelineLayoutCreateInfo layoutCreateInfo;
+ memset(&layoutCreateInfo, 0, sizeof(VkPipelineLayoutCreateFlags));
+ layoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
+ layoutCreateInfo.pNext = 0;
+ layoutCreateInfo.flags = 0;
+ layoutCreateInfo.setLayoutCount = 2;
+ layoutCreateInfo.pSetLayouts = dsLayout;
+ layoutCreateInfo.pushConstantRangeCount = 0;
+ layoutCreateInfo.pPushConstantRanges = nullptr;
+
+ GR_VK_CALL_ERRCHECK(fGpu->vkInterface(), CreatePipelineLayout(fGpu->device(),
+ &layoutCreateInfo,
+ nullptr,
+ &pipelineLayout));
+
+ // We need to enable the following extensions so that the compiler can correctly make spir-v
+ // from our glsl shaders.
+ fVS.extensions().appendf("#extension GL_ARB_separate_shader_objects : enable\n");
+ fFS.extensions().appendf("#extension GL_ARB_separate_shader_objects : enable\n");
+ fVS.extensions().appendf("#extension GL_ARB_shading_language_420pack : enable\n");
+ fFS.extensions().appendf("#extension GL_ARB_shading_language_420pack : enable\n");
+
+ this->finalizeShaders();
+
+ VkPipelineShaderStageCreateInfo shaderStageInfo[2];
+ SkAssertResult(CreateVkShaderModule(fGpu,
+ VK_SHADER_STAGE_VERTEX_BIT,
+ fVS,
+ &vertShaderModule,
+ &shaderStageInfo[0]));
+
+ SkAssertResult(CreateVkShaderModule(fGpu,
+ VK_SHADER_STAGE_FRAGMENT_BIT,
+ fFS,
+ &fragShaderModule,
+ &shaderStageInfo[1]));
+
+ GrVkResourceProvider& resourceProvider = fGpu->resourceProvider();
+ GrVkPipeline* pipeline = resourceProvider.createPipeline(*args.fPipeline,
+ *args.fPrimitiveProcessor,
+ shaderStageInfo,
+ 2,
+ primitiveType,
+ renderPass,
+ pipelineLayout);
+ GR_VK_CALL(fGpu->vkInterface(), DestroyShaderModule(fGpu->device(), vertShaderModule,
+ nullptr));
+ GR_VK_CALL(fGpu->vkInterface(), DestroyShaderModule(fGpu->device(), fragShaderModule,
+ nullptr));
+
+ if (!pipeline) {
+ GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineLayout(fGpu->device(), pipelineLayout,
+ nullptr));
+ GR_VK_CALL(fGpu->vkInterface(), DestroyDescriptorSetLayout(fGpu->device(), dsLayout[0],
+ nullptr));
+ GR_VK_CALL(fGpu->vkInterface(), DestroyDescriptorSetLayout(fGpu->device(), dsLayout[1],
+ nullptr));
+ return nullptr;
+ }
+
+
+ GrVkDescriptorPool::DescriptorTypeCounts typeCounts;
+ typeCounts.setTypeCount(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 2);
+ SkASSERT(numSamplers < 256);
+ typeCounts.setTypeCount(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, (uint8_t)numSamplers);
+ GrVkDescriptorPool* descriptorPool =
+ fGpu->resourceProvider().findOrCreateCompatibleDescriptorPool(typeCounts);
+
+ VkDescriptorSetAllocateInfo dsAllocateInfo;
+ memset(&dsAllocateInfo, 0, sizeof(VkDescriptorSetAllocateInfo));
+ dsAllocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
+ dsAllocateInfo.pNext = nullptr;
+ dsAllocateInfo.descriptorPool = descriptorPool->descPool();
+ dsAllocateInfo.descriptorSetCount = 2;
+ dsAllocateInfo.pSetLayouts = dsLayout;
+
+ VkDescriptorSet descriptorSets[2];
+ GR_VK_CALL_ERRCHECK(fGpu->vkInterface(), AllocateDescriptorSets(fGpu->device(),
+ &dsAllocateInfo,
+ descriptorSets));
+
+ return new GrVkProgram(fGpu,
+ pipeline,
+ pipelineLayout,
+ dsLayout,
+ descriptorPool,
+ descriptorSets,
+ fUniformHandles,
+ fUniformHandler.fUniforms,
+ fUniformHandler.fCurrentVertexUBOOffset,
+ fUniformHandler.fCurrentFragmentUBOOffset,
+ numSamplers,
+ fGeometryProcessor,
+ fXferProcessor,
+ fFragmentProcessors);
+}
diff --git a/src/gpu/vk/GrVkProgramBuilder.h b/src/gpu/vk/GrVkProgramBuilder.h
new file mode 100644
index 0000000000..1d7bac62b9
--- /dev/null
+++ b/src/gpu/vk/GrVkProgramBuilder.h
@@ -0,0 +1,74 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkProgramBuilder_DEFINED
+#define GrVkProgramBuilder_DEFINED
+
+#include "glsl/GrGLSLProgramBuilder.h"
+
+#include "GrPipeline.h"
+#include "vk/GrVkUniformHandler.h"
+#include "vk/GrVkVaryingHandler.h"
+
+#include "shaderc/shaderc.h"
+#include "vulkan/vulkan.h"
+
+class GrVkGpu;
+class GrVkRenderPass;
+class GrVkProgram;
+
+class GrVkProgramBuilder : public GrGLSLProgramBuilder {
+public:
+ /** Generates a shader program.
+ *
+ * The program implements what is specified in the stages given as input.
+ * After successful generation, the builder result objects are available
+ * to be used.
+ * @return true if generation was successful.
+ */
+ static GrVkProgram* CreateProgram(GrVkGpu*,
+ const DrawArgs&,
+ GrPrimitiveType,
+ const GrVkRenderPass& renderPass);
+
+ const GrCaps* caps() const override;
+ const GrGLSLCaps* glslCaps() const override;
+
+ GrVkGpu* gpu() const { return fGpu; }
+
+ void finalizeFragmentOutputColor(GrGLSLShaderVar& outputColor) override;
+
+private:
+ GrVkProgramBuilder(GrVkGpu*, const DrawArgs&);
+
+ void emitSamplers(const GrProcessor&,
+ GrGLSLTextureSampler::TextureSamplerArray* outSamplers) override;
+
+ GrVkProgram* finalize(const DrawArgs& args,
+ GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass);
+
+ static bool CreateVkShaderModule(const GrVkGpu* gpu,
+ VkShaderStageFlagBits stage,
+ const GrGLSLShaderBuilder& builder,
+ VkShaderModule* shaderModule,
+ VkPipelineShaderStageCreateInfo* stageInfo);
+
+ GrGLSLUniformHandler* uniformHandler() override { return &fUniformHandler; }
+ const GrGLSLUniformHandler* uniformHandler() const override { return &fUniformHandler; }
+ GrGLSLVaryingHandler* varyingHandler() override { return &fVaryingHandler; }
+
+ GrVkGpu* fGpu;
+ GrVkVaryingHandler fVaryingHandler;
+ GrVkUniformHandler fUniformHandler;
+
+ SkTArray<UniformHandle> fSamplerUniforms;
+
+ typedef GrGLSLProgramBuilder INHERITED;
+};
+
+#endif \ No newline at end of file
diff --git a/src/gpu/vk/GrVkProgramDataManager.cpp b/src/gpu/vk/GrVkProgramDataManager.cpp
new file mode 100644
index 0000000000..0ca03ad0b1
--- /dev/null
+++ b/src/gpu/vk/GrVkProgramDataManager.cpp
@@ -0,0 +1,315 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkProgramDataManager.h"
+
+#include "GrVkGpu.h"
+#include "GrVkUniformBuffer.h"
+
+GrVkProgramDataManager::GrVkProgramDataManager(const UniformInfoArray& uniforms,
+ uint32_t vertexUniformSize,
+ uint32_t fragmentUniformSize)
+ : fVertexUniformSize(vertexUniformSize)
+ , fFragmentUniformSize(fragmentUniformSize) {
+ fVertexUniformData.reset(vertexUniformSize);
+ fFragmentUniformData.reset(fragmentUniformSize);
+ int count = uniforms.count();
+ fUniforms.push_back_n(count);
+ // We must add uniforms in same order is the UniformInfoArray so that UniformHandles already
+ // owned by other objects will still match up here.
+ for (int i = 0; i < count; i++) {
+ Uniform& uniform = fUniforms[i];
+ const GrVkUniformHandler::UniformInfo uniformInfo = uniforms[i];
+ SkASSERT(GrGLSLShaderVar::kNonArray == uniformInfo.fVariable.getArrayCount() ||
+ uniformInfo.fVariable.getArrayCount() > 0);
+ SkDEBUGCODE(
+ uniform.fArrayCount = uniformInfo.fVariable.getArrayCount();
+ uniform.fType = uniformInfo.fVariable.getType();
+ );
+ uniform.fBinding = uniformInfo.fBinding;
+ uniform.fOffset = uniformInfo.fUBOffset;
+ SkDEBUGCODE(
+ uniform.fSetNumber = uniformInfo.fSetNumber;
+ );
+ }
+}
+
+void GrVkProgramDataManager::set1f(UniformHandle u, float v0) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat_GrSLType);
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);
+ void* buffer;
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {
+ buffer = fVertexUniformData.get();
+ } else {
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);
+ buffer = fFragmentUniformData.get();
+ }
+ buffer = static_cast<char*>(buffer) + uni.fOffset;
+ SkASSERT(sizeof(float) == 4);
+ memcpy(buffer, &v0, sizeof(float));
+}
+
+void GrVkProgramDataManager::set1fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kFloat_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrGLSLShaderVar::kNonArray == uni.fArrayCount));
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);
+
+ void* buffer;
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {
+ buffer = fVertexUniformData.get();
+ } else {
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);
+ buffer = fFragmentUniformData.get();
+ }
+ buffer = static_cast<char*>(buffer) + uni.fOffset;
+ SkASSERT(sizeof(float) == 4);
+ memcpy(buffer, v, arrayCount * sizeof(float));
+}
+
+void GrVkProgramDataManager::set2f(UniformHandle u, float v0, float v1) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kVec2f_GrSLType);
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);
+ void* buffer;
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {
+ buffer = fVertexUniformData.get();
+ } else {
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);
+ buffer = fFragmentUniformData.get();
+ }
+ buffer = static_cast<char*>(buffer) + uni.fOffset;
+ SkASSERT(sizeof(float) == 4);
+ float v[2] = { v0, v1 };
+ memcpy(buffer, v, 2 * sizeof(float));
+}
+
+void GrVkProgramDataManager::set2fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kVec2f_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrGLSLShaderVar::kNonArray == uni.fArrayCount));
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);
+
+ void* buffer;
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {
+ buffer = fVertexUniformData.get();
+ } else {
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);
+ buffer = fFragmentUniformData.get();
+ }
+ buffer = static_cast<char*>(buffer) + uni.fOffset;
+ SkASSERT(sizeof(float) == 4);
+ memcpy(buffer, v, arrayCount * 2* sizeof(float));
+}
+
+void GrVkProgramDataManager::set3f(UniformHandle u, float v0, float v1, float v2) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kVec3f_GrSLType);
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);
+ void* buffer;
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {
+ buffer = fVertexUniformData.get();
+ } else {
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);
+ buffer = fFragmentUniformData.get();
+ }
+ buffer = static_cast<char*>(buffer) + uni.fOffset;
+ SkASSERT(sizeof(float) == 4);
+ float v[3] = { v0, v1, v2 };
+ memcpy(buffer, v, 3 * sizeof(float));
+}
+
+void GrVkProgramDataManager::set3fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kVec3f_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrGLSLShaderVar::kNonArray == uni.fArrayCount));
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);
+
+ void* buffer;
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {
+ buffer = fVertexUniformData.get();
+ } else {
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);
+ buffer = fFragmentUniformData.get();
+ }
+ buffer = static_cast<char*>(buffer) + uni.fOffset;
+ SkASSERT(sizeof(float) == 4);
+ memcpy(buffer, v, arrayCount * 3 * sizeof(float));
+}
+
+void GrVkProgramDataManager::set4f(UniformHandle u, float v0, float v1, float v2, float v3) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kVec4f_GrSLType);
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);
+ void* buffer;
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {
+ buffer = fVertexUniformData.get();
+ } else {
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);
+ buffer = fFragmentUniformData.get();
+ }
+ buffer = static_cast<char*>(buffer) + uni.fOffset;
+ SkASSERT(sizeof(float) == 4);
+ float v[4] = { v0, v1, v2, v3 };
+ memcpy(buffer, v, 4 * sizeof(float));
+}
+
+void GrVkProgramDataManager::set4fv(UniformHandle u,
+ int arrayCount,
+ const float v[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kVec4f_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrGLSLShaderVar::kNonArray == uni.fArrayCount));
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);
+
+ void* buffer;
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {
+ buffer = fVertexUniformData.get();
+ } else {
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);
+ buffer = fFragmentUniformData.get();
+ }
+ buffer = static_cast<char*>(buffer) + uni.fOffset;
+ SkASSERT(sizeof(float) == 4);
+ memcpy(buffer, v, arrayCount * 4 * sizeof(float));
+}
+
+void GrVkProgramDataManager::setMatrix3f(UniformHandle u, const float matrix[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kMat33f_GrSLType);
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);
+ void* buffer;
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {
+ buffer = fVertexUniformData.get();
+ } else {
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);
+ buffer = fFragmentUniformData.get();
+ }
+
+ SkASSERT(sizeof(float) == 4);
+ buffer = static_cast<char*>(buffer) + uni.fOffset;
+ memcpy(buffer, &matrix[0], 3 * sizeof(float));
+ buffer = static_cast<char*>(buffer) + 4*sizeof(float);
+ memcpy(buffer, &matrix[3], 3 * sizeof(float));
+ buffer = static_cast<char*>(buffer) + 4 * sizeof(float);
+ memcpy(buffer, &matrix[6], 3 * sizeof(float));
+}
+
+void GrVkProgramDataManager::setMatrix3fv(UniformHandle u,
+ int arrayCount,
+ const float matrices[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kMat33f_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrGLSLShaderVar::kNonArray == uni.fArrayCount));
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);
+
+ void* buffer;
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {
+ buffer = fVertexUniformData.get();
+ } else {
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);
+ buffer = fFragmentUniformData.get();
+ }
+ SkASSERT(sizeof(float) == 4);
+ buffer = static_cast<char*>(buffer)+uni.fOffset;
+ for (int i = 0; i < arrayCount; ++i) {
+ const float* matrix = &matrices[9 * i];
+ memcpy(buffer, &matrix[0], 3 * sizeof(float));
+ buffer = static_cast<char*>(buffer)+4 * sizeof(float);
+ memcpy(buffer, &matrix[3], 3 * sizeof(float));
+ buffer = static_cast<char*>(buffer)+4 * sizeof(float);
+ memcpy(buffer, &matrix[6], 3 * sizeof(float));
+ buffer = static_cast<char*>(buffer)+4 * sizeof(float);
+ }
+}
+
+
+void GrVkProgramDataManager::setMatrix4f(UniformHandle u, const float matrix[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kMat44f_GrSLType);
+ SkASSERT(GrGLSLShaderVar::kNonArray == uni.fArrayCount);
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);
+ void* buffer;
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {
+ buffer = fVertexUniformData.get();
+ } else {
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);
+ buffer = fFragmentUniformData.get();
+ }
+ buffer = static_cast<char*>(buffer) + uni.fOffset;
+ SkASSERT(sizeof(float) == 4);
+ memcpy(buffer, matrix, 16 * sizeof(float));
+}
+
+void GrVkProgramDataManager::setMatrix4fv(UniformHandle u,
+ int arrayCount,
+ const float matrices[]) const {
+ const Uniform& uni = fUniforms[u.toIndex()];
+ SkASSERT(uni.fType == kMat44f_GrSLType);
+ SkASSERT(arrayCount > 0);
+ SkASSERT(arrayCount <= uni.fArrayCount ||
+ (1 == arrayCount && GrGLSLShaderVar::kNonArray == uni.fArrayCount));
+ SkASSERT(GrVkUniformHandler::kUniformBufferDescSet == uni.fSetNumber);
+
+ void* buffer;
+ if (GrVkUniformHandler::kVertexBinding == uni.fBinding) {
+ buffer = fVertexUniformData.get();
+ } else {
+ SkASSERT(GrVkUniformHandler::kFragBinding == uni.fBinding);
+ buffer = fFragmentUniformData.get();
+ }
+ buffer = static_cast<char*>(buffer) + uni.fOffset;
+ SkASSERT(sizeof(float) == 4);
+ memcpy(buffer, matrices, arrayCount * 16 * sizeof(float));
+}
+
+void GrVkProgramDataManager::uploadUniformBuffers(const GrVkGpu* gpu,
+ GrVkUniformBuffer* vertexBuffer,
+ GrVkUniformBuffer* fragmentBuffer) const {
+ if (vertexBuffer) {
+ vertexBuffer->addMemoryBarrier(gpu,
+ VK_ACCESS_UNIFORM_READ_BIT,
+ VK_ACCESS_HOST_WRITE_BIT,
+ VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
+ VK_PIPELINE_STAGE_HOST_BIT,
+ false);
+ SkAssertResult(vertexBuffer->updateData(gpu, fVertexUniformData.get(), fVertexUniformSize));
+ }
+
+ if (fragmentBuffer) {
+ fragmentBuffer->addMemoryBarrier(gpu,
+ VK_ACCESS_UNIFORM_READ_BIT,
+ VK_ACCESS_HOST_WRITE_BIT,
+ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
+ VK_PIPELINE_STAGE_HOST_BIT,
+ false);
+ SkAssertResult(fragmentBuffer->updateData(gpu, fFragmentUniformData.get(),
+ fFragmentUniformSize));
+ }
+}
diff --git a/src/gpu/vk/GrVkProgramDataManager.h b/src/gpu/vk/GrVkProgramDataManager.h
new file mode 100644
index 0000000000..a0684df025
--- /dev/null
+++ b/src/gpu/vk/GrVkProgramDataManager.h
@@ -0,0 +1,70 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkProgramDataManager_DEFINED
+#define GrVkProgramDataManager_DEFINED
+
+#include "glsl/GrGLSLProgramDataManager.h"
+
+#include "vk/GrVkUniformHandler.h"
+
+class GrVkGpu;
+class GrVkUniformBuffer;
+
+class GrVkProgramDataManager : public GrGLSLProgramDataManager {
+public:
+ typedef GrVkUniformHandler::UniformInfoArray UniformInfoArray;
+
+ GrVkProgramDataManager(const UniformInfoArray&,
+ uint32_t vertexUniformSize,
+ uint32_t fragmentUniformSize);
+
+ void set1f(UniformHandle, float v0) const override;
+ void set1fv(UniformHandle, int arrayCount, const float v[]) const override;
+ void set2f(UniformHandle, float, float) const override;
+ void set2fv(UniformHandle, int arrayCount, const float v[]) const override;
+ void set3f(UniformHandle, float, float, float) const override;
+ void set3fv(UniformHandle, int arrayCount, const float v[]) const override;
+ void set4f(UniformHandle, float, float, float, float) const override;
+ void set4fv(UniformHandle, int arrayCount, const float v[]) const override;
+ // matrices are column-major, the first two upload a single matrix, the latter two upload
+ // arrayCount matrices into a uniform array.
+ void setMatrix3f(UniformHandle, const float matrix[]) const override;
+ void setMatrix4f(UniformHandle, const float matrix[]) const override;
+ void setMatrix3fv(UniformHandle, int arrayCount, const float matrices[]) const override;
+ void setMatrix4fv(UniformHandle, int arrayCount, const float matrices[]) const override;
+
+ // for nvpr only
+ void setPathFragmentInputTransform(VaryingHandle u, int components,
+ const SkMatrix& matrix) const override {
+ SkFAIL("Only supported in NVPR, which is not in vulkan");
+ }
+
+ void uploadUniformBuffers(const GrVkGpu* gpu,
+ GrVkUniformBuffer* vertexBuffer,
+ GrVkUniformBuffer* fragmentBuffer) const;
+private:
+ struct Uniform {
+ uint32_t fBinding;
+ uint32_t fOffset;
+ SkDEBUGCODE(
+ GrSLType fType;
+ int fArrayCount;
+ uint32_t fSetNumber;
+ );
+ };
+
+ uint32_t fVertexUniformSize;
+ uint32_t fFragmentUniformSize;
+
+ SkTArray<Uniform, true> fUniforms;
+
+ mutable SkAutoMalloc fVertexUniformData;
+ mutable SkAutoMalloc fFragmentUniformData;
+};
+
+#endif \ No newline at end of file
diff --git a/src/gpu/vk/GrVkProgramDesc.cpp b/src/gpu/vk/GrVkProgramDesc.cpp
new file mode 100644
index 0000000000..346dbb6baa
--- /dev/null
+++ b/src/gpu/vk/GrVkProgramDesc.cpp
@@ -0,0 +1,155 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+#include "GrVkProgramDesc.h"
+
+//#include "GrVkProcessor.h"
+#include "GrProcessor.h"
+#include "GrPipeline.h"
+#include "GrVkGpu.h"
+#include "GrVkUtil.h"
+#include "SkChecksum.h"
+#include "glsl/GrGLSLFragmentProcessor.h"
+#include "glsl/GrGLSLFragmentShaderBuilder.h"
+#include "glsl/GrGLSLCaps.h"
+
+#include "shaderc/shaderc.h"
+
+static void add_texture_key(GrProcessorKeyBuilder* b, const GrProcessor& proc,
+ const GrGLSLCaps& caps) {
+ int numTextures = proc.numTextures();
+ // Need two bytes per key (swizzle and target).
+ int word32Count = (proc.numTextures() + 1) / 2;
+ if (0 == word32Count) {
+ return;
+ }
+ uint16_t* k16 = SkTCast<uint16_t*>(b->add32n(word32Count));
+ for (int i = 0; i < numTextures; ++i) {
+ const GrTextureAccess& access = proc.textureAccess(i);
+ GrTexture* texture = access.getTexture();
+ k16[i] = SkToU16(caps.configTextureSwizzle(texture->config()).asKey());
+ }
+ // zero the last 16 bits if the number of textures is odd.
+ if (numTextures & 0x1) {
+ k16[numTextures] = 0;
+ }
+}
+
+/**
+* A function which emits a meta key into the key builder. This is required because shader code may
+* be dependent on properties of the effect that the effect itself doesn't use
+* in its key (e.g. the pixel format of textures used). So we create a meta-key for
+* every effect using this function. It is also responsible for inserting the effect's class ID
+* which must be different for every GrProcessor subclass. It can fail if an effect uses too many
+* transforms, etc, for the space allotted in the meta-key. NOTE, both FPs and GPs share this
+* function because it is hairy, though FPs do not have attribs, and GPs do not have transforms
+*/
+static bool gen_meta_key(const GrProcessor& proc,
+ const GrGLSLCaps& glslCaps,
+ uint32_t transformKey,
+ GrProcessorKeyBuilder* b) {
+ size_t processorKeySize = b->size();
+ uint32_t classID = proc.classID();
+
+ // Currently we allow 16 bits for the class id and the overall processor key size.
+ static const uint32_t kMetaKeyInvalidMask = ~((uint32_t)SK_MaxU16);
+ if ((processorKeySize | classID) & kMetaKeyInvalidMask) {
+ return false;
+ }
+
+ add_texture_key(b, proc, glslCaps);
+
+ uint32_t* key = b->add32n(2);
+ key[0] = (classID << 16) | SkToU32(processorKeySize);
+ key[1] = transformKey;
+ return true;
+}
+
+static bool gen_frag_proc_and_meta_keys(const GrPrimitiveProcessor& primProc,
+ const GrFragmentProcessor& fp,
+ const GrGLSLCaps& glslCaps,
+ GrProcessorKeyBuilder* b) {
+ for (int i = 0; i < fp.numChildProcessors(); ++i) {
+ if (!gen_frag_proc_and_meta_keys(primProc, fp.childProcessor(i), glslCaps, b)) {
+ return false;
+ }
+ }
+
+ fp.getGLSLProcessorKey(glslCaps, b);
+
+ return gen_meta_key(fp, glslCaps, primProc.getTransformKey(fp.coordTransforms(),
+ fp.numTransformsExclChildren()), b);
+}
+
+bool GrVkProgramDescBuilder::Build(GrProgramDesc* desc,
+ const GrPrimitiveProcessor& primProc,
+ const GrPipeline& pipeline,
+ const GrGLSLCaps& glslCaps) {
+ // The descriptor is used as a cache key. Thus when a field of the
+ // descriptor will not affect program generation (because of the attribute
+ // bindings in use or other descriptor field settings) it should be set
+ // to a canonical value to avoid duplicate programs with different keys.
+
+ GrVkProgramDesc* vkDesc = (GrVkProgramDesc*)desc;
+
+ GR_STATIC_ASSERT(0 == kProcessorKeysOffset % sizeof(uint32_t));
+ // Make room for everything up to the effect keys.
+ vkDesc->key().reset();
+ vkDesc->key().push_back_n(kProcessorKeysOffset);
+
+ GrProcessorKeyBuilder b(&vkDesc->key());
+
+ primProc.getGLSLProcessorKey(glslCaps, &b);
+ if (!gen_meta_key(primProc, glslCaps, 0, &b)) {
+ vkDesc->key().reset();
+ return false;
+ }
+
+ for (int i = 0; i < pipeline.numFragmentProcessors(); ++i) {
+ const GrFragmentProcessor& fp = pipeline.getFragmentProcessor(i);
+ if (!gen_frag_proc_and_meta_keys(primProc, fp, glslCaps, &b)) {
+ vkDesc->key().reset();
+ return false;
+ }
+ }
+
+ const GrXferProcessor& xp = pipeline.getXferProcessor();
+ xp.getGLSLProcessorKey(glslCaps, &b);
+ if (!gen_meta_key(xp, glslCaps, 0, &b)) {
+ vkDesc->key().reset();
+ return false;
+ }
+
+ // --------DO NOT MOVE HEADER ABOVE THIS LINE--------------------------------------------------
+ // Because header is a pointer into the dynamic array, we can't push any new data into the key
+ // below here.
+ KeyHeader* header = vkDesc->atOffset<KeyHeader, kHeaderOffset>();
+
+ // make sure any padding in the header is zeroed.
+ memset(header, 0, kHeaderSize);
+
+ if (pipeline.readsFragPosition()) {
+ header->fFragPosKey =
+ GrGLSLFragmentShaderBuilder::KeyForFragmentPosition(pipeline.getRenderTarget());
+ } else {
+ header->fFragPosKey = 0;
+ }
+
+ header->fOutputSwizzle =
+ glslCaps.configOutputSwizzle(pipeline.getRenderTarget()->config()).asKey();
+
+ if (pipeline.ignoresCoverage()) {
+ header->fIgnoresCoverage = 1;
+ } else {
+ header->fIgnoresCoverage = 0;
+ }
+
+ header->fSnapVerticesToPixelCenters = pipeline.snapVerticesToPixelCenters();
+ header->fColorEffectCnt = pipeline.numColorFragmentProcessors();
+ header->fCoverageEffectCnt = pipeline.numCoverageFragmentProcessors();
+ vkDesc->finalize();
+ return true;
+}
diff --git a/src/gpu/vk/GrVkProgramDesc.h b/src/gpu/vk/GrVkProgramDesc.h
new file mode 100644
index 0000000000..1767a051ba
--- /dev/null
+++ b/src/gpu/vk/GrVkProgramDesc.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkProgramDesc_DEFINED
+#define GrVkProgramDesc_DEFINED
+
+#include "GrColor.h"
+#include "GrProgramDesc.h"
+#include "GrGpu.h"
+#include "GrTypesPriv.h"
+
+#include "shaderc/shaderc.h"
+#include "vulkan/vulkan.h"
+
+class GrVkGpu;
+class GrVkProgramDescBuilder;
+
+class GrVkProgramDesc : public GrProgramDesc {
+private:
+ friend class GrVkProgramDescBuilder;
+};
+
+/**
+ * This class can be used to build a GrProgramDesc. It also provides helpers for accessing
+ * GL specific info in the header.
+ */
+class GrVkProgramDescBuilder {
+public:
+ typedef GrProgramDesc::KeyHeader KeyHeader;
+ // The key, stored in fKey, is composed of five parts(first 2 are defined in the key itself):
+ // 1. uint32_t for total key length.
+ // 2. uint32_t for a checksum.
+ // 3. Header struct defined above.
+ // 4. Backend-specific information including per-processor keys and their key lengths.
+ // Each processor's key is a variable length array of uint32_t.
+ enum {
+ // Part 3.
+ kHeaderOffset = GrVkProgramDesc::kHeaderOffset,
+ kHeaderSize = SkAlign4(sizeof(KeyHeader)),
+ // Part 4.
+ // This is the offset into the backenend specific part of the key, which includes
+ // per-processor keys.
+ kProcessorKeysOffset = kHeaderOffset + kHeaderSize,
+ };
+
+ /**
+ * Builds a GL specific program descriptor
+ *
+ * @param GrPrimitiveProcessor The geometry
+ * @param GrPipeline The optimized drawstate. The descriptor will represent a program
+ * which this optstate can use to draw with. The optstate contains
+ * general draw information, as well as the specific color, geometry,
+ * and coverage stages which will be used to generate the GL Program for
+ * this optstate.
+ * @param GrVkGpu A GL Gpu, the caps and Gpu object are used to output processor specific
+ * parts of the descriptor.
+ * @param GrProgramDesc The built and finalized descriptor
+ **/
+ static bool Build(GrProgramDesc*,
+ const GrPrimitiveProcessor&,
+ const GrPipeline&,
+ const GrGLSLCaps&);
+};
+
+#endif
diff --git a/src/gpu/vk/GrVkRenderPass.cpp b/src/gpu/vk/GrVkRenderPass.cpp
new file mode 100644
index 0000000000..9c4787b865
--- /dev/null
+++ b/src/gpu/vk/GrVkRenderPass.cpp
@@ -0,0 +1,220 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkRenderPass.h"
+
+#include "GrVkFramebuffer.h"
+#include "GrVkGpu.h"
+#include "GrVkRenderTarget.h"
+#include "GrVkUtil.h"
+
+void setup_simple_vk_attachment_description(VkAttachmentDescription* attachment,
+ VkFormat format,
+ uint32_t samples,
+ VkImageLayout layout) {
+ attachment->flags = 0;
+ attachment->format = format;
+ SkAssertResult(GrSampleCountToVkSampleCount(samples, &attachment->samples));
+ attachment->loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ attachment->storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ attachment->stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ attachment->storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ attachment->initialLayout = layout;
+ attachment->finalLayout = layout;
+}
+
+void GrVkRenderPass::initSimple(const GrVkGpu* gpu, const GrVkRenderTarget& target) {
+ // Get attachment information from render target. This includes which attachments the render
+ // target has (color, resolve, stencil) and the attachments format and sample count.
+ target.getAttachmentsDescriptor(&fAttachmentsDescriptor, &fAttachmentFlags);
+
+ uint32_t numAttachments = fAttachmentsDescriptor.fAttachmentCount;
+ // Attachment descriptions to be set on the render pass
+ SkTArray<VkAttachmentDescription> attachments(numAttachments);
+ attachments.reset(numAttachments);
+ memset(attachments.begin(), 0, numAttachments*sizeof(VkAttachmentDescription));
+
+ // Refs to attachments on the render pass (as described by teh VkAttachmentDescription above),
+ // that are used by the subpass.
+ VkAttachmentReference colorRef;
+ VkAttachmentReference resolveRef;
+ VkAttachmentReference stencilRef;
+ uint32_t currentAttachment = 0;
+
+ // Go through each of the attachment types (color, resolve, stencil) and set the necessary
+ // on the various Vk structs.
+ VkSubpassDescription subpassDesc;
+ memset(&subpassDesc, 0, sizeof(VkSubpassDescription));
+ subpassDesc.flags = 0;
+ subpassDesc.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ subpassDesc.inputAttachmentCount = 0;
+ subpassDesc.pInputAttachments = nullptr;
+ if (fAttachmentFlags & kColor_AttachmentFlag) {
+ // set up color attachment
+ setup_simple_vk_attachment_description(&attachments[currentAttachment],
+ fAttachmentsDescriptor.fColor.fFormat,
+ fAttachmentsDescriptor.fColor.fSamples,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+ // setup subpass use of attachment
+ colorRef.attachment = currentAttachment++;
+ colorRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ subpassDesc.colorAttachmentCount = 1;
+ } else {
+ // I don't think there should ever be a time where we don't have a color attachment
+ SkASSERT(false);
+ colorRef.attachment = VK_ATTACHMENT_UNUSED;
+ colorRef.layout = VK_IMAGE_LAYOUT_UNDEFINED;
+ subpassDesc.colorAttachmentCount = 0;
+ }
+ subpassDesc.pColorAttachments = &colorRef;
+
+ if (fAttachmentFlags & kResolve_AttachmentFlag) {
+ // set up resolve attachment
+ setup_simple_vk_attachment_description(&attachments[currentAttachment],
+ fAttachmentsDescriptor.fResolve.fFormat,
+ fAttachmentsDescriptor.fResolve.fSamples,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+ // setup subpass use of attachment
+ resolveRef.attachment = currentAttachment++;
+ // I'm really not sure what the layout should be for the resolve textures.
+ resolveRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
+ subpassDesc.pResolveAttachments = &resolveRef;
+ } else {
+ subpassDesc.pResolveAttachments = nullptr;
+ }
+
+ if (fAttachmentFlags & kStencil_AttachmentFlag) {
+ // set up stencil attachment
+ setup_simple_vk_attachment_description(&attachments[currentAttachment],
+ fAttachmentsDescriptor.fStencil.fFormat,
+ fAttachmentsDescriptor.fStencil.fSamples,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
+ // setup subpass use of attachment
+ stencilRef.attachment = currentAttachment++;
+ stencilRef.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ } else {
+ stencilRef.attachment = VK_ATTACHMENT_UNUSED;
+ stencilRef.layout = VK_IMAGE_LAYOUT_UNDEFINED;
+ }
+ subpassDesc.pDepthStencilAttachment = &stencilRef;
+
+ subpassDesc.preserveAttachmentCount = 0;
+ subpassDesc.pPreserveAttachments = nullptr;
+
+ SkASSERT(numAttachments == currentAttachment);
+
+ // Create the VkRenderPass compatible with the attachment descriptions above
+ VkRenderPassCreateInfo createInfo;
+ memset(&createInfo, 0, sizeof(VkRenderPassCreateInfo));
+ createInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
+ createInfo.pNext = nullptr;
+ createInfo.flags = 0;
+ createInfo.attachmentCount = numAttachments;
+ createInfo.pAttachments = attachments.begin();
+ createInfo.subpassCount = 1;
+ createInfo.pSubpasses = &subpassDesc;
+ createInfo.dependencyCount = 0;
+ createInfo.pDependencies = nullptr;
+
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateRenderPass(gpu->device(),
+ &createInfo,
+ nullptr,
+ &fRenderPass));
+}
+
+void GrVkRenderPass::freeGPUData(const GrVkGpu* gpu) const {
+ GR_VK_CALL(gpu->vkInterface(), DestroyRenderPass(gpu->device(), fRenderPass, nullptr));
+}
+
+// Works under the assumption that color attachment will always be the first attachment in our
+// attachment array if it exists.
+bool GrVkRenderPass::colorAttachmentIndex(uint32_t* index) const {
+ *index = 0;
+ if (fAttachmentFlags & kColor_AttachmentFlag) {
+ return true;
+ }
+ return false;
+}
+
+// Works under the assumption that resolve attachment will always be after the color attachment.
+bool GrVkRenderPass::resolveAttachmentIndex(uint32_t* index) const {
+ *index = 0;
+ if (fAttachmentFlags & kColor_AttachmentFlag) {
+ ++(*index);
+ }
+ if (fAttachmentFlags & kResolve_AttachmentFlag) {
+ return true;
+ }
+ return false;
+}
+
+// Works under the assumption that stencil attachment will always be after the color and resolve
+// attachment.
+bool GrVkRenderPass::stencilAttachmentIndex(uint32_t* index) const {
+ *index = 0;
+ if (fAttachmentFlags & kColor_AttachmentFlag) {
+ ++(*index);
+ }
+ if (fAttachmentFlags & kResolve_AttachmentFlag) {
+ ++(*index);
+ }
+ if (fAttachmentFlags & kStencil_AttachmentFlag) {
+ return true;
+ }
+ return false;
+}
+
+void GrVkRenderPass::getBeginInfo(const GrVkRenderTarget& target,
+ VkRenderPassBeginInfo* beginInfo,
+ VkSubpassContents* contents) const {
+ SkASSERT(this->isCompatible(target));
+
+ VkRect2D renderArea;
+ renderArea.offset = { 0, 0 };
+ renderArea.extent = { (uint32_t)target.width(), (uint32_t)target.height() };
+
+ memset(beginInfo, 0, sizeof(VkRenderPassBeginInfo));
+ beginInfo->sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ beginInfo->pNext = nullptr;
+ beginInfo->renderPass = fRenderPass;
+ beginInfo->framebuffer = target.framebuffer()->framebuffer();
+ beginInfo->renderArea = renderArea;
+ beginInfo->clearValueCount = 0;
+ beginInfo->pClearValues = nullptr;
+
+ // Currently just assuming no secondary cmd buffers. This value will need to be update if we
+ // have them.
+ *contents = VK_SUBPASS_CONTENTS_INLINE;
+}
+
+bool GrVkRenderPass::isCompatible(const GrVkRenderTarget& target) const {
+ AttachmentsDescriptor desc;
+ AttachmentFlags flags;
+ target.getAttachmentsDescriptor(&desc, &flags);
+
+ if (flags != fAttachmentFlags) {
+ return false;
+ }
+
+ if (fAttachmentFlags & kColor_AttachmentFlag) {
+ if (fAttachmentsDescriptor.fColor != desc.fColor) {
+ return false;
+ }
+ }
+ if (fAttachmentFlags & kResolve_AttachmentFlag) {
+ if (fAttachmentsDescriptor.fResolve != desc.fResolve) {
+ return false;
+ }
+ }
+ if (fAttachmentFlags & kStencil_AttachmentFlag) {
+ if (fAttachmentsDescriptor.fStencil != desc.fStencil) {
+ return false;
+ }
+ }
+
+ return true;
+}
diff --git a/src/gpu/vk/GrVkRenderPass.h b/src/gpu/vk/GrVkRenderPass.h
new file mode 100644
index 0000000000..d460741d5e
--- /dev/null
+++ b/src/gpu/vk/GrVkRenderPass.h
@@ -0,0 +1,90 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkRenderPass_DEFINED
+#define GrVkRenderPass_DEFINED
+
+#include "GrTypes.h"
+
+#include "GrVkResource.h"
+
+#include "vulkan/vulkan.h"
+
+class GrVkGpu;
+class GrVkRenderTarget;
+
+class GrVkRenderPass : public GrVkResource {
+public:
+ GrVkRenderPass() : INHERITED(), fRenderPass(nullptr) {}
+ void initSimple(const GrVkGpu* gpu, const GrVkRenderTarget& target);
+
+ struct AttachmentsDescriptor {
+ struct AttachmentDesc {
+ VkFormat fFormat;
+ int fSamples;
+ AttachmentDesc() : fFormat(VK_FORMAT_UNDEFINED), fSamples(0) {}
+ bool operator==(const AttachmentDesc& right) const {
+ return (fFormat == right.fFormat && fSamples == right.fSamples);
+ }
+ bool operator!=(const AttachmentDesc& right) const {
+ return !(*this == right);
+ }
+ };
+ AttachmentDesc fColor;
+ AttachmentDesc fResolve;
+ AttachmentDesc fStencil;
+ uint32_t fAttachmentCount;
+ };
+
+ enum AttachmentFlags {
+ kColor_AttachmentFlag = 0x1,
+ kResolve_AttachmentFlag = 0x2,
+ kStencil_AttachmentFlag = 0x4,
+ };
+ GR_DECL_BITFIELD_OPS_FRIENDS(AttachmentFlags);
+
+ // The following return the index of the render pass attachment array for the given attachment.
+ // If the render pass does not have the given attachment it will return false and not set the
+ // index value.
+ bool colorAttachmentIndex(uint32_t* index) const;
+ bool resolveAttachmentIndex(uint32_t* index) const;
+ bool stencilAttachmentIndex(uint32_t* index) const;
+
+ // Sets the VkRenderPassBeginInfo and VkRenderPassContents need to begin a render pass.
+ // TODO: In the future I expect this function will also take an optional render area instead of
+ // defaulting to the entire render target.
+ // TODO: Figure out if load clear values should be passed into this function or should be stored
+ // on the GrVkRenderPass at create time since we'll know at that point if we want to do a load
+ // clear.
+ void getBeginInfo(const GrVkRenderTarget& target,
+ VkRenderPassBeginInfo* beginInfo,
+ VkSubpassContents* contents) const;
+
+ // Returns whether or not the structure of a RenderTarget matches that of the VkRenderPass in
+ // this object. Specifically this compares that the number of attachments, format of
+ // attachments, and sample counts are all the same. This function is used in the creation of
+ // basic RenderPasses that can be used when creating a VkFrameBuffer object.
+ bool isCompatible(const GrVkRenderTarget& target) const;
+
+ VkRenderPass vkRenderPass() const { return fRenderPass; }
+
+private:
+ GrVkRenderPass(const GrVkRenderPass&);
+ GrVkRenderPass& operator=(const GrVkRenderPass&);
+
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ VkRenderPass fRenderPass;
+ AttachmentFlags fAttachmentFlags;
+ AttachmentsDescriptor fAttachmentsDescriptor;
+
+ typedef GrVkResource INHERITED;
+};
+
+GR_MAKE_BITFIELD_OPS(GrVkRenderPass::AttachmentFlags);
+
+#endif \ No newline at end of file
diff --git a/src/gpu/vk/GrVkRenderTarget.cpp b/src/gpu/vk/GrVkRenderTarget.cpp
new file mode 100644
index 0000000000..87f1f77e39
--- /dev/null
+++ b/src/gpu/vk/GrVkRenderTarget.cpp
@@ -0,0 +1,391 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkRenderTarget.h"
+
+#include "GrRenderTargetPriv.h"
+#include "GrVkCommandBuffer.h"
+#include "GrVkFramebuffer.h"
+#include "GrVkGpu.h"
+#include "GrVkImageView.h"
+#include "GrVkResourceProvider.h"
+#include "GrVkUtil.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+// We're virtually derived from GrSurface (via GrRenderTarget) so its
+// constructor must be explicitly called.
+GrVkRenderTarget::GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImage::Resource* msaaResource,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView)
+ : GrSurface(gpu, lifeCycle, desc)
+ , GrVkImage(imageResource)
+ // for the moment we only support 1:1 color to stencil
+ , GrRenderTarget(gpu, lifeCycle, desc, kUnified_SampleConfig)
+ , fFramebuffer(nullptr)
+ , fColorAttachmentView(colorAttachmentView)
+ , fMSAAImageResource(msaaResource)
+ , fResolveAttachmentView(resolveAttachmentView)
+ , fCachedSimpleRenderPass(nullptr) {
+ SkASSERT(desc.fSampleCnt);
+ // The plus 1 is to account for the resolve texture.
+ fColorValuesPerPixel = desc.fSampleCnt + 1; // TODO: this still correct?
+ this->createFramebuffer(gpu);
+ this->registerWithCache();
+ msaaResource->ref();
+}
+
+// We're virtually derived from GrSurface (via GrRenderTarget) so its
+// constructor must be explicitly called.
+GrVkRenderTarget::GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImage::Resource* msaaResource,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView,
+ Derived)
+ : GrSurface(gpu, lifeCycle, desc)
+ , GrVkImage(imageResource)
+ // for the moment we only support 1:1 color to stencil
+ , GrRenderTarget(gpu, lifeCycle, desc, kUnified_SampleConfig)
+ , fFramebuffer(nullptr)
+ , fColorAttachmentView(colorAttachmentView)
+ , fMSAAImageResource(msaaResource)
+ , fResolveAttachmentView(resolveAttachmentView)
+ , fCachedSimpleRenderPass(nullptr) {
+ SkASSERT(desc.fSampleCnt);
+ // The plus 1 is to account for the resolve texture.
+ fColorValuesPerPixel = desc.fSampleCnt + 1; // TODO: this still correct?
+ this->createFramebuffer(gpu);
+ msaaResource->ref();
+}
+
+// We're virtually derived from GrSurface (via GrRenderTarget) so its
+// constructor must be explicitly called.
+GrVkRenderTarget::GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImageView* colorAttachmentView)
+ : GrSurface(gpu, lifeCycle, desc)
+ , GrVkImage(imageResource)
+ , GrRenderTarget(gpu, lifeCycle, desc, kUnified_SampleConfig)
+ , fFramebuffer(nullptr)
+ , fColorAttachmentView(colorAttachmentView)
+ , fMSAAImageResource(nullptr)
+ , fResolveAttachmentView(nullptr)
+ , fCachedSimpleRenderPass(nullptr) {
+ SkASSERT(!desc.fSampleCnt);
+ fColorValuesPerPixel = 1;
+ this->createFramebuffer(gpu);
+ this->registerWithCache();
+}
+
+// We're virtually derived from GrSurface (via GrRenderTarget) so its
+// constructor must be explicitly called.
+GrVkRenderTarget::GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImageView* colorAttachmentView,
+ Derived)
+ : GrSurface(gpu, lifeCycle, desc)
+ , GrVkImage(imageResource)
+ , GrRenderTarget(gpu, lifeCycle, desc, kUnified_SampleConfig)
+ , fFramebuffer(nullptr)
+ , fColorAttachmentView(colorAttachmentView)
+ , fMSAAImageResource(nullptr)
+ , fResolveAttachmentView(nullptr)
+ , fCachedSimpleRenderPass(nullptr) {
+ SkASSERT(!desc.fSampleCnt);
+ fColorValuesPerPixel = 1;
+ this->createFramebuffer(gpu);
+}
+
+GrVkRenderTarget*
+GrVkRenderTarget::Create(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::Resource* imageResource) {
+ VkFormat pixelFormat;
+ GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat);
+
+ VkImage colorImage;
+
+ // create msaa surface if necessary
+ const GrVkImage::Resource* msaaResource = nullptr;
+ const GrVkImageView* resolveAttachmentView = nullptr;
+ if (desc.fSampleCnt) {
+ GrVkImage::ImageDesc msImageDesc;
+ msImageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ msImageDesc.fFormat = pixelFormat;
+ msImageDesc.fWidth = desc.fWidth;
+ msImageDesc.fHeight = desc.fHeight;
+ msImageDesc.fLevels = 1;
+ msImageDesc.fSamples = desc.fSampleCnt;
+ msImageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ msImageDesc.fUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ msImageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+ msaaResource = GrVkImage::CreateResource(gpu, msImageDesc);
+
+ if (!msaaResource) {
+ return nullptr;
+ }
+
+ // Set color attachment image
+ colorImage = msaaResource->fImage;
+
+ // Create Resolve attachment view
+ resolveAttachmentView = GrVkImageView::Create(gpu, imageResource->fImage, pixelFormat,
+ GrVkImageView::kColor_Type);
+ if (!resolveAttachmentView) {
+ msaaResource->unref(gpu);
+ return nullptr;
+ }
+ } else {
+ // Set color attachment image
+ colorImage = imageResource->fImage;
+ }
+
+ // Get color attachment view
+ const GrVkImageView* colorAttachmentView = GrVkImageView::Create(gpu, colorImage, pixelFormat,
+ GrVkImageView::kColor_Type);
+ if (!colorAttachmentView) {
+ if (msaaResource) {
+ resolveAttachmentView->unref(gpu);
+ msaaResource->unref(gpu);
+ }
+ return NULL;
+ }
+
+ GrVkRenderTarget* texRT;
+ if (msaaResource) {
+ texRT = new GrVkRenderTarget(gpu, desc, lifeCycle, imageResource, msaaResource,
+ colorAttachmentView, resolveAttachmentView);
+ msaaResource->unref(gpu);
+ } else {
+ texRT = new GrVkRenderTarget(gpu, desc, lifeCycle, imageResource,
+ colorAttachmentView);
+ }
+
+ return texRT;
+}
+
+GrVkRenderTarget*
+GrVkRenderTarget::CreateNewRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::ImageDesc& imageDesc) {
+ SkASSERT(imageDesc.fUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
+
+ const GrVkImage::Resource* imageResource = GrVkImage::CreateResource(gpu, imageDesc);
+ if (!imageResource) {
+ return nullptr;
+ }
+
+ GrVkRenderTarget* rt = GrVkRenderTarget::Create(gpu, desc, lifeCycle, imageResource);
+ // Create() will increment the refCount of the image resource if it succeeds
+ imageResource->unref(gpu);
+
+ return rt;
+}
+
+GrVkRenderTarget*
+GrVkRenderTarget::CreateWrappedRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::Resource* imageResource) {
+ SkASSERT(imageResource);
+
+ // Note: we assume the caller will unref the imageResource
+ // Create() will increment the refCount, and we'll unref when we're done with it
+ return GrVkRenderTarget::Create(gpu, desc, lifeCycle, imageResource);
+}
+
+bool GrVkRenderTarget::completeStencilAttachment() {
+ this->createFramebuffer(this->getVkGpu());
+ return true;
+}
+
+void GrVkRenderTarget::createFramebuffer(GrVkGpu* gpu) {
+ if (fFramebuffer) {
+ fFramebuffer->unref(gpu);
+ }
+ if (fCachedSimpleRenderPass) {
+ fCachedSimpleRenderPass->unref(gpu);
+ }
+
+ // Vulkan requires us to create a compatible renderpass before we can create our framebuffer,
+ // so we use this to get a (cached) basic renderpass, only for creation.
+ fCachedSimpleRenderPass = gpu->resourceProvider().findOrCreateCompatibleRenderPass(*this);
+
+ // Stencil attachment view is stored in the base RT stencil attachment
+ const GrVkImageView* stencilView = this->stencilAttachmentView();
+ fFramebuffer = GrVkFramebuffer::Create(gpu, this->width(), this->height(),
+ fCachedSimpleRenderPass, fColorAttachmentView,
+ fResolveAttachmentView, stencilView);
+ SkASSERT(fFramebuffer);
+}
+
+void GrVkRenderTarget::getAttachmentsDescriptor(
+ GrVkRenderPass::AttachmentsDescriptor* desc,
+ GrVkRenderPass::AttachmentFlags* attachmentFlags) const {
+ int colorSamples = this->numColorSamples();
+ VkFormat colorFormat;
+ GrPixelConfigToVkFormat(this->config(), &colorFormat);
+ desc->fColor.fFormat = colorFormat;
+ desc->fColor.fSamples = colorSamples ? colorSamples : 1;
+ *attachmentFlags = GrVkRenderPass::kColor_AttachmentFlag;
+ uint32_t attachmentCount = 1;
+ if (colorSamples > 0) {
+ desc->fResolve.fFormat = colorFormat;
+ desc->fResolve.fSamples = 1;
+ *attachmentFlags |= GrVkRenderPass::kResolve_AttachmentFlag;
+ ++attachmentCount;
+ }
+
+ const GrStencilAttachment* stencil = this->renderTargetPriv().getStencilAttachment();
+ if (stencil) {
+ const GrVkStencilAttachment* vkStencil = static_cast<const GrVkStencilAttachment*>(stencil);
+ desc->fStencil.fFormat = vkStencil->vkFormat();
+ desc->fStencil.fSamples = vkStencil->numSamples() ? vkStencil->numSamples() : 1;
+ // Currently in vulkan stencil and color attachments must all have same number of samples
+ SkASSERT(desc->fColor.fSamples == desc->fStencil.fSamples);
+ *attachmentFlags |= GrVkRenderPass::kStencil_AttachmentFlag;
+ ++attachmentCount;
+ }
+ desc->fAttachmentCount = attachmentCount;
+}
+
+GrVkRenderTarget::~GrVkRenderTarget() {
+ // either release or abandon should have been called by the owner of this object.
+ SkASSERT(!fMSAAImageResource);
+ SkASSERT(!fResolveAttachmentView);
+ SkASSERT(!fColorAttachmentView);
+ SkASSERT(!fFramebuffer);
+ SkASSERT(!fCachedSimpleRenderPass);
+}
+
+void GrVkRenderTarget::addResources(GrVkCommandBuffer& commandBuffer) const {
+ commandBuffer.addResource(this->framebuffer());
+ commandBuffer.addResource(this->resource());
+ commandBuffer.addResource(this->colorAttachmentView());
+ if (this->msaaImageResource()) {
+ commandBuffer.addResource(this->msaaImageResource());
+ commandBuffer.addResource(this->resolveAttachmentView());
+ }
+ if (this->stencilImageResource()) {
+ commandBuffer.addResource(this->stencilImageResource());
+ commandBuffer.addResource(this->stencilAttachmentView());
+ }
+}
+
+void GrVkRenderTarget::releaseInternalObjects() {
+ GrVkGpu* gpu = this->getVkGpu();
+
+ if (fMSAAImageResource) {
+ fMSAAImageResource->unref(gpu);
+ fMSAAImageResource = nullptr;
+ }
+
+ if (fResolveAttachmentView) {
+ fResolveAttachmentView->unref(gpu);
+ fResolveAttachmentView = nullptr;
+ }
+ if (fColorAttachmentView) {
+ fColorAttachmentView->unref(gpu);
+ fColorAttachmentView = nullptr;
+ }
+ if (fFramebuffer) {
+ fFramebuffer->unref(gpu);
+ fFramebuffer = nullptr;
+ }
+ if (fCachedSimpleRenderPass) {
+ fCachedSimpleRenderPass->unref(gpu);
+ fCachedSimpleRenderPass = nullptr;
+ }
+}
+
+void GrVkRenderTarget::abandonInternalObjects() {
+ if (fMSAAImageResource) {
+ fMSAAImageResource->unrefAndAbandon();
+ fMSAAImageResource = nullptr;
+ }
+
+ if (fResolveAttachmentView) {
+ fResolveAttachmentView->unrefAndAbandon();
+ fResolveAttachmentView = nullptr;
+ }
+ if (fColorAttachmentView) {
+ fColorAttachmentView->unrefAndAbandon();
+ fColorAttachmentView = nullptr;
+ }
+ if (fFramebuffer) {
+ fFramebuffer->unrefAndAbandon();
+ fFramebuffer = nullptr;
+ }
+ if (fCachedSimpleRenderPass) {
+ fCachedSimpleRenderPass->unrefAndAbandon();
+ fCachedSimpleRenderPass = nullptr;
+ }
+}
+
+void GrVkRenderTarget::onRelease() {
+ this->releaseInternalObjects();
+ if (this->shouldFreeResources()) {
+ this->releaseImage(this->getVkGpu());
+ } else {
+ this->abandonImage();
+ }
+
+ GrRenderTarget::onRelease();
+}
+
+void GrVkRenderTarget::onAbandon() {
+ this->abandonInternalObjects();
+ this->abandonImage();
+ GrRenderTarget::onAbandon();
+}
+
+
+GrBackendObject GrVkRenderTarget::getRenderTargetHandle() const {
+ // Currently just passing back the pointer to the main Image::Resource as the handle
+ return (GrBackendObject)&fResource;
+}
+
+const GrVkImage::Resource* GrVkRenderTarget::stencilImageResource() const {
+ const GrStencilAttachment* stencil = this->renderTargetPriv().getStencilAttachment();
+ if (stencil) {
+ const GrVkStencilAttachment* vkStencil = static_cast<const GrVkStencilAttachment*>(stencil);
+ return vkStencil->imageResource();
+ }
+
+ return nullptr;
+}
+
+const GrVkImageView* GrVkRenderTarget::stencilAttachmentView() const {
+ const GrStencilAttachment* stencil = this->renderTargetPriv().getStencilAttachment();
+ if (stencil) {
+ const GrVkStencilAttachment* vkStencil = static_cast<const GrVkStencilAttachment*>(stencil);
+ return vkStencil->stencilView();
+ }
+
+ return nullptr;
+}
+
+
+GrVkGpu* GrVkRenderTarget::getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrVkGpu*>(this->getGpu());
+}
+
diff --git a/src/gpu/vk/GrVkRenderTarget.h b/src/gpu/vk/GrVkRenderTarget.h
new file mode 100644
index 0000000000..b4d72eac73
--- /dev/null
+++ b/src/gpu/vk/GrVkRenderTarget.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrVkRenderTarget_DEFINED
+#define GrVkRenderTarget_DEFINED
+
+#include "GrVkImage.h"
+#include "GrRenderTarget.h"
+
+#include "GrVkRenderPass.h"
+
+class GrVkCommandBuffer;
+class GrVkFramebuffer;
+class GrVkGpu;
+class GrVkImageView;
+class GrVkStencilAttachment;
+
+#ifdef SK_BUILD_FOR_WIN
+// Windows gives bogus warnings about inheriting asTexture/asRenderTarget via dominance.
+#pragma warning(push)
+#pragma warning(disable: 4250)
+#endif
+
+class GrVkRenderTarget: public GrRenderTarget, public virtual GrVkImage {
+public:
+ static GrVkRenderTarget* CreateNewRenderTarget(GrVkGpu*, const GrSurfaceDesc&,
+ GrGpuResource::LifeCycle,
+ const GrVkImage::ImageDesc&);
+
+ static GrVkRenderTarget* CreateWrappedRenderTarget(GrVkGpu*, const GrSurfaceDesc&,
+ GrGpuResource::LifeCycle,
+ const GrVkImage::Resource* resource);
+
+ ~GrVkRenderTarget() override;
+
+ const GrVkFramebuffer* framebuffer() const { return fFramebuffer; }
+ const GrVkImageView* colorAttachmentView() const { return fColorAttachmentView; }
+ const GrVkImage::Resource* msaaImageResource() const { return fMSAAImageResource; }
+ const GrVkImageView* resolveAttachmentView() const { return fResolveAttachmentView; }
+ const GrVkImage::Resource* stencilImageResource() const;
+ const GrVkImageView* stencilAttachmentView() const;
+
+ const GrVkRenderPass* simpleRenderPass() const { return fCachedSimpleRenderPass; }
+
+ // override of GrRenderTarget
+ ResolveType getResolveType() const override {
+ return kCanResolve_ResolveType;
+ }
+
+ bool canAttemptStencilAttachment() const override {
+ return true;
+ }
+
+ GrBackendObject getRenderTargetHandle() const override;
+
+ // Returns the total number of attachments
+ void getAttachmentsDescriptor(GrVkRenderPass::AttachmentsDescriptor* desc,
+ GrVkRenderPass::AttachmentFlags* flags) const;
+
+ void addResources(GrVkCommandBuffer& commandBuffer) const;
+
+protected:
+ enum Derived { kDerived };
+
+ GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImage::Resource* msaaImageResource,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView);
+
+ GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImage::Resource* msaaImageResource,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView,
+ Derived);
+
+ GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImageView* colorAttachmentView);
+
+ GrVkRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImageView* colorAttachmentView,
+ Derived);
+
+ static GrVkRenderTarget* Create(GrVkGpu*, const GrSurfaceDesc&,
+ GrGpuResource::LifeCycle,
+ const GrVkImage::Resource* imageResource);
+
+ GrVkGpu* getVkGpu() const;
+
+ void onAbandon() override;
+ void onRelease() override;
+
+ // This accounts for the texture's memory and any MSAA renderbuffer's memory.
+ size_t onGpuMemorySize() const override {
+ SkASSERT(kUnknown_GrPixelConfig != fDesc.fConfig);
+ SkASSERT(!GrPixelConfigIsCompressed(fDesc.fConfig));
+ size_t colorBytes = GrBytesPerPixel(fDesc.fConfig);
+ SkASSERT(colorBytes > 0);
+ return fColorValuesPerPixel * fDesc.fWidth * fDesc.fHeight * colorBytes;
+ }
+
+private:
+ bool completeStencilAttachment() override;
+
+ void createFramebuffer(GrVkGpu* gpu);
+
+ void releaseInternalObjects();
+ void abandonInternalObjects();
+
+ const GrVkFramebuffer* fFramebuffer;
+ const GrVkImageView* fColorAttachmentView;
+ const GrVkImage::Resource* fMSAAImageResource;
+ const GrVkImageView* fResolveAttachmentView;
+ int fColorValuesPerPixel;
+
+ // This is a cached pointer to a simple render pass. The render target should unref it
+ // once it is done with it.
+ const GrVkRenderPass* fCachedSimpleRenderPass;
+};
+
+#endif
diff --git a/src/gpu/vk/GrVkResource.h b/src/gpu/vk/GrVkResource.h
new file mode 100644
index 0000000000..8387c4ef96
--- /dev/null
+++ b/src/gpu/vk/GrVkResource.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkResource_DEFINED
+#define GrVkResource_DEFINED
+
+#include "SkAtomics.h"
+#include "SkTDynamicHash.h"
+#include "SkRandom.h"
+
+class GrVkGpu;
+
+// uncomment to enable tracing of resource refs
+//#ifdef SK_DEBUG
+//#define SK_TRACE_VK_RESOURCES
+//#endif
+
+/** \class GrVkResource
+
+ GrVkResource is the base class for Vulkan resources that may be shared by multiple
+ objects. When an existing owner wants to share a reference, it calls ref().
+ When an owner wants to release its reference, it calls unref(). When the
+ shared object's reference count goes to zero as the result of an unref()
+ call, its (virtual) destructor is called. It is an error for the
+ destructor to be called explicitly (or via the object going out of scope on
+ the stack or calling delete) if getRefCnt() > 1.
+
+ This is nearly identical to SkRefCntBase. The exceptions are that unref()
+ takes a GrVkGpu, and any derived classes must implement freeGPUData() and
+ possibly abandonSubResources().
+*/
+
+class GrVkResource : SkNoncopyable {
+public:
+ // Simple refCount tracing, to ensure that everything ref'ed is unref'ed.
+#ifdef SK_TRACE_VK_RESOURCES
+ static const uint32_t& GetKey(const GrVkResource& r) { return r.fKey; }
+ static uint32_t Hash(const uint32_t& k) { return k; }
+ static SkTDynamicHash<GrVkResource, uint32_t> fTrace;
+ static SkRandom fRandom;
+#endif
+
+ /** Default construct, initializing the reference count to 1.
+ */
+ GrVkResource() : fRefCnt(1) {
+#ifdef SK_TRACE_VK_RESOURCES
+ fKey = fRandom.nextU();
+ fTrace.add(this);
+#endif
+ }
+
+ /** Destruct, asserting that the reference count is 1.
+ */
+ virtual ~GrVkResource() {
+#ifdef SK_DEBUG
+ SkASSERTF(fRefCnt == 1, "fRefCnt was %d", fRefCnt);
+ fRefCnt = 0; // illegal value, to catch us if we reuse after delete
+#endif
+ }
+
+#ifdef SK_DEBUG
+ /** Return the reference count. Use only for debugging. */
+ int32_t getRefCnt() const { return fRefCnt; }
+#endif
+
+ /** May return true if the caller is the only owner.
+ * Ensures that all previous owner's actions are complete.
+ */
+ bool unique() const {
+ if (1 == sk_atomic_load(&fRefCnt, sk_memory_order_acquire)) {
+ // The acquire barrier is only really needed if we return true. It
+ // prevents code conditioned on the result of unique() from running
+ // until previous owners are all totally done calling unref().
+ return true;
+ }
+ return false;
+ }
+
+ /** Increment the reference count.
+ Must be balanced by a call to unref() or unrefAndFreeResources().
+ */
+ void ref() const {
+ SkASSERT(fRefCnt > 0);
+ (void)sk_atomic_fetch_add(&fRefCnt, +1, sk_memory_order_relaxed); // No barrier required.
+ }
+
+ /** Decrement the reference count. If the reference count is 1 before the
+ decrement, then delete the object. Note that if this is the case, then
+ the object needs to have been allocated via new, and not on the stack.
+ Any GPU data associated with this resource will be freed before it's deleted.
+ */
+ void unref(const GrVkGpu* gpu) const {
+ SkASSERT(fRefCnt > 0);
+ SkASSERT(gpu);
+ // A release here acts in place of all releases we "should" have been doing in ref().
+ if (1 == sk_atomic_fetch_add(&fRefCnt, -1, sk_memory_order_acq_rel)) {
+ // Like unique(), the acquire is only needed on success, to make sure
+ // code in internal_dispose() doesn't happen before the decrement.
+ this->internal_dispose(gpu);
+ }
+ }
+
+ /** Unref without freeing GPU data. Used only when we're abandoning the resource */
+ void unrefAndAbandon() const {
+ SkASSERT(fRefCnt > 0);
+ // A release here acts in place of all releases we "should" have been doing in ref().
+ if (1 == sk_atomic_fetch_add(&fRefCnt, -1, sk_memory_order_acq_rel)) {
+ // Like unique(), the acquire is only needed on success, to make sure
+ // code in internal_dispose() doesn't happen before the decrement.
+ this->internal_dispose();
+ }
+ }
+
+#ifdef SK_DEBUG
+ void validate() const {
+ SkASSERT(fRefCnt > 0);
+ }
+#endif
+
+private:
+ /** Must be implemented by any subclasses.
+ * Deletes any Vk data associated with this resource
+ */
+ virtual void freeGPUData(const GrVkGpu* gpu) const = 0;
+
+ /** Must be overridden by subclasses that themselves store GrVkResources.
+ * Will unrefAndAbandon those resources without deleting the underlying Vk data
+ */
+ virtual void abandonSubResources() const {}
+
+ /**
+ * Called when the ref count goes to 0. Will free Vk resources.
+ */
+ void internal_dispose(const GrVkGpu* gpu) const {
+ this->freeGPUData(gpu);
+#ifdef SK_TRACE_VK_RESOURCES
+ fTrace.remove(GetKey(*this));
+#endif
+ SkASSERT(0 == fRefCnt);
+ fRefCnt = 1;
+ delete this;
+ }
+
+ /**
+ * Internal_dispose without freeing Vk resources. Used when we've lost context.
+ */
+ void internal_dispose() const {
+ this->abandonSubResources();
+#ifdef SK_TRACE_VK_RESOURCES
+ fTrace.remove(GetKey(*this));
+#endif
+ SkASSERT(0 == fRefCnt);
+ fRefCnt = 1;
+ delete this;
+ }
+
+ mutable int32_t fRefCnt;
+#ifdef SK_TRACE_VK_RESOURCES
+ uint32_t fKey;
+#endif
+
+ typedef SkNoncopyable INHERITED;
+};
+
+
+#endif
diff --git a/src/gpu/vk/GrVkResourceProvider.cpp b/src/gpu/vk/GrVkResourceProvider.cpp
new file mode 100644
index 0000000000..f4cfa27224
--- /dev/null
+++ b/src/gpu/vk/GrVkResourceProvider.cpp
@@ -0,0 +1,118 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkResourceProvider.h"
+
+#include "GrVkCommandBuffer.h"
+#include "GrVkPipeline.h"
+#include "GrVkRenderPass.h"
+#include "GrVkUtil.h"
+
+#ifdef SK_TRACE_VK_RESOURCES
+SkTDynamicHash<GrVkResource, uint32_t> GrVkResource::fTrace;
+SkRandom GrVkResource::fRandom;
+#endif
+
+GrVkResourceProvider::GrVkResourceProvider(GrVkGpu* gpu) : fGpu(gpu) {
+}
+
+GrVkResourceProvider::~GrVkResourceProvider() {
+ SkASSERT(0 == fSimpleRenderPasses.count());
+}
+
+GrVkPipeline* GrVkResourceProvider::createPipeline(const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ VkPipelineShaderStageCreateInfo* shaderStageInfo,
+ int shaderStageCount,
+ GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass,
+ VkPipelineLayout layout) {
+
+ return GrVkPipeline::Create(fGpu, pipeline, primProc, shaderStageInfo, shaderStageCount,
+ primitiveType, renderPass, layout);
+}
+
+
+// To create framebuffers, we first need to create a simple RenderPass that is
+// only used for framebuffer creation. When we actually render we will create
+// RenderPasses as needed that are compatible with the framebuffer.
+const GrVkRenderPass*
+GrVkResourceProvider::findOrCreateCompatibleRenderPass(const GrVkRenderTarget& target) {
+ for (int i = 0; i < fSimpleRenderPasses.count(); ++i) {
+ GrVkRenderPass* renderPass = fSimpleRenderPasses[i];
+ if (renderPass->isCompatible(target)) {
+ renderPass->ref();
+ return renderPass;
+ }
+ }
+
+ GrVkRenderPass* renderPass = new GrVkRenderPass();
+ renderPass->initSimple(fGpu, target);
+ fSimpleRenderPasses.push_back(renderPass);
+ renderPass->ref();
+ return renderPass;
+}
+
+GrVkDescriptorPool* GrVkResourceProvider::findOrCreateCompatibleDescriptorPool(
+ const GrVkDescriptorPool::DescriptorTypeCounts& typeCounts) {
+ return new GrVkDescriptorPool(fGpu, typeCounts);
+}
+
+GrVkCommandBuffer* GrVkResourceProvider::createCommandBuffer() {
+ GrVkCommandBuffer* cmdBuffer = GrVkCommandBuffer::Create(fGpu, fGpu->cmdPool());
+ fActiveCommandBuffers.push_back(cmdBuffer);
+ cmdBuffer->ref();
+ return cmdBuffer;
+}
+
+void GrVkResourceProvider::checkCommandBuffers() {
+ for (int i = fActiveCommandBuffers.count()-1; i >= 0; --i) {
+ if (fActiveCommandBuffers[i]->finished(fGpu)) {
+ fActiveCommandBuffers[i]->unref(fGpu);
+ fActiveCommandBuffers.removeShuffle(i);
+ }
+ }
+}
+
+void GrVkResourceProvider::destroyResources() {
+ // release our current command buffers
+ for (int i = 0; i < fActiveCommandBuffers.count(); ++i) {
+ SkASSERT(fActiveCommandBuffers[i]->finished(fGpu));
+ SkASSERT(fActiveCommandBuffers[i]->unique());
+ fActiveCommandBuffers[i]->unref(fGpu);
+ }
+ fActiveCommandBuffers.reset();
+
+ // loop over all render passes to make sure we destroy all the internal VkRenderPasses
+ for (int i = 0; i < fSimpleRenderPasses.count(); ++i) {
+ fSimpleRenderPasses[i]->unref(fGpu);
+ }
+ fSimpleRenderPasses.reset();
+
+#ifdef SK_TRACE_VK_RESOURCES
+ SkASSERT(0 == GrVkResource::fTrace.count());
+#endif
+
+}
+
+void GrVkResourceProvider::abandonResources() {
+ // release our current command buffers
+ for (int i = 0; i < fActiveCommandBuffers.count(); ++i) {
+ SkASSERT(fActiveCommandBuffers[i]->finished(fGpu));
+ fActiveCommandBuffers[i]->unrefAndAbandon();
+ }
+ fActiveCommandBuffers.reset();
+
+ for (int i = 0; i < fSimpleRenderPasses.count(); ++i) {
+ fSimpleRenderPasses[i]->unrefAndAbandon();
+ }
+ fSimpleRenderPasses.reset();
+
+#ifdef SK_TRACE_VK_RESOURCES
+ SkASSERT(0 == GrVkResource::fTrace.count());
+#endif
+} \ No newline at end of file
diff --git a/src/gpu/vk/GrVkResourceProvider.h b/src/gpu/vk/GrVkResourceProvider.h
new file mode 100644
index 0000000000..245062ea8d
--- /dev/null
+++ b/src/gpu/vk/GrVkResourceProvider.h
@@ -0,0 +1,77 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkResourceProvider_DEFINED
+#define GrVkResourceProvider_DEFINED
+
+#include "GrVkDescriptorPool.h"
+#include "GrVkResource.h"
+#include "GrVkUtil.h"
+#include "SkTArray.h"
+
+#include "vulkan/vulkan.h"
+
+class GrPipeline;
+class GrPrimitiveProcessor;
+class GrVkCommandBuffer;
+class GrVkGpu;
+class GrVkPipeline;
+class GrVkRenderPass;
+class GrVkRenderTarget;
+
+class GrVkResourceProvider {
+public:
+ GrVkResourceProvider(GrVkGpu* gpu);
+ ~GrVkResourceProvider();
+
+ GrVkPipeline* createPipeline(const GrPipeline& pipeline,
+ const GrPrimitiveProcessor& primProc,
+ VkPipelineShaderStageCreateInfo* shaderStageInfo,
+ int shaderStageCount,
+ GrPrimitiveType primitiveType,
+ const GrVkRenderPass& renderPass,
+ VkPipelineLayout layout);
+
+ // Finds or creates a simple render pass that matches the target, increments the refcount,
+ // and returns.
+ const GrVkRenderPass* findOrCreateCompatibleRenderPass(const GrVkRenderTarget& target);
+
+ GrVkCommandBuffer* createCommandBuffer();
+ void checkCommandBuffers();
+
+ // Finds or creates a compatible GrVkDescriptorPool for the requested DescriptorTypeCount.
+ // The refcount is incremented and a pointer returned.
+ // TODO: Currently this will just create a descriptor pool without holding onto a ref itself
+ // so we currently do not reuse them. Rquires knowing if another draw is currently using
+ // the GrVkDescriptorPool, the ability to reset pools, and the ability to purge pools out
+ // of our cache of GrVkDescriptorPools.
+ GrVkDescriptorPool* findOrCreateCompatibleDescriptorPool(
+ const GrVkDescriptorPool::DescriptorTypeCounts& typeCounts);
+
+ // Destroy any cached resources. To be called before destroying the VkDevice.
+ // The assumption is that all queues are idle and all command buffers are finished.
+ // For resource tracing to work properly, this should be called after unrefing all other
+ // resource usages.
+ void destroyResources();
+
+ // Abandon any cached resources. To be used when the context/VkDevice is lost.
+ // For resource tracing to work properly, this should be called after unrefing all other
+ // resource usages.
+ void abandonResources();
+
+private:
+ GrVkGpu* fGpu;
+
+ // Array of RenderPasses that only have a single color attachment, optional stencil attachment,
+ // optional resolve attachment, and only one subpass
+ SkSTArray<4, GrVkRenderPass*> fSimpleRenderPasses;
+
+ // Array of CommandBuffers that are currently in flight
+ SkSTArray<4, GrVkCommandBuffer*> fActiveCommandBuffers;
+};
+
+#endif
diff --git a/src/gpu/vk/GrVkSampler.cpp b/src/gpu/vk/GrVkSampler.cpp
new file mode 100644
index 0000000000..d50c7f3744
--- /dev/null
+++ b/src/gpu/vk/GrVkSampler.cpp
@@ -0,0 +1,74 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkSampler.h"
+
+#include "GrTextureAccess.h"
+#include "GrVkGpu.h"
+
+static inline VkSamplerAddressMode tile_to_vk_sampler_address(SkShader::TileMode tm) {
+ static const VkSamplerAddressMode gWrapModes[] = {
+ VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
+ VK_SAMPLER_ADDRESS_MODE_REPEAT,
+ VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT
+ };
+ GR_STATIC_ASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gWrapModes));
+ GR_STATIC_ASSERT(0 == SkShader::kClamp_TileMode);
+ GR_STATIC_ASSERT(1 == SkShader::kRepeat_TileMode);
+ GR_STATIC_ASSERT(2 == SkShader::kMirror_TileMode);
+ return gWrapModes[tm];
+}
+
+GrVkSampler* GrVkSampler::Create(const GrVkGpu* gpu, const GrTextureAccess& textureAccess) {
+
+ static VkFilter vkMinFilterModes[] = {
+ VK_FILTER_NEAREST,
+ VK_FILTER_LINEAR,
+ VK_FILTER_LINEAR
+ };
+ static VkFilter vkMagFilterModes[] = {
+ VK_FILTER_NEAREST,
+ VK_FILTER_LINEAR,
+ VK_FILTER_LINEAR
+ };
+
+ const GrTextureParams& params = textureAccess.getParams();
+
+ VkSamplerCreateInfo createInfo;
+ memset(&createInfo, 0, sizeof(VkSamplerCreateInfo));
+ createInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
+ createInfo.pNext = 0;
+ createInfo.flags = 0;
+ createInfo.magFilter = vkMagFilterModes[params.filterMode()];
+ createInfo.minFilter = vkMinFilterModes[params.filterMode()];
+ createInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
+ createInfo.addressModeU = tile_to_vk_sampler_address(params.getTileModeX());
+ createInfo.addressModeV = tile_to_vk_sampler_address(params.getTileModeY());
+ createInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; // Shouldn't matter
+ createInfo.mipLodBias = 0.0f;
+ createInfo.anisotropyEnable = VK_FALSE;
+ createInfo.maxAnisotropy = 1.0f;
+ createInfo.compareEnable = VK_FALSE;
+ createInfo.compareOp = VK_COMPARE_OP_NEVER;
+ createInfo.minLod = 0.0f;
+ createInfo.maxLod = 0.0f;
+ createInfo.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
+ createInfo.unnormalizedCoordinates = VK_FALSE;
+
+ VkSampler sampler;
+ GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateSampler(gpu->device(),
+ &createInfo,
+ nullptr,
+ &sampler));
+
+ return new GrVkSampler(sampler);
+}
+
+void GrVkSampler::freeGPUData(const GrVkGpu* gpu) const {
+ SkASSERT(fSampler);
+ GR_VK_CALL(gpu->vkInterface(), DestroySampler(gpu->device(), fSampler, nullptr));
+} \ No newline at end of file
diff --git a/src/gpu/vk/GrVkSampler.h b/src/gpu/vk/GrVkSampler.h
new file mode 100644
index 0000000000..d3212cbc53
--- /dev/null
+++ b/src/gpu/vk/GrVkSampler.h
@@ -0,0 +1,35 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkSampler_DEFINED
+#define GrVkSampler_DEFINED
+
+#include "GrVkResource.h"
+
+#include "vulkan/vulkan.h"
+
+class GrTextureAccess;
+class GrVkGpu;
+
+
+class GrVkSampler : public GrVkResource {
+public:
+ static GrVkSampler* Create(const GrVkGpu* gpu, const GrTextureAccess& textureAccess);
+
+ VkSampler sampler() const { return fSampler; }
+
+private:
+ GrVkSampler(VkSampler sampler) : INHERITED(), fSampler(sampler) {}
+
+ void freeGPUData(const GrVkGpu* gpu) const override;
+
+ VkSampler fSampler;
+
+ typedef GrVkResource INHERITED;
+};
+
+#endif \ No newline at end of file
diff --git a/src/gpu/vk/GrVkStencilAttachment.cpp b/src/gpu/vk/GrVkStencilAttachment.cpp
new file mode 100644
index 0000000000..0b234f2a9b
--- /dev/null
+++ b/src/gpu/vk/GrVkStencilAttachment.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkStencilAttachment.h"
+#include "GrVkGpu.h"
+#include "GrVkImage.h"
+#include "GrVkImageView.h"
+#include "GrVkUtil.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+GrVkStencilAttachment::GrVkStencilAttachment(GrVkGpu* gpu,
+ GrGpuResource::LifeCycle lifeCycle,
+ const Format& format,
+ const GrVkImage::ImageDesc& desc,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImageView* stencilView)
+ : INHERITED(gpu, lifeCycle, desc.fWidth, desc.fHeight, format.fStencilBits, desc.fSamples)
+ , fFormat(format)
+ , fImageResource(imageResource)
+ , fStencilView(stencilView) {
+ this->registerWithCache();
+ imageResource->ref();
+ stencilView->ref();
+}
+
+GrVkStencilAttachment* GrVkStencilAttachment::Create(GrVkGpu* gpu,
+ GrGpuResource::LifeCycle lifeCycle,
+ int width,
+ int height,
+ int sampleCnt,
+ const Format& format) {
+ GrVkImage::ImageDesc imageDesc;
+ imageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ imageDesc.fFormat = format.fInternalFormat;
+ imageDesc.fWidth = width;
+ imageDesc.fHeight = height;
+ imageDesc.fLevels = 1;
+ imageDesc.fSamples = sampleCnt;
+ imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ imageDesc.fUsageFlags = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+ imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+ const GrVkImage::Resource* imageResource = GrVkImage::CreateResource(gpu, imageDesc);
+ if (!imageResource) {
+ return nullptr;
+ }
+
+ const GrVkImageView* imageView = GrVkImageView::Create(gpu, imageResource->fImage,
+ format.fInternalFormat,
+ GrVkImageView::kStencil_Type);
+ if (!imageView) {
+ imageResource->unref(gpu);
+ return nullptr;
+ }
+
+ GrVkStencilAttachment* stencil = new GrVkStencilAttachment(gpu, lifeCycle, format, imageDesc,
+ imageResource, imageView);
+ imageResource->unref(gpu);
+ imageView->unref(gpu);
+
+ return stencil;
+}
+
+GrVkStencilAttachment::~GrVkStencilAttachment() {
+ // should have been released or abandoned first
+ SkASSERT(!fImageResource);
+ SkASSERT(!fStencilView);
+}
+
+size_t GrVkStencilAttachment::onGpuMemorySize() const {
+ uint64_t size = this->width();
+ size *= this->height();
+ size *= fFormat.fTotalBits;
+ size *= SkTMax(1,this->numSamples());
+ return static_cast<size_t>(size / 8);
+}
+
+void GrVkStencilAttachment::onRelease() {
+ GrVkGpu* gpu = this->getVkGpu();
+
+ fImageResource->unref(gpu);
+ fImageResource = nullptr;
+
+ fStencilView->unref(gpu);
+ fStencilView = nullptr;
+ INHERITED::onRelease();
+}
+
+void GrVkStencilAttachment::onAbandon() {
+ fImageResource->unrefAndAbandon();
+ fImageResource = nullptr;
+ fStencilView->unrefAndAbandon();
+ fStencilView = nullptr;
+ INHERITED::onAbandon();
+}
+
+GrVkGpu* GrVkStencilAttachment::getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrVkGpu*>(this->getGpu());
+}
diff --git a/src/gpu/vk/GrVkStencilAttachment.h b/src/gpu/vk/GrVkStencilAttachment.h
new file mode 100644
index 0000000000..ab1b32bc70
--- /dev/null
+++ b/src/gpu/vk/GrVkStencilAttachment.h
@@ -0,0 +1,62 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkStencil_DEFINED
+#define GrVkStencil_DEFINED
+
+#include "GrStencilAttachment.h"
+#include "GrVkImage.h"
+#include "vulkan/vulkan.h"
+
+class GrVkImageView;
+class GrVkGpu;
+
+class GrVkStencilAttachment : public GrStencilAttachment {
+public:
+ struct Format {
+ VkFormat fInternalFormat;
+ int fStencilBits;
+ int fTotalBits;
+ bool fPacked;
+ };
+
+ static GrVkStencilAttachment* Create(GrVkGpu* gpu, GrGpuResource::LifeCycle lifeCycle,
+ int width, int height,
+ int sampleCnt, const Format& format);
+
+ ~GrVkStencilAttachment() override;
+
+ const GrVkImage::Resource* imageResource() const { return fImageResource; }
+ const GrVkImageView* stencilView() const { return fStencilView; }
+
+ VkFormat vkFormat() const { return fFormat.fInternalFormat; }
+
+protected:
+ void onRelease() override;
+ void onAbandon() override;
+
+private:
+ size_t onGpuMemorySize() const override;
+
+ GrVkStencilAttachment(GrVkGpu* gpu,
+ GrGpuResource::LifeCycle lifeCycle,
+ const Format& format,
+ const GrVkImage::ImageDesc&,
+ const GrVkImage::Resource*,
+ const GrVkImageView* stencilView);
+
+ GrVkGpu* getVkGpu() const;
+
+ Format fFormat;
+
+ const GrVkImage::Resource* fImageResource;
+ const GrVkImageView* fStencilView;
+
+ typedef GrStencilAttachment INHERITED;
+};
+
+#endif
diff --git a/src/gpu/vk/GrVkTexture.cpp b/src/gpu/vk/GrVkTexture.cpp
new file mode 100644
index 0000000000..24157be6f6
--- /dev/null
+++ b/src/gpu/vk/GrVkTexture.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkTexture.h"
+#include "GrVkGpu.h"
+#include "GrVkImageView.h"
+#include "GrVkUtil.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+// Because this class is virtually derived from GrSurface we must explicitly call its constructor.
+GrVkTexture::GrVkTexture(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImageView* view)
+ : GrSurface(gpu, lifeCycle, desc)
+ , GrVkImage(imageResource)
+ , INHERITED(gpu, lifeCycle, desc)
+ , fTextureView(view) {
+ this->registerWithCache();
+}
+
+// Because this class is virtually derived from GrSurface we must explicitly call its constructor.
+GrVkTexture::GrVkTexture(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImageView* view,
+ Derived)
+ : GrSurface(gpu, lifeCycle, desc)
+ , GrVkImage(imageResource)
+ , INHERITED(gpu, lifeCycle, desc)
+ , fTextureView(view) {}
+
+
+GrVkTexture* GrVkTexture::Create(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ VkFormat format,
+ const GrVkImage::Resource* imageResource) {
+ VkImage image = imageResource->fImage;
+ const GrVkImageView* imageView = GrVkImageView::Create(gpu, image, format,
+ GrVkImageView::kColor_Type);
+ if (!imageView) {
+ return nullptr;
+ }
+
+ return new GrVkTexture(gpu, desc, lifeCycle, imageResource, imageView);
+}
+
+GrVkTexture* GrVkTexture::CreateNewTexture(GrVkGpu* gpu, const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::ImageDesc& imageDesc) {
+ SkASSERT(imageDesc.fUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT);
+
+ const GrVkImage::Resource* imageResource = GrVkImage::CreateResource(gpu, imageDesc);
+ if (!imageResource) {
+ return nullptr;
+ }
+
+ GrVkTexture* texture = Create(gpu, desc, lifeCycle, imageDesc.fFormat, imageResource);
+ // Create() will increment the refCount of the image resource if it succeeds
+ imageResource->unref(gpu);
+
+ return texture;
+}
+
+GrVkTexture* GrVkTexture::CreateWrappedTexture(GrVkGpu* gpu, const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ VkFormat format,
+ const GrVkImage::Resource* imageResource) {
+ SkASSERT(imageResource);
+
+ // Note: we assume the caller will unref the imageResource
+ // Create() will increment the refCount, and we'll unref when we're done with it
+ return Create(gpu, desc, lifeCycle, format, imageResource);
+}
+
+GrVkTexture::~GrVkTexture() {
+ // either release or abandon should have been called by the owner of this object.
+ SkASSERT(!fTextureView);
+}
+
+void GrVkTexture::onRelease() {
+ // we create this and don't hand it off, so we should always destroy it
+ if (fTextureView) {
+ fTextureView->unref(this->getVkGpu());
+ fTextureView = nullptr;
+ }
+
+ if (this->shouldFreeResources()) {
+ this->releaseImage(this->getVkGpu());
+ } else {
+ this->abandonImage();
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrVkTexture::onAbandon() {
+ if (fTextureView) {
+ fTextureView->unrefAndAbandon();
+ fTextureView = nullptr;
+ }
+
+ this->abandonImage();
+ INHERITED::onAbandon();
+}
+
+GrBackendObject GrVkTexture::getTextureHandle() const {
+ // Currently just passing back the pointer to the Resource as the handle
+ return (GrBackendObject)&fResource;
+}
+
+GrVkGpu* GrVkTexture::getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrVkGpu*>(this->getGpu());
+}
+
diff --git a/src/gpu/vk/GrVkTexture.h b/src/gpu/vk/GrVkTexture.h
new file mode 100644
index 0000000000..5e31c9da4b
--- /dev/null
+++ b/src/gpu/vk/GrVkTexture.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkTexture_DEFINED
+#define GrVkTexture_DEFINED
+
+#include "GrGpu.h"
+#include "GrTexture.h"
+#include "GrVkImage.h"
+
+class GrVkGpu;
+class GrVkImageView;
+
+class GrVkTexture : public GrTexture, public virtual GrVkImage {
+public:
+ static GrVkTexture* CreateNewTexture(GrVkGpu*, const GrSurfaceDesc&,
+ GrGpuResource::LifeCycle,
+ const GrVkImage::ImageDesc&);
+
+
+ static GrVkTexture* CreateWrappedTexture(GrVkGpu*, const GrSurfaceDesc&,
+ GrGpuResource::LifeCycle,
+ VkFormat, const GrVkImage::Resource*);
+
+ ~GrVkTexture() override;
+
+ GrBackendObject getTextureHandle() const override;
+
+ void textureParamsModified() override {}
+
+ const GrVkImageView* textureView() const { return fTextureView; }
+
+protected:
+ enum Derived { kDerived };
+
+ GrVkTexture(GrVkGpu*, const GrSurfaceDesc&, GrGpuResource::LifeCycle,
+ const GrVkImage::Resource*, const GrVkImageView* imageView);
+
+ GrVkTexture(GrVkGpu*, const GrSurfaceDesc&, GrGpuResource::LifeCycle,
+ const GrVkImage::Resource*, const GrVkImageView* imageView, Derived);
+
+ static GrVkTexture* Create(GrVkGpu*, const GrSurfaceDesc&,
+ GrGpuResource::LifeCycle, VkFormat,
+ const GrVkImage::Resource* texImpl);
+
+ GrVkGpu* getVkGpu() const;
+
+ void onAbandon() override;
+ void onRelease() override;
+
+private:
+ const GrVkImageView* fTextureView;
+
+ typedef GrTexture INHERITED;
+};
+
+#endif
diff --git a/src/gpu/vk/GrVkTextureRenderTarget.cpp b/src/gpu/vk/GrVkTextureRenderTarget.cpp
new file mode 100644
index 0000000000..79ba90481e
--- /dev/null
+++ b/src/gpu/vk/GrVkTextureRenderTarget.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkTextureRenderTarget.h"
+
+#include "GrRenderTargetPriv.h"
+#include "GrVkGpu.h"
+#include "GrVkImageView.h"
+#include "GrVkUtil.h"
+
+#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
+
+GrVkTextureRenderTarget*
+GrVkTextureRenderTarget::Create(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ VkFormat format,
+ const GrVkImage::Resource* imageResource) {
+
+ VkImage image = imageResource->fImage;
+ // Create the texture ImageView
+ const GrVkImageView* imageView = GrVkImageView::Create(gpu, image, format,
+ GrVkImageView::kColor_Type);
+ if (!imageView) {
+ return nullptr;
+ }
+
+ VkFormat pixelFormat;
+ GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat);
+
+ VkImage colorImage;
+
+ // create msaa surface if necessary
+ const GrVkImage::Resource* msaaImageResource = nullptr;
+ const GrVkImageView* resolveAttachmentView = nullptr;
+ if (desc.fSampleCnt) {
+ GrVkImage::ImageDesc msImageDesc;
+ msImageDesc.fImageType = VK_IMAGE_TYPE_2D;
+ msImageDesc.fFormat = pixelFormat;
+ msImageDesc.fWidth = desc.fWidth;
+ msImageDesc.fHeight = desc.fHeight;
+ msImageDesc.fLevels = 1;
+ msImageDesc.fSamples = desc.fSampleCnt;
+ msImageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
+ msImageDesc.fUsageFlags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ msImageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+
+ msaaImageResource = GrVkImage::CreateResource(gpu, msImageDesc);
+
+ if (!msaaImageResource) {
+ imageView->unref(gpu);
+ return nullptr;
+ }
+
+ // Set color attachment image
+ colorImage = msaaImageResource->fImage;
+
+ // Create resolve attachment view if necessary.
+ // If the format matches, this is the same as the texture imageView.
+ if (pixelFormat == format) {
+ resolveAttachmentView = imageView;
+ resolveAttachmentView->ref();
+ } else {
+ resolveAttachmentView = GrVkImageView::Create(gpu, image, pixelFormat,
+ GrVkImageView::kColor_Type);
+ if (!resolveAttachmentView) {
+ msaaImageResource->unref(gpu);
+ imageView->unref(gpu);
+ return nullptr;
+ }
+ }
+ } else {
+ // Set color attachment image
+ colorImage = imageResource->fImage;
+ }
+
+ const GrVkImageView* colorAttachmentView;
+ // Get color attachment view.
+ // If the format matches and there's no multisampling,
+ // this is the same as the texture imageView
+ if (pixelFormat == format && !resolveAttachmentView) {
+ colorAttachmentView = imageView;
+ colorAttachmentView->ref();
+ } else {
+ colorAttachmentView = GrVkImageView::Create(gpu, colorImage, pixelFormat,
+ GrVkImageView::kColor_Type);
+ if (!colorAttachmentView) {
+ if (msaaImageResource) {
+ resolveAttachmentView->unref(gpu);
+ msaaImageResource->unref(gpu);
+ }
+ imageView->unref(gpu);
+ return nullptr;
+ }
+ }
+
+ GrVkTextureRenderTarget* texRT;
+ if (msaaImageResource) {
+ texRT = new GrVkTextureRenderTarget(gpu, desc, lifeCycle,
+ imageResource, imageView, msaaImageResource,
+ colorAttachmentView,
+ resolveAttachmentView);
+ msaaImageResource->unref(gpu);
+ } else {
+ texRT = new GrVkTextureRenderTarget(gpu, desc, lifeCycle,
+ imageResource, imageView,
+ colorAttachmentView);
+ }
+ return texRT;
+}
+
+GrVkTextureRenderTarget*
+GrVkTextureRenderTarget::CreateNewTextureRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::ImageDesc& imageDesc) {
+ SkASSERT(imageDesc.fUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
+ SkASSERT(imageDesc.fUsageFlags & VK_IMAGE_USAGE_SAMPLED_BIT);
+
+ const GrVkImage::Resource* imageRsrc = GrVkImage::CreateResource(gpu, imageDesc);
+
+ if (!imageRsrc) {
+ return nullptr;
+ }
+
+ GrVkTextureRenderTarget* trt = GrVkTextureRenderTarget::Create(gpu, desc, lifeCycle,
+ imageDesc.fFormat, imageRsrc);
+ // Create() will increment the refCount of the image resource if it succeeds
+ imageRsrc->unref(gpu);
+
+ return trt;
+}
+
+GrVkTextureRenderTarget*
+GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ VkFormat format,
+ GrVkImage::Resource* imageRsrc) {
+ SkASSERT(imageRsrc);
+
+ // Note: we assume the caller will unref the imageResource
+ // Create() will increment the refCount, and we'll unref when we're done with it
+ return GrVkTextureRenderTarget::Create(gpu, desc, lifeCycle, format, imageRsrc);
+}
+
diff --git a/src/gpu/vk/GrVkTextureRenderTarget.h b/src/gpu/vk/GrVkTextureRenderTarget.h
new file mode 100644
index 0000000000..6e0d89968f
--- /dev/null
+++ b/src/gpu/vk/GrVkTextureRenderTarget.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+
+#ifndef GrVkTextureRenderTarget_DEFINED
+#define GrVkTextureRenderTarget_DEFINED
+
+#include "GrVkTexture.h"
+#include "GrVkRenderTarget.h"
+#include "GrVkGpu.h"
+
+#ifdef SK_BUILD_FOR_WIN
+// Windows gives bogus warnings about inheriting asTexture/asRenderTarget via dominance.
+#pragma warning(push)
+#pragma warning(disable: 4250)
+#endif
+
+class GrVkImageView;
+
+class GrVkTextureRenderTarget: public GrVkTexture, public GrVkRenderTarget {
+public:
+ static GrVkTextureRenderTarget* CreateNewTextureRenderTarget(GrVkGpu*, const GrSurfaceDesc&,
+ GrGpuResource::LifeCycle,
+ const GrVkImage::ImageDesc&);
+
+ static GrVkTextureRenderTarget* CreateWrappedTextureRenderTarget(GrVkGpu*,
+ const GrSurfaceDesc&,
+ GrGpuResource::LifeCycle,
+ VkFormat,
+ GrVkImage::Resource*);
+
+protected:
+ void onAbandon() override {
+ GrVkRenderTarget::onAbandon();
+ GrVkTexture::onAbandon();
+ }
+
+ void onRelease() override {
+ GrVkRenderTarget::onRelease();
+ GrVkTexture::onRelease();
+ }
+
+private:
+ GrVkTextureRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImageView* texView,
+ const GrVkImage::Resource* msaaResource,
+ const GrVkImageView* colorAttachmentView,
+ const GrVkImageView* resolveAttachmentView)
+ : GrSurface(gpu, lifeCycle, desc)
+ , GrVkImage(imageResource)
+ , GrVkTexture(gpu, desc, lifeCycle, imageResource, texView, GrVkTexture::kDerived)
+ , GrVkRenderTarget(gpu, desc, lifeCycle, imageResource, msaaResource, colorAttachmentView,
+ resolveAttachmentView, GrVkRenderTarget::kDerived) {
+ this->registerWithCache();
+ }
+
+ GrVkTextureRenderTarget(GrVkGpu* gpu,
+ const GrSurfaceDesc& desc,
+ GrGpuResource::LifeCycle lifeCycle,
+ const GrVkImage::Resource* imageResource,
+ const GrVkImageView* texView,
+ const GrVkImageView* colorAttachmentView)
+ : GrSurface(gpu, lifeCycle, desc)
+ , GrVkImage(imageResource)
+ , GrVkTexture(gpu, desc, lifeCycle, imageResource, texView, GrVkTexture::kDerived)
+ , GrVkRenderTarget(gpu, desc, lifeCycle, imageResource, colorAttachmentView,
+ GrVkRenderTarget::kDerived) {
+ this->registerWithCache();
+ }
+
+ static GrVkTextureRenderTarget* Create(GrVkGpu*, const GrSurfaceDesc&,
+ GrGpuResource::LifeCycle,
+ VkFormat format,
+ const GrVkImage::Resource* imageResource);
+
+ // GrGLRenderTarget accounts for the texture's memory and any MSAA renderbuffer's memory.
+ size_t onGpuMemorySize() const override {
+ return GrVkRenderTarget::onGpuMemorySize();
+ }
+};
+
+#endif
diff --git a/src/gpu/vk/GrVkTransferBuffer.cpp b/src/gpu/vk/GrVkTransferBuffer.cpp
new file mode 100644
index 0000000000..3730627764
--- /dev/null
+++ b/src/gpu/vk/GrVkTransferBuffer.cpp
@@ -0,0 +1,58 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkTransferBuffer.h"
+#include "GrVkGpu.h"
+#include "SkTraceMemoryDump.h"
+
+
+GrVkTransferBuffer* GrVkTransferBuffer::Create(GrVkGpu* gpu, size_t size, GrVkBuffer::Type type) {
+ GrVkBuffer::Desc desc;
+ desc.fDynamic = false;
+ SkASSERT(GrVkBuffer::kCopyRead_Type == type || GrVkBuffer::kCopyWrite_Type == type);
+ desc.fType = type;
+ desc.fSizeInBytes = size;
+
+ const GrVkBuffer::Resource* bufferResource = GrVkBuffer::Create(gpu, desc);
+ if (!bufferResource) {
+ return nullptr;
+ }
+
+ GrVkTransferBuffer* buffer = new GrVkTransferBuffer(gpu, desc, bufferResource);
+ if (!buffer) {
+ bufferResource->unref(gpu);
+ }
+ return buffer;
+}
+
+GrVkTransferBuffer::GrVkTransferBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* bufferResource)
+ : INHERITED(gpu, desc.fSizeInBytes)
+ , GrVkBuffer(desc, bufferResource) {
+ this->registerWithCache();
+}
+
+void GrVkTransferBuffer::onRelease() {
+ if (!this->wasDestroyed()) {
+ this->vkRelease(this->getVkGpu());
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrVkTransferBuffer::onAbandon() {
+ this->vkAbandon();
+ INHERITED::onAbandon();
+}
+
+void GrVkTransferBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
+ const SkString& dumpName) const {
+ SkString buffer_id;
+ buffer_id.appendU64((uint64_t)this->buffer());
+ traceMemoryDump->setMemoryBacking(dumpName.c_str(), "vk_buffer",
+ buffer_id.c_str());
+}
diff --git a/src/gpu/vk/GrVkTransferBuffer.h b/src/gpu/vk/GrVkTransferBuffer.h
new file mode 100644
index 0000000000..f978df95fd
--- /dev/null
+++ b/src/gpu/vk/GrVkTransferBuffer.h
@@ -0,0 +1,54 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkTransferBuffer_DEFINED
+#define GrVkTransferBuffer_DEFINED
+
+#include "GrTransferBuffer.h"
+#include "GrVkBuffer.h"
+#include "vk/GrVkInterface.h"
+
+class GrVkGpu;
+
+class GrVkTransferBuffer : public GrTransferBuffer, public GrVkBuffer {
+
+public:
+ static GrVkTransferBuffer* Create(GrVkGpu* gpu, size_t size, GrVkBuffer::Type type);
+
+protected:
+ void onAbandon() override;
+ void onRelease() override;
+
+private:
+ GrVkTransferBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* resource);
+ void setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
+ const SkString& dumpName) const override;
+
+ void* onMap() override {
+ if (!this->wasDestroyed()) {
+ return this->vkMap(this->getVkGpu());
+ } else {
+ return nullptr;
+ }
+ }
+
+ void onUnmap() override {
+ if (!this->wasDestroyed()) {
+ this->vkUnmap(this->getVkGpu());
+ }
+ }
+
+ GrVkGpu* getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return reinterpret_cast<GrVkGpu*>(this->getGpu());
+ }
+
+ typedef GrTransferBuffer INHERITED;
+};
+
+#endif
diff --git a/src/gpu/vk/GrVkUniformBuffer.cpp b/src/gpu/vk/GrVkUniformBuffer.cpp
new file mode 100644
index 0000000000..022e2e33bd
--- /dev/null
+++ b/src/gpu/vk/GrVkUniformBuffer.cpp
@@ -0,0 +1,31 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkUniformBuffer.h"
+#include "GrVkGpu.h"
+
+
+GrVkUniformBuffer* GrVkUniformBuffer::Create(GrVkGpu* gpu, size_t size, bool dynamic) {
+ if (0 == size) {
+ return nullptr;
+ }
+ GrVkBuffer::Desc desc;
+ desc.fDynamic = dynamic;
+ desc.fType = GrVkBuffer::kUniform_Type;
+ desc.fSizeInBytes = size;
+
+ const GrVkBuffer::Resource* bufferResource = GrVkBuffer::Create(gpu, desc);
+ if (!bufferResource) {
+ return nullptr;
+ }
+
+ GrVkUniformBuffer* buffer = new GrVkUniformBuffer(desc, bufferResource);
+ if (!buffer) {
+ bufferResource->unref(gpu);
+ }
+ return buffer;
+} \ No newline at end of file
diff --git a/src/gpu/vk/GrVkUniformBuffer.h b/src/gpu/vk/GrVkUniformBuffer.h
new file mode 100644
index 0000000000..0eae47b30b
--- /dev/null
+++ b/src/gpu/vk/GrVkUniformBuffer.h
@@ -0,0 +1,45 @@
+/*
+* Copyright 2015 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkUniformBuffer_DEFINED
+#define GrVkUniformBuffer_DEFINED
+
+#include "GrVkBuffer.h"
+#include "vk/GrVkInterface.h"
+
+class GrVkGpu;
+
+class GrVkUniformBuffer : public GrVkBuffer {
+
+public:
+ static GrVkUniformBuffer* Create(GrVkGpu* gpu, size_t size, bool dynamic);
+
+ void* map(const GrVkGpu* gpu) {
+ return this->vkMap(gpu);
+ }
+ void unmap(const GrVkGpu* gpu) {
+ this->vkUnmap(gpu);
+ }
+ bool updateData(const GrVkGpu* gpu, const void* src, size_t srcSizeInBytes) {
+ return this->vkUpdateData(gpu, src, srcSizeInBytes);
+ }
+ void release(const GrVkGpu* gpu) {
+ this->vkRelease(gpu);
+ }
+ void abandon() {
+ this->vkAbandon();
+ }
+
+private:
+ GrVkUniformBuffer(const GrVkBuffer::Desc& desc, const GrVkBuffer::Resource* resource)
+ : INHERITED(desc, resource) {
+ };
+
+ typedef GrVkBuffer INHERITED;
+};
+
+#endif
diff --git a/src/gpu/vk/GrVkUniformHandler.cpp b/src/gpu/vk/GrVkUniformHandler.cpp
new file mode 100644
index 0000000000..20ef9c6c40
--- /dev/null
+++ b/src/gpu/vk/GrVkUniformHandler.cpp
@@ -0,0 +1,149 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkUniformHandler.h"
+#include "glsl/GrGLSLProgramBuilder.h"
+
+// To determine whether a current offset is aligned, we can just 'and' the lowest bits with the
+// alignment mask. A value of 0 means aligned, any other value is how many bytes past alignment we
+// are. This works since all alignments are powers of 2. The mask is always (alignment - 1).
+uint32_t grsltype_to_alignment_mask(GrSLType type) {
+ SkASSERT(GrSLTypeIsFloatType(type));
+ static const uint32_t kAlignments[kGrSLTypeCount] = {
+ 0x0, // kVoid_GrSLType, should never return this
+ 0x3, // kFloat_GrSLType
+ 0x7, // kVec2f_GrSLType
+ 0xF, // kVec3f_GrSLType
+ 0xF, // kVec4f_GrSLType
+ 0xF, // kMat33f_GrSLType
+ 0xF, // kMat44f_GrSLType
+ 0x0, // Sampler2D_GrSLType, should never return this
+ 0x0, // SamplerExternal_GrSLType, should never return this
+ };
+ GR_STATIC_ASSERT(0 == kVoid_GrSLType);
+ GR_STATIC_ASSERT(1 == kFloat_GrSLType);
+ GR_STATIC_ASSERT(2 == kVec2f_GrSLType);
+ GR_STATIC_ASSERT(3 == kVec3f_GrSLType);
+ GR_STATIC_ASSERT(4 == kVec4f_GrSLType);
+ GR_STATIC_ASSERT(5 == kMat33f_GrSLType);
+ GR_STATIC_ASSERT(6 == kMat44f_GrSLType);
+ GR_STATIC_ASSERT(7 == kSampler2D_GrSLType);
+ GR_STATIC_ASSERT(8 == kSamplerExternal_GrSLType);
+ GR_STATIC_ASSERT(SK_ARRAY_COUNT(kAlignments) == kGrSLTypeCount);
+ return kAlignments[type];
+}
+
+// Given the current offset into the ubo, calculate the offset for the uniform we're trying to add
+// taking into consideration all alignment requirements. The uniformOffset is set to the offset for
+// the new uniform, and currentOffset is updated to be the offset to the end of the new uniform.
+void get_ubo_aligned_offset(uint32_t* uniformOffset,
+ uint32_t* currentOffset,
+ GrSLType type,
+ int arrayCount) {
+ uint32_t alignmentMask = grsltype_to_alignment_mask(type);
+ uint32_t offsetDiff = *currentOffset & alignmentMask;
+ if (offsetDiff != 0) {
+ offsetDiff = alignmentMask - offsetDiff + 1;
+ }
+ *uniformOffset = *currentOffset + offsetDiff;
+ SkASSERT(sizeof(float) == 4);
+ // We use a 0 arrayCount to indicate it is not an array type but we still need to count the one
+ // object.
+ int count = arrayCount ? arrayCount : 1;
+ *currentOffset = *uniformOffset + count * (uint32_t)GrSLTypeSize(type);
+}
+
+GrGLSLUniformHandler::UniformHandle GrVkUniformHandler::internalAddUniformArray(
+ uint32_t visibility,
+ GrSLType type,
+ GrSLPrecision precision,
+ const char* name,
+ bool mangleName,
+ int arrayCount,
+ const char** outName) {
+ SkASSERT(name && strlen(name));
+ SkDEBUGCODE(static const uint32_t kVisibilityMask = kVertex_GrShaderFlag|kFragment_GrShaderFlag);
+ SkASSERT(0 == (~kVisibilityMask & visibility));
+ SkASSERT(0 != visibility);
+ SkASSERT(kDefault_GrSLPrecision == precision || GrSLTypeIsFloatType(type));
+
+ UniformInfo& uni = fUniforms.push_back();
+ uni.fVariable.setType(type);
+ // TODO this is a bit hacky, lets think of a better way. Basically we need to be able to use
+ // the uniform view matrix name in the GP, and the GP is immutable so it has to tell the PB
+ // exactly what name it wants to use for the uniform view matrix. If we prefix anythings, then
+ // the names will mismatch. I think the correct solution is to have all GPs which need the
+ // uniform view matrix, they should upload the view matrix in their setData along with regular
+ // uniforms.
+ char prefix = 'u';
+ if ('u' == name[0]) {
+ prefix = '\0';
+ }
+ fProgramBuilder->nameVariable(uni.fVariable.accessName(), prefix, name, mangleName);
+ uni.fVariable.setArrayCount(arrayCount);
+ // For now asserting the the visibility is either only vertex or only fragment
+ SkASSERT(kVertex_GrShaderFlag == visibility || kFragment_GrShaderFlag == visibility);
+ uni.fVisibility = visibility;
+ uni.fVariable.setPrecision(precision);
+ if (GrSLTypeIsFloatType(type)) {
+ // When outputing the GLSL, only the outer uniform block will get the Uniform modifier. Thus
+ // we set the modifier to none for all uniforms declared inside the block.
+ uni.fVariable.setTypeModifier(GrGLSLShaderVar::kNone_TypeModifier);
+
+ uint32_t* currentOffset = kVertex_GrShaderFlag == visibility ? &fCurrentVertexUBOOffset
+ : &fCurrentFragmentUBOOffset;
+ get_ubo_aligned_offset(&uni.fUBOffset, currentOffset, type, arrayCount);
+ uni.fSetNumber = kUniformBufferDescSet;
+ uni.fBinding = kVertex_GrShaderFlag == visibility ? kVertexBinding : kFragBinding;
+
+ if (outName) {
+ *outName = uni.fVariable.c_str();
+ }
+ } else {
+ SkASSERT(type == kSampler2D_GrSLType);
+ uni.fVariable.setTypeModifier(GrGLSLShaderVar::kUniform_TypeModifier);
+
+ uni.fSetNumber = kSamplerDescSet;
+ uni.fBinding = fCurrentSamplerBinding++;
+ uni.fUBOffset = 0; // This value will be ignored, but initializing to avoid any errors.
+ SkString layoutQualifier;
+ layoutQualifier.appendf("set=%d, binding=%d", uni.fSetNumber, uni.fBinding);
+ uni.fVariable.setLayoutQualifier(layoutQualifier.c_str());
+ }
+
+ return GrGLSLUniformHandler::UniformHandle(fUniforms.count() - 1);
+}
+
+void GrVkUniformHandler::appendUniformDecls(GrShaderFlags visibility, SkString* out) const {
+ SkTArray<UniformInfo*> uniformBufferUniform;
+ // Used to collect all the variables that will be place inside the uniform buffer
+ SkString uniformsString;
+ SkASSERT(kVertex_GrShaderFlag == visibility || kFragment_GrShaderFlag == visibility);
+ uint32_t uniformBinding = (visibility == kVertex_GrShaderFlag) ? kVertexBinding : kFragBinding;
+ for (int i = 0; i < fUniforms.count(); ++i) {
+ const UniformInfo& localUniform = fUniforms[i];
+ if (visibility == localUniform.fVisibility) {
+ if (GrSLTypeIsFloatType(localUniform.fVariable.getType())) {
+ SkASSERT(uniformBinding == localUniform.fBinding);
+ SkASSERT(kUniformBufferDescSet == localUniform.fSetNumber);
+ localUniform.fVariable.appendDecl(fProgramBuilder->glslCaps(), &uniformsString);
+ uniformsString.append(";\n");
+ } else {
+ SkASSERT(localUniform.fVariable.getType() == kSampler2D_GrSLType);
+ SkASSERT(kSamplerDescSet == localUniform.fSetNumber);
+ localUniform.fVariable.appendDecl(fProgramBuilder->glslCaps(), out);
+ out->append(";\n");
+ }
+ }
+ }
+ if (!uniformsString.isEmpty()) {
+ const char* stage = (visibility == kVertex_GrShaderFlag) ? "vertex" : "fragment";
+ out->appendf("layout (set=%d, binding=%d) uniform %sUniformBuffer\n{\n",
+ kUniformBufferDescSet, uniformBinding, stage);
+ out->appendf("%s\n};\n", uniformsString.c_str());
+ }
+} \ No newline at end of file
diff --git a/src/gpu/vk/GrVkUniformHandler.h b/src/gpu/vk/GrVkUniformHandler.h
new file mode 100644
index 0000000000..f84bcff0a2
--- /dev/null
+++ b/src/gpu/vk/GrVkUniformHandler.h
@@ -0,0 +1,85 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkUniformHandler_DEFINED
+#define GrVkUniformHandler_DEFINED
+
+#include "glsl/GrGLSLUniformHandler.h"
+
+#include "GrAllocator.h"
+#include "glsl/GrGLSLShaderVar.h"
+
+static const int kUniformsPerBlock = 8;
+
+class GrVkUniformHandler : public GrGLSLUniformHandler {
+public:
+ enum {
+ kSamplerDescSet = 0,
+ kUniformBufferDescSet = 1,
+ };
+ enum {
+ kVertexBinding = 0,
+ kFragBinding = 1,
+ };
+
+ // fUBOffset is only valid if the GrSLType of the fVariable is not a sampler
+ struct UniformInfo {
+ GrGLSLShaderVar fVariable;
+ uint32_t fVisibility;
+ uint32_t fSetNumber;
+ uint32_t fBinding;
+ uint32_t fUBOffset;
+ };
+ typedef GrTAllocator<UniformInfo> UniformInfoArray;
+
+ const GrGLSLShaderVar& getUniformVariable(UniformHandle u) const override {
+ return fUniforms[u.toIndex()].fVariable;
+ }
+
+ const char* getUniformCStr(UniformHandle u) const override {
+ return this->getUniformVariable(u).c_str();
+ }
+
+private:
+ explicit GrVkUniformHandler(GrGLSLProgramBuilder* program)
+ : INHERITED(program)
+ , fUniforms(kUniformsPerBlock)
+ , fCurrentVertexUBOOffset(0)
+ , fCurrentFragmentUBOOffset(0)
+ , fCurrentSamplerBinding(0) {
+ }
+
+ UniformHandle internalAddUniformArray(uint32_t visibility,
+ GrSLType type,
+ GrSLPrecision precision,
+ const char* name,
+ bool mangleName,
+ int arrayCount,
+ const char** outName) override;
+
+ void appendUniformDecls(GrShaderFlags, SkString*) const override;
+
+ bool hasVertexUniforms() const { return fCurrentVertexUBOOffset > 0; }
+ bool hasFragmentUniforms() const { return fCurrentFragmentUBOOffset > 0; }
+
+
+ const UniformInfo& getUniformInfo(UniformHandle u) const {
+ return fUniforms[u.toIndex()];
+ }
+
+
+ UniformInfoArray fUniforms;
+ uint32_t fCurrentVertexUBOOffset;
+ uint32_t fCurrentFragmentUBOOffset;
+ uint32_t fCurrentSamplerBinding;
+
+ friend class GrVkProgramBuilder;
+
+ typedef GrGLSLUniformHandler INHERITED;
+};
+
+#endif \ No newline at end of file
diff --git a/src/gpu/vk/GrVkUtil.cpp b/src/gpu/vk/GrVkUtil.cpp
new file mode 100644
index 0000000000..ec3ec234d4
--- /dev/null
+++ b/src/gpu/vk/GrVkUtil.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkUtil.h"
+
+bool GrPixelConfigToVkFormat(GrPixelConfig config, VkFormat* format) {
+ VkFormat dontCare;
+ if (!format) {
+ format = &dontCare;
+ }
+
+ switch (config) {
+ case kRGBA_8888_GrPixelConfig:
+ *format = VK_FORMAT_R8G8B8A8_UNORM;
+ break;
+ case kBGRA_8888_GrPixelConfig:
+ *format = VK_FORMAT_B8G8R8A8_UNORM;
+ break;
+ case kSRGBA_8888_GrPixelConfig:
+ *format = VK_FORMAT_R8G8B8A8_SRGB;
+ break;
+ case kRGB_565_GrPixelConfig:
+ *format = VK_FORMAT_R5G6B5_UNORM_PACK16;
+ break;
+ case kRGBA_4444_GrPixelConfig:
+ *format = VK_FORMAT_R4G4B4A4_UNORM_PACK16;
+ break;
+ case kIndex_8_GrPixelConfig:
+ // No current rad support for this config
+ return false;
+ case kAlpha_8_GrPixelConfig:
+ *format = VK_FORMAT_R8_UNORM;
+ break;
+ case kETC1_GrPixelConfig:
+ // converting to ETC2 which is a superset of ETC1
+ *format = VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
+ break;
+ case kLATC_GrPixelConfig:
+ // No current rad support for this config
+ return false;
+ case kR11_EAC_GrPixelConfig:
+ *format = VK_FORMAT_EAC_R11_UNORM_BLOCK;
+ break;
+ case kASTC_12x12_GrPixelConfig:
+ *format = VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
+ break;
+ case kRGBA_float_GrPixelConfig:
+ *format = VK_FORMAT_R32G32B32A32_SFLOAT;
+ break;
+ case kRGBA_half_GrPixelConfig:
+ *format = VK_FORMAT_R16G16B16A16_SFLOAT;
+ break;
+ case kAlpha_half_GrPixelConfig:
+ *format = VK_FORMAT_R16_SFLOAT;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+bool GrSampleCountToVkSampleCount(uint32_t samples, VkSampleCountFlagBits* vkSamples) {
+ switch (samples) {
+ case 0: // fall through
+ case 1:
+ *vkSamples = VK_SAMPLE_COUNT_1_BIT;
+ return true;
+ case 2:
+ *vkSamples = VK_SAMPLE_COUNT_2_BIT;
+ return true;
+ case 4:
+ *vkSamples = VK_SAMPLE_COUNT_2_BIT;
+ return true;
+ case 8:
+ *vkSamples = VK_SAMPLE_COUNT_2_BIT;
+ return true;
+ case 16:
+ *vkSamples = VK_SAMPLE_COUNT_2_BIT;
+ return true;
+ case 32:
+ *vkSamples = VK_SAMPLE_COUNT_2_BIT;
+ return true;
+ case 64:
+ *vkSamples = VK_SAMPLE_COUNT_2_BIT;
+ return true;
+ default:
+ return false;
+ }
+}
+
diff --git a/src/gpu/vk/GrVkUtil.h b/src/gpu/vk/GrVkUtil.h
new file mode 100644
index 0000000000..4fee31046d
--- /dev/null
+++ b/src/gpu/vk/GrVkUtil.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkUtil_DEFINED
+#define GrVkUtil_DEFINED
+
+#include "GrColor.h"
+#include "GrTypes.h"
+#include "vk/GrVkInterface.h"
+
+#include "vulkan/vulkan.h"
+
+// makes a Vk call on the interface
+#define GR_VK_CALL(IFACE, X) (IFACE)->fFunctions.f##X;
+// same as GR_VK_CALL but checks for success
+#ifdef SK_DEBUG
+#define GR_VK_CALL_ERRCHECK(IFACE, X) \
+ VkResult SK_MACRO_APPEND_LINE(ret) = GR_VK_CALL(IFACE, X); \
+ SkASSERT(VK_SUCCESS == SK_MACRO_APPEND_LINE(ret));
+#else
+#define GR_VK_CALL_ERRCHECK(IFACE, X) (void) GR_VK_CALL(IFACE, X);
+#endif
+
+/**
+ * Returns the vulkan texture format for the given GrPixelConfig
+ */
+bool GrPixelConfigToVkFormat(GrPixelConfig config, VkFormat* format);
+
+bool GrSampleCountToVkSampleCount(uint32_t samples, VkSampleCountFlagBits* vkSamples);
+
+#endif
+
diff --git a/src/gpu/vk/GrVkVaryingHandler.cpp b/src/gpu/vk/GrVkVaryingHandler.cpp
new file mode 100644
index 0000000000..c923f6684c
--- /dev/null
+++ b/src/gpu/vk/GrVkVaryingHandler.cpp
@@ -0,0 +1,26 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrVkVaryingHandler.h"
+
+
+void finalize_helper(GrVkVaryingHandler::VarArray& vars) {
+ for (int i = 0; i < vars.count(); ++i) {
+ SkString location;
+ location.appendf("location = %d", i);
+ vars[i].setLayoutQualifier(location.c_str());
+ }
+}
+
+void GrVkVaryingHandler::onFinalize() {
+ finalize_helper(fVertexInputs);
+ finalize_helper(fVertexOutputs);
+ finalize_helper(fGeomInputs);
+ finalize_helper(fGeomOutputs);
+ finalize_helper(fFragInputs);
+ finalize_helper(fFragOutputs);
+} \ No newline at end of file
diff --git a/src/gpu/vk/GrVkVaryingHandler.h b/src/gpu/vk/GrVkVaryingHandler.h
new file mode 100644
index 0000000000..d47194149e
--- /dev/null
+++ b/src/gpu/vk/GrVkVaryingHandler.h
@@ -0,0 +1,27 @@
+/*
+* Copyright 2016 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrVkVaryingHandler_DEFINED
+#define GrVkVaryingHandler_DEFINED
+
+#include "glsl/GrGLSLVarying.h"
+
+class GrVkVaryingHandler : public GrGLSLVaryingHandler {
+public:
+ GrVkVaryingHandler(GrGLSLProgramBuilder* program) : INHERITED(program) {}
+
+ typedef GrGLSLVaryingHandler::VarArray VarArray;
+
+private:
+ void onFinalize() override;
+
+ friend class GrVkProgramBuilder;
+
+ typedef GrGLSLVaryingHandler INHERITED;
+};
+
+#endif \ No newline at end of file
diff --git a/src/gpu/vk/GrVkVertexBuffer.cpp b/src/gpu/vk/GrVkVertexBuffer.cpp
new file mode 100644
index 0000000000..46c6d28f23
--- /dev/null
+++ b/src/gpu/vk/GrVkVertexBuffer.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrVkVertexBuffer.h"
+#include "GrVkGpu.h"
+
+GrVkVertexBuffer::GrVkVertexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* bufferResource)
+ : INHERITED(gpu, desc.fSizeInBytes, desc.fDynamic, false)
+ , GrVkBuffer(desc, bufferResource) {
+ this->registerWithCache();
+}
+
+GrVkVertexBuffer* GrVkVertexBuffer::Create(GrVkGpu* gpu, size_t size, bool dynamic) {
+ GrVkBuffer::Desc desc;
+ desc.fDynamic = dynamic;
+ desc.fType = GrVkBuffer::kVertex_Type;
+ desc.fSizeInBytes = size;
+
+ const GrVkBuffer::Resource* bufferResource = GrVkBuffer::Create(gpu, desc);
+ if (!bufferResource) {
+ return nullptr;
+ }
+
+ GrVkVertexBuffer* buffer = new GrVkVertexBuffer(gpu, desc, bufferResource);
+ if (!buffer) {
+ bufferResource->unref(gpu);
+ }
+ return buffer;
+}
+
+void GrVkVertexBuffer::onRelease() {
+ if (!this->wasDestroyed()) {
+ this->vkRelease(this->getVkGpu());
+ }
+
+ INHERITED::onRelease();
+}
+
+void GrVkVertexBuffer::onAbandon() {
+ this->vkAbandon();
+ INHERITED::onAbandon();
+}
+
+void* GrVkVertexBuffer::onMap() {
+ if (!this->wasDestroyed()) {
+ return this->vkMap(this->getVkGpu());
+ } else {
+ return NULL;
+ }
+}
+
+void GrVkVertexBuffer::onUnmap() {
+ if (!this->wasDestroyed()) {
+ this->vkUnmap(this->getVkGpu());
+ }
+}
+
+bool GrVkVertexBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
+ if (!this->wasDestroyed()) {
+ return this->vkUpdateData(this->getVkGpu(), src, srcSizeInBytes);
+ } else {
+ return false;
+ }
+}
+
+GrVkGpu* GrVkVertexBuffer::getVkGpu() const {
+ SkASSERT(!this->wasDestroyed());
+ return static_cast<GrVkGpu*>(this->getGpu());
+}
+
diff --git a/src/gpu/vk/GrVkVertexBuffer.h b/src/gpu/vk/GrVkVertexBuffer.h
new file mode 100644
index 0000000000..82f00597b7
--- /dev/null
+++ b/src/gpu/vk/GrVkVertexBuffer.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrVkVertexBuffer_DEFINED
+#define GrVkVertexBuffer_DEFINED
+
+#include "GrVertexBuffer.h"
+#include "GrVkBuffer.h"
+#include "vk/GrVkInterface.h"
+
+class GrVkGpu;
+
+class GrVkVertexBuffer : public GrVertexBuffer, public GrVkBuffer {
+public:
+ static GrVkVertexBuffer* Create(GrVkGpu* gpu, size_t size, bool dynamic);
+
+protected:
+ void onAbandon() override;
+ void onRelease() override;
+
+private:
+ GrVkVertexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
+ const GrVkBuffer::Resource* resource);
+
+ void* onMap() override;
+ void onUnmap() override;
+ bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
+
+ GrVkGpu* getVkGpu() const;
+
+ typedef GrVertexBuffer INHERITED;
+};
+
+#endif