1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
|
/*
* Copyright 2015 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "GrVkUtil.h"
#include "vk/GrVkGpu.h"
#include "SkSLCompiler.h"
bool GrPixelConfigToVkFormat(GrPixelConfig config, VkFormat* format) {
VkFormat dontCare;
if (!format) {
format = &dontCare;
}
switch (config) {
case kUnknown_GrPixelConfig:
return false;
case kRGBA_8888_GrPixelConfig:
*format = VK_FORMAT_R8G8B8A8_UNORM;
return true;
case kBGRA_8888_GrPixelConfig:
*format = VK_FORMAT_B8G8R8A8_UNORM;
return true;
case kSRGBA_8888_GrPixelConfig:
*format = VK_FORMAT_R8G8B8A8_SRGB;
return true;
case kSBGRA_8888_GrPixelConfig:
*format = VK_FORMAT_B8G8R8A8_SRGB;
return true;
case kRGBA_8888_sint_GrPixelConfig:
*format = VK_FORMAT_R8G8B8A8_SINT;
return true;
case kRGB_565_GrPixelConfig:
*format = VK_FORMAT_R5G6B5_UNORM_PACK16;
return true;
case kRGBA_4444_GrPixelConfig:
// R4G4B4A4 is not required to be supported so we actually
// store the data is if it was B4G4R4A4 and swizzle in shaders
*format = VK_FORMAT_B4G4R4A4_UNORM_PACK16;
return true;
case kAlpha_8_GrPixelConfig:
*format = VK_FORMAT_R8_UNORM;
return true;
case kGray_8_GrPixelConfig:
*format = VK_FORMAT_R8_UNORM;
return true;
case kRGBA_float_GrPixelConfig:
*format = VK_FORMAT_R32G32B32A32_SFLOAT;
return true;
case kRG_float_GrPixelConfig:
*format = VK_FORMAT_R32G32_SFLOAT;
return true;
case kRGBA_half_GrPixelConfig:
*format = VK_FORMAT_R16G16B16A16_SFLOAT;
return true;
case kAlpha_half_GrPixelConfig:
*format = VK_FORMAT_R16_SFLOAT;
return true;
}
SK_ABORT("Unexpected config");
return false;
}
GrPixelConfig GrVkFormatToPixelConfig(VkFormat format) {
switch (format) {
case VK_FORMAT_R8G8B8A8_UNORM:
return kRGBA_8888_GrPixelConfig;
case VK_FORMAT_B8G8R8A8_UNORM:
return kBGRA_8888_GrPixelConfig;
case VK_FORMAT_R8G8B8A8_SRGB:
return kSRGBA_8888_GrPixelConfig;
case VK_FORMAT_B8G8R8A8_SRGB:
return kSBGRA_8888_GrPixelConfig;
case VK_FORMAT_R8G8B8A8_SINT:
return kRGBA_8888_sint_GrPixelConfig;
case VK_FORMAT_R5G6B5_UNORM_PACK16:
return kRGB_565_GrPixelConfig;
break;
case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
// R4G4B4A4 is not required to be supported so we actually
// store RGBA_4444 data as B4G4R4A4.
return kRGBA_4444_GrPixelConfig;
case VK_FORMAT_R8_UNORM:
return kAlpha_8_GrPixelConfig;
case VK_FORMAT_R32G32B32A32_SFLOAT:
return kRGBA_float_GrPixelConfig;
case VK_FORMAT_R32G32_SFLOAT:
return kRG_float_GrPixelConfig;
case VK_FORMAT_R16G16B16A16_SFLOAT:
return kRGBA_half_GrPixelConfig;
case VK_FORMAT_R16_SFLOAT:
return kAlpha_half_GrPixelConfig;
default:
return kUnknown_GrPixelConfig;
}
}
bool GrVkFormatIsSRGB(VkFormat format, VkFormat* linearFormat) {
VkFormat linearFmt = format;
switch (format) {
case VK_FORMAT_R8_SRGB:
linearFmt = VK_FORMAT_R8_UNORM;
break;
case VK_FORMAT_R8G8_SRGB:
linearFmt = VK_FORMAT_R8G8_UNORM;
break;
case VK_FORMAT_R8G8B8_SRGB:
linearFmt = VK_FORMAT_R8G8B8_UNORM;
break;
case VK_FORMAT_B8G8R8_SRGB:
linearFmt = VK_FORMAT_B8G8R8_UNORM;
break;
case VK_FORMAT_R8G8B8A8_SRGB:
linearFmt = VK_FORMAT_R8G8B8A8_UNORM;
break;
case VK_FORMAT_B8G8R8A8_SRGB:
linearFmt = VK_FORMAT_B8G8R8A8_UNORM;
break;
case VK_FORMAT_A8B8G8R8_SRGB_PACK32:
linearFmt = VK_FORMAT_A8B8G8R8_UNORM_PACK32;
break;
case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
linearFmt = VK_FORMAT_BC1_RGB_UNORM_BLOCK;
break;
case VK_FORMAT_BC1_RGBA_SRGB_BLOCK:
linearFmt = VK_FORMAT_BC1_RGBA_UNORM_BLOCK;
break;
case VK_FORMAT_BC2_SRGB_BLOCK:
linearFmt = VK_FORMAT_BC2_UNORM_BLOCK;
break;
case VK_FORMAT_BC3_SRGB_BLOCK:
linearFmt = VK_FORMAT_BC3_UNORM_BLOCK;
break;
case VK_FORMAT_BC7_SRGB_BLOCK:
linearFmt = VK_FORMAT_BC7_UNORM_BLOCK;
break;
case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK:
linearFmt = VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK;
break;
case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK:
linearFmt = VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK;
break;
case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK:
linearFmt = VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
break;
case VK_FORMAT_ASTC_4x4_SRGB_BLOCK:
linearFmt = VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
break;
case VK_FORMAT_ASTC_5x4_SRGB_BLOCK:
linearFmt = VK_FORMAT_ASTC_5x4_UNORM_BLOCK;
break;
case VK_FORMAT_ASTC_5x5_SRGB_BLOCK:
linearFmt = VK_FORMAT_ASTC_5x5_UNORM_BLOCK;
break;
case VK_FORMAT_ASTC_6x5_SRGB_BLOCK:
linearFmt = VK_FORMAT_ASTC_6x5_UNORM_BLOCK;
break;
case VK_FORMAT_ASTC_6x6_SRGB_BLOCK:
linearFmt = VK_FORMAT_ASTC_6x6_UNORM_BLOCK;
break;
case VK_FORMAT_ASTC_8x5_SRGB_BLOCK:
linearFmt = VK_FORMAT_ASTC_8x5_UNORM_BLOCK;
break;
case VK_FORMAT_ASTC_8x6_SRGB_BLOCK:
linearFmt = VK_FORMAT_ASTC_8x6_UNORM_BLOCK;
break;
case VK_FORMAT_ASTC_8x8_SRGB_BLOCK:
linearFmt = VK_FORMAT_ASTC_8x8_UNORM_BLOCK;
break;
case VK_FORMAT_ASTC_10x5_SRGB_BLOCK:
linearFmt = VK_FORMAT_ASTC_10x5_UNORM_BLOCK;
break;
case VK_FORMAT_ASTC_10x6_SRGB_BLOCK:
linearFmt = VK_FORMAT_ASTC_10x6_UNORM_BLOCK;
break;
case VK_FORMAT_ASTC_10x8_SRGB_BLOCK:
linearFmt = VK_FORMAT_ASTC_10x8_UNORM_BLOCK;
break;
case VK_FORMAT_ASTC_10x10_SRGB_BLOCK:
linearFmt = VK_FORMAT_ASTC_10x10_UNORM_BLOCK;
break;
case VK_FORMAT_ASTC_12x10_SRGB_BLOCK:
linearFmt = VK_FORMAT_ASTC_12x10_UNORM_BLOCK;
break;
case VK_FORMAT_ASTC_12x12_SRGB_BLOCK:
linearFmt = VK_FORMAT_ASTC_12x12_UNORM_BLOCK;
break;
default:
break;
}
if (linearFormat) {
*linearFormat = linearFmt;
}
return (linearFmt != format);
}
bool GrSampleCountToVkSampleCount(uint32_t samples, VkSampleCountFlagBits* vkSamples) {
switch (samples) {
case 0: // fall through
case 1:
*vkSamples = VK_SAMPLE_COUNT_1_BIT;
return true;
case 2:
*vkSamples = VK_SAMPLE_COUNT_2_BIT;
return true;
case 4:
*vkSamples = VK_SAMPLE_COUNT_4_BIT;
return true;
case 8:
*vkSamples = VK_SAMPLE_COUNT_8_BIT;
return true;
case 16:
*vkSamples = VK_SAMPLE_COUNT_16_BIT;
return true;
case 32:
*vkSamples = VK_SAMPLE_COUNT_32_BIT;
return true;
case 64:
*vkSamples = VK_SAMPLE_COUNT_64_BIT;
return true;
default:
return false;
}
}
SkSL::Program::Kind vk_shader_stage_to_skiasl_kind(VkShaderStageFlagBits stage) {
if (VK_SHADER_STAGE_VERTEX_BIT == stage) {
return SkSL::Program::kVertex_Kind;
}
if (VK_SHADER_STAGE_GEOMETRY_BIT == stage) {
return SkSL::Program::kGeometry_Kind;
}
SkASSERT(VK_SHADER_STAGE_FRAGMENT_BIT == stage);
return SkSL::Program::kFragment_Kind;
}
VkShaderStageFlagBits skiasl_kind_to_vk_shader_stage(SkSL::Program::Kind kind) {
if (SkSL::Program::kVertex_Kind == kind) {
return VK_SHADER_STAGE_VERTEX_BIT;
}
if (SkSL::Program::kGeometry_Kind == kind) {
return VK_SHADER_STAGE_GEOMETRY_BIT;
}
SkASSERT(SkSL::Program::kFragment_Kind == kind);
return VK_SHADER_STAGE_FRAGMENT_BIT;
}
bool GrCompileVkShaderModule(const GrVkGpu* gpu,
const char* shaderString,
VkShaderStageFlagBits stage,
VkShaderModule* shaderModule,
VkPipelineShaderStageCreateInfo* stageInfo,
const SkSL::Program::Settings& settings,
SkSL::Program::Inputs* outInputs) {
std::unique_ptr<SkSL::Program> program = gpu->shaderCompiler()->convertProgram(
vk_shader_stage_to_skiasl_kind(stage),
SkSL::String(shaderString),
settings);
if (!program) {
SkDebugf("SkSL error:\n%s\n", gpu->shaderCompiler()->errorText().c_str());
SkASSERT(false);
}
*outInputs = program->fInputs;
SkSL::String code;
if (!gpu->shaderCompiler()->toSPIRV(*program, &code)) {
SkDebugf("%s\n", gpu->shaderCompiler()->errorText().c_str());
return false;
}
VkShaderModuleCreateInfo moduleCreateInfo;
memset(&moduleCreateInfo, 0, sizeof(VkShaderModuleCreateInfo));
moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
moduleCreateInfo.pNext = nullptr;
moduleCreateInfo.flags = 0;
moduleCreateInfo.codeSize = code.size();
moduleCreateInfo.pCode = (const uint32_t*)code.c_str();
VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateShaderModule(gpu->device(),
&moduleCreateInfo,
nullptr,
shaderModule));
if (err) {
return false;
}
memset(stageInfo, 0, sizeof(VkPipelineShaderStageCreateInfo));
stageInfo->sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
stageInfo->pNext = nullptr;
stageInfo->flags = 0;
stageInfo->stage = skiasl_kind_to_vk_shader_stage(program->fKind);
stageInfo->module = *shaderModule;
stageInfo->pName = "main";
stageInfo->pSpecializationInfo = nullptr;
return true;
}
|