aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/gpu/gl
diff options
context:
space:
mode:
authorGravatar Chris Dalton <csmartdalton@google.com>2017-11-15 18:27:09 -0700
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2017-11-16 19:29:56 +0000
commit47c8ed3c064f5176750f370b88119735163c0e8a (patch)
tree576fa3f0d64463f1d5c3b3b5e4872438c70688fc /src/gpu/gl
parent3bc00fe8100948e99deb3dea3f833fd026d39bd5 (diff)
Reland "Fix precision caps and rrect/ellipse effect precisions"
This is a reland of e42180022720f2fcfd3c634cad855506a7940591 Original change's description: > Fix precision caps and rrect/ellipse effect precisions > > Replaces all the complex precision caps with a single flag that says > whether "float" == fp32. Updates the ellipse and rrect effects to > use float coords, and use the scale workaround when float != fp32. > > Bug: skia:7190 > Change-Id: Ieccff9f38acd05e5cec78fe90d01a5da901a9307 > Reviewed-on: https://skia-review.googlesource.com/70961 > Commit-Queue: Chris Dalton <csmartdalton@google.com> > Reviewed-by: Ethan Nicholas <ethannicholas@google.com> > Reviewed-by: Brian Salomon <bsalomon@google.com> TBR=bsalomon@google.com Bug: skia:7190 Change-Id: I7ced37a64164b83d86f6a957c35e10ce9085aba0 Reviewed-on: https://skia-review.googlesource.com/72760 Reviewed-by: Chris Dalton <csmartdalton@google.com> Commit-Queue: Chris Dalton <csmartdalton@google.com>
Diffstat (limited to 'src/gpu/gl')
-rw-r--r--src/gpu/gl/GrGLCaps.cpp115
-rw-r--r--src/gpu/gl/GrGLCaps.h4
2 files changed, 26 insertions, 93 deletions
diff --git a/src/gpu/gl/GrGLCaps.cpp b/src/gpu/gl/GrGLCaps.cpp
index fe9bce8528..0c185e4a78 100644
--- a/src/gpu/gl/GrGLCaps.cpp
+++ b/src/gpu/gl/GrGLCaps.cpp
@@ -291,7 +291,7 @@ void GrGLCaps::init(const GrContextOptions& contextOptions,
**************************************************************************/
// This must be called after fCoreProfile is set on the GrGLCaps
- this->initGLSL(ctxInfo);
+ this->initGLSL(ctxInfo, gli);
GrShaderCaps* shaderCaps = fShaderCaps.get();
shaderCaps->fPathRenderingSupport = this->hasPathRenderingSupport(ctxInfo, gli);
@@ -634,8 +634,6 @@ void GrGLCaps::init(const GrContextOptions& contextOptions,
fDrawRangeElementsSupport = version >= GR_GL_VER(3,0);
}
- this->initShaderPrecisionTable(ctxInfo, gli, shaderCaps);
-
if (kGL_GrGLStandard == standard) {
if ((version >= GR_GL_VER(4, 0) || ctxInfo.hasExtension("GL_ARB_sample_shading")) &&
ctxInfo.vendor() != kIntel_GrGLVendor) {
@@ -760,7 +758,27 @@ const char* get_glsl_version_decl_string(GrGLStandard standard, GrGLSLGeneration
return "<no version>";
}
-void GrGLCaps::initGLSL(const GrGLContextInfo& ctxInfo) {
+bool is_float_fp32(const GrGLContextInfo& ctxInfo, const GrGLInterface* gli, GrGLenum precision) {
+ if (kGLES_GrGLStandard != ctxInfo.standard() &&
+ ctxInfo.version() < GR_GL_VER(4,1) &&
+ !ctxInfo.hasExtension("GL_ARB_ES2_compatibility")) {
+ // We're on a desktop GL that doesn't have precision info. Assume they're all 32bit float.
+ return true;
+ }
+ // glGetShaderPrecisionFormat doesn't accept GL_GEOMETRY_SHADER as a shader type. Hopefully the
+ // geometry shaders don't have lower precision than vertex and fragment.
+ for (GrGLenum shader : {GR_GL_FRAGMENT_SHADER, GR_GL_VERTEX_SHADER}) {
+ GrGLint range[2];
+ GrGLint bits;
+ GR_GL_GetShaderPrecisionFormat(gli, shader, precision, range, &bits);
+ if (range[0] < 127 || range[1] < 127 || bits < 23) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void GrGLCaps::initGLSL(const GrGLContextInfo& ctxInfo, const GrGLInterface* gli) {
GrGLStandard standard = ctxInfo.standard();
GrGLVersion version = ctxInfo.version();
@@ -937,6 +955,9 @@ void GrGLCaps::initGLSL(const GrGLContextInfo& ctxInfo) {
shaderCaps->fVertexIDSupport = ctxInfo.glslGeneration() >= k330_GrGLSLGeneration;
}
+ shaderCaps->fFloatIs32Bits = is_float_fp32(ctxInfo, gli, GR_GL_HIGH_FLOAT);
+ shaderCaps->fHalfIs32Bits = is_float_fp32(ctxInfo, gli, GR_GL_MEDIUM_FLOAT);
+
if (kTegra3_GrGLRenderer == ctxInfo.renderer()) {
// The Tegra3 compiler will sometimes never return if we have min(abs(x), 1.0),
// so we must do the abs first in a separate expression.
@@ -1408,92 +1429,6 @@ void GrGLCaps::onDumpJSON(SkJSONWriter* writer) const {
writer->endObject();
}
-static GrGLenum precision_to_gl_float_type(GrSLPrecision p) {
- switch (p) {
- case kLow_GrSLPrecision:
- return GR_GL_LOW_FLOAT;
- case kMedium_GrSLPrecision:
- return GR_GL_MEDIUM_FLOAT;
- case kHigh_GrSLPrecision:
- return GR_GL_HIGH_FLOAT;
- default:
- SK_ABORT("Unexpected precision type.");
- return -1;
- }
-}
-
-static GrGLenum shader_type_to_gl_shader(GrShaderType type) {
- switch (type) {
- case kVertex_GrShaderType:
- return GR_GL_VERTEX_SHADER;
- case kGeometry_GrShaderType:
- return GR_GL_GEOMETRY_SHADER;
- case kFragment_GrShaderType:
- return GR_GL_FRAGMENT_SHADER;
- }
- SK_ABORT("Unknown shader type.");
- return -1;
-}
-
-void GrGLCaps::initShaderPrecisionTable(const GrGLContextInfo& ctxInfo,
- const GrGLInterface* intf,
- GrShaderCaps* shaderCaps) {
- if (kGLES_GrGLStandard == ctxInfo.standard() || ctxInfo.version() >= GR_GL_VER(4, 1) ||
- ctxInfo.hasExtension("GL_ARB_ES2_compatibility")) {
- for (int s = 0; s < kGrShaderTypeCount; ++s) {
- if (kGeometry_GrShaderType != s) {
- GrShaderType shaderType = static_cast<GrShaderType>(s);
- GrGLenum glShader = shader_type_to_gl_shader(shaderType);
- GrShaderCaps::PrecisionInfo* first = nullptr;
- shaderCaps->fShaderPrecisionVaries = false;
- for (int p = 0; p < kGrSLPrecisionCount; ++p) {
- GrSLPrecision precision = static_cast<GrSLPrecision>(p);
- GrGLenum glPrecision = precision_to_gl_float_type(precision);
- GrGLint range[2];
- GrGLint bits;
- GR_GL_GetShaderPrecisionFormat(intf, glShader, glPrecision, range, &bits);
- if (bits) {
- shaderCaps->fFloatPrecisions[s][p].fLogRangeLow = range[0];
- shaderCaps->fFloatPrecisions[s][p].fLogRangeHigh = range[1];
- shaderCaps->fFloatPrecisions[s][p].fBits = bits;
- if (!first) {
- first = &shaderCaps->fFloatPrecisions[s][p];
- }
- else if (!shaderCaps->fShaderPrecisionVaries) {
- shaderCaps->fShaderPrecisionVaries =
- (*first != shaderCaps->fFloatPrecisions[s][p]);
- }
- }
- }
- }
- }
- }
- else {
- // We're on a desktop GL that doesn't have precision info. Assume they're all 32bit float.
- shaderCaps->fShaderPrecisionVaries = false;
- for (int s = 0; s < kGrShaderTypeCount; ++s) {
- if (kGeometry_GrShaderType != s) {
- for (int p = 0; p < kGrSLPrecisionCount; ++p) {
- shaderCaps->fFloatPrecisions[s][p].fLogRangeLow = 127;
- shaderCaps->fFloatPrecisions[s][p].fLogRangeHigh = 127;
- shaderCaps->fFloatPrecisions[s][p].fBits = 23;
- }
- }
- }
- }
- // GetShaderPrecisionFormat doesn't accept GL_GEOMETRY_SHADER as a shader type. Assume they're
- // the same as the vertex shader. Only fragment shaders were ever allowed to omit support for
- // highp. GS was added after GetShaderPrecisionFormat was added to the list of features that
- // are recommended against.
- if (shaderCaps->fGeometryShaderSupport) {
- for (int p = 0; p < kGrSLPrecisionCount; ++p) {
- shaderCaps->fFloatPrecisions[kGeometry_GrShaderType][p] =
- shaderCaps->fFloatPrecisions[kVertex_GrShaderType][p];
- }
- }
- shaderCaps->initSamplerPrecisionTable();
-}
-
bool GrGLCaps::bgraIsInternalFormat() const {
return fConfigTable[kBGRA_8888_GrPixelConfig].fFormats.fBaseInternalFormat == GR_GL_BGRA;
}
diff --git a/src/gpu/gl/GrGLCaps.h b/src/gpu/gl/GrGLCaps.h
index 923b3f416d..3a50e8a393 100644
--- a/src/gpu/gl/GrGLCaps.h
+++ b/src/gpu/gl/GrGLCaps.h
@@ -428,7 +428,7 @@ private:
GrGLenum* externalType) const;
void init(const GrContextOptions&, const GrGLContextInfo&, const GrGLInterface*);
- void initGLSL(const GrGLContextInfo&);
+ void initGLSL(const GrGLContextInfo&, const GrGLInterface*);
bool hasPathRenderingSupport(const GrGLContextInfo&, const GrGLInterface*);
void onApplyOptionsOverrides(const GrContextOptions& options) override;
@@ -441,8 +441,6 @@ private:
void initConfigTable(const GrContextOptions&, const GrGLContextInfo&, const GrGLInterface*,
GrShaderCaps*);
- void initShaderPrecisionTable(const GrGLContextInfo&, const GrGLInterface*, GrShaderCaps*);
-
GrGLStandard fStandard;
SkTArray<StencilFormat, true> fStencilFormats;