aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar bsalomon <bsalomon@google.com>2016-03-09 06:25:15 -0800
committerGravatar Commit bot <commit-bot@chromium.org>2016-03-09 06:25:16 -0800
commite699d0cd25fd059e3f0c4949e613e50c83a52179 (patch)
treeca881e973092698a1a8d1ad13a8dd912479a596e /src
parent790f99ae729803ddf827251bbbb9bb21fc165e35 (diff)
Don't allow nullptr in texels array params (unless using a transfer buffer).
Diffstat (limited to 'src')
-rw-r--r--src/gpu/GrGpu.cpp30
-rw-r--r--src/gpu/GrGpu.h28
-rw-r--r--src/gpu/GrTextureProvider.cpp31
-rw-r--r--src/gpu/gl/GrGLGpu.cpp101
-rw-r--r--src/gpu/vk/GrVkGpu.cpp3
5 files changed, 100 insertions, 93 deletions
diff --git a/src/gpu/GrGpu.cpp b/src/gpu/GrGpu.cpp
index c593f311e4..afeeda7ae6 100644
--- a/src/gpu/GrGpu.cpp
+++ b/src/gpu/GrGpu.cpp
@@ -99,7 +99,7 @@ static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin, bool renderTarget)
* @param isRT Indicates if the texture can be a render target.
*/
static bool check_texture_creation_params(const GrCaps& caps, const GrSurfaceDesc& desc,
- bool* isRT) {
+ bool* isRT, const SkTArray<GrMipLevel>& texels) {
if (!caps.isConfigTexturable(desc.fConfig)) {
return false;
}
@@ -125,6 +125,12 @@ static bool check_texture_creation_params(const GrCaps& caps, const GrSurfaceDes
return false;
}
}
+
+ for (int i = 0; i < texels.count(); ++i) {
+ if (!texels[i].fPixels) {
+ return false;
+ }
+ }
return true;
}
@@ -134,7 +140,7 @@ GrTexture* GrGpu::createTexture(const GrSurfaceDesc& origDesc, SkBudgeted budget
const GrCaps* caps = this->caps();
bool isRT = false;
- bool textureCreationParamsValid = check_texture_creation_params(*caps, desc, &isRT);
+ bool textureCreationParamsValid = check_texture_creation_params(*caps, desc, &isRT, texels);
if (!textureCreationParamsValid) {
return nullptr;
}
@@ -180,17 +186,6 @@ GrTexture* GrGpu::createTexture(const GrSurfaceDesc& origDesc, SkBudgeted budget
return tex;
}
-GrTexture* GrGpu::createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
- const void* srcData, size_t rowBytes) {
- GrMipLevel level;
- level.fPixels = srcData;
- level.fRowBytes = rowBytes;
- SkSTArray<1, GrMipLevel> levels;
- levels.push_back(level);
-
- return this->createTexture(desc, budgeted, levels);
-}
-
GrTexture* GrGpu::wrapBackendTexture(const GrBackendTextureDesc& desc, GrWrapOwnership ownership) {
this->handleDirtyContext();
if (!this->caps()->isConfigTexturable(desc.fConfig)) {
@@ -392,16 +387,11 @@ bool GrGpu::writePixels(GrSurface* surface,
if (!surface) {
return false;
}
- bool validMipDataFound = false;
for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
- if (texels[currentMipLevel].fPixels != nullptr) {
- validMipDataFound = true;
- break;
+ if (!texels[currentMipLevel].fPixels ) {
+ return false;
}
}
- if (!validMipDataFound) {
- return false;
- }
this->handleDirtyContext();
if (this->onWritePixels(surface, left, top, width, height, config, texels)) {
diff --git a/src/gpu/GrGpu.h b/src/gpu/GrGpu.h
index ebe4116ebe..9725590544 100644
--- a/src/gpu/GrGpu.h
+++ b/src/gpu/GrGpu.h
@@ -97,21 +97,21 @@ public:
const SkTArray<GrMipLevel>& texels);
/**
- * This function is a shim which creates a SkTArGrMipLevell> of size 1.
- * It then calls createTexture with that SkTArray.
- *
- * @param srcData texel data to load texture. Begins with full-size
- * palette data for paletted texture. For compressed
- * formats it contains the compressed pixel data. Otherwise,
- * it contains width*height texels. If nullptr texture data
- * is uninitialized.
- * @param rowBytes the number of bytes between consecutive rows. Zero
- * means rows are tightly packed. This field is ignored
- * for compressed pixel formats.
- * @return The texture object if successful, otherwise, nullptr.
+ * Simplified createTexture() interface for when there is no initial texel data to upload.
*/
- GrTexture* createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
- const void* srcData, size_t rowBytes);
+ GrTexture* createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted) {
+ return this->createTexture(desc, budgeted, SkTArray<GrMipLevel>());
+ }
+
+ /** Simplified createTexture() interface for when there is only a base level */
+ GrTexture* createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted, const void* level0Data,
+ size_t rowBytes) {
+ SkASSERT(level0Data);
+ GrMipLevel level = { level0Data, rowBytes };
+ SkSTArray<1, GrMipLevel> array;
+ array.push_back() = level;
+ return this->createTexture(desc, budgeted, array);
+ }
/**
* Implements GrTextureProvider::wrapBackendTexture
diff --git a/src/gpu/GrTextureProvider.cpp b/src/gpu/GrTextureProvider.cpp
index 2d422f717f..f1775aba78 100644
--- a/src/gpu/GrTextureProvider.cpp
+++ b/src/gpu/GrTextureProvider.cpp
@@ -37,6 +37,15 @@ GrTexture* GrTextureProvider::createMipMappedTexture(const GrSurfaceDesc& desc,
if (this->isAbandoned()) {
return nullptr;
}
+ if (mipLevelCount && !texels) {
+ return nullptr;
+ }
+ for (int i = 0; i < mipLevelCount; ++i) {
+ if (!texels[i].fPixels) {
+ return nullptr;
+ }
+ }
+
if ((desc.fFlags & kRenderTarget_GrSurfaceFlag) &&
!fGpu->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) {
return nullptr;
@@ -48,8 +57,8 @@ GrTexture* GrTextureProvider::createMipMappedTexture(const GrSurfaceDesc& desc,
static const uint32_t kFlags = kExact_ScratchTextureFlag |
kNoCreate_ScratchTextureFlag;
if (GrTexture* texture = this->refScratchTexture(desc, kFlags)) {
- if (texture->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
- baseMipLevel.fPixels, baseMipLevel.fRowBytes)) {
+ if (!texels || texture->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
+ baseMipLevel.fPixels, baseMipLevel.fRowBytes)) {
if (SkBudgeted::kNo == budgeted) {
texture->resourcePriv().makeUnbudgeted();
}
@@ -69,12 +78,16 @@ GrTexture* GrTextureProvider::createMipMappedTexture(const GrSurfaceDesc& desc,
GrTexture* GrTextureProvider::createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
const void* srcData, size_t rowBytes) {
- const int mipLevelCount = 1;
- GrMipLevel texels[mipLevelCount];
- texels[0].fPixels = srcData;
- texels[0].fRowBytes = rowBytes;
-
- return this->createMipMappedTexture(desc, budgeted, texels, mipLevelCount);
+ GrMipLevel tempTexels;
+ GrMipLevel* texels = nullptr;
+ int levelCount = 0;
+ if (srcData) {
+ tempTexels.fPixels = srcData;
+ tempTexels.fRowBytes = rowBytes;
+ texels = &tempTexels;
+ levelCount = 1;
+ }
+ return this->createMipMappedTexture(desc, budgeted, texels, levelCount);
}
GrTexture* GrTextureProvider::createApproxTexture(const GrSurfaceDesc& desc) {
@@ -137,7 +150,7 @@ GrTexture* GrTextureProvider::refScratchTexture(const GrSurfaceDesc& inDesc,
}
if (!(kNoCreate_ScratchTextureFlag & flags)) {
- return fGpu->createTexture(*desc, SkBudgeted::kYes, nullptr, 0);
+ return fGpu->createTexture(*desc, SkBudgeted::kYes);
}
return nullptr;
diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
index 95f34a9cf9..86c4cc3bbc 100644
--- a/src/gpu/gl/GrGLGpu.cpp
+++ b/src/gpu/gl/GrGLGpu.cpp
@@ -926,7 +926,7 @@ static inline GrGLenum check_alloc_error(const GrSurfaceDesc& desc,
* @param succeeded Set to true if allocating and populating the texture completed
* without error.
*/
-static void allocate_and_populate_uncompressed_texture(const GrSurfaceDesc& desc,
+static bool allocate_and_populate_uncompressed_texture(const GrSurfaceDesc& desc,
const GrGLInterface& interface,
const GrGLCaps& caps,
GrGLenum target,
@@ -934,8 +934,7 @@ static void allocate_and_populate_uncompressed_texture(const GrSurfaceDesc& desc
GrGLenum externalFormat,
GrGLenum externalType,
const SkTArray<GrMipLevel>& texels,
- int baseWidth, int baseHeight,
- bool* succeeded) {
+ int baseWidth, int baseHeight) {
CLEAR_ERROR_BEFORE_ALLOC(&interface);
bool useTexStorage = caps.isConfigTexSupportEnabled(desc.fConfig);
@@ -955,7 +954,7 @@ static void allocate_and_populate_uncompressed_texture(const GrSurfaceDesc& desc
desc.fWidth, desc.fHeight));
GrGLenum error = check_alloc_error(desc, &interface);
if (error != GR_GL_NO_ERROR) {
- *succeeded = false;
+ return false;
} else {
for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
const void* currentMipData = texels[currentMipLevel].fPixels;
@@ -976,33 +975,48 @@ static void allocate_and_populate_uncompressed_texture(const GrSurfaceDesc& desc
externalFormat, externalType,
currentMipData));
}
- *succeeded = true;
+ return true;
}
} else {
- *succeeded = true;
- for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
- int twoToTheMipLevel = 1 << currentMipLevel;
- int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel);
- int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel);
- const void* currentMipData = texels[currentMipLevel].fPixels;
- // Even if curremtMipData is nullptr, continue to call TexImage2D.
- // This will allocate texture memory which we can later populate.
+ if (texels.empty()) {
GL_ALLOC_CALL(&interface,
TexImage2D(target,
- currentMipLevel,
+ 0,
internalFormat,
- currentWidth,
- currentHeight,
+ baseWidth,
+ baseHeight,
0, // border
externalFormat, externalType,
- currentMipData));
+ nullptr));
GrGLenum error = check_alloc_error(desc, &interface);
if (error != GR_GL_NO_ERROR) {
- *succeeded = false;
- break;
+ return false;
+ }
+ } else {
+ for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
+ int twoToTheMipLevel = 1 << currentMipLevel;
+ int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel);
+ int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel);
+ const void* currentMipData = texels[currentMipLevel].fPixels;
+ // Even if curremtMipData is nullptr, continue to call TexImage2D.
+ // This will allocate texture memory which we can later populate.
+ GL_ALLOC_CALL(&interface,
+ TexImage2D(target,
+ currentMipLevel,
+ internalFormat,
+ currentWidth,
+ currentHeight,
+ 0, // border
+ externalFormat, externalType,
+ currentMipData));
+ GrGLenum error = check_alloc_error(desc, &interface);
+ if (error != GR_GL_NO_ERROR) {
+ return false;
+ }
}
}
}
+ return true;
}
/**
@@ -1136,8 +1150,7 @@ bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
for (int currentMipLevel = texelsShallowCopy.count() - 1; currentMipLevel >= 0;
currentMipLevel--) {
- SkASSERT(texelsShallowCopy[currentMipLevel].fPixels ||
- kNewTexture_UploadType == uploadType || kTransfer_UploadType == uploadType);
+ SkASSERT(texelsShallowCopy[currentMipLevel].fPixels || kTransfer_UploadType == uploadType);
}
const GrGLInterface* interface = this->glInterface();
@@ -1154,10 +1167,6 @@ bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
int currentWidth = SkTMax(1, width / twoToTheMipLevel);
int currentHeight = SkTMax(1, height / twoToTheMipLevel);
- if (texelsShallowCopy[currentMipLevel].fPixels == nullptr) {
- continue;
- }
-
if (currentHeight > SK_MaxS32 ||
currentWidth > SK_MaxS32) {
return false;
@@ -1193,7 +1202,7 @@ bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
bool swFlipY = false;
bool glFlipY = false;
- if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
+ if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin && !texelsShallowCopy.empty()) {
if (caps.unpackFlipYSupport()) {
glFlipY = true;
} else {
@@ -1219,10 +1228,6 @@ bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
char* buffer = (char*)tempStorage.reset(combined_buffer_size);
for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
- if (texelsShallowCopy[currentMipLevel].fPixels == nullptr) {
- continue;
- }
-
int twoToTheMipLevel = 1 << currentMipLevel;
int currentWidth = SkTMax(1, width / twoToTheMipLevel);
int currentHeight = SkTMax(1, height / twoToTheMipLevel);
@@ -1269,6 +1274,9 @@ bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
} else {
return false;
}
+ }
+
+ if (!texelsShallowCopy.empty()) {
if (glFlipY) {
GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE));
}
@@ -1281,10 +1289,10 @@ bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
0 == left && 0 == top &&
desc.fWidth == width && desc.fHeight == height &&
!desc.fTextureStorageAllocator.fAllocateTextureStorage) {
- allocate_and_populate_uncompressed_texture(desc, *interface, caps, target,
- internalFormat, externalFormat,
- externalType, texelsShallowCopy,
- width, height, &succeeded);
+ succeeded = allocate_and_populate_uncompressed_texture(desc, *interface, caps, target,
+ internalFormat, externalFormat,
+ externalType, texelsShallowCopy,
+ width, height);
} else {
if (swFlipY || glFlipY) {
top = desc.fHeight - (top + height);
@@ -1294,9 +1302,6 @@ bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
int twoToTheMipLevel = 1 << currentMipLevel;
int currentWidth = SkTMax(1, width / twoToTheMipLevel);
int currentHeight = SkTMax(1, height / twoToTheMipLevel);
- if (texelsShallowCopy[currentMipLevel].fPixels == nullptr) {
- continue;
- }
GL_CALL(TexSubImage2D(target,
currentMipLevel,
@@ -1324,8 +1329,6 @@ bool GrGLGpu::uploadCompressedTexData(const GrSurfaceDesc& desc,
UploadType uploadType,
int left, int top, int width, int height) {
SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
- SkASSERT(kTransfer_UploadType != uploadType &&
- (texels[0].fPixels || kNewTexture_UploadType != uploadType));
// No support for software flip y, yet...
SkASSERT(kBottomLeft_GrSurfaceOrigin != desc.fOrigin);
@@ -1366,9 +1369,7 @@ bool GrGLGpu::uploadCompressedTexData(const GrSurfaceDesc& desc,
return false;
}
for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
- if (texels[currentMipLevel].fPixels == nullptr) {
- continue;
- }
+ SkASSERT(texels[currentMipLevel].fPixels || kTransfer_UploadType == uploadType);
int twoToTheMipLevel = 1 << currentMipLevel;
int currentWidth = SkTMax(1, width / twoToTheMipLevel);
@@ -1847,14 +1848,15 @@ bool GrGLGpu::createTextureExternalAllocatorImpl(const GrSurfaceDesc& desc,
// We do not make SkTArray available outside of Skia,
// and so we do not want to allow mipmaps to external
// allocators just yet.
- SkASSERT(texels.count() == 1);
- SkSTArray<1, GrMipLevel> texelsShallowCopy(1);
- texelsShallowCopy.push_back(texels[0]);
+ SkASSERT(texels.count() < 2);
+ const void* pixels = nullptr;
+ if (!texels.empty()) {
+ pixels = texels.begin()->fPixels;
+ }
switch (desc.fTextureStorageAllocator.fAllocateTextureStorage(
desc.fTextureStorageAllocator.fCtx, reinterpret_cast<GrBackendObject>(info),
- desc.fWidth, desc.fHeight, desc.fConfig, texelsShallowCopy[0].fPixels,
- desc.fOrigin)) {
+ desc.fWidth, desc.fHeight, desc.fConfig, pixels, desc.fOrigin)) {
case GrTextureStorageAllocator::Result::kSucceededAndUploaded:
return true;
case GrTextureStorageAllocator::Result::kFailed:
@@ -1865,7 +1867,7 @@ bool GrGLGpu::createTextureExternalAllocatorImpl(const GrSurfaceDesc& desc,
if (!this->uploadTexData(desc, info->fTarget, kNewTexture_UploadType, 0, 0,
desc.fWidth, desc.fHeight,
- desc.fConfig, texelsShallowCopy)) {
+ desc.fConfig, texels)) {
desc.fTextureStorageAllocator.fDeallocateTextureStorage(
desc.fTextureStorageAllocator.fCtx, reinterpret_cast<GrBackendObject>(info));
return false;
@@ -2479,7 +2481,8 @@ bool GrGLGpu::readPixelsSupported(GrPixelConfig rtConfig, GrPixelConfig readConf
desc.fConfig = rtConfig;
desc.fWidth = desc.fHeight = 16;
desc.fFlags = kRenderTarget_GrSurfaceFlag;
- SkAutoTUnref<GrTexture> temp(this->createTexture(desc, SkBudgeted::kNo, nullptr, 0));
+ SkAutoTUnref<GrTexture> temp(this->createTexture(desc,
+ SkBudgeted::kNo));
if (!temp) {
return false;
}
diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
index 1ce78fd192..d357f32e1f 100644
--- a/src/gpu/vk/GrVkGpu.cpp
+++ b/src/gpu/vk/GrVkGpu.cpp
@@ -542,7 +542,8 @@ GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::Li
}
// TODO: We're ignoring MIP levels here.
- if (!texels.empty() && texels.begin()->fPixels) {
+ if (!texels.empty()) {
+ SkASSERT(texels.begin()->fPixels);
if (!this->uploadTexData(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
texels.begin()->fPixels, texels.begin()->fRowBytes)) {
tex->unref();