aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/core/SkPixelRef.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/SkPixelRef.cpp')
-rw-r--r--src/core/SkPixelRef.cpp89
1 files changed, 76 insertions, 13 deletions
diff --git a/src/core/SkPixelRef.cpp b/src/core/SkPixelRef.cpp
index e7f5c8d947..90e2a40ea4 100644
--- a/src/core/SkPixelRef.cpp
+++ b/src/core/SkPixelRef.cpp
@@ -172,27 +172,48 @@ void SkPixelRef::setPreLocked(void* pixels, size_t rowBytes, SkColorTable* ctabl
#endif
}
+bool SkPixelRef::lockPixelsInsideMutex(LockRec* rec) {
+ fMutex->assertHeld();
+
+ // For historical reasons, we always inc fLockCount, even if we return false.
+ // It would be nice to change this (it seems), and only inc if we actually succeed...
+ if (1 == ++fLockCount) {
+ SkASSERT(fRec.isZero());
+
+ LockRec rec;
+ if (!this->onNewLockPixels(&rec)) {
+ fLockCount -= 1; // we return fLockCount unchanged if we fail.
+ return false;
+ }
+ SkASSERT(!rec.isZero()); // else why did onNewLock return true?
+ fRec = rec;
+ }
+ *rec = fRec;
+ return true;
+}
+
bool SkPixelRef::lockPixels(LockRec* rec) {
SkASSERT(!fPreLocked || SKPIXELREF_PRELOCKED_LOCKCOUNT == fLockCount);
- if (!fPreLocked) {
+ if (fPreLocked) {
+ *rec = fRec;
+ return true;
+ } else {
TRACE_EVENT_BEGIN0("skia", "SkPixelRef::lockPixelsMutex");
SkAutoMutexAcquire ac(*fMutex);
TRACE_EVENT_END0("skia", "SkPixelRef::lockPixelsMutex");
-
- if (1 == ++fLockCount) {
- SkASSERT(fRec.isZero());
-
- LockRec rec;
- if (!this->onNewLockPixels(&rec)) {
- return false;
- }
- SkASSERT(!rec.isZero()); // else why did onNewLock return true?
- fRec = rec;
+ SkDEBUGCODE(int oldCount = fLockCount;)
+ bool success = this->lockPixelsInsideMutex(rec);
+ // lockPixelsInsideMutex only increments the count if it succeeds.
+ SkASSERT(oldCount + (int)success == fLockCount);
+
+ if (!success) {
+ // For compatibility with SkBitmap calling lockPixels, we still want to increment
+ // fLockCount even if we failed. If we updated SkBitmap we could remove this oddity.
+ fLockCount += 1;
}
+ return success;
}
- *rec = fRec;
- return true;
}
bool SkPixelRef::lockPixels() {
@@ -219,6 +240,26 @@ void SkPixelRef::unlockPixels() {
}
}
+bool SkPixelRef::requestLock(const LockRequest& request, LockResult* result) {
+ SkASSERT(result);
+ if (request.fSize.isEmpty()) {
+ return false;
+ }
+
+ if (fPreLocked) {
+ result->fUnlockProc = NULL;
+ result->fUnlockContext = NULL;
+ result->fCTable = fRec.fColorTable;
+ result->fPixels = fRec.fPixels;
+ result->fRowBytes = fRec.fRowBytes;
+ result->fSize.set(fInfo.width(), fInfo.height());
+ return true;
+ } else {
+ SkAutoMutexAcquire ac(*fMutex);
+ return this->onRequestLock(request, result);
+ }
+}
+
bool SkPixelRef::lockPixelsAreWritable() const {
return this->onLockPixelsAreWritable();
}
@@ -291,6 +332,8 @@ bool SkPixelRef::readPixels(SkBitmap* dst, const SkIRect* subset) {
return this->onReadPixels(dst, subset);
}
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
bool SkPixelRef::onReadPixels(SkBitmap* dst, const SkIRect* subset) {
return false;
}
@@ -308,3 +351,23 @@ size_t SkPixelRef::getAllocatedSizeInBytes() const {
return 0;
}
+static void unlock_legacy_result(void* ctx) {
+ SkPixelRef* pr = (SkPixelRef*)ctx;
+ pr->unlockPixels();
+ pr->unref(); // balancing the Ref in onRequestLoc
+}
+
+bool SkPixelRef::onRequestLock(const LockRequest& request, LockResult* result) {
+ LockRec rec;
+ if (!this->lockPixelsInsideMutex(&rec)) {
+ return false;
+ }
+
+ result->fUnlockProc = unlock_legacy_result;
+ result->fUnlockContext = SkRef(this); // this is balanced in our fUnlockProc
+ result->fCTable = rec.fColorTable;
+ result->fPixels = rec.fPixels;
+ result->fRowBytes = rec.fRowBytes;
+ result->fSize.set(fInfo.width(), fInfo.height());
+ return true;
+}