summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorcommit-bot@chromium.org <commit-bot@chromium.org@2bbb7eff-a529-9590-31e7-b0007b416f81>2014-04-27 19:21:51 +0000
committercommit-bot@chromium.org <commit-bot@chromium.org@2bbb7eff-a529-9590-31e7-b0007b416f81>2014-04-27 19:21:51 +0000
commit01b0da4230d5ffe8a656a413058037194606d45f (patch)
treef3471243e83b9225837c9a2b8204b7eac527210c
parentee285a41e56b90e52c9077ab57412dffc46fc872 (diff)
downloadinclude-01b0da4230d5ffe8a656a413058037194606d45f.tar.gz
teach TSAN about SkSpinlock, SkRefCnt, and SkOnce
BUG=skia: Committed: http://code.google.com/p/skia/source/detail?r=14353 NOTRY=true NOTREECHECKS=true Committed: http://code.google.com/p/skia/source/detail?r=14354 R=bsalomon@google.com, bungeman@google.com, mtklein@google.com Author: mtklein@chromium.org Review URL: https://codereview.chromium.org/247813005 git-svn-id: http://skia.googlecode.com/svn/trunk/include@14390 2bbb7eff-a529-9590-31e7-b0007b416f81
-rw-r--r--core/SkDynamicAnnotations.h14
-rw-r--r--core/SkOnce.h4
-rw-r--r--core/SkRefCnt.h14
3 files changed, 4 insertions, 28 deletions
diff --git a/core/SkDynamicAnnotations.h b/core/SkDynamicAnnotations.h
index e4493c8..6d21cdd 100644
--- a/core/SkDynamicAnnotations.h
+++ b/core/SkDynamicAnnotations.h
@@ -19,10 +19,6 @@ extern "C" {
// TSAN provides these hooks.
void AnnotateIgnoreReadsBegin(const char* file, int line);
void AnnotateIgnoreReadsEnd(const char* file, int line);
-void AnnotateHappensBefore(const char* file, int line, const volatile void* ptr);
-void AnnotateHappensAfter(const char* file, int line, const volatile void* ptr);
-void AnnotateRWLockAcquired(const char* file, int line, const volatile void* lock, long is_w);
-void AnnotateRWLockReleased(const char* file, int line, const volatile void* lock, long is_w);
} // extern "C"
// SK_ANNOTATE_UNPROTECTED_READ can wrap any variable read to tell TSAN to ignore that it appears to
@@ -41,19 +37,9 @@ inline T SK_ANNOTATE_UNPROTECTED_READ(const volatile T& x) {
return read;
}
-#define SK_ANNOTATE_HAPPENS_BEFORE(obj) AnnotateHappensBefore(__FILE__, __LINE__, obj)
-#define SK_ANNOTATE_HAPPENS_AFTER(obj) AnnotateHappensAfter(__FILE__, __LINE__, obj)
-
-#define SK_ANNOTATE_RWLOCK_ACQUIRED(lock, w) AnnotateRWLockAcquired(__FILE__, __LINE__, lock, w)
-#define SK_ANNOTATE_RWLOCK_RELEASED(lock, w) AnnotateRWLockReleased(__FILE__, __LINE__, lock, w)
-
#else // !DYNAMIC_ANNOTATIONS_ENABLED
#define SK_ANNOTATE_UNPROTECTED_READ(x) (x)
-#define SK_ANNOTATE_HAPPENS_BEFORE(obj)
-#define SK_ANNOTATE_HAPPENS_AFTER(obj)
-#define SK_ANNOTATE_RWLOCK_ACQUIRED(lock, w)
-#define SK_ANNOTATE_RWLOCK_RELEASED(lock, w)
#endif
diff --git a/core/SkOnce.h b/core/SkOnce.h
index a42e702..d5330b9 100644
--- a/core/SkOnce.h
+++ b/core/SkOnce.h
@@ -54,12 +54,10 @@ struct SkSpinlock {
while (!sk_atomic_cas(&thisIsPrivate, 0, 1)) {
// spin
}
- SK_ANNOTATE_RWLOCK_ACQUIRED(this, true);
}
void release() {
SkASSERT(shouldBeZero == 0);
- SK_ANNOTATE_RWLOCK_RELEASED(this, true);
// This requires a release memory barrier before storing, which sk_atomic_cas guarantees.
SkAssertResult(sk_atomic_cas(&thisIsPrivate, 1, 0));
}
@@ -147,7 +145,6 @@ static void sk_once_slow(bool* done, Lock* lock, Func f, Arg arg, void (*atExit)
// observable whenever we observe *done == true.
release_barrier();
*done = true;
- SK_ANNOTATE_HAPPENS_BEFORE(done);
}
}
@@ -168,7 +165,6 @@ inline void SkOnce(bool* done, Lock* lock, Func f, Arg arg, void(*atExit)()) {
// happens after f(arg), so by syncing to once->done = true here we're
// forcing ourselves to also wait until the effects of f(arg) are readble.
acquire_barrier();
- SK_ANNOTATE_HAPPENS_AFTER(done);
}
template <typename Func, typename Arg>
diff --git a/core/SkRefCnt.h b/core/SkRefCnt.h
index 2ac68dd..41c7829 100644
--- a/core/SkRefCnt.h
+++ b/core/SkRefCnt.h
@@ -37,7 +37,7 @@ public:
*/
virtual ~SkRefCntBase() {
#ifdef SK_DEBUG
- SkASSERT(this->unique());
+ SkASSERT(fRefCnt == 1);
fRefCnt = 0; // illegal value, to catch us if we reuse after delete
#endif
}
@@ -53,7 +53,6 @@ public:
// an unproctected read. Generally, don't read fRefCnt, and don't stifle this warning.
bool const unique = (1 == SK_ANNOTATE_UNPROTECTED_READ(fRefCnt));
if (unique) {
- SK_ANNOTATE_HAPPENS_AFTER(this);
// Acquire barrier (L/SL), if not provided by load of fRefCnt.
// Prevents user's 'unique' code from happening before decrements.
//TODO: issue the barrier.
@@ -64,7 +63,7 @@ public:
/** Increment the reference count. Must be balanced by a call to unref().
*/
void ref() const {
- SkASSERT(this->unsafeGetRefCnt() > 0);
+ SkASSERT(fRefCnt > 0);
sk_atomic_inc(&fRefCnt); // No barrier required.
}
@@ -73,11 +72,9 @@ public:
the object needs to have been allocated via new, and not on the stack.
*/
void unref() const {
- SkASSERT(this->unsafeGetRefCnt() > 0);
- SK_ANNOTATE_HAPPENS_BEFORE(this);
+ SkASSERT(fRefCnt > 0);
// Release barrier (SL/S), if not provided below.
if (sk_atomic_dec(&fRefCnt) == 1) {
- SK_ANNOTATE_HAPPENS_AFTER(this);
// Acquire barrier (L/SL), if not provided above.
// Prevents code in dispose from happening before the decrement.
sk_membar_acquire__after_atomic_dec();
@@ -87,7 +84,7 @@ public:
#ifdef SK_DEBUG
void validate() const {
- SkASSERT(this->unsafeGetRefCnt() > 0);
+ SkASSERT(fRefCnt > 0);
}
#endif
@@ -106,9 +103,6 @@ protected:
}
private:
- // OK for use in asserts, but not much else.
- int32_t unsafeGetRefCnt() { return SK_ANNOTATE_UNPROTECTED_READ(fRefCnt); }
-
/**
* Called when the ref count goes to 0.
*/