summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTorne (Richard Coles) <torne@google.com>2014-05-14 12:13:14 +0100
committerTorne (Richard Coles) <torne@google.com>2014-05-14 12:13:14 +0100
commita0a7358caef50a06622cc72e5b9dc44594ebea4a (patch)
treebc7f117fd6a44db83a9f64d4632dcf9f77b00119
parentab7cfa50cee8463f0dc077cbeca69107303793e1 (diff)
parent7b176c0d5d822759c0d1660c8f7d882d72d64643 (diff)
downloadinclude-a0a7358caef50a06622cc72e5b9dc44594ebea4a.tar.gz
Merge from Chromium at DEPS revision 269336HEADmastermain
This commit was generated by merge_to_master.py. Change-Id: I07848840df4da46e0aa5b11fb8533dafb972af91
-rw-r--r--core/SkColorPriv.h9
-rw-r--r--core/SkColorShader.h9
-rw-r--r--core/SkComposeShader.h8
-rw-r--r--core/SkEmptyShader.h20
-rw-r--r--core/SkPostConfig.h10
-rw-r--r--core/SkRect.h18
-rw-r--r--core/SkScalar.h20
-rw-r--r--core/SkShader.h74
-rw-r--r--core/SkSurface.h23
-rw-r--r--core/SkTLazy.h5
-rw-r--r--core/SkTypes.h4
-rw-r--r--core/SkUtils.h12
-rw-r--r--core/SkXfermode.h52
-rw-r--r--effects/SkBlurMaskFilter.h6
-rw-r--r--effects/SkPerlinNoiseShader.h7
-rw-r--r--effects/SkTransparentShader.h9
-rw-r--r--gpu/GrCacheable.h63
-rw-r--r--gpu/GrColor.h2
-rw-r--r--gpu/GrConfig.h9
-rw-r--r--gpu/GrContext.h16
-rw-r--r--gpu/GrContextFactory.h2
-rw-r--r--gpu/GrGpuObject.h (renamed from gpu/GrResource.h)75
-rw-r--r--gpu/GrRenderTarget.h2
-rw-r--r--gpu/GrSurface.h6
-rw-r--r--gpu/GrTexture.h22
-rw-r--r--gpu/GrTypesPriv.h2
-rw-r--r--gpu/SkGpuDevice.h1
-rw-r--r--gpu/gl/GrGLFunctions.h2
-rw-r--r--gpu/gl/GrGLInterface.h2
-rw-r--r--utils/SkThreadPool.h27
30 files changed, 305 insertions, 212 deletions
diff --git a/core/SkColorPriv.h b/core/SkColorPriv.h
index 9591f22..d5571df 100644
--- a/core/SkColorPriv.h
+++ b/core/SkColorPriv.h
@@ -181,6 +181,15 @@ static inline unsigned SkAlpha255To256(U8CPU alpha) {
return alpha + 1;
}
+/**
+ * Turn a 0..255 value into a 0..256 value, rounding up if the value is >= 0x80.
+ * This is slightly more accurate than SkAlpha255To256.
+ */
+static inline unsigned Sk255To256(U8CPU value) {
+ SkASSERT(SkToU8(value) == value);
+ return value + (value >> 7);
+}
+
/** Multiplify value by 0..256, and shift the result down 8
(i.e. return (value * alpha256) >> 8)
*/
diff --git a/core/SkColorShader.h b/core/SkColorShader.h
index 9e19a71..be59627 100644
--- a/core/SkColorShader.h
+++ b/core/SkColorShader.h
@@ -27,19 +27,13 @@ public:
virtual bool isOpaque() const SK_OVERRIDE;
- virtual SkShader::Context* createContext(const SkBitmap& device,
- const SkPaint& paint,
- const SkMatrix& matrix,
- void* storage) const SK_OVERRIDE;
-
virtual size_t contextSize() const SK_OVERRIDE {
return sizeof(ColorShaderContext);
}
class ColorShaderContext : public SkShader::Context {
public:
- ColorShaderContext(const SkColorShader& shader, const SkBitmap& device,
- const SkPaint& paint, const SkMatrix& matrix);
+ ColorShaderContext(const SkColorShader& shader, const ContextRec&);
virtual uint32_t getFlags() const SK_OVERRIDE;
virtual uint8_t getSpan16Alpha() const SK_OVERRIDE;
@@ -68,6 +62,7 @@ public:
protected:
SkColorShader(SkReadBuffer&);
virtual void flatten(SkWriteBuffer&) const SK_OVERRIDE;
+ virtual Context* onCreateContext(const ContextRec&, void* storage) const SK_OVERRIDE;
private:
SkColor fColor; // ignored if fInheritColor is true
diff --git a/core/SkComposeShader.h b/core/SkComposeShader.h
index d42da0c..cfb03b9 100644
--- a/core/SkComposeShader.h
+++ b/core/SkComposeShader.h
@@ -34,18 +34,13 @@ public:
SkComposeShader(SkShader* sA, SkShader* sB, SkXfermode* mode = NULL);
virtual ~SkComposeShader();
- virtual bool validContext(const SkBitmap&, const SkPaint&,
- const SkMatrix&, SkMatrix* totalInverse = NULL) const SK_OVERRIDE;
- virtual SkShader::Context* createContext(const SkBitmap&, const SkPaint&,
- const SkMatrix&, void*) const SK_OVERRIDE;
virtual size_t contextSize() const SK_OVERRIDE;
class ComposeShaderContext : public SkShader::Context {
public:
// When this object gets destroyed, it will call contextA and contextB's destructor
// but it will NOT free the memory.
- ComposeShaderContext(const SkComposeShader&, const SkBitmap&,
- const SkPaint&, const SkMatrix&,
+ ComposeShaderContext(const SkComposeShader&, const ContextRec&,
SkShader::Context* contextA, SkShader::Context* contextB);
SkShader::Context* getShaderContextA() const { return fShaderContextA; }
@@ -73,6 +68,7 @@ public:
protected:
SkComposeShader(SkReadBuffer& );
virtual void flatten(SkWriteBuffer&) const SK_OVERRIDE;
+ virtual Context* onCreateContext(const ContextRec&, void*) const SK_OVERRIDE;
private:
SkShader* fShaderA;
diff --git a/core/SkEmptyShader.h b/core/SkEmptyShader.h
index 7494eff..7de3bc1 100644
--- a/core/SkEmptyShader.h
+++ b/core/SkEmptyShader.h
@@ -1,4 +1,3 @@
-
/*
* Copyright 2011 Google Inc.
*
@@ -6,13 +5,13 @@
* found in the LICENSE file.
*/
-
-
#ifndef SkEmptyShader_DEFINED
#define SkEmptyShader_DEFINED
#include "SkShader.h"
+// TODO: move this to private, as there is a public factory on SkShader
+
/**
* \class SkEmptyShader
* A Shader that always draws nothing. Its createContext always returns NULL.
@@ -27,23 +26,16 @@ public:
return sizeof(SkShader::Context);
}
- virtual bool validContext(const SkBitmap&, const SkPaint&,
- const SkMatrix&, SkMatrix* totalInverse = NULL) const SK_OVERRIDE {
- return false;
- }
-
- virtual SkShader::Context* createContext(const SkBitmap&, const SkPaint&,
- const SkMatrix&, void*) const SK_OVERRIDE {
- // validContext returns false.
- return NULL;
- }
-
SK_TO_STRING_OVERRIDE()
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkEmptyShader)
protected:
SkEmptyShader(SkReadBuffer& buffer) : INHERITED(buffer) {}
+ virtual SkShader::Context* onCreateContext(const ContextRec&, void*) const SK_OVERRIDE {
+ return NULL;
+ }
+
private:
typedef SkShader INHERITED;
};
diff --git a/core/SkPostConfig.h b/core/SkPostConfig.h
index bee87e6..88a2bfc 100644
--- a/core/SkPostConfig.h
+++ b/core/SkPostConfig.h
@@ -154,8 +154,8 @@
# undef NOMINMAX
# endif
#
-# ifndef SK_DEBUGBREAK
-# define SK_DEBUGBREAK(p) do { if (!(p)) { SkNO_RETURN_HINT(); __debugbreak(); }} while (false)
+# ifndef SK_ALWAYSBREAK
+# define SK_ALWAYSBREAK(p) do { if (!(p)) { SkNO_RETURN_HINT(); __debugbreak(); }} while (false)
# endif
#
# ifndef SK_A32_SHIFT
@@ -166,14 +166,14 @@
# endif
#
#else
-# ifndef SK_DEBUGBREAK
+# ifndef SK_ALWAYSBREAK
# ifdef SK_DEBUG
# include <stdio.h>
-# define SK_DEBUGBREAK(cond) do { if (cond) break; \
+# define SK_ALWAYSBREAK(cond) do { if (cond) break; \
SkDebugf("%s:%d: failed assertion \"%s\"\n", \
__FILE__, __LINE__, #cond); SK_CRASH(); } while (false)
# else
-# define SK_DEBUGBREAK(cond) do { if (cond) break; SK_CRASH(); } while (false)
+# define SK_ALWAYSBREAK(cond) do { if (cond) break; SK_CRASH(); } while (false)
# endif
# endif
#endif
diff --git a/core/SkRect.h b/core/SkRect.h
index 397e4a0..fd8cb16 100644
--- a/core/SkRect.h
+++ b/core/SkRect.h
@@ -732,6 +732,24 @@ struct SK_API SkRect {
}
/**
+ * Variant of round() that explicitly performs the rounding step (i.e. floor(x + 0.5)) using
+ * double instead of SkScalar (float). It does this by calling SkDScalarRoundToInt(), which
+ * may be slower than calling SkScalarRountToInt(), but gives slightly more accurate results.
+ *
+ * e.g.
+ * SkScalar x = 0.49999997f;
+ * int ix = SkScalarRoundToInt(x);
+ * SkASSERT(0 == ix); // <--- fails
+ * ix = SkDScalarRoundToInt(x);
+ * SkASSERT(0 == ix); // <--- succeeds
+ */
+ void dround(SkIRect* dst) const {
+ SkASSERT(dst);
+ dst->set(SkDScalarRoundToInt(fLeft), SkDScalarRoundToInt(fTop),
+ SkDScalarRoundToInt(fRight), SkDScalarRoundToInt(fBottom));
+ }
+
+ /**
* Set the dst rectangle by rounding "out" this rectangle, choosing the
* SkScalarFloor of top and left, and the SkScalarCeil of right and bottom.
*/
diff --git a/core/SkScalar.h b/core/SkScalar.h
index b9256ba..b37cf5c 100644
--- a/core/SkScalar.h
+++ b/core/SkScalar.h
@@ -83,6 +83,26 @@ static inline bool SkScalarIsFinite(float x) {
#define SkScalarRoundToInt(x) sk_float_round2int(x)
#define SkScalarTruncToInt(x) static_cast<int>(x)
+/**
+ * Variant of SkScalarRoundToInt, that performs the rounding step (adding 0.5) explicitly using
+ * double, to avoid possibly losing the low bit(s) of the answer before calling floor().
+ *
+ * This routine will likely be slower than SkScalarRoundToInt(), and should only be used when the
+ * extra precision is known to be valuable.
+ *
+ * In particular, this catches the following case:
+ * SkScalar x = 0.49999997;
+ * int ix = SkScalarRoundToInt(x);
+ * SkASSERT(0 == ix); // <--- fails
+ * ix = SkDScalarRoundToInt(x);
+ * SkASSERT(0 == ix); // <--- succeeds
+ */
+static inline int SkDScalarRoundToInt(SkScalar x) {
+ double xx = x;
+ xx += 0.5;
+ return (int)floor(xx);
+}
+
/** Returns the absolute value of the specified SkScalar
*/
#define SkScalarAbs(x) sk_float_abs(x)
diff --git a/core/SkShader.h b/core/SkShader.h
index 4af8f78..dc93b84 100644
--- a/core/SkShader.h
+++ b/core/SkShader.h
@@ -119,10 +119,26 @@ public:
*/
virtual bool isOpaque() const { return false; }
+ /**
+ * ContextRec acts as a parameter bundle for creating Contexts.
+ */
+ struct ContextRec {
+ ContextRec() : fDevice(NULL), fPaint(NULL), fMatrix(NULL), fLocalMatrix(NULL) {}
+ ContextRec(const SkBitmap& device, const SkPaint& paint, const SkMatrix& matrix)
+ : fDevice(&device)
+ , fPaint(&paint)
+ , fMatrix(&matrix)
+ , fLocalMatrix(NULL) {}
+
+ const SkBitmap* fDevice; // the bitmap we are drawing into
+ const SkPaint* fPaint; // the current paint associated with the draw
+ const SkMatrix* fMatrix; // the current matrix in the canvas
+ const SkMatrix* fLocalMatrix; // optional local matrix
+ };
+
class Context : public ::SkNoncopyable {
public:
- Context(const SkShader& shader, const SkBitmap& device,
- const SkPaint& paint, const SkMatrix& matrix);
+ Context(const SkShader& shader, const ContextRec&);
virtual ~Context();
@@ -184,39 +200,32 @@ public:
};
static MatrixClass ComputeMatrixClass(const SkMatrix&);
- uint8_t getPaintAlpha() const { return fPaintAlpha; }
- const SkMatrix& getTotalInverse() const { return fTotalInverse; }
- MatrixClass getInverseClass() const { return (MatrixClass)fTotalInverseClass; }
-
+ uint8_t getPaintAlpha() const { return fPaintAlpha; }
+ const SkMatrix& getTotalInverse() const { return fTotalInverse; }
+ MatrixClass getInverseClass() const { return (MatrixClass)fTotalInverseClass; }
+ const SkMatrix& getCTM() const { return fCTM; }
private:
- SkMatrix fTotalInverse;
- uint8_t fPaintAlpha;
- uint8_t fTotalInverseClass;
+ SkMatrix fCTM;
+ SkMatrix fTotalInverse;
+ uint8_t fPaintAlpha;
+ uint8_t fTotalInverseClass;
typedef SkNoncopyable INHERITED;
};
/**
- * Subclasses should be sure to call their INHERITED::validContext() if
- * they override this method.
- */
- virtual bool validContext(const SkBitmap& device, const SkPaint& paint,
- const SkMatrix& matrix, SkMatrix* totalInverse = NULL) const;
-
- /**
* Create the actual object that does the shading.
- * Returns NULL if validContext() returns false.
* Size of storage must be >= contextSize.
*/
- virtual Context* createContext(const SkBitmap& device,
- const SkPaint& paint,
- const SkMatrix& matrix,
- void* storage) const = 0;
+ Context* createContext(const ContextRec&, void* storage) const;
/**
* Return the size of a Context returned by createContext.
+ *
+ * Override this if your subclass overrides createContext, to return the correct size of
+ * your subclass' context.
*/
- virtual size_t contextSize() const = 0;
+ virtual size_t contextSize() const;
/**
* Helper to check the flags to know if it is legal to call shadeSpan16()
@@ -356,6 +365,11 @@ public:
//////////////////////////////////////////////////////////////////////////
// Factory methods for stock shaders
+ /**
+ * Call this to create a new "empty" shader, that will not draw anything.
+ */
+ static SkShader* CreateEmptyShader();
+
/** Call this to create a new shader that will draw with the specified bitmap.
*
* If the bitmap cannot be used (e.g. has no pixels, or its dimensions
@@ -384,20 +398,26 @@ public:
* @param tmy The tiling mode to use when sampling the bitmap in the y-direction.
* @return Returns a new shader object. Note: this function never returns null.
*/
- static SkShader* CreatePictureShader(SkPicture* src, TileMode tmx, TileMode tmy);
+ static SkShader* CreatePictureShader(SkPicture* src, TileMode tmx, TileMode tmy,
+ const SkMatrix* localMatrix = NULL);
SK_TO_STRING_VIRT()
SK_DEFINE_FLATTENABLE_TYPE(SkShader)
protected:
-
SkShader(SkReadBuffer& );
virtual void flatten(SkWriteBuffer&) const SK_OVERRIDE;
-private:
- SkMatrix fLocalMatrix;
+ bool computeTotalInverse(const ContextRec&, SkMatrix* totalInverse) const;
- bool computeTotalInverse(const SkMatrix& matrix, SkMatrix* totalInverse) const;
+ /**
+ * Your subclass must also override contextSize() if it overrides onCreateContext().
+ * Base class impl returns NULL.
+ */
+ virtual Context* onCreateContext(const ContextRec&, void* storage) const;
+
+private:
+ SkMatrix fLocalMatrix;
typedef SkFlattenable INHERITED;
};
diff --git a/core/SkSurface.h b/core/SkSurface.h
index 542c9a0..d049d8c 100644
--- a/core/SkSurface.h
+++ b/core/SkSurface.h
@@ -56,15 +56,31 @@ public:
}
/**
+ * Text rendering modes that can be passed to NewRenderTarget*
+ */
+ enum TextRenderMode {
+ /**
+ * This will use the standard text rendering method
+ */
+ kStandard_TextRenderMode,
+ /**
+ * This will use signed distance fields for text rendering when possible
+ */
+ kDistanceField_TextRenderMode,
+ };
+
+ /**
* Return a new surface using the specified render target.
*/
- static SkSurface* NewRenderTargetDirect(GrRenderTarget*);
+ static SkSurface* NewRenderTargetDirect(GrRenderTarget*,
+ TextRenderMode trm = kStandard_TextRenderMode);
/**
* Return a new surface whose contents will be drawn to an offscreen
* render target, allocated by the surface.
*/
- static SkSurface* NewRenderTarget(GrContext*, const SkImageInfo&, int sampleCount = 0);
+ static SkSurface* NewRenderTarget(GrContext*, const SkImageInfo&, int sampleCount = 0,
+ TextRenderMode trm = kStandard_TextRenderMode);
/**
* Return a new surface whose contents will be drawn to an offscreen
@@ -78,7 +94,8 @@ public:
* Note: Scratch textures count against the GrContext's cached resource
* budget.
*/
- static SkSurface* NewScratchRenderTarget(GrContext*, const SkImageInfo&, int sampleCount = 0);
+ static SkSurface* NewScratchRenderTarget(GrContext*, const SkImageInfo&, int sampleCount = 0,
+ TextRenderMode trm = kStandard_TextRenderMode);
int width() const { return fWidth; }
int height() const { return fHeight; }
diff --git a/core/SkTLazy.h b/core/SkTLazy.h
index a291e22..518beec 100644
--- a/core/SkTLazy.h
+++ b/core/SkTLazy.h
@@ -66,6 +66,11 @@ public:
* contents.
*/
T* set(const T& src) {
+ // Diagnoistic. May remove later. See crbug.com/364224
+ if (NULL == &src) {
+ sk_throw();
+ }
+
if (this->isValid()) {
*fPtr = src;
} else {
diff --git a/core/SkTypes.h b/core/SkTypes.h
index 13450cd..15de8a4 100644
--- a/core/SkTypes.h
+++ b/core/SkTypes.h
@@ -93,7 +93,7 @@ inline void operator delete(void* p) {
#endif
#ifdef SK_DEBUG
- #define SkASSERT(cond) SK_DEBUGBREAK(cond)
+ #define SkASSERT(cond) SK_ALWAYSBREAK(cond)
#define SkDEBUGFAIL(message) SkASSERT(false && message)
#define SkDEBUGCODE(code) code
#define SkDECLAREPARAM(type, var) , type var
@@ -113,6 +113,8 @@ inline void operator delete(void* p) {
#define SkAssertResult(cond) cond
#endif
+#define SkFAIL(message) SK_ALWAYSBREAK(false && message)
+
#ifdef SK_DEVELOPER
#define SkDEVCODE(code) code
#else
diff --git a/core/SkUtils.h b/core/SkUtils.h
index d6bf8dd..996a82e 100644
--- a/core/SkUtils.h
+++ b/core/SkUtils.h
@@ -17,7 +17,7 @@
@param value The 16bit value to be copied into buffer
@param count The number of times value should be copied into the buffer.
*/
-void sk_memset16_portable(uint16_t dst[], uint16_t value, int count);
+void sk_memset16(uint16_t dst[], uint16_t value, int count);
typedef void (*SkMemset16Proc)(uint16_t dst[], uint16_t value, int count);
SkMemset16Proc SkMemset16GetPlatformProc();
@@ -26,18 +26,10 @@ SkMemset16Proc SkMemset16GetPlatformProc();
@param value The 32bit value to be copied into buffer
@param count The number of times value should be copied into the buffer.
*/
-void sk_memset32_portable(uint32_t dst[], uint32_t value, int count);
+void sk_memset32(uint32_t dst[], uint32_t value, int count);
typedef void (*SkMemset32Proc)(uint32_t dst[], uint32_t value, int count);
SkMemset32Proc SkMemset32GetPlatformProc();
-#ifndef sk_memset16
-extern SkMemset16Proc sk_memset16;
-#endif
-
-#ifndef sk_memset32
-extern SkMemset32Proc sk_memset32;
-#endif
-
///////////////////////////////////////////////////////////////////////////////
#define kMaxBytesInUTF8Sequence 4
diff --git a/core/SkXfermode.h b/core/SkXfermode.h
index 9bad1e8..88f3f62 100644
--- a/core/SkXfermode.h
+++ b/core/SkXfermode.h
@@ -17,8 +17,6 @@ class GrEffectRef;
class GrTexture;
class SkString;
-//#define SK_SUPPORT_LEGACY_PROCXFERMODE
-
/** \class SkXfermode
*
* SkXfermode is the base class for objects that are called to implement custom
@@ -246,54 +244,4 @@ private:
typedef SkFlattenable INHERITED;
};
-///////////////////////////////////////////////////////////////////////////////
-
-#ifdef SK_SUPPORT_LEGACY_PROCXFERMODE
-/** \class SkProcXfermode
-
- SkProcXfermode is a xfermode that applies the specified proc to its colors.
- This class is not exported to java.
-*/
-class SK_API SkProcXfermode : public SkXfermode {
-public:
- static SkProcXfermode* Create(SkXfermodeProc proc) {
- return SkNEW_ARGS(SkProcXfermode, (proc));
- }
-
- // overrides from SkXfermode
- virtual void xfer32(SkPMColor dst[], const SkPMColor src[], int count,
- const SkAlpha aa[]) const SK_OVERRIDE;
- virtual void xfer16(uint16_t dst[], const SkPMColor src[], int count,
- const SkAlpha aa[]) const SK_OVERRIDE;
- virtual void xferA8(SkAlpha dst[], const SkPMColor src[], int count,
- const SkAlpha aa[]) const SK_OVERRIDE;
-
- SK_TO_STRING_OVERRIDE()
- SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkProcXfermode)
-
-protected:
- SkProcXfermode(SkReadBuffer&);
- virtual void flatten(SkWriteBuffer&) const SK_OVERRIDE;
-
- // allow subclasses to update this after we unflatten
- void setProc(SkXfermodeProc proc) {
- fProc = proc;
- }
-
- SkXfermodeProc getProc() const {
- return fProc;
- }
-
-#ifdef SK_SUPPORT_LEGACY_PUBLICEFFECTCONSTRUCTORS
-public:
-#endif
- SkProcXfermode(SkXfermodeProc proc) : fProc(proc) {}
-
-private:
- SkXfermodeProc fProc;
-
- typedef SkXfermode INHERITED;
-};
-#endif
-
#endif
diff --git a/effects/SkBlurMaskFilter.h b/effects/SkBlurMaskFilter.h
index ddb163e..5fcf463 100644
--- a/effects/SkBlurMaskFilter.h
+++ b/effects/SkBlurMaskFilter.h
@@ -26,6 +26,12 @@ public:
};
#endif
+ /**
+ * If radius > 0, return the corresponding sigma, else return 0. Use this to convert from the
+ * (legacy) idea of specify the blur "radius" to the standard notion of specifying its sigma.
+ */
+ static SkScalar ConvertRadiusToSigma(SkScalar radius);
+
enum BlurFlags {
kNone_BlurFlag = 0x00,
/** The blur layer's radius is not affected by transforms */
diff --git a/effects/SkPerlinNoiseShader.h b/effects/SkPerlinNoiseShader.h
index 5b27029..5082a07 100644
--- a/effects/SkPerlinNoiseShader.h
+++ b/effects/SkPerlinNoiseShader.h
@@ -72,15 +72,11 @@ public:
}
- virtual SkShader::Context* createContext(
- const SkBitmap& device, const SkPaint& paint,
- const SkMatrix& matrix, void* storage) const SK_OVERRIDE;
virtual size_t contextSize() const SK_OVERRIDE;
class PerlinNoiseShaderContext : public SkShader::Context {
public:
- PerlinNoiseShaderContext(const SkPerlinNoiseShader& shader, const SkBitmap& device,
- const SkPaint& paint, const SkMatrix& matrix);
+ PerlinNoiseShaderContext(const SkPerlinNoiseShader& shader, const ContextRec&);
virtual ~PerlinNoiseShaderContext() {}
virtual void shadeSpan(int x, int y, SkPMColor[], int count) SK_OVERRIDE;
@@ -107,6 +103,7 @@ public:
protected:
SkPerlinNoiseShader(SkReadBuffer&);
virtual void flatten(SkWriteBuffer&) const SK_OVERRIDE;
+ virtual Context* onCreateContext(const ContextRec&, void* storage) const SK_OVERRIDE;
private:
SkPerlinNoiseShader(SkPerlinNoiseShader::Type type, SkScalar baseFrequencyX,
diff --git a/effects/SkTransparentShader.h b/effects/SkTransparentShader.h
index 790e5ae..d9a3e5d 100644
--- a/effects/SkTransparentShader.h
+++ b/effects/SkTransparentShader.h
@@ -14,15 +14,11 @@ class SK_API SkTransparentShader : public SkShader {
public:
SkTransparentShader() {}
- virtual SkShader::Context* createContext(const SkBitmap& device, const SkPaint& paint,
- const SkMatrix& matrix, void* storage) const
- SK_OVERRIDE;
virtual size_t contextSize() const SK_OVERRIDE;
class TransparentShaderContext : public SkShader::Context {
public:
- TransparentShaderContext(const SkTransparentShader& shader, const SkBitmap& device,
- const SkPaint& paint, const SkMatrix& matrix);
+ TransparentShaderContext(const SkTransparentShader& shader, const ContextRec&);
virtual ~TransparentShaderContext();
virtual uint32_t getFlags() const SK_OVERRIDE;
@@ -38,6 +34,9 @@ public:
SK_TO_STRING_OVERRIDE()
SK_DECLARE_PUBLIC_FLATTENABLE_DESERIALIZATION_PROCS(SkTransparentShader)
+protected:
+ virtual Context* onCreateContext(const ContextRec&, void* storage) const SK_OVERRIDE;
+
private:
SkTransparentShader(SkReadBuffer& buffer) : INHERITED(buffer) {}
diff --git a/gpu/GrCacheable.h b/gpu/GrCacheable.h
new file mode 100644
index 0000000..39c62b1
--- /dev/null
+++ b/gpu/GrCacheable.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2014 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef GrCacheable_DEFINED
+#define GrCacheable_DEFINED
+
+#include "SkRefCnt.h"
+
+class GrResourceCacheEntry;
+
+/**
+ * Base class for objects that can be kept in the GrResourceCache.
+ */
+class GrCacheable : public SkRefCnt {
+public:
+ SK_DECLARE_INST_COUNT(GrCacheable)
+
+ /**
+ * Retrieves the amount of GPU memory used by this resource in bytes. It is
+ * approximate since we aren't aware of additional padding or copies made
+ * by the driver.
+ *
+ * @return the amount of GPU memory used in bytes
+ */
+ virtual size_t gpuMemorySize() const = 0;
+
+ /**
+ * Checks whether the GPU memory allocated to this resource is still in effect.
+ * It can become invalid if its context is destroyed or lost, in which case it
+ * should no longer count against the GrResourceCache budget.
+ *
+ * @return true if this resource is still holding GPU memory
+ * false otherwise.
+ */
+ virtual bool isValidOnGpu() const = 0;
+
+ void setCacheEntry(GrResourceCacheEntry* cacheEntry) { fCacheEntry = cacheEntry; }
+ GrResourceCacheEntry* getCacheEntry() { return fCacheEntry; }
+
+protected:
+ GrCacheable() : fCacheEntry(NULL) {}
+
+ bool isInCache() const { return NULL != fCacheEntry; }
+
+ /**
+ * This entry point should be called whenever gpuMemorySize() begins
+ * reporting a different size. If the object is in the cache, it will call
+ * gpuMemorySize() immediately and pass the new size on to the resource
+ * cache.
+ */
+ void didChangeGpuMemorySize() const;
+
+private:
+ GrResourceCacheEntry* fCacheEntry; // NULL if not in cache
+
+ typedef SkRefCnt INHERITED;
+};
+
+#endif
diff --git a/gpu/GrColor.h b/gpu/GrColor.h
index b0bce3f..183781a 100644
--- a/gpu/GrColor.h
+++ b/gpu/GrColor.h
@@ -117,7 +117,7 @@ static inline char GrColorComponentFlagToChar(GrColorComponentFlags component) {
case kA_GrColorComponentFlag:
return 'a';
default:
- GrCrash("Invalid color component flag.");
+ SkFAIL("Invalid color component flag.");
return '\0';
}
}
diff --git a/gpu/GrConfig.h b/gpu/GrConfig.h
index fc464c5..cbdb350 100644
--- a/gpu/GrConfig.h
+++ b/gpu/GrConfig.h
@@ -178,15 +178,6 @@ typedef unsigned __int64 uint64_t;
#define GrAlwaysAssert(COND) GR_ALWAYSASSERT(COND)
/**
- * Crash from unrecoverable condition, optionally with a message. The debug variants only
- * crash in a debug build. The message versions print the message regardless of release vs debug.
- */
-inline void GrCrash() { GrAlwaysAssert(false); }
-inline void GrCrash(const char* msg) { GrPrintf(msg); GrAlwaysAssert(false); }
-inline void GrDebugCrash() { SkASSERT(false); }
-inline void GrDebugCrash(const char* msg) { GrPrintf(msg); SkASSERT(false); }
-
-/**
* GR_STATIC_ASSERT is a compile time assertion. Depending on the platform
* it may print the message in the compiler log. Obviously, the condition must
* be evaluatable at compile time.
diff --git a/gpu/GrContext.h b/gpu/GrContext.h
index c88f469..195ab72 100644
--- a/gpu/GrContext.h
+++ b/gpu/GrContext.h
@@ -20,6 +20,7 @@
class GrAARectRenderer;
class GrAutoScratchTexture;
+class GrCacheable;
class GrDrawState;
class GrDrawTarget;
class GrEffect;
@@ -87,8 +88,8 @@ public:
* buffer, etc. references/IDs are now invalid. Should be called even when
* GrContext is no longer going to be used for two reasons:
* 1) ~GrContext will not try to free the objects in the 3D API.
- * 2) If you've created GrResources that outlive the GrContext they will
- * be marked as invalid (GrResource::isValid()) and won't attempt to
+ * 2) If you've created GrGpuObjects that outlive the GrContext they will
+ * be marked as invalid (GrGpuObjects::isValid()) and won't attempt to
* free their underlying resource in the 3D API.
* Content drawn since the last GrContext::flush() may be lost.
*/
@@ -898,6 +899,17 @@ public:
GrPathRendererChain::DrawType drawType = GrPathRendererChain::kColor_DrawType,
GrPathRendererChain::StencilSupport* stencilSupport = NULL);
+ /**
+ * Stores a custom resource in the cache, based on the specified key.
+ */
+ void addResourceToCache(const GrResourceKey&, GrCacheable*);
+
+ /**
+ * Finds a resource in the cache, based on the specified key. This is intended for use in
+ * conjunction with addResourceToCache(). The return value will be NULL if not found. The
+ * caller must balance with a call to unref().
+ */
+ GrCacheable* findAndRefCachedResource(const GrResourceKey&);
#if GR_CACHE_STATS
void printCacheStats() const;
diff --git a/gpu/GrContextFactory.h b/gpu/GrContextFactory.h
index 916d5df..f09bad9 100644
--- a/gpu/GrContextFactory.h
+++ b/gpu/GrContextFactory.h
@@ -84,7 +84,7 @@ public:
case kDebug_GLContextType:
return "debug";
default:
- GrCrash("Unknown GL Context type.");
+ SkFAIL("Unknown GL Context type.");
}
}
diff --git a/gpu/GrResource.h b/gpu/GrGpuObject.h
index 93dec58..72d2f89 100644
--- a/gpu/GrResource.h
+++ b/gpu/GrGpuObject.h
@@ -5,26 +5,25 @@
* found in the LICENSE file.
*/
-#ifndef GrResource_DEFINED
-#define GrResource_DEFINED
+#ifndef GrGpuObject_DEFINED
+#define GrGpuObject_DEFINED
-#include "SkRefCnt.h"
+#include "GrCacheable.h"
#include "SkTInternalLList.h"
class GrGpu;
class GrContext;
-class GrResourceEntry;
/**
- * Base class for the GPU resources created by a GrContext.
+ * Base class for the GPU objects created by a GrContext.
*/
-class GrResource : public SkRefCnt {
+class GrGpuObject : public GrCacheable {
public:
- SK_DECLARE_INST_COUNT(GrResource)
+ SK_DECLARE_INST_COUNT(GrGpuObject)
/**
- * Frees the resource in the underlying 3D API. It must be safe to call this
- * when the resource has been previously abandoned.
+ * Frees the object in the underlying 3D API. It must be safe to call this
+ * when the object has been previously abandoned.
*/
void release();
@@ -35,37 +34,26 @@ public:
void abandon();
/**
- * Tests whether a resource has been abandoned or released. All resources
- * will be in this state after their creating GrContext is destroyed or has
- * contextLost called. It's up to the client to test isValid() before
- * attempting to use a resource if it holds refs on resources across
+ * Tests whether a object has been abandoned or released. All objects will
+ * be in this state after their creating GrContext is destroyed or has
+ * contextLost called. It's up to the client to test wasDestroyed() before
+ * attempting to use an object if it holds refs on objects across
* ~GrContext, freeResources with the force flag, or contextLost.
*
- * @return true if the resource has been released or abandoned,
+ * @return true if the object has been released or abandoned,
* false otherwise.
*/
- bool isValid() const { return NULL != fGpu; }
+ bool wasDestroyed() const { return NULL == fGpu; }
/**
- * Retrieves the size of the object in GPU memory. This is approximate since
- * we aren't aware of additional padding or copies made by the driver.
- *
- * @return the size of the buffer in bytes
- */
- virtual size_t sizeInBytes() const = 0;
-
- /**
- * Retrieves the context that owns the resource. Note that it is possible
- * for this to return NULL. When resources have been release()ed or
- * abandon()ed they no longer have an owning context. Destroying a
- * GrContext automatically releases all its resources.
+ * Retrieves the context that owns the object. Note that it is possible for
+ * this to return NULL. When objects have been release()ed or abandon()ed
+ * they no longer have an owning context. Destroying a GrContext
+ * automatically releases all its resources.
*/
const GrContext* getContext() const;
GrContext* getContext();
- void setCacheEntry(GrResourceEntry* cacheEntry) { fCacheEntry = cacheEntry; }
- GrResourceEntry* getCacheEntry() { return fCacheEntry; }
-
void incDeferredRefCount() const {
SkASSERT(fDeferredRefCount >= 0);
++fDeferredRefCount;
@@ -84,14 +72,16 @@ public:
void setNeedsDeferredUnref() { fFlags |= kDeferredUnref_FlagBit; }
+ virtual bool isValidOnGpu() const SK_OVERRIDE { return !this->wasDestroyed(); }
+
protected:
/**
- * isWrapped indicates we have wrapped a client-created backend resource in a GrResource. If it
- * is true then the client is responsible for the lifetime of the underlying backend resource.
- * Otherwise, our onRelease() should free the resource.
+ * isWrapped indicates we have wrapped a client-created backend object in a GrGpuObject. If it
+ * is true then the client is responsible for the lifetime of the underlying backend object.
+ * Otherwise, our onRelease() should free the object.
*/
- GrResource(GrGpu* gpu, bool isWrapped);
- virtual ~GrResource();
+ GrGpuObject(GrGpu* gpu, bool isWrapped);
+ virtual ~GrGpuObject();
GrGpu* getGpu() const { return fGpu; }
@@ -100,7 +90,6 @@ protected:
virtual void onRelease() {};
virtual void onAbandon() {};
- bool isInCache() const { return NULL != fCacheEntry; }
bool isWrapped() const { return kWrapped_FlagBit & fFlags; }
bool needsDeferredUnref() const { return SkToBool(kDeferredUnref_FlagBit & fFlags); }
@@ -110,18 +99,16 @@ private:
#endif
// We're in an internal doubly linked list
- SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrResource);
+ SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrGpuObject);
GrGpu* fGpu; // not reffed. The GrGpu can be deleted while there
- // are still live GrResources. It will call
- // release() on all such resources in its
- // destructor.
- GrResourceEntry* fCacheEntry; // NULL if not in cache
+ // are still live GrGpuObjects. It will call
+ // release() on all such objects in its destructor.
mutable int fDeferredRefCount; // How many references in deferred drawing buffers.
enum Flags {
/**
- * This resource wraps a GPU resource given to us by the user.
+ * This object wraps a GPU object given to us by the user.
* Lifetime management is left up to the user (i.e., we will not
* free it).
*/
@@ -129,7 +116,7 @@ private:
/**
* This texture should be de-refed when the deferred ref count goes
- * to zero. A resource gets into this state when the resource cache
+ * to zero. An object gets into this state when the resource cache
* is holding a ref-of-obligation (i.e., someone needs to own it but
* no one else wants to) but doesn't really want to keep it around.
*/
@@ -137,7 +124,7 @@ private:
};
uint32_t fFlags;
- typedef SkRefCnt INHERITED;
+ typedef GrCacheable INHERITED;
};
#endif
diff --git a/gpu/GrRenderTarget.h b/gpu/GrRenderTarget.h
index ac3cbee..6a3f26f 100644
--- a/gpu/GrRenderTarget.h
+++ b/gpu/GrRenderTarget.h
@@ -26,7 +26,7 @@ public:
SK_DECLARE_INST_COUNT(GrRenderTarget)
// GrResource overrides
- virtual size_t sizeInBytes() const SK_OVERRIDE;
+ virtual size_t gpuMemorySize() const SK_OVERRIDE;
// GrSurface overrides
/**
diff --git a/gpu/GrSurface.h b/gpu/GrSurface.h
index 15e44ab..f741c77 100644
--- a/gpu/GrSurface.h
+++ b/gpu/GrSurface.h
@@ -10,14 +10,14 @@
#define GrSurface_DEFINED
#include "GrTypes.h"
-#include "GrResource.h"
+#include "GrGpuObject.h"
#include "SkRect.h"
class GrTexture;
class GrRenderTarget;
struct SkImageInfo;
-class GrSurface : public GrResource {
+class GrSurface : public GrGpuObject {
public:
SK_DECLARE_INST_COUNT(GrSurface);
@@ -144,7 +144,7 @@ protected:
GrTextureDesc fDesc;
private:
- typedef GrResource INHERITED;
+ typedef GrGpuObject INHERITED;
};
#endif // GrSurface_DEFINED
diff --git a/gpu/GrTexture.h b/gpu/GrTexture.h
index 1df9dc6..ac31f51 100644
--- a/gpu/GrTexture.h
+++ b/gpu/GrTexture.h
@@ -44,22 +44,16 @@ public:
return 0 != (fDesc.fFlags & flags);
}
- void dirtyMipMaps(bool mipMapsDirty) {
- fMipMapsDirty = mipMapsDirty;
- }
+ void dirtyMipMaps(bool mipMapsDirty);
bool mipMapsAreDirty() const {
- return fMipMapsDirty;
+ return kValid_MipMapsStatus != fMipMapsStatus;
}
/**
* Approximate number of bytes used by the texture
*/
- virtual size_t sizeInBytes() const SK_OVERRIDE {
- return (size_t) fDesc.fWidth *
- fDesc.fHeight *
- GrBytesPerPixel(fDesc.fConfig);
- }
+ virtual size_t gpuMemorySize() const SK_OVERRIDE;
// GrSurface overrides
virtual bool readPixels(int left, int top, int width, int height,
@@ -144,7 +138,7 @@ protected:
GrTexture(GrGpu* gpu, bool isWrapped, const GrTextureDesc& desc)
: INHERITED(gpu, isWrapped, desc)
, fRenderTarget(NULL)
- , fMipMapsDirty(true) {
+ , fMipMapsStatus(kNotAllocated_MipMapsStatus) {
// only make sense if alloc size is pow2
fShiftFixedX = 31 - SkCLZ(fDesc.fWidth);
@@ -159,12 +153,18 @@ protected:
void validateDesc() const;
private:
+ enum MipMapsStatus {
+ kNotAllocated_MipMapsStatus,
+ kAllocated_MipMapsStatus,
+ kValid_MipMapsStatus
+ };
+
// these two shift a fixed-point value into normalized coordinates
// for this texture if the texture is power of two sized.
int fShiftFixedX;
int fShiftFixedY;
- bool fMipMapsDirty;
+ MipMapsStatus fMipMapsStatus;
virtual void internal_dispose() const SK_OVERRIDE;
diff --git a/gpu/GrTypesPriv.h b/gpu/GrTypesPriv.h
index f09f95d..dfe4153 100644
--- a/gpu/GrTypesPriv.h
+++ b/gpu/GrTypesPriv.h
@@ -212,7 +212,7 @@ static inline GrEffectEdgeType GrInvertEffectEdgeType(const GrEffectEdgeType edg
case kInverseFillAA_GrEffectEdgeType:
return kFillAA_GrEffectEdgeType;
case kHairlineAA_GrEffectEdgeType:
- GrCrash("Hairline fill isn't invertible.");
+ SkFAIL("Hairline fill isn't invertible.");
}
return kFillAA_GrEffectEdgeType; // suppress warning.
}
diff --git a/gpu/SkGpuDevice.h b/gpu/SkGpuDevice.h
index 7f564de..8042ed3 100644
--- a/gpu/SkGpuDevice.h
+++ b/gpu/SkGpuDevice.h
@@ -32,6 +32,7 @@ public:
enum Flags {
kNeedClear_Flag = 1 << 0, //!< Surface requires an initial clear
kCached_Flag = 1 << 1, //!< Surface is cached and needs to be unlocked when released
+ kDFFonts_Flag = 1 << 2, //!< Surface should render fonts using signed distance fields
};
/**
diff --git a/gpu/gl/GrGLFunctions.h b/gpu/gl/GrGLFunctions.h
index 3f87365..51db053 100644
--- a/gpu/gl/GrGLFunctions.h
+++ b/gpu/gl/GrGLFunctions.h
@@ -111,6 +111,7 @@ extern "C" {
typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLEndQueryProc)(GrGLenum target);
typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLFinishProc)();
typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLFlushProc)();
+ typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLFlushMappedBufferRangeProc)(GrGLenum target, GrGLintptr offset, GrGLsizeiptr length);
typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLFramebufferRenderbufferProc)(GrGLenum target, GrGLenum attachment, GrGLenum renderbuffertarget, GrGLuint renderbuffer);
typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLFramebufferTexture2DProc)(GrGLenum target, GrGLenum attachment, GrGLenum textarget, GrGLuint texture, GrGLint level);
typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLFramebufferTexture2DMultisampleProc)(GrGLenum target, GrGLenum attachment, GrGLenum textarget, GrGLuint texture, GrGLint level, GrGLsizei samples);
@@ -150,6 +151,7 @@ extern "C" {
typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLLineWidthProc)(GrGLfloat width);
typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLLinkProgramProc)(GrGLuint program);
typedef GrGLvoid* (GR_GL_FUNCTION_TYPE* GrGLMapBufferProc)(GrGLenum target, GrGLenum access);
+ typedef GrGLvoid* (GR_GL_FUNCTION_TYPE* GrGLMapBufferRangeProc)(GrGLenum target, GrGLintptr offset, GrGLsizeiptr length, GrGLbitfield access);
typedef GrGLvoid* (GR_GL_FUNCTION_TYPE* GrGLMapBufferSubDataProc)(GrGLuint target, GrGLintptr offset, GrGLsizeiptr size, GrGLenum access);
typedef GrGLvoid* (GR_GL_FUNCTION_TYPE* GrGLMapTexSubImage2DProc)(GrGLenum target, GrGLint level, GrGLint xoffset, GrGLint yoffset, GrGLsizei width, GrGLsizei height, GrGLenum format, GrGLenum type, GrGLenum access);
typedef GrGLvoid (GR_GL_FUNCTION_TYPE* GrGLPixelStoreiProc)(GrGLenum pname, GrGLint param);
diff --git a/gpu/gl/GrGLInterface.h b/gpu/gl/GrGLInterface.h
index 9070af2..2b33e8e 100644
--- a/gpu/gl/GrGLInterface.h
+++ b/gpu/gl/GrGLInterface.h
@@ -192,6 +192,7 @@ public:
GLPtr<GrGLEndQueryProc> fEndQuery;
GLPtr<GrGLFinishProc> fFinish;
GLPtr<GrGLFlushProc> fFlush;
+ GLPtr<GrGLFlushMappedBufferRangeProc> fFlushMappedBufferRange;
GLPtr<GrGLFramebufferRenderbufferProc> fFramebufferRenderbuffer;
GLPtr<GrGLFramebufferTexture2DProc> fFramebufferTexture2D;
GLPtr<GrGLFramebufferTexture2DMultisampleProc> fFramebufferTexture2DMultisample;
@@ -231,6 +232,7 @@ public:
GLPtr<GrGLLineWidthProc> fLineWidth;
GLPtr<GrGLLinkProgramProc> fLinkProgram;
GLPtr<GrGLMapBufferProc> fMapBuffer;
+ GLPtr<GrGLMapBufferRangeProc> fMapBufferRange;
GLPtr<GrGLMapBufferSubDataProc> fMapBufferSubData;
GLPtr<GrGLMapTexSubImage2DProc> fMapTexSubImage2D;
GLPtr<GrGLMatrixLoadfProc> fMatrixLoadf;
diff --git a/utils/SkThreadPool.h b/utils/SkThreadPool.h
index a75bed8..c99c5c4 100644
--- a/utils/SkThreadPool.h
+++ b/utils/SkThreadPool.h
@@ -26,7 +26,7 @@ static inline int num_cores() {
GetSystemInfo(&sysinfo);
return sysinfo.dwNumberOfProcessors;
#elif defined(SK_BUILD_FOR_UNIX) || defined(SK_BUILD_FOR_MAC) || defined(SK_BUILD_FOR_ANDROID)
- return sysconf(_SC_NPROCESSORS_ONLN);
+ return (int) sysconf(_SC_NPROCESSORS_ONLN);
#else
return 1;
#endif
@@ -50,6 +50,11 @@ public:
void add(SkTRunnable<T>*);
/**
+ * Same as add, but adds the runnable as the very next to run rather than enqueueing it.
+ */
+ void addNext(SkTRunnable<T>*);
+
+ /**
* Block until all added SkRunnables have completed. Once called, calling add() is undefined.
*/
void wait();
@@ -66,6 +71,9 @@ public:
kHalting_State, // There's no work to do and no thread is busy. All threads can shut down.
};
+ void addSomewhere(SkTRunnable<T>* r,
+ void (SkTInternalLList<LinkedRunnable>::*)(LinkedRunnable*));
+
SkTInternalLList<LinkedRunnable> fQueue;
SkCondVar fReady;
SkTDArray<SkThread*> fThreads;
@@ -111,7 +119,8 @@ struct ThreadLocal<void> {
} // namespace SkThreadPoolPrivate
template <typename T>
-void SkTThreadPool<T>::add(SkTRunnable<T>* r) {
+void SkTThreadPool<T>::addSomewhere(SkTRunnable<T>* r,
+ void (SkTInternalLList<LinkedRunnable>::* f)(LinkedRunnable*)) {
if (r == NULL) {
return;
}
@@ -126,11 +135,21 @@ void SkTThreadPool<T>::add(SkTRunnable<T>* r) {
linkedRunnable->fRunnable = r;
fReady.lock();
SkASSERT(fState != kHalting_State); // Shouldn't be able to add work when we're halting.
- fQueue.addToHead(linkedRunnable);
+ (fQueue.*f)(linkedRunnable);
fReady.signal();
fReady.unlock();
}
+template <typename T>
+void SkTThreadPool<T>::add(SkTRunnable<T>* r) {
+ this->addSomewhere(r, &SkTInternalLList<LinkedRunnable>::addToTail);
+}
+
+template <typename T>
+void SkTThreadPool<T>::addNext(SkTRunnable<T>* r) {
+ this->addSomewhere(r, &SkTInternalLList<LinkedRunnable>::addToHead);
+}
+
template <typename T>
void SkTThreadPool<T>::wait() {
@@ -174,7 +193,7 @@ template <typename T>
// We've got the lock back here, no matter if we ran wait or not.
// The queue is not empty, so we have something to run. Claim it.
- LinkedRunnable* r = pool->fQueue.tail();
+ LinkedRunnable* r = pool->fQueue.head();
pool->fQueue.remove(r);