Merge third_party/skia/src from https://chromium.googlesource.com/external/skia/src.git at d1d24d273a220b6b76c4918f04cbb1b1769f3588

This commit was generated by merge_from_chromium.py.

Change-Id: Icd49d069b5a6bde2641b9774e3f9a70108a8faa5
diff --git a/core/SkBitmapProcShader.cpp b/core/SkBitmapProcShader.cpp
index 503269a..00d938b 100644
--- a/core/SkBitmapProcShader.cpp
+++ b/core/SkBitmapProcShader.cpp
@@ -101,13 +101,13 @@
     if (!fRawBitmap.getTexture() && !valid_for_drawing(fRawBitmap)) {
         return NULL;
     }
-    
+
     SkMatrix totalInverse;
     // Do this first, so we know the matrix can be inverted.
     if (!this->computeTotalInverse(rec, &totalInverse)) {
         return NULL;
     }
-    
+
     void* stateStorage = (char*)storage + sizeof(BitmapProcShaderContext);
     SkBitmapProcState* state = SkNEW_PLACEMENT(stateStorage, SkBitmapProcState);
 
diff --git a/core/SkComposeShader.cpp b/core/SkComposeShader.cpp
index 7a7dce6..b2f69b4 100644
--- a/core/SkComposeShader.cpp
+++ b/core/SkComposeShader.cpp
@@ -93,7 +93,7 @@
     // sub-shaders.
     SkPaint opaquePaint(*rec.fPaint);
     opaquePaint.setAlpha(0xFF);
-    
+
     ContextRec newRec(rec);
     newRec.fMatrix = &tmpM;
     newRec.fPaint = &opaquePaint;
diff --git a/core/SkDraw.cpp b/core/SkDraw.cpp
index a74e3c0..9347efe 100644
--- a/core/SkDraw.cpp
+++ b/core/SkDraw.cpp
@@ -2432,6 +2432,8 @@
     return sizeof(TriColorShaderContext);
 }
 void SkTriColorShader::TriColorShaderContext::shadeSpan(int x, int y, SkPMColor dstC[], int count) {
+    const int alphaScale = Sk255To256(this->getPaintAlpha());
+
     SkPoint src;
 
     for (int i = 0; i < count; i++) {
@@ -2450,9 +2452,15 @@
             scale0 = 0;
         }
 
+        if (256 != alphaScale) {
+            scale0 = SkAlphaMul(scale0, alphaScale);
+            scale1 = SkAlphaMul(scale1, alphaScale);
+            scale2 = SkAlphaMul(scale2, alphaScale);
+        }
+
         dstC[i] = SkAlphaMulQ(fColors[0], scale0) +
-        SkAlphaMulQ(fColors[1], scale1) +
-        SkAlphaMulQ(fColors[2], scale2);
+                  SkAlphaMulQ(fColors[1], scale1) +
+                  SkAlphaMulQ(fColors[2], scale2);
     }
 }
 
diff --git a/core/SkFilterShader.h b/core/SkFilterShader.h
index 3f983e2..1a4b71f 100644
--- a/core/SkFilterShader.h
+++ b/core/SkFilterShader.h
@@ -43,7 +43,7 @@
     SkFilterShader(SkReadBuffer& );
     virtual void flatten(SkWriteBuffer&) const SK_OVERRIDE;
     virtual Context* onCreateContext(const ContextRec&, void* storage) const SK_OVERRIDE;
-    
+
 
 private:
     SkShader*       fShader;
diff --git a/core/SkPaintPriv.cpp b/core/SkPaintPriv.cpp
index 65fd0e7..ce05389 100644
--- a/core/SkPaintPriv.cpp
+++ b/core/SkPaintPriv.cpp
@@ -76,24 +76,3 @@
     }
     return false;
 }
-
-bool NeedsDeepCopy(const SkPaint& paint) {
-    /*
-     *  These fields are known to be immutable, and so can be shallow-copied
-     *
-     *  getTypeface()
-     *  getAnnotation()
-     *  paint.getColorFilter()
-     *  getXfermode()
-     *  getPathEffect()
-     *  getMaskFilter()
-     */
-
-    return paint.getShader() ||
-#ifdef SK_SUPPORT_LEGACY_LAYERRASTERIZER_API
-           paint.getRasterizer() ||
-#endif
-           paint.getLooper() || // needs to hide its addLayer...
-           paint.getImageFilter();
-}
-
diff --git a/core/SkPaintPriv.h b/core/SkPaintPriv.h
index 9668fef..38c9063 100644
--- a/core/SkPaintPriv.h
+++ b/core/SkPaintPriv.h
@@ -22,11 +22,4 @@
 */
 bool isPaintOpaque(const SkPaint* paint,
                    const SkBitmap* bmpReplacesShader = NULL);
-
-/** Returns true if the provided paint has fields which are not
-    immutable (and will thus require deep copying).
-    @param paint the paint to be analyzed
-    @return true if the paint requires a deep copy
-*/
-bool NeedsDeepCopy(const SkPaint& paint);
 #endif
diff --git a/core/SkPicture.cpp b/core/SkPicture.cpp
index 6843430..3b04906 100644
--- a/core/SkPicture.cpp
+++ b/core/SkPicture.cpp
@@ -15,7 +15,6 @@
 #include "SkBitmapDevice.h"
 #include "SkCanvas.h"
 #include "SkChunkAlloc.h"
-#include "SkPaintPriv.h"
 #include "SkPicture.h"
 #include "SkRegion.h"
 #include "SkStream.h"
@@ -218,6 +217,26 @@
     return clonedPicture;
 }
 
+static bool needs_deep_copy(const SkPaint& paint) {
+    /*
+     *  These fields are known to be immutable, and so can be shallow-copied
+     *
+     *  getTypeface()
+     *  getAnnotation()
+     *  paint.getColorFilter()
+     *  getXfermode()
+     *  getPathEffect()
+     *  getMaskFilter()
+     */
+
+    return paint.getShader() ||
+#ifdef SK_SUPPORT_LEGACY_LAYERRASTERIZER_API
+           paint.getRasterizer() ||
+#endif
+           paint.getLooper() || // needs to hide its addLayer...
+           paint.getImageFilter();
+}
+
 void SkPicture::clone(SkPicture* pictures, int count) const {
     SkPictCopyInfo copyInfo;
     SkPictInfo info;
@@ -263,7 +282,7 @@
 
                 SkDEBUGCODE(int heapSize = SafeCount(fPlayback->fBitmapHeap.get());)
                 for (int i = 0; i < paintCount; i++) {
-                    if (NeedsDeepCopy(fPlayback->fPaints->at(i))) {
+                    if (needs_deep_copy(fPlayback->fPaints->at(i))) {
                         copyInfo.paintData[i] =
                             SkFlatData::Create<SkPaint::FlatteningTraits>(&copyInfo.controller,
                                                               fPlayback->fPaints->at(i), 0);
diff --git a/core/SkPictureShader.h b/core/SkPictureShader.h
index 1788205..0fbfbee 100644
--- a/core/SkPictureShader.h
+++ b/core/SkPictureShader.h
@@ -57,23 +57,23 @@
                                SkShader* bitmapShader);
 
         virtual ~PictureShaderContext();
-        
+
         virtual uint32_t getFlags() const SK_OVERRIDE;
-        
+
         virtual ShadeProc asAShadeProc(void** ctx) SK_OVERRIDE;
         virtual void shadeSpan(int x, int y, SkPMColor dstC[], int count) SK_OVERRIDE;
         virtual void shadeSpan16(int x, int y, uint16_t dstC[], int count) SK_OVERRIDE;
-        
+
     private:
         PictureShaderContext(const SkPictureShader&, const ContextRec&, SkShader* bitmapShader);
 
         SkAutoTUnref<SkShader>  fBitmapShader;
         SkShader::Context*      fBitmapShaderContext;
         void*                   fBitmapShaderContextStorage;
-        
+
         typedef SkShader::Context INHERITED;
     };
-    
+
     typedef SkShader INHERITED;
 };
 
diff --git a/gpu/GrContext.cpp b/gpu/GrContext.cpp
index c518a1c..d2664c3 100644
--- a/gpu/GrContext.cpp
+++ b/gpu/GrContext.cpp
@@ -1825,6 +1825,17 @@
     return path;
 }
 
+void GrContext::addResourceToCache(const GrResourceKey& resourceKey, GrCacheable* resource) {
+    fTextureCache->purgeAsNeeded(1, resource->gpuMemorySize());
+    fTextureCache->addResource(resourceKey, resource);
+}
+
+GrCacheable* GrContext::findAndRefCachedResource(const GrResourceKey& resourceKey) {
+    GrCacheable* resource = fTextureCache->find(resourceKey);
+    SkSafeRef(resource);
+    return resource;
+}
+
 ///////////////////////////////////////////////////////////////////////////////
 #if GR_CACHE_STATS
 void GrContext::printCacheStats() const {
diff --git a/gpu/GrPictureUtils.cpp b/gpu/GrPictureUtils.cpp
index d8e4161..e8c3b50 100644
--- a/gpu/GrPictureUtils.cpp
+++ b/gpu/GrPictureUtils.cpp
@@ -7,8 +7,6 @@
 
 #include "GrPictureUtils.h"
 #include "SkDevice.h"
-#include "SkDraw.h"
-#include "SkPaintPriv.h"
 
 // The GrGather device performs GPU-backend-specific preprocessing on
 // a picture. The results are stored in a GPUAccelData.
@@ -22,17 +20,12 @@
 public:
     SK_DECLARE_INST_COUNT(GrGatherDevice)
 
-    GrGatherDevice(int width, int height, SkPicture* picture, GPUAccelData* accelData,
-                   int saveLayerDepth) {
+    GrGatherDevice(int width, int height, SkPicture* picture, GPUAccelData* accelData) {
         fPicture = picture;
-        fSaveLayerDepth = saveLayerDepth;
-        fInfo.fValid = true;
         fInfo.fSize.set(width, height);
-        fInfo.fPaint = NULL;
         fInfo.fSaveLayerOpID = fPicture->EXPERIMENTAL_curOpID();
         fInfo.fRestoreOpID = 0;
         fInfo.fHasNestedLayers = false;
-        fInfo.fIsNested = (2 == fSaveLayerDepth);
 
         fEmptyBitmap.setConfig(SkImageInfo::Make(fInfo.fSize.fWidth,
                                                  fInfo.fSize.fHeight,
@@ -117,8 +110,7 @@
                               const SkPaint& paint) SK_OVERRIDE {
     }
     virtual void drawDevice(const SkDraw& draw, SkBaseDevice* deviceIn, int x, int y,
-                            const SkPaint& paint) SK_OVERRIDE {
-        // deviceIn is the one that is being "restored" back to its parent
+                            const SkPaint&) SK_OVERRIDE {
         GrGatherDevice* device = static_cast<GrGatherDevice*>(deviceIn);
 
         if (device->fAlreadyDrawn) {
@@ -126,29 +118,6 @@
         }
 
         device->fInfo.fRestoreOpID = fPicture->EXPERIMENTAL_curOpID();
-        device->fInfo.fCTM = *draw.fMatrix;
-        device->fInfo.fCTM.postTranslate(SkIntToScalar(-device->getOrigin().fX),
-                                         SkIntToScalar(-device->getOrigin().fY));
-
-        // We need the x & y values that will yield 'getOrigin' when transformed
-        // by 'draw.fMatrix'.
-        device->fInfo.fOffset.iset(device->getOrigin());
-
-        SkMatrix invMatrix;
-        if (draw.fMatrix->invert(&invMatrix)) {
-            invMatrix.mapPoints(&device->fInfo.fOffset, 1);
-        } else {
-            device->fInfo.fValid = false;
-        }
-
-        if (NeedsDeepCopy(paint)) {
-            // This NULL acts as a signal that the paint was uncopyable (for now)
-            device->fInfo.fPaint = NULL;
-            device->fInfo.fValid = false;
-        } else {
-            device->fInfo.fPaint = SkNEW_ARGS(SkPaint, (paint));
-        }
-
         fAccelData->addSaveLayerInfo(device->fInfo);
         device->fAlreadyDrawn = true;
     }
@@ -189,9 +158,6 @@
     // The information regarding the saveLayer call this device represents.
     GPUAccelData::SaveLayerInfo fInfo;
 
-    // The depth of this device in the saveLayer stack
-    int fSaveLayerDepth;
-
     virtual void replaceBitmapBackendForRasterSurface(const SkBitmap&) SK_OVERRIDE {
         NotSupported();
     }
@@ -201,8 +167,7 @@
         SkASSERT(kSaveLayer_Usage == usage);
 
         fInfo.fHasNestedLayers = true;
-        return SkNEW_ARGS(GrGatherDevice, (info.width(), info.height(), fPicture, 
-                                           fAccelData, fSaveLayerDepth+1));
+        return SkNEW_ARGS(GrGatherDevice, (info.width(), info.height(), fPicture, fAccelData));
     }
 
     virtual void flush() SK_OVERRIDE {}
@@ -274,7 +239,7 @@
         return ;
     }
 
-    GrGatherDevice device(pict->width(), pict->height(), pict, accelData, 0);
+    GrGatherDevice device(pict->width(), pict->height(), pict, accelData);
     GrGatherCanvas canvas(&device, pict);
 
     canvas.gather();
diff --git a/gpu/GrPictureUtils.h b/gpu/GrPictureUtils.h
index 5ca4132..6b4d901 100644
--- a/gpu/GrPictureUtils.h
+++ b/gpu/GrPictureUtils.h
@@ -17,21 +17,8 @@
 public:
     // Information about a given saveLayer in an SkPicture
     struct SaveLayerInfo {
-        // True if the SaveLayerInfo is valid. False if either 'fOffset' is
-        // invalid (due to a non-invertible CTM) or 'fPaint' is NULL (due
-        // to a non-copyable paint).
-        bool fValid;
         // The size of the saveLayer
         SkISize fSize;
-        // The CTM in which this layer's draws must occur. It already incorporates
-        // the translation needed to map the layer's top-left point to the origin.
-        SkMatrix fCTM;
-        // The offset that needs to be passed to drawBitmap to correctly
-        // position the pre-rendered layer.
-        SkPoint fOffset;
-        // The paint to use on restore. NULL if the paint was not copyable (and
-        // thus that this layer should not be pulled forward).
-        const SkPaint* fPaint;
         // The ID of this saveLayer in the picture. 0 is an invalid ID.
         size_t  fSaveLayerOpID;
         // The ID of the matching restore in the picture. 0 is an invalid ID.
@@ -39,8 +26,6 @@
         // True if this saveLayer has at least one other saveLayer nested within it.
         // False otherwise.
         bool    fHasNestedLayers;
-        // True if this saveLayer is nested within another. False otherwise.
-        bool    fIsNested;
     };
 
     GPUAccelData(Key key) : INHERITED(key) { }
@@ -58,14 +43,6 @@
         return fSaveLayerInfo[index];
     }
 
-    // We may, in the future, need to pass in the GPUDevice in order to
-    // incorporate the clip and matrix state into the key
-    static SkPicture::AccelData::Key ComputeAccelDataKey() {
-        static const SkPicture::AccelData::Key gGPUID = SkPicture::AccelData::GenerateDomain();
-
-        return gGPUID;
-    }
-
 protected:
     SkTDArray<SaveLayerInfo> fSaveLayerInfo;
 
diff --git a/gpu/GrResourceCache.cpp b/gpu/GrResourceCache.cpp
index 26f7592..529c3a5 100644
--- a/gpu/GrResourceCache.cpp
+++ b/gpu/GrResourceCache.cpp
@@ -13,6 +13,16 @@
 
 DECLARE_SKMESSAGEBUS_MESSAGE(GrResourceInvalidatedMessage);
 
+///////////////////////////////////////////////////////////////////////////////
+
+void GrCacheable::didChangeGpuMemorySize() const {
+    if (this->isInCache()) {
+        fCacheEntry->didChangeResourceSize();
+    }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
 GrResourceKey::ResourceType GrResourceKey::GenerateResourceType() {
     static int32_t gNextType = 0;
 
@@ -26,8 +36,14 @@
 
 ///////////////////////////////////////////////////////////////////////////////
 
-GrResourceCacheEntry::GrResourceCacheEntry(const GrResourceKey& key, GrCacheable* resource)
-        : fKey(key), fResource(resource) {
+GrResourceCacheEntry::GrResourceCacheEntry(GrResourceCache* resourceCache,
+                                           const GrResourceKey& key,
+                                           GrCacheable* resource)
+        : fResourceCache(resourceCache),
+          fKey(key),
+          fResource(resource),
+          fCachedSize(resource->gpuMemorySize()),
+          fIsExclusive(false) {
     // we assume ownership of the resource, and will unref it when we die
     SkASSERT(resource);
     resource->ref();
@@ -40,12 +56,24 @@
 
 #ifdef SK_DEBUG
 void GrResourceCacheEntry::validate() const {
+    SkASSERT(fResourceCache);
     SkASSERT(fResource);
     SkASSERT(fResource->getCacheEntry() == this);
+    SkASSERT(fResource->gpuMemorySize() == fCachedSize);
     fResource->validate();
 }
 #endif
 
+void GrResourceCacheEntry::didChangeResourceSize() {
+    size_t oldSize = fCachedSize;
+    fCachedSize = fResource->gpuMemorySize();
+    if (fCachedSize > oldSize) {
+        fResourceCache->didIncreaseResourceSize(this, fCachedSize - oldSize);
+    } else if (fCachedSize < oldSize) {
+        fResourceCache->didDecreaseResourceSize(this, oldSize - fCachedSize);
+    }
+}
+
 ///////////////////////////////////////////////////////////////////////////////
 
 GrResourceCache::GrResourceCache(int maxCount, size_t maxBytes) :
@@ -115,7 +143,7 @@
     // update our stats
     if (kIgnore_BudgetBehavior == behavior) {
         fClientDetachedCount += 1;
-        fClientDetachedBytes += entry->resource()->gpuMemorySize();
+        fClientDetachedBytes += entry->fCachedSize;
 
 #if GR_CACHE_STATS
         if (fHighWaterClientDetachedCount < fClientDetachedCount) {
@@ -130,7 +158,7 @@
         SkASSERT(kAccountFor_BudgetBehavior == behavior);
 
         fEntryCount -= 1;
-        fEntryBytes -= entry->resource()->gpuMemorySize();
+        fEntryBytes -= entry->fCachedSize;
     }
 }
 
@@ -141,12 +169,12 @@
     // update our stats
     if (kIgnore_BudgetBehavior == behavior) {
         fClientDetachedCount -= 1;
-        fClientDetachedBytes -= entry->resource()->gpuMemorySize();
+        fClientDetachedBytes -= entry->fCachedSize;
     } else {
         SkASSERT(kAccountFor_BudgetBehavior == behavior);
 
         fEntryCount += 1;
-        fEntryBytes += entry->resource()->gpuMemorySize();
+        fEntryBytes += entry->fCachedSize;
 
 #if GR_CACHE_STATS
         if (fHighWaterEntryCount < fEntryCount) {
@@ -208,7 +236,7 @@
     SkASSERT(!fPurging);
     GrAutoResourceCacheValidate atcv(this);
 
-    GrResourceCacheEntry* entry = SkNEW_ARGS(GrResourceCacheEntry, (key, resource));
+    GrResourceCacheEntry* entry = SkNEW_ARGS(GrResourceCacheEntry, (this, key, resource));
     resource->setCacheEntry(entry);
 
     this->attachToHead(entry);
@@ -223,6 +251,9 @@
 void GrResourceCache::makeExclusive(GrResourceCacheEntry* entry) {
     GrAutoResourceCacheValidate atcv(this);
 
+    SkASSERT(!entry->fIsExclusive);
+    entry->fIsExclusive = true;
+
     // When scratch textures are detached (to hide them from future finds) they
     // still count against the resource budget
     this->internalDetach(entry, kIgnore_BudgetBehavior);
@@ -239,11 +270,12 @@
     // the client called GrContext::contextDestroyed() to notify Gr,
     // and then later an SkGpuDevice's destructor releases its backing
     // texture (which was invalidated at contextDestroyed time).
+    // TODO: Safely delete the GrResourceCacheEntry as well.
     fClientDetachedCount -= 1;
     fEntryCount -= 1;
-    size_t size = entry->resource()->gpuMemorySize();
-    fClientDetachedBytes -= size;
-    fEntryBytes -= size;
+    fClientDetachedBytes -= entry->fCachedSize;
+    fEntryBytes -= entry->fCachedSize;
+    entry->fCachedSize = 0;
 }
 
 void GrResourceCache::makeNonExclusive(GrResourceCacheEntry* entry) {
@@ -259,11 +291,32 @@
         // alter the budget information.
         attachToHead(entry, kIgnore_BudgetBehavior);
         fCache.insert(entry->key(), entry);
+
+        SkASSERT(entry->fIsExclusive);
+        entry->fIsExclusive = false;
     } else {
         this->removeInvalidResource(entry);
     }
 }
 
+void GrResourceCache::didIncreaseResourceSize(const GrResourceCacheEntry* entry, size_t amountInc) {
+    fEntryBytes += amountInc;
+    if (entry->fIsExclusive) {
+        fClientDetachedBytes += amountInc;
+    }
+    this->purgeAsNeeded();
+}
+
+void GrResourceCache::didDecreaseResourceSize(const GrResourceCacheEntry* entry, size_t amountDec) {
+    fEntryBytes -= amountDec;
+    if (entry->fIsExclusive) {
+        fClientDetachedBytes -= amountDec;
+    }
+#ifdef SK_DEBUG
+    this->validate();
+#endif
+}
+
 /**
  * Destroying a resource may potentially trigger the unlock of additional
  * resources which in turn will trigger a nested purge. We block the nested
diff --git a/gpu/GrResourceCache.h b/gpu/GrResourceCache.h
index b2f91cd..1a81fe6 100644
--- a/gpu/GrResourceCache.h
+++ b/gpu/GrResourceCache.h
@@ -19,6 +19,7 @@
 #include "SkTInternalLList.h"
 
 class GrCacheable;
+class GrResourceCache;
 class GrResourceCacheEntry;
 
 class GrResourceKey {
@@ -128,12 +129,24 @@
     void validate() const {}
 #endif
 
+    /**
+     *  Update the cached size for this entry and inform the resource cache that
+     *  it has changed. Usually invoked from GrCacheable::didChangeGpuMemorySize,
+     *  not directly from here.
+     */
+    void didChangeResourceSize();
+
 private:
-    GrResourceCacheEntry(const GrResourceKey& key, GrCacheable* resource);
+    GrResourceCacheEntry(GrResourceCache* resourceCache,
+                         const GrResourceKey& key,
+                         GrCacheable* resource);
     ~GrResourceCacheEntry();
 
+    GrResourceCache* fResourceCache;
     GrResourceKey    fKey;
     GrCacheable*     fResource;
+    size_t           fCachedSize;
+    bool             fIsExclusive;
 
     // Linked list for the LRU ordering.
     SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrResourceCacheEntry);
@@ -272,6 +285,12 @@
     void makeNonExclusive(GrResourceCacheEntry* entry);
 
     /**
+     * Notify the cache that the size of a resource has changed.
+     */
+    void didIncreaseResourceSize(const GrResourceCacheEntry*, size_t amountInc);
+    void didDecreaseResourceSize(const GrResourceCacheEntry*, size_t amountDec);
+
+    /**
      * Remove a resource from the cache and delete it!
      */
     void deleteResource(GrResourceCacheEntry* entry);
diff --git a/gpu/GrTexture.cpp b/gpu/GrTexture.cpp
index f851515..3186d89 100644
--- a/gpu/GrTexture.cpp
+++ b/gpu/GrTexture.cpp
@@ -44,6 +44,33 @@
     this->INHERITED::internal_dispose();
 }
 
+void GrTexture::dirtyMipMaps(bool mipMapsDirty) {
+    if (mipMapsDirty) {
+        if (kValid_MipMapsStatus == fMipMapsStatus) {
+            fMipMapsStatus = kAllocated_MipMapsStatus;
+        }
+    } else {
+        const bool sizeChanged = kNotAllocated_MipMapsStatus == fMipMapsStatus;
+        fMipMapsStatus = kValid_MipMapsStatus;
+        if (sizeChanged) {
+            // This must not be called until after changing fMipMapsStatus.
+            this->didChangeGpuMemorySize();
+        }
+    }
+}
+
+size_t GrTexture::gpuMemorySize() const {
+    size_t textureSize =  (size_t) fDesc.fWidth *
+                                   fDesc.fHeight *
+                                   GrBytesPerPixel(fDesc.fConfig);
+    if (kNotAllocated_MipMapsStatus != fMipMapsStatus) {
+        // We don't have to worry about the mipmaps being a different size than
+        // we'd expect because we never change fDesc.fWidth/fHeight.
+        textureSize *= 2;
+    }
+    return textureSize;
+}
+
 bool GrTexture::readPixels(int left, int top, int width, int height,
                            GrPixelConfig config, void* buffer,
                            size_t rowBytes, uint32_t pixelOpsFlags) {
diff --git a/gpu/SkGpuDevice.cpp b/gpu/SkGpuDevice.cpp
index 3119a9e..714a6da 100644
--- a/gpu/SkGpuDevice.cpp
+++ b/gpu/SkGpuDevice.cpp
@@ -1910,8 +1910,16 @@
     return SkSurface::NewRenderTarget(fContext, info, fRenderTarget->numSamples());
 }
 
+// In the future this may not be a static method if we need to incorporate the
+// clip and matrix state into the key
+SkPicture::AccelData::Key SkGpuDevice::ComputeAccelDataKey() {
+    static const SkPicture::AccelData::Key gGPUID = SkPicture::AccelData::GenerateDomain();
+
+    return gGPUID;
+}
+
 void SkGpuDevice::EXPERIMENTAL_optimize(SkPicture* picture) {
-    SkPicture::AccelData::Key key = GPUAccelData::ComputeAccelDataKey();
+    SkPicture::AccelData::Key key = ComputeAccelDataKey();
 
     GPUAccelData* data = SkNEW_ARGS(GPUAccelData, (key));
 
@@ -1926,7 +1934,7 @@
 
 bool SkGpuDevice::EXPERIMENTAL_drawPicture(SkCanvas* canvas, SkPicture* picture) {
 
-    SkPicture::AccelData::Key key = GPUAccelData::ComputeAccelDataKey();
+    SkPicture::AccelData::Key key = ComputeAccelDataKey();
 
     const SkPicture::AccelData* data = picture->EXPERIMENTAL_getAccelData(key);
     if (NULL == data) {
@@ -1935,6 +1943,27 @@
 
     const GPUAccelData *gpuData = static_cast<const GPUAccelData*>(data);
 
+//#define SK_PRINT_PULL_FORWARD_INFO 1
+
+#ifdef SK_PRINT_PULL_FORWARD_INFO
+    static bool gPrintedAccelData = false;
+
+    if (!gPrintedAccelData) {
+        for (int i = 0; i < gpuData->numSaveLayers(); ++i) {
+            const GPUAccelData::SaveLayerInfo& info = gpuData->saveLayerInfo(i);
+
+            SkDebugf("%d: Width: %d Height: %d SL: %d R: %d hasNestedLayers: %s\n",
+                                            i,
+                                            info.fSize.fWidth,
+                                            info.fSize.fHeight,
+                                            info.fSaveLayerOpID,
+                                            info.fRestoreOpID,
+                                            info.fHasNestedLayers ? "T" : "F");
+        }
+        gPrintedAccelData = true;
+    }
+#endif
+
     SkAutoTArray<bool> pullForward(gpuData->numSaveLayers());
     for (int i = 0; i < gpuData->numSaveLayers(); ++i) {
         pullForward[i] = false;
@@ -1955,6 +1984,10 @@
 
     const SkPicture::OperationList& ops = picture->EXPERIMENTAL_getActiveOps(clip);
 
+#ifdef SK_PRINT_PULL_FORWARD_INFO
+    SkDebugf("rect: %d %d %d %d\n", clip.fLeft, clip.fTop, clip.fRight, clip.fBottom);
+#endif
+
     for (int i = 0; i < ops.numOps(); ++i) {
         for (int j = 0; j < gpuData->numSaveLayers(); ++j) {
             const GPUAccelData::SaveLayerInfo& info = gpuData->saveLayerInfo(j);
@@ -1965,5 +1998,17 @@
         }
     }
 
+#ifdef SK_PRINT_PULL_FORWARD_INFO
+    SkDebugf("Need SaveLayers: ");
+    for (int i = 0; i < gpuData->numSaveLayers(); ++i) {
+        if (pullForward[i]) {
+            const GrCachedLayer* layer = fContext->getLayerCache()->findLayerOrCreate(picture, i);
+
+            SkDebugf("%d (%d), ", i, layer->layerID());
+        }
+    }
+    SkDebugf("\n");
+#endif
+
     return false;
 }
diff --git a/ports/SkFontConfigInterface_direct.cpp b/ports/SkFontConfigInterface_direct.cpp
index 13993f1..80ee56e 100644
--- a/ports/SkFontConfigInterface_direct.cpp
+++ b/ports/SkFontConfigInterface_direct.cpp
@@ -15,6 +15,7 @@
 
 #include "SkBuffer.h"
 #include "SkFontConfigInterface.h"
+#include "SkOnce.h"
 #include "SkStream.h"
 
 size_t SkFontConfigInterface::FontIdentity::writeToMemory(void* addr) const {
@@ -123,16 +124,13 @@
     SkMutex mutex_;
 };
 
+static void create_singleton_direct_interface(SkFontConfigInterface** singleton) {
+    *singleton = new SkFontConfigInterfaceDirect;
+}
 SkFontConfigInterface* SkFontConfigInterface::GetSingletonDirectInterface() {
     static SkFontConfigInterface* gDirect;
-    if (NULL == gDirect) {
-        static SkMutex gMutex;
-        SkAutoMutexAcquire ac(gMutex);
-
-        if (NULL == gDirect) {
-            gDirect = new SkFontConfigInterfaceDirect;
-        }
-    }
+    SK_DECLARE_STATIC_ONCE(once);
+    SkOnce(&once, create_singleton_direct_interface, &gDirect);
     return gDirect;
 }
 
diff --git a/record/SkRecordOpts.cpp b/record/SkRecordOpts.cpp
index 5b537de..aaa611c 100644
--- a/record/SkRecordOpts.cpp
+++ b/record/SkRecordOpts.cpp
@@ -7,10 +7,12 @@
 
 #include "SkRecordOpts.h"
 
-#include "SkRecordTraits.h"
+#include "SkRecordPattern.h"
 #include "SkRecords.h"
 #include "SkTDArray.h"
 
+using namespace SkRecords;
+
 void SkRecordOptimize(SkRecord* record) {
     // TODO(mtklein): fuse independent optimizations to reduce number of passes?
     SkRecordNoopSaveRestores(record);
@@ -19,205 +21,180 @@
     SkRecordBoundDrawPosTextH(record);
 }
 
-namespace {
+// Most of the optimizations in this file are pattern-based.  These are all defined as structs with:
+//   - a Pattern typedef
+//   - a bool onMatch(SkRceord*, Pattern*, unsigned begin, unsigned end) method,
+//     which returns true if it made changes and false if not.
 
-// Convenience base class to share some common implementation code.
-class Common : SkNoncopyable {
-public:
-    explicit Common(SkRecord* record) : fRecord(record), fIndex(0) {}
+// Run a pattern-based optimization once across the SkRecord, returning true if it made any changes.
+// It looks for spans which match Pass::Pattern, and when found calls onMatch() with the pattern,
+// record, and [begin,end) span of the commands that matched.
+template <typename Pass>
+static bool apply(Pass* pass, SkRecord* record) {
+    typename Pass::Pattern pattern;
+    bool changed = false;
+    unsigned begin, end = 0;
 
-    unsigned index() const { return fIndex; }
-    void next() { ++fIndex; }
-
-protected:
-    SkRecord* fRecord;
-    unsigned fIndex;
-};
+    while (pattern.search(record, &begin, &end)) {
+        changed |= pass->onMatch(record, &pattern, begin, end);
+    }
+    return changed;
+}
 
 // Turns logical no-op Save-[non-drawing command]*-Restore patterns into actual no-ops.
-// TODO(mtklein): state machine diagram
-class SaveRestoreNooper : public Common {
-public:
-    explicit SaveRestoreNooper(SkRecord* record)
-        : Common(record), fSave(kInactive), fChanged(false) {}
+struct SaveRestoreNooper {
+    // Star matches greedily, so we also have to exclude Save and Restore.
+    typedef Pattern3<Is<Save>,
+                     Star<Not<Or3<Is<Save>,
+                                  Is<Restore>,
+                                  IsDraw> > >,
+                     Is<Restore> >
+        Pattern;
 
-    // Drawing commands reset state to inactive without nooping.
-    template <typename T>
-    SK_WHEN(SkRecords::IsDraw<T>, void) operator()(T*) { fSave = kInactive; }
-
-    // Most non-drawing commands can be ignored.
-    template <typename T>
-    SK_WHEN(!SkRecords::IsDraw<T>, void) operator()(T*) {}
-
-    void operator()(SkRecords::Save* r) {
-        fSave = SkCanvas::kMatrixClip_SaveFlag == r->flags ? this->index() : kInactive;
-    }
-
-    void operator()(SkRecords::Restore* r) {
-        if (fSave != kInactive) {
-            // Remove everything between the save and restore, inclusive on both sides.
-            fChanged = true;
-            for (unsigned i = fSave; i <= this->index(); i++) {
-                fRecord->replace<SkRecords::NoOp>(i);
-            }
-            fSave = kInactive;
+    bool onMatch(SkRecord* record, Pattern* pattern, unsigned begin, unsigned end) {
+        // If restore doesn't revert both matrix and clip, this isn't safe to noop away.
+        if (pattern->first<Save>()->flags != SkCanvas::kMatrixClip_SaveFlag) {
+            return false;
         }
+
+        // The entire span between Save and Restore (inclusively) does nothing.
+        for (unsigned i = begin; i < end; i++) {
+            record->replace<NoOp>(i);
+        }
+        return true;
     }
-
-    bool changed() const { return fChanged; }
-
-private:
-    static const unsigned kInactive = ~0;
-    unsigned fSave;
-    bool fChanged;
 };
-
-// Tries to replace PushCull with PairedPushCull, which lets us skip to the paired PopCull
-// when the canvas can quickReject the cull rect.
-class CullAnnotator : public Common {
-public:
-    explicit CullAnnotator(SkRecord* record) : Common(record) {}
-
-    // Do nothing to most ops.
-    template <typename T> void operator()(T*) {}
-
-    void operator()(SkRecords::PushCull* push) {
-        Pair pair = { this->index(), push };
-        fPushStack.push(pair);
-    }
-
-    void operator()(SkRecords::PopCull* pop) {
-        Pair push = fPushStack.top();
-        fPushStack.pop();
-
-        SkASSERT(this->index() > push.index);
-        unsigned skip = this->index() - push.index;
-
-        SkRecords::Adopted<SkRecords::PushCull> adopted(push.command);
-        SkNEW_PLACEMENT_ARGS(fRecord->replace<SkRecords::PairedPushCull>(push.index, adopted),
-                             SkRecords::PairedPushCull, (&adopted, skip));
-    }
-
-private:
-    struct Pair {
-        unsigned index;
-        SkRecords::PushCull* command;
-    };
-
-    SkTDArray<Pair> fPushStack;
-};
+void SkRecordNoopSaveRestores(SkRecord* record) {
+    SaveRestoreNooper pass;
+    while (apply(&pass, record));  // Run until it stops changing things.
+}
 
 // Replaces DrawPosText with DrawPosTextH when all Y coordinates are equal.
-class StrengthReducer : public Common {
-public:
-    explicit StrengthReducer(SkRecord* record) : Common(record) {}
+struct StrengthReducer {
+    typedef Pattern1<Is<DrawPosText> > Pattern;
 
-    // Do nothing to most ops.
-    template <typename T> void operator()(T*) {}
+    bool onMatch(SkRecord* record, Pattern* pattern, unsigned begin, unsigned end) {
+        SkASSERT(end == begin + 1);
+        DrawPosText* draw = pattern->first<DrawPosText>();
 
-    void operator()(SkRecords::DrawPosText* r) {
-        const unsigned points = r->paint.countText(r->text, r->byteLength);
+        const unsigned points = draw->paint.countText(draw->text, draw->byteLength);
         if (points == 0) {
-            // No point (ha!).
-            return;
+            return false;  // No point (ha!).
         }
 
-        const SkScalar firstY = r->pos[0].fY;
+        const SkScalar firstY = draw->pos[0].fY;
         for (unsigned i = 1; i < points; i++) {
-            if (r->pos[i].fY != firstY) {
-                // Needs the full strength of DrawPosText.
-                return;
+            if (draw->pos[i].fY != firstY) {
+                return false;  // Needs full power of DrawPosText.
             }
         }
         // All ys are the same.  We can replace DrawPosText with DrawPosTextH.
 
-        // r->pos is points SkPoints, [(x,y),(x,y),(x,y),(x,y), ... ].
+        // draw->pos is points SkPoints, [(x,y),(x,y),(x,y),(x,y), ... ].
         // We're going to squint and look at that as 2*points SkScalars, [x,y,x,y,x,y,x,y, ...].
         // Then we'll rearrange things so all the xs are in order up front, clobbering the ys.
         SK_COMPILE_ASSERT(sizeof(SkPoint) == 2 * sizeof(SkScalar), SquintingIsNotSafe);
-        SkScalar* scalars = &r->pos[0].fX;
+        SkScalar* scalars = &draw->pos[0].fX;
         for (unsigned i = 0; i < 2*points; i += 2) {
             scalars[i/2] = scalars[i];
         }
 
-        // Extend lifetime of r to the end of the method so we can copy its parts.
-        SkRecords::Adopted<SkRecords::DrawPosText> adopted(r);
-        SkNEW_PLACEMENT_ARGS(fRecord->replace<SkRecords::DrawPosTextH>(this->index(), adopted),
-                             SkRecords::DrawPosTextH,
-                             (r->text, r->byteLength, scalars, firstY, r->paint));
+        // Extend lifetime of draw to the end of the loop so we can copy its paint.
+        Adopted<DrawPosText> adopted(draw);
+        SkNEW_PLACEMENT_ARGS(record->replace<DrawPosTextH>(begin, adopted),
+                             DrawPosTextH,
+                             (draw->text, draw->byteLength, scalars, firstY, draw->paint));
+        return true;
     }
 };
+void SkRecordReduceDrawPosTextStrength(SkRecord* record) {
+    StrengthReducer pass;
+    apply(&pass, record);
+}
 
 // Tries to replace DrawPosTextH with BoundedDrawPosTextH, which knows conservative upper and lower
 // bounds to use with SkCanvas::quickRejectY.
-class TextBounder : public Common {
-public:
-    explicit TextBounder(SkRecord* record) : Common(record) {}
+struct TextBounder {
+    typedef Pattern1<Is<DrawPosTextH> > Pattern;
 
-    // Do nothing to most ops.
-    template <typename T> void operator()(T*) {}
+    bool onMatch(SkRecord* record, Pattern* pattern, unsigned begin, unsigned end) {
+        SkASSERT(end == begin + 1);
+        DrawPosTextH* draw = pattern->first<DrawPosTextH>();
 
-    void operator()(SkRecords::DrawPosTextH* r) {
         // If we're drawing vertical text, none of the checks we're about to do make any sense.
         // We'll need to call SkPaint::computeFastBounds() later, so bail if that's not possible.
-        if (r->paint.isVerticalText() || !r->paint.canComputeFastBounds()) {
-            return;
+        if (draw->paint.isVerticalText() || !draw->paint.canComputeFastBounds()) {
+            return false;
         }
 
         // Rather than checking the top and bottom font metrics, we guess.  Actually looking up the
         // top and bottom metrics is slow, and this overapproximation should be good enough.
-        const SkScalar buffer = r->paint.getTextSize() * 1.5f;
+        const SkScalar buffer = draw->paint.getTextSize() * 1.5f;
         SkDEBUGCODE(SkPaint::FontMetrics metrics;)
-        SkDEBUGCODE(r->paint.getFontMetrics(&metrics);)
+        SkDEBUGCODE(draw->paint.getFontMetrics(&metrics);)
         SkASSERT(-buffer <= metrics.fTop);
         SkASSERT(+buffer >= metrics.fBottom);
 
         // Let the paint adjust the text bounds.  We don't care about left and right here, so we use
         // 0 and 1 respectively just so the bounds rectangle isn't empty.
         SkRect bounds;
-        bounds.set(0, r->y - buffer, SK_Scalar1, r->y + buffer);
-        SkRect adjusted = r->paint.computeFastBounds(bounds, &bounds);
+        bounds.set(0, draw->y - buffer, SK_Scalar1, draw->y + buffer);
+        SkRect adjusted = draw->paint.computeFastBounds(bounds, &bounds);
 
-        SkRecords::Adopted<SkRecords::DrawPosTextH> adopted(r);
-        SkNEW_PLACEMENT_ARGS(
-                fRecord->replace<SkRecords::BoundedDrawPosTextH>(this->index(), adopted),
-                SkRecords::BoundedDrawPosTextH,
-                (&adopted, adjusted.fTop, adjusted.fBottom));
+        Adopted<DrawPosTextH> adopted(draw);
+        SkNEW_PLACEMENT_ARGS(record->replace<BoundedDrawPosTextH>(begin, adopted),
+                             BoundedDrawPosTextH,
+                             (&adopted, adjusted.fTop, adjusted.fBottom));
+        return true;
     }
 };
-
-
-template <typename Pass>
-static void run_pass(Pass& pass, SkRecord* record) {
-    for (; pass.index() < record->count(); pass.next()) {
-        record->mutate(pass.index(), pass);
-    }
-}
-
-}  // namespace
-
-
-void SkRecordNoopSaveRestores(SkRecord* record) {
-    // Run SaveRestoreNooper until it doesn't make any more changes.
-    bool changed;
-    do {
-        SaveRestoreNooper nooper(record);
-        run_pass(nooper, record);
-        changed = nooper.changed();
-    } while (changed);
-}
-
-void SkRecordAnnotateCullingPairs(SkRecord* record) {
-    CullAnnotator annotator(record);
-    run_pass(annotator, record);
-}
-
-void SkRecordReduceDrawPosTextStrength(SkRecord* record) {
-    StrengthReducer reducer(record);
-    run_pass(reducer, record);
-}
-
 void SkRecordBoundDrawPosTextH(SkRecord* record) {
-    TextBounder bounder(record);
-    run_pass(bounder, record);
+    TextBounder pass;
+    apply(&pass, record);
+}
+
+// Replaces PushCull with PairedPushCull, which lets us skip to the paired PopCull when the canvas
+// can quickReject the cull rect.
+// There's no efficient way (yet?) to express this one as a pattern, so we write a custom pass.
+class CullAnnotator {
+public:
+    // Do nothing to most ops.
+    template <typename T> void operator()(T*) {}
+
+    void operator()(PushCull* push) {
+        Pair pair = { fIndex, push };
+        fPushStack.push(pair);
+    }
+
+    void operator()(PopCull* pop) {
+        Pair push = fPushStack.top();
+        fPushStack.pop();
+
+        SkASSERT(fIndex > push.index);
+        unsigned skip = fIndex - push.index;
+
+        Adopted<PushCull> adopted(push.command);
+        SkNEW_PLACEMENT_ARGS(fRecord->replace<PairedPushCull>(push.index, adopted),
+                             PairedPushCull, (&adopted, skip));
+    }
+
+    void apply(SkRecord* record) {
+        for (fRecord = record, fIndex = 0; fIndex < record->count(); fIndex++) {
+            fRecord->mutate(fIndex, *this);
+        }
+    }
+
+private:
+    struct Pair {
+        unsigned index;
+        PushCull* command;
+    };
+
+    SkTDArray<Pair> fPushStack;
+    SkRecord* fRecord;
+    unsigned fIndex;
+};
+void SkRecordAnnotateCullingPairs(SkRecord* record) {
+    CullAnnotator pass;
+    pass.apply(record);
 }
diff --git a/record/SkRecordPattern.h b/record/SkRecordPattern.h
new file mode 100644
index 0000000..2023a90
--- /dev/null
+++ b/record/SkRecordPattern.h
@@ -0,0 +1,219 @@
+#ifndef SkRecordPattern_DEFINED
+#define SkRecordPattern_DEFINED
+
+#include "SkTLogic.h"
+
+namespace SkRecords {
+
+// First, some matchers.  These match a single command in the SkRecord,
+// and may hang onto some data from it.  If so, you can get the data by calling .get().
+
+// Matches a command of type T, and stores that command.
+template <typename T>
+class Is {
+public:
+    Is() : fPtr(NULL) {}
+
+    typedef T type;
+    type* get() { return fPtr; }
+
+    bool match(T* ptr) {
+        fPtr = ptr;
+        return true;
+    }
+
+    template <typename U>
+    bool match(U*) {
+        fPtr = NULL;
+        return false;
+    }
+
+private:
+    type* fPtr;
+};
+
+// Matches any command that draws, and stores its paint.
+class IsDraw {
+    SK_CREATE_MEMBER_DETECTOR(paint);
+public:
+    IsDraw() : fPaint(NULL) {}
+
+    typedef SkPaint type;
+    type* get() { return fPaint; }
+
+    template <typename T>
+    SK_WHEN(HasMember_paint<T>, bool) match(T* draw) {
+        fPaint = AsPtr(draw->paint);
+        return true;
+    }
+
+    template <typename T>
+    SK_WHEN(!HasMember_paint<T>, bool) match(T*) {
+        fPaint = NULL;
+        return false;
+    }
+
+private:
+    // Abstracts away whether the paint is always part of the command or optional.
+    template <typename T> static T* AsPtr(SkRecords::Optional<T>& x) { return x; }
+    template <typename T> static T* AsPtr(T& x) { return &x; }
+
+    type* fPaint;
+};
+
+// Matches if Matcher doesn't.  Stores nothing.
+template <typename Matcher>
+struct Not {
+    template <typename T>
+    bool match(T* ptr) { return !Matcher().match(ptr); }
+};
+
+// Matches if either of A or B does.  Stores nothing.
+template <typename A, typename B>
+struct Or {
+    template <typename T>
+    bool match(T* ptr) { return A().match(ptr) || B().match(ptr); }
+};
+
+// Matches if any of A, B or C does.  Stores nothing.
+template <typename A, typename B, typename C>
+struct Or3 : Or<A, Or<B, C> > {};
+
+// We'll use this to choose which implementation of Star suits each Matcher.
+SK_CREATE_TYPE_DETECTOR(type);
+
+// Star is a special matcher that matches Matcher 0 or more times _greedily_ in the SkRecord.
+// This version stores nothing.  It's enabled when Matcher stores nothing.
+template <typename Matcher, typename = void>
+class Star {
+public:
+    void reset() {}
+
+    template <typename T>
+    bool match(T* ptr) { return Matcher().match(ptr); }
+};
+
+// This version stores a list of matches.  It's enabled if Matcher stores something.
+template <typename Matcher>
+class Star<Matcher, SK_WHEN(HasType_type<Matcher>, void)> {
+public:
+    typedef SkTDArray<typename Matcher::type*> type;
+    type* get() { return &fMatches; }
+
+    void reset() { fMatches.rewind(); }
+
+    template <typename T>
+    bool match(T* ptr) {
+        Matcher matcher;
+        if (matcher.match(ptr)) {
+            fMatches.push(matcher.get());
+            return true;
+        }
+        return false;
+    }
+
+private:
+    type fMatches;
+};
+
+
+// Cons builds a list of Matchers.
+// It first matches Matcher (something from above), then Pattern (another Cons or Nil).
+//
+// This is the main entry point to pattern matching, and so provides a couple of extra API bits:
+//  - search scans through the record to look for matches;
+//  - first, second, and third return the data stored by their respective matchers in the pattern.
+//
+// These Cons build lists analogously to Lisp's "cons".  See Pattern# for the "list" equivalent.
+template <typename Matcher, typename Pattern>
+class Cons {
+public:
+    // If this pattern matches the SkRecord starting at i,
+    // return the index just past the end of the pattern, otherwise return 0.
+    SK_ALWAYS_INLINE unsigned match(SkRecord* record, unsigned i) {
+        i = this->matchHead(&fHead, record, i);
+        return i == 0 ? 0 : fTail.match(record, i);
+    }
+
+    // Starting from *end, walk through the SkRecord to find the first span matching this pattern.
+    // If there is no such span, return false.  If there is, return true and set [*begin, *end).
+    SK_ALWAYS_INLINE bool search(SkRecord* record, unsigned* begin, unsigned* end) {
+        for (*begin = *end; *begin < record->count(); ++(*begin)) {
+            *end = this->match(record, *begin);
+            if (*end != 0) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    // Once either match or search has succeeded, access the stored data of the first, second,
+    // or third matcher in this pattern.  Add as needed for longer patterns.
+    // T is checked statically at compile time; no casting is involved.  It's just an API wart.
+    template <typename T> T* first()  { return fHead.get(); }
+    template <typename T> T* second() { return fTail.fHead.get(); }
+    template <typename T> T* third()  { return fTail.fTail.fHead.get(); }
+
+private:
+    template <typename T>
+    void operator()(T* r) { fHeadMatched = fHead.match(r); }
+
+    // If head isn't a Star, try to match at i once.
+    template <typename T>
+    unsigned matchHead(T*, SkRecord* record, unsigned i) {
+        if (i < record->count()) {
+            fHeadMatched = false;
+            record->mutate(i, *this);
+            if (fHeadMatched) {
+                return i+1;
+            }
+        }
+        return 0;
+    }
+
+    // If head is a Star, walk i until it doesn't match.
+    template <typename T>
+    unsigned matchHead(Star<T>*, SkRecord* record, unsigned i) {
+        fHead.reset();
+        while (i < record->count()) {
+            fHeadMatched = false;
+            record->mutate(i, *this);
+            if (!fHeadMatched) {
+                return i;
+            }
+            i++;
+        }
+        return 0;
+    }
+
+    Matcher fHead;
+    Pattern fTail;
+    bool fHeadMatched;
+
+    friend class ::SkRecord;  // So operator() can otherwise stay private.
+
+    // All Cons are friends with each other.  This lets first, second, and third work.
+    template <typename, typename> friend class Cons;
+};
+
+// Nil is the end of every pattern Cons chain.
+struct Nil {
+    // Bottoms out recursion down the fTail chain.  Just return whatever i the front decided on.
+    unsigned match(SkRecord*, unsigned i) { return i; }
+};
+
+// These Pattern# types are syntax sugar over Cons and Nil, just to help eliminate some of the
+// template noise.  Use these if you can.  Feel free to add more for longer patterns.
+// All types A, B, C, ... are Matchers.
+template <typename A>
+struct Pattern1 : Cons<A, Nil> {};
+
+template <typename A, typename B>
+struct Pattern2 : Cons<A, Pattern1<B> > {};
+
+template <typename A, typename B, typename C>
+struct Pattern3 : Cons<A, Pattern2<B, C> > {};
+
+}  // namespace SkRecords
+
+#endif//SkRecordPattern_DEFINED
diff --git a/record/SkRecordTraits.h b/record/SkRecordTraits.h
deleted file mode 100644
index 570a717..0000000
--- a/record/SkRecordTraits.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#include "SkRecords.h"
-#include "SkTLogic.h"
-
-// Type traits that are useful for working with SkRecords.
-
-namespace SkRecords {
-
-namespace {
-
-// Abstracts away whether the T is optional or not.
-template <typename T> const T* as_ptr(const SkRecords::Optional<T>& x) { return x; }
-template <typename T> const T* as_ptr(const T& x) { return &x; }
-
-}  // namespace
-
-// Gets the paint from any command that may have one.
-template <typename Command> const SkPaint* GetPaint(const Command& x) { return as_ptr(x.paint); }
-
-// Have a paint?  You are a draw command!
-template <typename Command> struct IsDraw {
-    SK_CREATE_MEMBER_DETECTOR(paint);
-    static const bool value = HasMember_paint<Command>::value;
-};
-
-// Have a clip op?  You are a clip command.
-template <typename Command> struct IsClip {
-    SK_CREATE_MEMBER_DETECTOR(op);
-    static const bool value = HasMember_op<Command>::value;
-};
-
-}  // namespace SkRecords
diff --git a/utils/SkTLogic.h b/utils/SkTLogic.h
index 62952ad..925d4bd 100644
--- a/utils/SkTLogic.h
+++ b/utils/SkTLogic.h
@@ -89,4 +89,14 @@
     static const bool value = sizeof(func<Derived>(NULL)) == sizeof(uint16_t);      \
 }
 
+// Same sort of thing as SK_CREATE_MEMBER_DETECTOR, but checks for the existence of a nested type.
+#define SK_CREATE_TYPE_DETECTOR(type)                                   \
+template <typename T>                                                   \
+class HasType_##type {                                                  \
+    template <typename U> static uint8_t func(typename U::type*);       \
+    template <typename U> static uint16_t func(...);                    \
+public:                                                                 \
+    static const bool value = sizeof(func<T>(NULL)) == sizeof(uint8_t); \
+}
+
 #endif